drm/i915: Replace engine->timeline with a plain list
To continue the onslaught of removing the assumption of a global execution ordering, another casualty is the engine->timeline. Without an actual timeline to track, it is overkill and we can replace it with a much less grand plain list. We still need a list of requests inflight, for the simple purpose of finding inflight requests (for retiring, resetting, preemption etc). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-3-chris@chris-wilson.co.uk
This commit is contained in:
		
							parent
							
								
									9db0c5caa7
								
							
						
					
					
						commit
						422d7df4f0
					
				| @ -565,4 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) | |||||||
| 
 | 
 | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | void intel_engine_init_active(struct intel_engine_cs *engine, | ||||||
|  | 			      unsigned int subclass); | ||||||
|  | #define ENGINE_PHYSICAL	0 | ||||||
|  | #define ENGINE_MOCK	1 | ||||||
|  | #define ENGINE_VIRTUAL	2 | ||||||
|  | 
 | ||||||
| #endif /* _INTEL_RINGBUFFER_H_ */ | #endif /* _INTEL_RINGBUFFER_H_ */ | ||||||
|  | |||||||
| @ -617,14 +617,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) | |||||||
| 	if (err) | 	if (err) | ||||||
| 		return err; | 		return err; | ||||||
| 
 | 
 | ||||||
| 	err = i915_timeline_init(engine->i915, | 	intel_engine_init_active(engine, ENGINE_PHYSICAL); | ||||||
| 				 &engine->timeline, |  | ||||||
| 				 engine->status_page.vma); |  | ||||||
| 	if (err) |  | ||||||
| 		goto err_hwsp; |  | ||||||
| 
 |  | ||||||
| 	i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); |  | ||||||
| 
 |  | ||||||
| 	intel_engine_init_breadcrumbs(engine); | 	intel_engine_init_breadcrumbs(engine); | ||||||
| 	intel_engine_init_execlists(engine); | 	intel_engine_init_execlists(engine); | ||||||
| 	intel_engine_init_hangcheck(engine); | 	intel_engine_init_hangcheck(engine); | ||||||
| @ -637,10 +630,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) | |||||||
| 		intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); | 		intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 |  | ||||||
| err_hwsp: |  | ||||||
| 	cleanup_status_page(engine); |  | ||||||
| 	return err; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
| @ -797,6 +786,27 @@ static int pin_context(struct i915_gem_context *ctx, | |||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | void | ||||||
|  | intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) | ||||||
|  | { | ||||||
|  | 	INIT_LIST_HEAD(&engine->active.requests); | ||||||
|  | 
 | ||||||
|  | 	spin_lock_init(&engine->active.lock); | ||||||
|  | 	lockdep_set_subclass(&engine->active.lock, subclass); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Due to an interesting quirk in lockdep's internal debug tracking, | ||||||
|  | 	 * after setting a subclass we must ensure the lock is used. Otherwise, | ||||||
|  | 	 * nr_unused_locks is incremented once too often. | ||||||
|  | 	 */ | ||||||
|  | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||||||
|  | 	local_irq_disable(); | ||||||
|  | 	lock_map_acquire(&engine->active.lock.dep_map); | ||||||
|  | 	lock_map_release(&engine->active.lock.dep_map); | ||||||
|  | 	local_irq_enable(); | ||||||
|  | #endif | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * intel_engines_init_common - initialize cengine state which might require hw access |  * intel_engines_init_common - initialize cengine state which might require hw access | ||||||
|  * @engine: Engine to initialize. |  * @engine: Engine to initialize. | ||||||
| @ -860,6 +870,8 @@ err_unpin: | |||||||
|  */ |  */ | ||||||
| void intel_engine_cleanup_common(struct intel_engine_cs *engine) | void intel_engine_cleanup_common(struct intel_engine_cs *engine) | ||||||
| { | { | ||||||
|  | 	GEM_BUG_ON(!list_empty(&engine->active.requests)); | ||||||
|  | 
 | ||||||
| 	cleanup_status_page(engine); | 	cleanup_status_page(engine); | ||||||
| 
 | 
 | ||||||
| 	intel_engine_fini_breadcrumbs(engine); | 	intel_engine_fini_breadcrumbs(engine); | ||||||
| @ -874,8 +886,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||||||
| 	intel_context_unpin(engine->kernel_context); | 	intel_context_unpin(engine->kernel_context); | ||||||
| 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); | 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); | ||||||
| 
 | 
 | ||||||
| 	i915_timeline_fini(&engine->timeline); |  | ||||||
| 
 |  | ||||||
| 	intel_wa_list_free(&engine->ctx_wa_list); | 	intel_wa_list_free(&engine->ctx_wa_list); | ||||||
| 	intel_wa_list_free(&engine->wa_list); | 	intel_wa_list_free(&engine->wa_list); | ||||||
| 	intel_wa_list_free(&engine->whitelist); | 	intel_wa_list_free(&engine->whitelist); | ||||||
| @ -1482,16 +1492,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||||||
| 
 | 
 | ||||||
| 	drm_printf(m, "\tRequests:\n"); | 	drm_printf(m, "\tRequests:\n"); | ||||||
| 
 | 
 | ||||||
| 	rq = list_first_entry(&engine->timeline.requests, |  | ||||||
| 			      struct i915_request, link); |  | ||||||
| 	if (&rq->link != &engine->timeline.requests) |  | ||||||
| 		print_request(m, rq, "\t\tfirst  "); |  | ||||||
| 
 |  | ||||||
| 	rq = list_last_entry(&engine->timeline.requests, |  | ||||||
| 			     struct i915_request, link); |  | ||||||
| 	if (&rq->link != &engine->timeline.requests) |  | ||||||
| 		print_request(m, rq, "\t\tlast   "); |  | ||||||
| 
 |  | ||||||
| 	rq = intel_engine_find_active_request(engine); | 	rq = intel_engine_find_active_request(engine); | ||||||
| 	if (rq) { | 	if (rq) { | ||||||
| 		print_request(m, rq, "\t\tactive "); | 		print_request(m, rq, "\t\tactive "); | ||||||
| @ -1572,7 +1572,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) | |||||||
| 	if (!intel_engine_supports_stats(engine)) | 	if (!intel_engine_supports_stats(engine)) | ||||||
| 		return -ENODEV; | 		return -ENODEV; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 	write_seqlock(&engine->stats.lock); | 	write_seqlock(&engine->stats.lock); | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(engine->stats.enabled == ~0)) { | 	if (unlikely(engine->stats.enabled == ~0)) { | ||||||
| @ -1598,7 +1598,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) | |||||||
| 
 | 
 | ||||||
| unlock: | unlock: | ||||||
| 	write_sequnlock(&engine->stats.lock); | 	write_sequnlock(&engine->stats.lock); | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
| @ -1683,22 +1683,22 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) | |||||||
| 	 * At all other times, we must assume the GPU is still running, but | 	 * At all other times, we must assume the GPU is still running, but | ||||||
| 	 * we only care about the snapshot of this moment. | 	 * we only care about the snapshot of this moment. | ||||||
| 	 */ | 	 */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 	list_for_each_entry(request, &engine->timeline.requests, link) { | 	list_for_each_entry(request, &engine->active.requests, sched.link) { | ||||||
| 		if (i915_request_completed(request)) | 		if (i915_request_completed(request)) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		if (!i915_request_started(request)) | 		if (!i915_request_started(request)) | ||||||
| 			break; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		/* More than one preemptible request may match! */ | 		/* More than one preemptible request may match! */ | ||||||
| 		if (!match_ring(request)) | 		if (!match_ring(request)) | ||||||
| 			break; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		active = request; | 		active = request; | ||||||
| 		break; | 		break; | ||||||
| 	} | 	} | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	return active; | 	return active; | ||||||
| } | } | ||||||
|  | |||||||
| @ -288,7 +288,11 @@ struct intel_engine_cs { | |||||||
| 
 | 
 | ||||||
| 	struct intel_ring *buffer; | 	struct intel_ring *buffer; | ||||||
| 
 | 
 | ||||||
| 	struct i915_timeline timeline; | 	struct { | ||||||
|  | 		spinlock_t lock; | ||||||
|  | 		struct list_head requests; | ||||||
|  | 	} active; | ||||||
|  | 
 | ||||||
| 	struct llist_head barrier_tasks; | 	struct llist_head barrier_tasks; | ||||||
| 
 | 
 | ||||||
| 	struct intel_context *kernel_context; /* pinned */ | 	struct intel_context *kernel_context; /* pinned */ | ||||||
|  | |||||||
| @ -298,8 +298,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, | |||||||
| 	 * Check against the first request in ELSP[1], it will, thanks to the | 	 * Check against the first request in ELSP[1], it will, thanks to the | ||||||
| 	 * power of PI, be the highest priority of that context. | 	 * power of PI, be the highest priority of that context. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!list_is_last(&rq->link, &engine->timeline.requests) && | 	if (!list_is_last(&rq->sched.link, &engine->active.requests) && | ||||||
| 	    rq_prio(list_next_entry(rq, link)) > last_prio) | 	    rq_prio(list_next_entry(rq, sched.link)) > last_prio) | ||||||
| 		return true; | 		return true; | ||||||
| 
 | 
 | ||||||
| 	if (rb) { | 	if (rb) { | ||||||
| @ -434,11 +434,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) | |||||||
| 	struct list_head *uninitialized_var(pl); | 	struct list_head *uninitialized_var(pl); | ||||||
| 	int prio = I915_PRIORITY_INVALID; | 	int prio = I915_PRIORITY_INVALID; | ||||||
| 
 | 
 | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_safe_reverse(rq, rn, | 	list_for_each_entry_safe_reverse(rq, rn, | ||||||
| 					 &engine->timeline.requests, | 					 &engine->active.requests, | ||||||
| 					 link) { | 					 sched.link) { | ||||||
| 		struct intel_engine_cs *owner; | 		struct intel_engine_cs *owner; | ||||||
| 
 | 
 | ||||||
| 		if (i915_request_completed(rq)) | 		if (i915_request_completed(rq)) | ||||||
| @ -465,7 +465,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) | |||||||
| 			} | 			} | ||||||
| 			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); | 			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); | ||||||
| 
 | 
 | ||||||
| 			list_add(&rq->sched.link, pl); | 			list_move(&rq->sched.link, pl); | ||||||
| 			active = rq; | 			active = rq; | ||||||
| 		} else { | 		} else { | ||||||
| 			rq->engine = owner; | 			rq->engine = owner; | ||||||
| @ -933,11 +933,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||||||
| 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb); | 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb); | ||||||
| 		struct i915_request *rq; | 		struct i915_request *rq; | ||||||
| 
 | 
 | ||||||
| 		spin_lock(&ve->base.timeline.lock); | 		spin_lock(&ve->base.active.lock); | ||||||
| 
 | 
 | ||||||
| 		rq = ve->request; | 		rq = ve->request; | ||||||
| 		if (unlikely(!rq)) { /* lost the race to a sibling */ | 		if (unlikely(!rq)) { /* lost the race to a sibling */ | ||||||
| 			spin_unlock(&ve->base.timeline.lock); | 			spin_unlock(&ve->base.active.lock); | ||||||
| 			rb_erase_cached(rb, &execlists->virtual); | 			rb_erase_cached(rb, &execlists->virtual); | ||||||
| 			RB_CLEAR_NODE(rb); | 			RB_CLEAR_NODE(rb); | ||||||
| 			rb = rb_first_cached(&execlists->virtual); | 			rb = rb_first_cached(&execlists->virtual); | ||||||
| @ -950,13 +950,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||||||
| 
 | 
 | ||||||
| 		if (rq_prio(rq) >= queue_prio(execlists)) { | 		if (rq_prio(rq) >= queue_prio(execlists)) { | ||||||
| 			if (!virtual_matches(ve, rq, engine)) { | 			if (!virtual_matches(ve, rq, engine)) { | ||||||
| 				spin_unlock(&ve->base.timeline.lock); | 				spin_unlock(&ve->base.active.lock); | ||||||
| 				rb = rb_next(rb); | 				rb = rb_next(rb); | ||||||
| 				continue; | 				continue; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			if (last && !can_merge_rq(last, rq)) { | 			if (last && !can_merge_rq(last, rq)) { | ||||||
| 				spin_unlock(&ve->base.timeline.lock); | 				spin_unlock(&ve->base.active.lock); | ||||||
| 				return; /* leave this rq for another engine */ | 				return; /* leave this rq for another engine */ | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| @ -1011,7 +1011,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||||||
| 			last = rq; | 			last = rq; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		spin_unlock(&ve->base.timeline.lock); | 		spin_unlock(&ve->base.active.lock); | ||||||
| 		break; | 		break; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| @ -1068,8 +1068,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||||||
| 				GEM_BUG_ON(port_isset(port)); | 				GEM_BUG_ON(port_isset(port)); | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			list_del_init(&rq->sched.link); |  | ||||||
| 
 |  | ||||||
| 			__i915_request_submit(rq); | 			__i915_request_submit(rq); | ||||||
| 			trace_i915_request_in(rq, port_index(port, execlists)); | 			trace_i915_request_in(rq, port_index(port, execlists)); | ||||||
| 
 | 
 | ||||||
| @ -1170,7 +1168,7 @@ static void process_csb(struct intel_engine_cs *engine) | |||||||
| 	const u8 num_entries = execlists->csb_size; | 	const u8 num_entries = execlists->csb_size; | ||||||
| 	u8 head, tail; | 	u8 head, tail; | ||||||
| 
 | 
 | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Note that csb_write, csb_status may be either in HWSP or mmio. | 	 * Note that csb_write, csb_status may be either in HWSP or mmio. | ||||||
| @ -1330,7 +1328,7 @@ static void process_csb(struct intel_engine_cs *engine) | |||||||
| 
 | 
 | ||||||
| static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) | static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) | ||||||
| { | { | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	process_csb(engine); | 	process_csb(engine); | ||||||
| 	if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) | 	if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) | ||||||
| @ -1351,15 +1349,16 @@ static void execlists_submission_tasklet(unsigned long data) | |||||||
| 		  !!intel_wakeref_active(&engine->wakeref), | 		  !!intel_wakeref_active(&engine->wakeref), | ||||||
| 		  engine->execlists.active); | 		  engine->execlists.active); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 	__execlists_submission_tasklet(engine); | 	__execlists_submission_tasklet(engine); | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void queue_request(struct intel_engine_cs *engine, | static void queue_request(struct intel_engine_cs *engine, | ||||||
| 			  struct i915_sched_node *node, | 			  struct i915_sched_node *node, | ||||||
| 			  int prio) | 			  int prio) | ||||||
| { | { | ||||||
|  | 	GEM_BUG_ON(!list_empty(&node->link)); | ||||||
| 	list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); | 	list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -1390,7 +1389,7 @@ static void execlists_submit_request(struct i915_request *request) | |||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	/* Will be called from irq-context when using foreign fences. */ | 	/* Will be called from irq-context when using foreign fences. */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	queue_request(engine, &request->sched, rq_prio(request)); | 	queue_request(engine, &request->sched, rq_prio(request)); | ||||||
| 
 | 
 | ||||||
| @ -1399,7 +1398,7 @@ static void execlists_submit_request(struct i915_request *request) | |||||||
| 
 | 
 | ||||||
| 	submit_queue(engine, rq_prio(request)); | 	submit_queue(engine, rq_prio(request)); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void __execlists_context_fini(struct intel_context *ce) | static void __execlists_context_fini(struct intel_context *ce) | ||||||
| @ -2050,8 +2049,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) | |||||||
| 	intel_engine_stop_cs(engine); | 	intel_engine_stop_cs(engine); | ||||||
| 
 | 
 | ||||||
| 	/* And flush any current direct submission. */ | 	/* And flush any current direct submission. */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static bool lrc_regs_ok(const struct i915_request *rq) | static bool lrc_regs_ok(const struct i915_request *rq) | ||||||
| @ -2094,11 +2093,11 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists) | |||||||
| 
 | 
 | ||||||
| static struct i915_request *active_request(struct i915_request *rq) | static struct i915_request *active_request(struct i915_request *rq) | ||||||
| { | { | ||||||
| 	const struct list_head * const list = &rq->engine->timeline.requests; | 	const struct list_head * const list = &rq->engine->active.requests; | ||||||
| 	const struct intel_context * const context = rq->hw_context; | 	const struct intel_context * const context = rq->hw_context; | ||||||
| 	struct i915_request *active = NULL; | 	struct i915_request *active = NULL; | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_from_reverse(rq, list, link) { | 	list_for_each_entry_from_reverse(rq, list, sched.link) { | ||||||
| 		if (i915_request_completed(rq)) | 		if (i915_request_completed(rq)) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| @ -2215,11 +2214,11 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) | |||||||
| 
 | 
 | ||||||
| 	GEM_TRACE("%s\n", engine->name); | 	GEM_TRACE("%s\n", engine->name); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	__execlists_reset(engine, stalled); | 	__execlists_reset(engine, stalled); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void nop_submission_tasklet(unsigned long data) | static void nop_submission_tasklet(unsigned long data) | ||||||
| @ -2250,12 +2249,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 	 * submission's irq state, we also wish to remind ourselves that | 	 * submission's irq state, we also wish to remind ourselves that | ||||||
| 	 * it is irq state.) | 	 * it is irq state.) | ||||||
| 	 */ | 	 */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	__execlists_reset(engine, true); | 	__execlists_reset(engine, true); | ||||||
| 
 | 
 | ||||||
| 	/* Mark all executing requests as skipped. */ | 	/* Mark all executing requests as skipped. */ | ||||||
| 	list_for_each_entry(rq, &engine->timeline.requests, link) { | 	list_for_each_entry(rq, &engine->active.requests, sched.link) { | ||||||
| 		if (!i915_request_signaled(rq)) | 		if (!i915_request_signaled(rq)) | ||||||
| 			dma_fence_set_error(&rq->fence, -EIO); | 			dma_fence_set_error(&rq->fence, -EIO); | ||||||
| 
 | 
 | ||||||
| @ -2286,7 +2285,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 		rb_erase_cached(rb, &execlists->virtual); | 		rb_erase_cached(rb, &execlists->virtual); | ||||||
| 		RB_CLEAR_NODE(rb); | 		RB_CLEAR_NODE(rb); | ||||||
| 
 | 
 | ||||||
| 		spin_lock(&ve->base.timeline.lock); | 		spin_lock(&ve->base.active.lock); | ||||||
| 		if (ve->request) { | 		if (ve->request) { | ||||||
| 			ve->request->engine = engine; | 			ve->request->engine = engine; | ||||||
| 			__i915_request_submit(ve->request); | 			__i915_request_submit(ve->request); | ||||||
| @ -2295,7 +2294,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 			ve->base.execlists.queue_priority_hint = INT_MIN; | 			ve->base.execlists.queue_priority_hint = INT_MIN; | ||||||
| 			ve->request = NULL; | 			ve->request = NULL; | ||||||
| 		} | 		} | ||||||
| 		spin_unlock(&ve->base.timeline.lock); | 		spin_unlock(&ve->base.active.lock); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Remaining _unready_ requests will be nop'ed when submitted */ | 	/* Remaining _unready_ requests will be nop'ed when submitted */ | ||||||
| @ -2307,7 +2306,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); | 	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); | ||||||
| 	execlists->tasklet.func = nop_submission_tasklet; | 	execlists->tasklet.func = nop_submission_tasklet; | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void execlists_reset_finish(struct intel_engine_cs *engine) | static void execlists_reset_finish(struct intel_engine_cs *engine) | ||||||
| @ -3010,12 +3009,18 @@ error_deref_obj: | |||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static struct list_head *virtual_queue(struct virtual_engine *ve) | ||||||
|  | { | ||||||
|  | 	return &ve->base.execlists.default_priolist.requests[0]; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void virtual_context_destroy(struct kref *kref) | static void virtual_context_destroy(struct kref *kref) | ||||||
| { | { | ||||||
| 	struct virtual_engine *ve = | 	struct virtual_engine *ve = | ||||||
| 		container_of(kref, typeof(*ve), context.ref); | 		container_of(kref, typeof(*ve), context.ref); | ||||||
| 	unsigned int n; | 	unsigned int n; | ||||||
| 
 | 
 | ||||||
|  | 	GEM_BUG_ON(!list_empty(virtual_queue(ve))); | ||||||
| 	GEM_BUG_ON(ve->request); | 	GEM_BUG_ON(ve->request); | ||||||
| 	GEM_BUG_ON(ve->context.inflight); | 	GEM_BUG_ON(ve->context.inflight); | ||||||
| 
 | 
 | ||||||
| @ -3026,13 +3031,13 @@ static void virtual_context_destroy(struct kref *kref) | |||||||
| 		if (RB_EMPTY_NODE(node)) | 		if (RB_EMPTY_NODE(node)) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		spin_lock_irq(&sibling->timeline.lock); | 		spin_lock_irq(&sibling->active.lock); | ||||||
| 
 | 
 | ||||||
| 		/* Detachment is lazily performed in the execlists tasklet */ | 		/* Detachment is lazily performed in the execlists tasklet */ | ||||||
| 		if (!RB_EMPTY_NODE(node)) | 		if (!RB_EMPTY_NODE(node)) | ||||||
| 			rb_erase_cached(node, &sibling->execlists.virtual); | 			rb_erase_cached(node, &sibling->execlists.virtual); | ||||||
| 
 | 
 | ||||||
| 		spin_unlock_irq(&sibling->timeline.lock); | 		spin_unlock_irq(&sibling->active.lock); | ||||||
| 	} | 	} | ||||||
| 	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); | 	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); | ||||||
| 
 | 
 | ||||||
| @ -3040,8 +3045,6 @@ static void virtual_context_destroy(struct kref *kref) | |||||||
| 		__execlists_context_fini(&ve->context); | 		__execlists_context_fini(&ve->context); | ||||||
| 
 | 
 | ||||||
| 	kfree(ve->bonds); | 	kfree(ve->bonds); | ||||||
| 
 |  | ||||||
| 	i915_timeline_fini(&ve->base.timeline); |  | ||||||
| 	kfree(ve); | 	kfree(ve); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -3161,16 +3164,16 @@ static void virtual_submission_tasklet(unsigned long data) | |||||||
| 
 | 
 | ||||||
| 		if (unlikely(!(mask & sibling->mask))) { | 		if (unlikely(!(mask & sibling->mask))) { | ||||||
| 			if (!RB_EMPTY_NODE(&node->rb)) { | 			if (!RB_EMPTY_NODE(&node->rb)) { | ||||||
| 				spin_lock(&sibling->timeline.lock); | 				spin_lock(&sibling->active.lock); | ||||||
| 				rb_erase_cached(&node->rb, | 				rb_erase_cached(&node->rb, | ||||||
| 						&sibling->execlists.virtual); | 						&sibling->execlists.virtual); | ||||||
| 				RB_CLEAR_NODE(&node->rb); | 				RB_CLEAR_NODE(&node->rb); | ||||||
| 				spin_unlock(&sibling->timeline.lock); | 				spin_unlock(&sibling->active.lock); | ||||||
| 			} | 			} | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		spin_lock(&sibling->timeline.lock); | 		spin_lock(&sibling->active.lock); | ||||||
| 
 | 
 | ||||||
| 		if (!RB_EMPTY_NODE(&node->rb)) { | 		if (!RB_EMPTY_NODE(&node->rb)) { | ||||||
| 			/*
 | 			/*
 | ||||||
| @ -3214,7 +3217,7 @@ submit_engine: | |||||||
| 			tasklet_hi_schedule(&sibling->execlists.tasklet); | 			tasklet_hi_schedule(&sibling->execlists.tasklet); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		spin_unlock(&sibling->timeline.lock); | 		spin_unlock(&sibling->active.lock); | ||||||
| 	} | 	} | ||||||
| 	local_irq_enable(); | 	local_irq_enable(); | ||||||
| } | } | ||||||
| @ -3231,9 +3234,13 @@ static void virtual_submit_request(struct i915_request *rq) | |||||||
| 	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); | 	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); | ||||||
| 
 | 
 | ||||||
| 	GEM_BUG_ON(ve->request); | 	GEM_BUG_ON(ve->request); | ||||||
|  | 	GEM_BUG_ON(!list_empty(virtual_queue(ve))); | ||||||
|  | 
 | ||||||
| 	ve->base.execlists.queue_priority_hint = rq_prio(rq); | 	ve->base.execlists.queue_priority_hint = rq_prio(rq); | ||||||
| 	WRITE_ONCE(ve->request, rq); | 	WRITE_ONCE(ve->request, rq); | ||||||
| 
 | 
 | ||||||
|  | 	list_move_tail(&rq->sched.link, virtual_queue(ve)); | ||||||
|  | 
 | ||||||
| 	tasklet_schedule(&ve->base.execlists.tasklet); | 	tasklet_schedule(&ve->base.execlists.tasklet); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -3297,10 +3304,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, | |||||||
| 
 | 
 | ||||||
| 	snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); | 	snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); | ||||||
| 
 | 
 | ||||||
| 	err = i915_timeline_init(ctx->i915, &ve->base.timeline, NULL); | 	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); | ||||||
| 	if (err) |  | ||||||
| 		goto err_put; |  | ||||||
| 	i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL); |  | ||||||
| 
 | 
 | ||||||
| 	intel_engine_init_execlists(&ve->base); | 	intel_engine_init_execlists(&ve->base); | ||||||
| 
 | 
 | ||||||
| @ -3311,6 +3315,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, | |||||||
| 	ve->base.submit_request = virtual_submit_request; | 	ve->base.submit_request = virtual_submit_request; | ||||||
| 	ve->base.bond_execute = virtual_bond_execute; | 	ve->base.bond_execute = virtual_bond_execute; | ||||||
| 
 | 
 | ||||||
|  | 	INIT_LIST_HEAD(virtual_queue(ve)); | ||||||
| 	ve->base.execlists.queue_priority_hint = INT_MIN; | 	ve->base.execlists.queue_priority_hint = INT_MIN; | ||||||
| 	tasklet_init(&ve->base.execlists.tasklet, | 	tasklet_init(&ve->base.execlists.tasklet, | ||||||
| 		     virtual_submission_tasklet, | 		     virtual_submission_tasklet, | ||||||
| @ -3465,11 +3470,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, | |||||||
| 	unsigned int count; | 	unsigned int count; | ||||||
| 	struct rb_node *rb; | 	struct rb_node *rb; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	last = NULL; | 	last = NULL; | ||||||
| 	count = 0; | 	count = 0; | ||||||
| 	list_for_each_entry(rq, &engine->timeline.requests, link) { | 	list_for_each_entry(rq, &engine->active.requests, sched.link) { | ||||||
| 		if (count++ < max - 1) | 		if (count++ < max - 1) | ||||||
| 			show_request(m, rq, "\t\tE "); | 			show_request(m, rq, "\t\tE "); | ||||||
| 		else | 		else | ||||||
| @ -3532,7 +3537,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, | |||||||
| 		show_request(m, last, "\t\tV "); | 		show_request(m, last, "\t\tV "); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void intel_lr_context_reset(struct intel_engine_cs *engine, | void intel_lr_context_reset(struct intel_engine_cs *engine, | ||||||
|  | |||||||
| @ -49,12 +49,12 @@ static void engine_skip_context(struct i915_request *rq) | |||||||
| 	struct intel_engine_cs *engine = rq->engine; | 	struct intel_engine_cs *engine = rq->engine; | ||||||
| 	struct i915_gem_context *hung_ctx = rq->gem_context; | 	struct i915_gem_context *hung_ctx = rq->gem_context; | ||||||
| 
 | 
 | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	if (!i915_request_is_active(rq)) | 	if (!i915_request_is_active(rq)) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_continue(rq, &engine->timeline.requests, link) | 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link) | ||||||
| 		if (rq->gem_context == hung_ctx) | 		if (rq->gem_context == hung_ctx) | ||||||
| 			i915_request_skip(rq, -EIO); | 			i915_request_skip(rq, -EIO); | ||||||
| } | } | ||||||
| @ -130,7 +130,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty) | |||||||
| 		  rq->fence.seqno, | 		  rq->fence.seqno, | ||||||
| 		  yesno(guilty)); | 		  yesno(guilty)); | ||||||
| 
 | 
 | ||||||
| 	lockdep_assert_held(&rq->engine->timeline.lock); | 	lockdep_assert_held(&rq->engine->active.lock); | ||||||
| 	GEM_BUG_ON(i915_request_completed(rq)); | 	GEM_BUG_ON(i915_request_completed(rq)); | ||||||
| 
 | 
 | ||||||
| 	if (guilty) { | 	if (guilty) { | ||||||
| @ -785,10 +785,10 @@ static void nop_submit_request(struct i915_request *request) | |||||||
| 		  engine->name, request->fence.context, request->fence.seqno); | 		  engine->name, request->fence.context, request->fence.seqno); | ||||||
| 	dma_fence_set_error(&request->fence, -EIO); | 	dma_fence_set_error(&request->fence, -EIO); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 	__i915_request_submit(request); | 	__i915_request_submit(request); | ||||||
| 	i915_request_mark_complete(request); | 	i915_request_mark_complete(request); | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	intel_engine_queue_breadcrumbs(engine); | 	intel_engine_queue_breadcrumbs(engine); | ||||||
| } | } | ||||||
|  | |||||||
| @ -730,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine) | |||||||
| 
 | 
 | ||||||
| static void reset_ring(struct intel_engine_cs *engine, bool stalled) | static void reset_ring(struct intel_engine_cs *engine, bool stalled) | ||||||
| { | { | ||||||
| 	struct i915_timeline *tl = &engine->timeline; |  | ||||||
| 	struct i915_request *pos, *rq; | 	struct i915_request *pos, *rq; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	u32 head; | 	u32 head; | ||||||
| 
 | 
 | ||||||
| 	rq = NULL; | 	rq = NULL; | ||||||
| 	spin_lock_irqsave(&tl->lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 	list_for_each_entry(pos, &tl->requests, link) { | 	list_for_each_entry(pos, &engine->active.requests, sched.link) { | ||||||
| 		if (!i915_request_completed(pos)) { | 		if (!i915_request_completed(pos)) { | ||||||
| 			rq = pos; | 			rq = pos; | ||||||
| 			break; | 			break; | ||||||
| @ -791,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) | |||||||
| 	} | 	} | ||||||
| 	engine->buffer->head = intel_ring_wrap(engine->buffer, head); | 	engine->buffer->head = intel_ring_wrap(engine->buffer, head); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&tl->lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void reset_finish(struct intel_engine_cs *engine) | static void reset_finish(struct intel_engine_cs *engine) | ||||||
| @ -877,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine) | |||||||
| 	struct i915_request *request; | 	struct i915_request *request; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	/* Mark all submitted requests as skipped. */ | 	/* Mark all submitted requests as skipped. */ | ||||||
| 	list_for_each_entry(request, &engine->timeline.requests, link) { | 	list_for_each_entry(request, &engine->active.requests, sched.link) { | ||||||
| 		if (!i915_request_signaled(request)) | 		if (!i915_request_signaled(request)) | ||||||
| 			dma_fence_set_error(&request->fence, -EIO); | 			dma_fence_set_error(&request->fence, -EIO); | ||||||
| 
 | 
 | ||||||
| @ -889,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine) | |||||||
| 
 | 
 | ||||||
| 	/* Remaining _unready_ requests will be nop'ed when submitted */ | 	/* Remaining _unready_ requests will be nop'ed when submitted */ | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void i9xx_submit_request(struct i915_request *request) | static void i9xx_submit_request(struct i915_request *request) | ||||||
| @ -1267,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, | |||||||
| 
 | 
 | ||||||
| 	GEM_BUG_ON(!is_power_of_2(size)); | 	GEM_BUG_ON(!is_power_of_2(size)); | ||||||
| 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); | 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); | ||||||
| 	GEM_BUG_ON(timeline == &engine->timeline); |  | ||||||
| 	lockdep_assert_held(&engine->i915->drm.struct_mutex); |  | ||||||
| 
 | 
 | ||||||
| 	ring = kzalloc(sizeof(*ring), GFP_KERNEL); | 	ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||||||
| 	if (!ring) | 	if (!ring) | ||||||
|  | |||||||
| @ -229,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 	struct i915_request *request; | 	struct i915_request *request; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	/* Mark all submitted requests as skipped. */ | 	/* Mark all submitted requests as skipped. */ | ||||||
| 	list_for_each_entry(request, &engine->timeline.requests, sched.link) { | 	list_for_each_entry(request, &engine->active.requests, sched.link) { | ||||||
| 		if (!i915_request_signaled(request)) | 		if (!i915_request_signaled(request)) | ||||||
| 			dma_fence_set_error(&request->fence, -EIO); | 			dma_fence_set_error(&request->fence, -EIO); | ||||||
| 
 | 
 | ||||||
| 		i915_request_mark_complete(request); | 		i915_request_mark_complete(request); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, | struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, | ||||||
| @ -285,28 +285,23 @@ int mock_engine_init(struct intel_engine_cs *engine) | |||||||
| 	struct drm_i915_private *i915 = engine->i915; | 	struct drm_i915_private *i915 = engine->i915; | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
|  | 	intel_engine_init_active(engine, ENGINE_MOCK); | ||||||
| 	intel_engine_init_breadcrumbs(engine); | 	intel_engine_init_breadcrumbs(engine); | ||||||
| 	intel_engine_init_execlists(engine); | 	intel_engine_init_execlists(engine); | ||||||
| 	intel_engine_init__pm(engine); | 	intel_engine_init__pm(engine); | ||||||
| 
 | 
 | ||||||
| 	if (i915_timeline_init(i915, &engine->timeline, NULL)) |  | ||||||
| 		goto err_breadcrumbs; |  | ||||||
| 	i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); |  | ||||||
| 
 |  | ||||||
| 	engine->kernel_context = | 	engine->kernel_context = | ||||||
| 		i915_gem_context_get_engine(i915->kernel_context, engine->id); | 		i915_gem_context_get_engine(i915->kernel_context, engine->id); | ||||||
| 	if (IS_ERR(engine->kernel_context)) | 	if (IS_ERR(engine->kernel_context)) | ||||||
| 		goto err_timeline; | 		goto err_breadcrumbs; | ||||||
| 
 | 
 | ||||||
| 	err = intel_context_pin(engine->kernel_context); | 	err = intel_context_pin(engine->kernel_context); | ||||||
| 	intel_context_put(engine->kernel_context); | 	intel_context_put(engine->kernel_context); | ||||||
| 	if (err) | 	if (err) | ||||||
| 		goto err_timeline; | 		goto err_breadcrumbs; | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
| err_timeline: |  | ||||||
| 	i915_timeline_fini(&engine->timeline); |  | ||||||
| err_breadcrumbs: | err_breadcrumbs: | ||||||
| 	intel_engine_fini_breadcrumbs(engine); | 	intel_engine_fini_breadcrumbs(engine); | ||||||
| 	return -ENOMEM; | 	return -ENOMEM; | ||||||
| @ -340,7 +335,6 @@ void mock_engine_free(struct intel_engine_cs *engine) | |||||||
| 	intel_context_unpin(engine->kernel_context); | 	intel_context_unpin(engine->kernel_context); | ||||||
| 
 | 
 | ||||||
| 	intel_engine_fini_breadcrumbs(engine); | 	intel_engine_fini_breadcrumbs(engine); | ||||||
| 	i915_timeline_fini(&engine->timeline); |  | ||||||
| 
 | 
 | ||||||
| 	kfree(engine); | 	kfree(engine); | ||||||
| } | } | ||||||
|  | |||||||
| @ -1275,7 +1275,7 @@ static void engine_record_requests(struct intel_engine_cs *engine, | |||||||
| 
 | 
 | ||||||
| 	count = 0; | 	count = 0; | ||||||
| 	request = first; | 	request = first; | ||||||
| 	list_for_each_entry_from(request, &engine->timeline.requests, link) | 	list_for_each_entry_from(request, &engine->active.requests, sched.link) | ||||||
| 		count++; | 		count++; | ||||||
| 	if (!count) | 	if (!count) | ||||||
| 		return; | 		return; | ||||||
| @ -1288,7 +1288,8 @@ static void engine_record_requests(struct intel_engine_cs *engine, | |||||||
| 
 | 
 | ||||||
| 	count = 0; | 	count = 0; | ||||||
| 	request = first; | 	request = first; | ||||||
| 	list_for_each_entry_from(request, &engine->timeline.requests, link) { | 	list_for_each_entry_from(request, | ||||||
|  | 				 &engine->active.requests, sched.link) { | ||||||
| 		if (count >= ee->num_requests) { | 		if (count >= ee->num_requests) { | ||||||
| 			/*
 | 			/*
 | ||||||
| 			 * If the ring request list was changed in | 			 * If the ring request list was changed in | ||||||
|  | |||||||
| @ -232,9 +232,9 @@ static bool i915_request_retire(struct i915_request *rq) | |||||||
| 
 | 
 | ||||||
| 	local_irq_disable(); | 	local_irq_disable(); | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&rq->engine->timeline.lock); | 	spin_lock(&rq->engine->active.lock); | ||||||
| 	list_del(&rq->link); | 	list_del(&rq->sched.link); | ||||||
| 	spin_unlock(&rq->engine->timeline.lock); | 	spin_unlock(&rq->engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&rq->lock); | 	spin_lock(&rq->lock); | ||||||
| 	i915_request_mark_complete(rq); | 	i915_request_mark_complete(rq); | ||||||
| @ -254,6 +254,7 @@ static bool i915_request_retire(struct i915_request *rq) | |||||||
| 	intel_context_unpin(rq->hw_context); | 	intel_context_unpin(rq->hw_context); | ||||||
| 
 | 
 | ||||||
| 	i915_request_remove_from_client(rq); | 	i915_request_remove_from_client(rq); | ||||||
|  | 	list_del(&rq->link); | ||||||
| 
 | 
 | ||||||
| 	free_capture_list(rq); | 	free_capture_list(rq); | ||||||
| 	i915_sched_node_fini(&rq->sched); | 	i915_sched_node_fini(&rq->sched); | ||||||
| @ -373,28 +374,17 @@ __i915_request_await_execution(struct i915_request *rq, | |||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void move_to_timeline(struct i915_request *request, |  | ||||||
| 			     struct i915_timeline *timeline) |  | ||||||
| { |  | ||||||
| 	GEM_BUG_ON(request->timeline == &request->engine->timeline); |  | ||||||
| 	lockdep_assert_held(&request->engine->timeline.lock); |  | ||||||
| 
 |  | ||||||
| 	spin_lock(&request->timeline->lock); |  | ||||||
| 	list_move_tail(&request->link, &timeline->requests); |  | ||||||
| 	spin_unlock(&request->timeline->lock); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void __i915_request_submit(struct i915_request *request) | void __i915_request_submit(struct i915_request *request) | ||||||
| { | { | ||||||
| 	struct intel_engine_cs *engine = request->engine; | 	struct intel_engine_cs *engine = request->engine; | ||||||
| 
 | 
 | ||||||
| 	GEM_TRACE("%s fence %llx:%lld -> current %d\n", | 	GEM_TRACE("%s fence %llx:%lld, current %d\n", | ||||||
| 		  engine->name, | 		  engine->name, | ||||||
| 		  request->fence.context, request->fence.seqno, | 		  request->fence.context, request->fence.seqno, | ||||||
| 		  hwsp_seqno(request)); | 		  hwsp_seqno(request)); | ||||||
| 
 | 
 | ||||||
| 	GEM_BUG_ON(!irqs_disabled()); | 	GEM_BUG_ON(!irqs_disabled()); | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	if (i915_gem_context_is_banned(request->gem_context)) | 	if (i915_gem_context_is_banned(request->gem_context)) | ||||||
| 		i915_request_skip(request, -EIO); | 		i915_request_skip(request, -EIO); | ||||||
| @ -422,6 +412,8 @@ void __i915_request_submit(struct i915_request *request) | |||||||
| 	/* We may be recursing from the signal callback of another i915 fence */ | 	/* We may be recursing from the signal callback of another i915 fence */ | ||||||
| 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | ||||||
| 
 | 
 | ||||||
|  | 	list_move_tail(&request->sched.link, &engine->active.requests); | ||||||
|  | 
 | ||||||
| 	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); | 	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); | ||||||
| 	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); | 	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); | ||||||
| 
 | 
 | ||||||
| @ -437,9 +429,6 @@ void __i915_request_submit(struct i915_request *request) | |||||||
| 	engine->emit_fini_breadcrumb(request, | 	engine->emit_fini_breadcrumb(request, | ||||||
| 				     request->ring->vaddr + request->postfix); | 				     request->ring->vaddr + request->postfix); | ||||||
| 
 | 
 | ||||||
| 	/* Transfer from per-context onto the global per-engine timeline */ |  | ||||||
| 	move_to_timeline(request, &engine->timeline); |  | ||||||
| 
 |  | ||||||
| 	engine->serial++; | 	engine->serial++; | ||||||
| 
 | 
 | ||||||
| 	trace_i915_request_execute(request); | 	trace_i915_request_execute(request); | ||||||
| @ -451,11 +440,11 @@ void i915_request_submit(struct i915_request *request) | |||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	/* Will be called from irq-context when using foreign fences. */ | 	/* Will be called from irq-context when using foreign fences. */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	__i915_request_submit(request); | 	__i915_request_submit(request); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void __i915_request_unsubmit(struct i915_request *request) | void __i915_request_unsubmit(struct i915_request *request) | ||||||
| @ -468,7 +457,7 @@ void __i915_request_unsubmit(struct i915_request *request) | |||||||
| 		  hwsp_seqno(request)); | 		  hwsp_seqno(request)); | ||||||
| 
 | 
 | ||||||
| 	GEM_BUG_ON(!irqs_disabled()); | 	GEM_BUG_ON(!irqs_disabled()); | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Only unwind in reverse order, required so that the per-context list | 	 * Only unwind in reverse order, required so that the per-context list | ||||||
| @ -486,9 +475,6 @@ void __i915_request_unsubmit(struct i915_request *request) | |||||||
| 
 | 
 | ||||||
| 	spin_unlock(&request->lock); | 	spin_unlock(&request->lock); | ||||||
| 
 | 
 | ||||||
| 	/* Transfer back from the global per-engine timeline to per-context */ |  | ||||||
| 	move_to_timeline(request, request->timeline); |  | ||||||
| 
 |  | ||||||
| 	/* We've already spun, don't charge on resubmitting. */ | 	/* We've already spun, don't charge on resubmitting. */ | ||||||
| 	if (request->sched.semaphores && i915_request_started(request)) { | 	if (request->sched.semaphores && i915_request_started(request)) { | ||||||
| 		request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; | 		request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; | ||||||
| @ -510,11 +496,11 @@ void i915_request_unsubmit(struct i915_request *request) | |||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	/* Will be called from irq-context when using foreign fences. */ | 	/* Will be called from irq-context when using foreign fences. */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	__i915_request_unsubmit(request); | 	__i915_request_unsubmit(request); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int __i915_sw_fence_call | static int __i915_sw_fence_call | ||||||
| @ -669,7 +655,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) | |||||||
| 	rq->engine = ce->engine; | 	rq->engine = ce->engine; | ||||||
| 	rq->ring = ce->ring; | 	rq->ring = ce->ring; | ||||||
| 	rq->timeline = tl; | 	rq->timeline = tl; | ||||||
| 	GEM_BUG_ON(rq->timeline == &ce->engine->timeline); |  | ||||||
| 	rq->hwsp_seqno = tl->hwsp_seqno; | 	rq->hwsp_seqno = tl->hwsp_seqno; | ||||||
| 	rq->hwsp_cacheline = tl->hwsp_cacheline; | 	rq->hwsp_cacheline = tl->hwsp_cacheline; | ||||||
| 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ | 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ | ||||||
| @ -1136,9 +1121,7 @@ __i915_request_add_to_timeline(struct i915_request *rq) | |||||||
| 							 0); | 							 0); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irq(&timeline->lock); |  | ||||||
| 	list_add_tail(&rq->link, &timeline->requests); | 	list_add_tail(&rq->link, &timeline->requests); | ||||||
| 	spin_unlock_irq(&timeline->lock); |  | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Make sure that no request gazumped us - if it was allocated after | 	 * Make sure that no request gazumped us - if it was allocated after | ||||||
|  | |||||||
| @ -217,7 +217,7 @@ struct i915_request { | |||||||
| 
 | 
 | ||||||
| 	bool waitboost; | 	bool waitboost; | ||||||
| 
 | 
 | ||||||
| 	/** engine->request_list entry for this request */ | 	/** timeline->request entry for this request */ | ||||||
| 	struct list_head link; | 	struct list_head link; | ||||||
| 
 | 
 | ||||||
| 	/** ring->request_list entry for this request */ | 	/** ring->request_list entry for this request */ | ||||||
|  | |||||||
| @ -77,7 +77,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) | |||||||
| 	bool first = true; | 	bool first = true; | ||||||
| 	int idx, i; | 	int idx, i; | ||||||
| 
 | 
 | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 	assert_priolists(execlists); | 	assert_priolists(execlists); | ||||||
| 
 | 
 | ||||||
| 	/* buckets sorted from highest [in slot 0] to lowest priority */ | 	/* buckets sorted from highest [in slot 0] to lowest priority */ | ||||||
| @ -162,9 +162,9 @@ sched_lock_engine(const struct i915_sched_node *node, | |||||||
| 	 * check that the rq still belongs to the newly locked engine. | 	 * check that the rq still belongs to the newly locked engine. | ||||||
| 	 */ | 	 */ | ||||||
| 	while (locked != (engine = READ_ONCE(rq->engine))) { | 	while (locked != (engine = READ_ONCE(rq->engine))) { | ||||||
| 		spin_unlock(&locked->timeline.lock); | 		spin_unlock(&locked->active.lock); | ||||||
| 		memset(cache, 0, sizeof(*cache)); | 		memset(cache, 0, sizeof(*cache)); | ||||||
| 		spin_lock(&engine->timeline.lock); | 		spin_lock(&engine->active.lock); | ||||||
| 		locked = engine; | 		locked = engine; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| @ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio) | |||||||
| 	 * tasklet, i.e. we have not change the priority queue | 	 * tasklet, i.e. we have not change the priority queue | ||||||
| 	 * sufficiently to oust the running context. | 	 * sufficiently to oust the running context. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight))) | 	if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight))) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	tasklet_hi_schedule(&engine->execlists.tasklet); | 	tasklet_hi_schedule(&engine->execlists.tasklet); | ||||||
| @ -278,7 +278,7 @@ static void __i915_schedule(struct i915_sched_node *node, | |||||||
| 
 | 
 | ||||||
| 	memset(&cache, 0, sizeof(cache)); | 	memset(&cache, 0, sizeof(cache)); | ||||||
| 	engine = node_to_request(node)->engine; | 	engine = node_to_request(node)->engine; | ||||||
| 	spin_lock(&engine->timeline.lock); | 	spin_lock(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	/* Fifo and depth-first replacement ensure our deps execute before us */ | 	/* Fifo and depth-first replacement ensure our deps execute before us */ | ||||||
| 	engine = sched_lock_engine(node, engine, &cache); | 	engine = sched_lock_engine(node, engine, &cache); | ||||||
| @ -287,7 +287,7 @@ static void __i915_schedule(struct i915_sched_node *node, | |||||||
| 
 | 
 | ||||||
| 		node = dep->signaler; | 		node = dep->signaler; | ||||||
| 		engine = sched_lock_engine(node, engine, &cache); | 		engine = sched_lock_engine(node, engine, &cache); | ||||||
| 		lockdep_assert_held(&engine->timeline.lock); | 		lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 		/* Recheck after acquiring the engine->timeline.lock */ | 		/* Recheck after acquiring the engine->timeline.lock */ | ||||||
| 		if (prio <= node->attr.priority || node_signaled(node)) | 		if (prio <= node->attr.priority || node_signaled(node)) | ||||||
| @ -296,14 +296,8 @@ static void __i915_schedule(struct i915_sched_node *node, | |||||||
| 		GEM_BUG_ON(node_to_request(node)->engine != engine); | 		GEM_BUG_ON(node_to_request(node)->engine != engine); | ||||||
| 
 | 
 | ||||||
| 		node->attr.priority = prio; | 		node->attr.priority = prio; | ||||||
| 		if (!list_empty(&node->link)) { | 
 | ||||||
| 			GEM_BUG_ON(intel_engine_is_virtual(engine)); | 		if (list_empty(&node->link)) { | ||||||
| 			if (!cache.priolist) |  | ||||||
| 				cache.priolist = |  | ||||||
| 					i915_sched_lookup_priolist(engine, |  | ||||||
| 								   prio); |  | ||||||
| 			list_move_tail(&node->link, cache.priolist); |  | ||||||
| 		} else { |  | ||||||
| 			/*
 | 			/*
 | ||||||
| 			 * If the request is not in the priolist queue because | 			 * If the request is not in the priolist queue because | ||||||
| 			 * it is not yet runnable, then it doesn't contribute | 			 * it is not yet runnable, then it doesn't contribute | ||||||
| @ -312,10 +306,18 @@ static void __i915_schedule(struct i915_sched_node *node, | |||||||
| 			 * queue; but in that case we may still need to reorder | 			 * queue; but in that case we may still need to reorder | ||||||
| 			 * the inflight requests. | 			 * the inflight requests. | ||||||
| 			 */ | 			 */ | ||||||
| 			if (!i915_sw_fence_done(&node_to_request(node)->submit)) |  | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | 		if (!intel_engine_is_virtual(engine) && | ||||||
|  | 		    !i915_request_is_active(node_to_request(node))) { | ||||||
|  | 			if (!cache.priolist) | ||||||
|  | 				cache.priolist = | ||||||
|  | 					i915_sched_lookup_priolist(engine, | ||||||
|  | 								   prio); | ||||||
|  | 			list_move_tail(&node->link, cache.priolist); | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		if (prio <= engine->execlists.queue_priority_hint) | 		if (prio <= engine->execlists.queue_priority_hint) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| @ -325,7 +327,7 @@ static void __i915_schedule(struct i915_sched_node *node, | |||||||
| 		kick_submission(engine, prio); | 		kick_submission(engine, prio); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	spin_unlock(&engine->timeline.lock); | 	spin_unlock(&engine->active.lock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) | ||||||
| @ -439,8 +441,6 @@ void i915_sched_node_fini(struct i915_sched_node *node) | |||||||
| { | { | ||||||
| 	struct i915_dependency *dep, *tmp; | 	struct i915_dependency *dep, *tmp; | ||||||
| 
 | 
 | ||||||
| 	GEM_BUG_ON(!list_empty(&node->link)); |  | ||||||
| 
 |  | ||||||
| 	spin_lock_irq(&schedule_lock); | 	spin_lock_irq(&schedule_lock); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | |||||||
| @ -251,7 +251,6 @@ int i915_timeline_init(struct drm_i915_private *i915, | |||||||
| 
 | 
 | ||||||
| 	timeline->fence_context = dma_fence_context_alloc(1); | 	timeline->fence_context = dma_fence_context_alloc(1); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_init(&timeline->lock); |  | ||||||
| 	mutex_init(&timeline->mutex); | 	mutex_init(&timeline->mutex); | ||||||
| 
 | 
 | ||||||
| 	INIT_ACTIVE_REQUEST(&timeline->last_request); | 	INIT_ACTIVE_REQUEST(&timeline->last_request); | ||||||
|  | |||||||
| @ -36,25 +36,6 @@ int i915_timeline_init(struct drm_i915_private *i915, | |||||||
| 		       struct i915_vma *hwsp); | 		       struct i915_vma *hwsp); | ||||||
| void i915_timeline_fini(struct i915_timeline *tl); | void i915_timeline_fini(struct i915_timeline *tl); | ||||||
| 
 | 
 | ||||||
| static inline void |  | ||||||
| i915_timeline_set_subclass(struct i915_timeline *timeline, |  | ||||||
| 			   unsigned int subclass) |  | ||||||
| { |  | ||||||
| 	lockdep_set_subclass(&timeline->lock, subclass); |  | ||||||
| 
 |  | ||||||
| 	/*
 |  | ||||||
| 	 * Due to an interesting quirk in lockdep's internal debug tracking, |  | ||||||
| 	 * after setting a subclass we must ensure the lock is used. Otherwise, |  | ||||||
| 	 * nr_unused_locks is incremented once too often. |  | ||||||
| 	 */ |  | ||||||
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |  | ||||||
| 	local_irq_disable(); |  | ||||||
| 	lock_map_acquire(&timeline->lock.dep_map); |  | ||||||
| 	lock_map_release(&timeline->lock.dep_map); |  | ||||||
| 	local_irq_enable(); |  | ||||||
| #endif |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| struct i915_timeline * | struct i915_timeline * | ||||||
| i915_timeline_create(struct drm_i915_private *i915, | i915_timeline_create(struct drm_i915_private *i915, | ||||||
| 		     struct i915_vma *global_hwsp); | 		     struct i915_vma *global_hwsp); | ||||||
|  | |||||||
| @ -23,10 +23,6 @@ struct i915_timeline { | |||||||
| 	u64 fence_context; | 	u64 fence_context; | ||||||
| 	u32 seqno; | 	u32 seqno; | ||||||
| 
 | 
 | ||||||
| 	spinlock_t lock; |  | ||||||
| #define TIMELINE_CLIENT 0 /* default subclass */ |  | ||||||
| #define TIMELINE_ENGINE 1 |  | ||||||
| #define TIMELINE_VIRTUAL 2 |  | ||||||
| 	struct mutex mutex; /* protects the flow of requests */ | 	struct mutex mutex; /* protects the flow of requests */ | ||||||
| 
 | 
 | ||||||
| 	unsigned int pin_count; | 	unsigned int pin_count; | ||||||
|  | |||||||
| @ -740,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) | |||||||
| 	bool submit = false; | 	bool submit = false; | ||||||
| 	struct rb_node *rb; | 	struct rb_node *rb; | ||||||
| 
 | 
 | ||||||
| 	lockdep_assert_held(&engine->timeline.lock); | 	lockdep_assert_held(&engine->active.lock); | ||||||
| 
 | 
 | ||||||
| 	if (port_isset(port)) { | 	if (port_isset(port)) { | ||||||
| 		if (intel_engine_has_preemption(engine)) { | 		if (intel_engine_has_preemption(engine)) { | ||||||
| @ -822,7 +822,7 @@ static void guc_submission_tasklet(unsigned long data) | |||||||
| 	struct i915_request *rq; | 	struct i915_request *rq; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	rq = port_request(port); | 	rq = port_request(port); | ||||||
| 	while (rq && i915_request_completed(rq)) { | 	while (rq && i915_request_completed(rq)) { | ||||||
| @ -847,7 +847,7 @@ static void guc_submission_tasklet(unsigned long data) | |||||||
| 	if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) | 	if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) | ||||||
| 		guc_dequeue(engine); | 		guc_dequeue(engine); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void guc_reset_prepare(struct intel_engine_cs *engine) | static void guc_reset_prepare(struct intel_engine_cs *engine) | ||||||
| @ -884,7 +884,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) | |||||||
| 	struct i915_request *rq; | 	struct i915_request *rq; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	execlists_cancel_port_requests(execlists); | 	execlists_cancel_port_requests(execlists); | ||||||
| 
 | 
 | ||||||
| @ -900,7 +900,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) | |||||||
| 	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); | 	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); | ||||||
| 
 | 
 | ||||||
| out_unlock: | out_unlock: | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void guc_cancel_requests(struct intel_engine_cs *engine) | static void guc_cancel_requests(struct intel_engine_cs *engine) | ||||||
| @ -926,13 +926,13 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 	 * submission's irq state, we also wish to remind ourselves that | 	 * submission's irq state, we also wish to remind ourselves that | ||||||
| 	 * it is irq state.) | 	 * it is irq state.) | ||||||
| 	 */ | 	 */ | ||||||
| 	spin_lock_irqsave(&engine->timeline.lock, flags); | 	spin_lock_irqsave(&engine->active.lock, flags); | ||||||
| 
 | 
 | ||||||
| 	/* Cancel the requests on the HW and clear the ELSP tracker. */ | 	/* Cancel the requests on the HW and clear the ELSP tracker. */ | ||||||
| 	execlists_cancel_port_requests(execlists); | 	execlists_cancel_port_requests(execlists); | ||||||
| 
 | 
 | ||||||
| 	/* Mark all executing requests as skipped. */ | 	/* Mark all executing requests as skipped. */ | ||||||
| 	list_for_each_entry(rq, &engine->timeline.requests, link) { | 	list_for_each_entry(rq, &engine->active.requests, sched.link) { | ||||||
| 		if (!i915_request_signaled(rq)) | 		if (!i915_request_signaled(rq)) | ||||||
| 			dma_fence_set_error(&rq->fence, -EIO); | 			dma_fence_set_error(&rq->fence, -EIO); | ||||||
| 
 | 
 | ||||||
| @ -961,7 +961,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) | |||||||
| 	execlists->queue = RB_ROOT_CACHED; | 	execlists->queue = RB_ROOT_CACHED; | ||||||
| 	GEM_BUG_ON(port_isset(execlists->port)); | 	GEM_BUG_ON(port_isset(execlists->port)); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&engine->timeline.lock, flags); | 	spin_unlock_irqrestore(&engine->active.lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void guc_reset_finish(struct intel_engine_cs *engine) | static void guc_reset_finish(struct intel_engine_cs *engine) | ||||||
|  | |||||||
| @ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) | |||||||
| 	timeline->i915 = NULL; | 	timeline->i915 = NULL; | ||||||
| 	timeline->fence_context = context; | 	timeline->fence_context = context; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_init(&timeline->lock); |  | ||||||
| 	mutex_init(&timeline->mutex); | 	mutex_init(&timeline->mutex); | ||||||
| 
 | 
 | ||||||
| 	INIT_ACTIVE_REQUEST(&timeline->last_request); | 	INIT_ACTIVE_REQUEST(&timeline->last_request); | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user