drm/i915: Store the BIT(engine->id) as the engine's mask
In the next patch, we are introducing a broad virtual engine to encompass multiple physical engines, losing the 1:1 nature of BIT(engine->id). To reflect the broader set of engines implied by the virtual instance, lets store the full bitmask. v2: Use intel_engine_mask_t (s/ring_mask/engine_mask/) v3: Tvrtko voted for moah churn so teach everyone to not mention ring and use $class$instance throughout. v4: Comment upon the disparity in bspec for using VCS1,VCS2 in gen8 and VCS[0-4] in later gen. We opt to keep the code consistent and use 0-index naming throughout. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190305180332.30900-1-chris@chris-wilson.co.uk
This commit is contained in:
		
							parent
							
								
									c8b502422b
								
							
						
					
					
						commit
						8a68d46436
					
				| @ -391,12 +391,12 @@ struct cmd_info { | ||||
| #define F_POST_HANDLE	(1<<2) | ||||
| 	u32 flag; | ||||
| 
 | ||||
| #define R_RCS	(1 << RCS) | ||||
| #define R_VCS1  (1 << VCS) | ||||
| #define R_VCS2  (1 << VCS2) | ||||
| #define R_RCS	BIT(RCS0) | ||||
| #define R_VCS1  BIT(VCS0) | ||||
| #define R_VCS2  BIT(VCS1) | ||||
| #define R_VCS	(R_VCS1 | R_VCS2) | ||||
| #define R_BCS	(1 << BCS) | ||||
| #define R_VECS	(1 << VECS) | ||||
| #define R_BCS	BIT(BCS0) | ||||
| #define R_VECS	BIT(VECS0) | ||||
| #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) | ||||
| 	/* rings that support this cmd: BLT/RCS/VCS/VECS */ | ||||
| 	u16 rings; | ||||
| @ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = { | ||||
| }; | ||||
| 
 | ||||
| static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { | ||||
| 	[RCS] = { | ||||
| 	[RCS0] = { | ||||
| 		&decode_info_mi, | ||||
| 		NULL, | ||||
| 		NULL, | ||||
| @ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { | ||||
| 		NULL, | ||||
| 	}, | ||||
| 
 | ||||
| 	[VCS] = { | ||||
| 	[VCS0] = { | ||||
| 		&decode_info_mi, | ||||
| 		NULL, | ||||
| 		NULL, | ||||
| @ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { | ||||
| 		NULL, | ||||
| 	}, | ||||
| 
 | ||||
| 	[BCS] = { | ||||
| 	[BCS0] = { | ||||
| 		&decode_info_mi, | ||||
| 		NULL, | ||||
| 		&decode_info_2d, | ||||
| @ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { | ||||
| 		NULL, | ||||
| 	}, | ||||
| 
 | ||||
| 	[VECS] = { | ||||
| 	[VECS0] = { | ||||
| 		&decode_info_mi, | ||||
| 		NULL, | ||||
| 		NULL, | ||||
| @ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { | ||||
| 		NULL, | ||||
| 	}, | ||||
| 
 | ||||
| 	[VCS2] = { | ||||
| 	[VCS1] = { | ||||
| 		&decode_info_mi, | ||||
| 		NULL, | ||||
| 		NULL, | ||||
| @ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, | ||||
| 	struct cmd_entry *e; | ||||
| 
 | ||||
| 	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { | ||||
| 		if ((opcode == e->info->opcode) && | ||||
| 				(e->info->rings & (1 << ring_id))) | ||||
| 		if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) | ||||
| 			return e->info; | ||||
| 	} | ||||
| 	return NULL; | ||||
| @ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s) | ||||
| 	struct intel_gvt *gvt = s->vgpu->gvt; | ||||
| 
 | ||||
| 	for (i = 1; i < cmd_len; i += 2) { | ||||
| 		if (IS_BROADWELL(gvt->dev_priv) && | ||||
| 				(s->ring_id != RCS)) { | ||||
| 			if (s->ring_id == BCS && | ||||
| 					cmd_reg(s, i) == | ||||
| 					i915_mmio_reg_offset(DERRMR)) | ||||
| 		if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { | ||||
| 			if (s->ring_id == BCS0 && | ||||
| 			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) | ||||
| 				ret |= 0; | ||||
| 			else | ||||
| 				ret |= (cmd_reg_inhibit(s, i)) ? | ||||
| 					-EBADRQC : 0; | ||||
| 				ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; | ||||
| 		} | ||||
| 		if (ret) | ||||
| 			break; | ||||
| @ -1047,27 +1043,27 @@ struct cmd_interrupt_event { | ||||
| }; | ||||
| 
 | ||||
| static struct cmd_interrupt_event cmd_interrupt_events[] = { | ||||
| 	[RCS] = { | ||||
| 	[RCS0] = { | ||||
| 		.pipe_control_notify = RCS_PIPE_CONTROL, | ||||
| 		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED, | ||||
| 		.mi_user_interrupt = RCS_MI_USER_INTERRUPT, | ||||
| 	}, | ||||
| 	[BCS] = { | ||||
| 	[BCS0] = { | ||||
| 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED, | ||||
| 		.mi_flush_dw = BCS_MI_FLUSH_DW, | ||||
| 		.mi_user_interrupt = BCS_MI_USER_INTERRUPT, | ||||
| 	}, | ||||
| 	[VCS] = { | ||||
| 	[VCS0] = { | ||||
| 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED, | ||||
| 		.mi_flush_dw = VCS_MI_FLUSH_DW, | ||||
| 		.mi_user_interrupt = VCS_MI_USER_INTERRUPT, | ||||
| 	}, | ||||
| 	[VCS2] = { | ||||
| 	[VCS1] = { | ||||
| 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED, | ||||
| 		.mi_flush_dw = VCS2_MI_FLUSH_DW, | ||||
| 		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT, | ||||
| 	}, | ||||
| 	[VECS] = { | ||||
| 	[VECS0] = { | ||||
| 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED, | ||||
| 		.mi_flush_dw = VECS_MI_FLUSH_DW, | ||||
| 		.mi_user_interrupt = VECS_MI_USER_INTERRUPT, | ||||
|  | ||||
| @ -47,17 +47,16 @@ | ||||
| 		((a)->lrca == (b)->lrca)) | ||||
| 
 | ||||
| static int context_switch_events[] = { | ||||
| 	[RCS] = RCS_AS_CONTEXT_SWITCH, | ||||
| 	[BCS] = BCS_AS_CONTEXT_SWITCH, | ||||
| 	[VCS] = VCS_AS_CONTEXT_SWITCH, | ||||
| 	[VCS2] = VCS2_AS_CONTEXT_SWITCH, | ||||
| 	[VECS] = VECS_AS_CONTEXT_SWITCH, | ||||
| 	[RCS0]  = RCS_AS_CONTEXT_SWITCH, | ||||
| 	[BCS0]  = BCS_AS_CONTEXT_SWITCH, | ||||
| 	[VCS0]  = VCS_AS_CONTEXT_SWITCH, | ||||
| 	[VCS1]  = VCS2_AS_CONTEXT_SWITCH, | ||||
| 	[VECS0] = VECS_AS_CONTEXT_SWITCH, | ||||
| }; | ||||
| 
 | ||||
| static int ring_id_to_context_switch_event(int ring_id) | ||||
| static int ring_id_to_context_switch_event(unsigned int ring_id) | ||||
| { | ||||
| 	if (WARN_ON(ring_id < RCS || | ||||
| 		    ring_id >= ARRAY_SIZE(context_switch_events))) | ||||
| 	if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	return context_switch_events[ring_id]; | ||||
| @ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) | ||||
| 	gvt_dbg_el("complete workload %p status %d\n", workload, | ||||
| 			workload->status); | ||||
| 
 | ||||
| 	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) | ||||
| 	if (workload->status || (vgpu->resetting_eng & BIT(ring_id))) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	if (!list_empty(workload_q_head(vgpu, ring_id))) { | ||||
|  | ||||
| @ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | ||||
| 	} else { | ||||
| 		if (data & GEN6_GRDOM_RENDER) { | ||||
| 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); | ||||
| 			engine_mask |= (1 << RCS); | ||||
| 			engine_mask |= BIT(RCS0); | ||||
| 		} | ||||
| 		if (data & GEN6_GRDOM_MEDIA) { | ||||
| 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); | ||||
| 			engine_mask |= (1 << VCS); | ||||
| 			engine_mask |= BIT(VCS0); | ||||
| 		} | ||||
| 		if (data & GEN6_GRDOM_BLT) { | ||||
| 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); | ||||
| 			engine_mask |= (1 << BCS); | ||||
| 			engine_mask |= BIT(BCS0); | ||||
| 		} | ||||
| 		if (data & GEN6_GRDOM_VECS) { | ||||
| 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); | ||||
| 			engine_mask |= (1 << VECS); | ||||
| 			engine_mask |= BIT(VECS0); | ||||
| 		} | ||||
| 		if (data & GEN8_GRDOM_MEDIA2) { | ||||
| 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); | ||||
| 			if (HAS_BSD2(vgpu->gvt->dev_priv)) | ||||
| 				engine_mask |= (1 << VCS2); | ||||
| 			engine_mask |= BIT(VCS1); | ||||
| 		} | ||||
| 		engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; | ||||
| 	} | ||||
| 
 | ||||
| 	/* vgpu_lock already hold by emulate mmio r/w */ | ||||
| @ -1704,7 +1704,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | ||||
| 			return 0; | ||||
| 
 | ||||
| 		ret = intel_vgpu_select_submission_ops(vgpu, | ||||
| 			       ENGINE_MASK(ring_id), | ||||
| 			       BIT(ring_id), | ||||
| 			       INTEL_VGPU_EXECLIST_SUBMISSION); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| @ -1724,19 +1724,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, | ||||
| 
 | ||||
| 	switch (offset) { | ||||
| 	case 0x4260: | ||||
| 		id = RCS; | ||||
| 		id = RCS0; | ||||
| 		break; | ||||
| 	case 0x4264: | ||||
| 		id = VCS; | ||||
| 		id = VCS0; | ||||
| 		break; | ||||
| 	case 0x4268: | ||||
| 		id = VCS2; | ||||
| 		id = VCS1; | ||||
| 		break; | ||||
| 	case 0x426c: | ||||
| 		id = BCS; | ||||
| 		id = BCS0; | ||||
| 		break; | ||||
| 	case 0x4270: | ||||
| 		id = VECS; | ||||
| 		id = VECS0; | ||||
| 		break; | ||||
| 	default: | ||||
| 		return -EINVAL; | ||||
| @ -1793,7 +1793,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, | ||||
| 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ | ||||
| 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ | ||||
| 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ | ||||
| 	if (HAS_BSD2(dev_priv)) \ | ||||
| 	if (HAS_ENGINE(dev_priv, VCS1)) \ | ||||
| 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ | ||||
| } while (0) | ||||
| 
 | ||||
|  | ||||
| @ -536,7 +536,7 @@ static void gen8_init_irq( | ||||
| 	SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); | ||||
| 	SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); | ||||
| 
 | ||||
| 	if (HAS_BSD2(gvt->dev_priv)) { | ||||
| 	if (HAS_ENGINE(gvt->dev_priv, VCS1)) { | ||||
| 		SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, | ||||
| 			INTEL_GVT_IRQ_INFO_GT1); | ||||
| 		SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, | ||||
|  | ||||
| @ -41,102 +41,102 @@ | ||||
| 
 | ||||
| /* Raw offset is appened to each line for convenience. */ | ||||
| static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { | ||||
| 	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ | ||||
| 	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ | ||||
| 	{RCS, HWSTAM, 0x0, false}, /* 0x2098 */ | ||||
| 	{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ | ||||
| 	{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ | ||||
| 	{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ | ||||
| 	{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ | ||||
| 	{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ | ||||
| 	{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ | ||||
| 	{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ | ||||
| 	{RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ | ||||
| 	{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ | ||||
| 	{RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ | ||||
| 	{RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ | ||||
| 	{RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ | ||||
| 	{RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ | ||||
| 	{RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ | ||||
| 	{RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ | ||||
| 	{RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ | ||||
| 	{RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ | ||||
| 
 | ||||
| 	{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ | ||||
| 	{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ | ||||
| 	{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ | ||||
| 	{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ | ||||
| 	{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ | ||||
| 	{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ | ||||
| 	{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ | ||||
| 	{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ | ||||
| 	{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ | ||||
| 	{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ | ||||
| 	{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ | ||||
| 	{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ | ||||
| }; | ||||
| 
 | ||||
| static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { | ||||
| 	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ | ||||
| 	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ | ||||
| 	{RCS, HWSTAM, 0x0, false}, /* 0x2098 */ | ||||
| 	{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ | ||||
| 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ | ||||
| 	{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ | ||||
| 	{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ | ||||
| 	{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ | ||||
| 	{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ | ||||
| 	{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ | ||||
| 	{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ | ||||
| 	{RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ | ||||
| 	{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ | ||||
| 	{RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ | ||||
| 	{RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ | ||||
| 	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ | ||||
| 	{RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ | ||||
| 	{RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ | ||||
| 	{RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ | ||||
| 	{RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ | ||||
| 	{RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ | ||||
| 	{RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ | ||||
| 
 | ||||
| 	{RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ | ||||
| 	{RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ | ||||
| 	{RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ | ||||
| 	{RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ | ||||
| 	{RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ | ||||
| 	{RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ | ||||
| 	{RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ | ||||
| 	{RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ | ||||
| 	{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ | ||||
| 	{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ | ||||
| 	{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ | ||||
| 	{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ | ||||
| 	{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ | ||||
| 	{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ | ||||
| 	{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ | ||||
| 	{RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */ | ||||
| 	{RCS, TRVADR, 0, false}, /* 0x4df0 */ | ||||
| 	{RCS, TRTTE, 0, false}, /* 0x4df4 */ | ||||
| 	{RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ | ||||
| 	{RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ | ||||
| 	{RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ | ||||
| 	{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ | ||||
| 	{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ | ||||
| 	{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ | ||||
| 	{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ | ||||
| 	{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ | ||||
| 	{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ | ||||
| 	{RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ | ||||
| 	{RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ | ||||
| 	{RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ | ||||
| 	{RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ | ||||
| 	{RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ | ||||
| 	{RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */ | ||||
| 	{RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */ | ||||
| 	{RCS0, TRVADR, 0, false}, /* 0x4df0 */ | ||||
| 	{RCS0, TRTTE, 0, false}, /* 0x4df4 */ | ||||
| 
 | ||||
| 	{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ | ||||
| 	{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ | ||||
| 	{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ | ||||
| 	{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ | ||||
| 	{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ | ||||
| 	{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ | ||||
| 	{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ | ||||
| 	{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ | ||||
| 	{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ | ||||
| 	{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ | ||||
| 
 | ||||
| 	{VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ | ||||
| 	{VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ | ||||
| 
 | ||||
| 	{VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ | ||||
| 	{VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ | ||||
| 
 | ||||
| 	{RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ | ||||
| 	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ | ||||
| 	{RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ | ||||
| 	{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ | ||||
| 	{RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ | ||||
| 	{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ | ||||
| 	{RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ | ||||
| 	{RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ | ||||
| 
 | ||||
| 	{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ | ||||
| 	{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ | ||||
| 	{RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ | ||||
| 	{RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ | ||||
| 
 | ||||
| 	{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ | ||||
| 	{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ | ||||
| 	{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ | ||||
| 	{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ | ||||
| 	{RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ | ||||
| 	{RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ | ||||
| 	{RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ | ||||
| 	{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ | ||||
| }; | ||||
| 
 | ||||
| static struct { | ||||
| @ -149,11 +149,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	i915_reg_t offset; | ||||
| 	u32 regs[] = { | ||||
| 		[RCS] = 0xc800, | ||||
| 		[VCS] = 0xc900, | ||||
| 		[VCS2] = 0xca00, | ||||
| 		[BCS] = 0xcc00, | ||||
| 		[VECS] = 0xcb00, | ||||
| 		[RCS0]  = 0xc800, | ||||
| 		[VCS0]  = 0xc900, | ||||
| 		[VCS1]  = 0xca00, | ||||
| 		[BCS0]  = 0xcc00, | ||||
| 		[VECS0] = 0xcb00, | ||||
| 	}; | ||||
| 	int ring_id, i; | ||||
| 
 | ||||
| @ -301,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, | ||||
| 		goto out; | ||||
| 
 | ||||
| 	/* no MOCS register in context except render engine */ | ||||
| 	if (req->engine->id != RCS) | ||||
| 	if (req->engine->id != RCS0) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	ret = restore_render_mocs_control_for_inhibit(vgpu, req); | ||||
| @ -331,11 +331,11 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) | ||||
| 	enum forcewake_domains fw; | ||||
| 	i915_reg_t reg; | ||||
| 	u32 regs[] = { | ||||
| 		[RCS] = 0x4260, | ||||
| 		[VCS] = 0x4264, | ||||
| 		[VCS2] = 0x4268, | ||||
| 		[BCS] = 0x426c, | ||||
| 		[VECS] = 0x4270, | ||||
| 		[RCS0]  = 0x4260, | ||||
| 		[VCS0]  = 0x4264, | ||||
| 		[VCS1]  = 0x4268, | ||||
| 		[BCS0]  = 0x426c, | ||||
| 		[VECS0] = 0x4270, | ||||
| 	}; | ||||
| 
 | ||||
| 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) | ||||
| @ -353,7 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) | ||||
| 	 */ | ||||
| 	fw = intel_uncore_forcewake_for_reg(dev_priv, reg, | ||||
| 					    FW_REG_READ | FW_REG_WRITE); | ||||
| 	if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9)) | ||||
| 	if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) | ||||
| 		fw |= FORCEWAKE_RENDER; | ||||
| 
 | ||||
| 	intel_uncore_forcewake_get(dev_priv, fw); | ||||
| @ -378,11 +378,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, | ||||
| 	u32 old_v, new_v; | ||||
| 
 | ||||
| 	u32 regs[] = { | ||||
| 		[RCS] = 0xc800, | ||||
| 		[VCS] = 0xc900, | ||||
| 		[VCS2] = 0xca00, | ||||
| 		[BCS] = 0xcc00, | ||||
| 		[VECS] = 0xcb00, | ||||
| 		[RCS0]  = 0xc800, | ||||
| 		[VCS0]  = 0xc900, | ||||
| 		[VCS1]  = 0xca00, | ||||
| 		[BCS0]  = 0xcc00, | ||||
| 		[VECS0] = 0xcb00, | ||||
| 	}; | ||||
| 	int i; | ||||
| 
 | ||||
| @ -390,8 +390,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, | ||||
| 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) | ||||
| 		return; | ||||
| 
 | ||||
| 	if ((IS_KABYLAKE(dev_priv)  || IS_BROXTON(dev_priv) | ||||
| 		|| IS_COFFEELAKE(dev_priv)) && ring_id == RCS) | ||||
| 	if (ring_id == RCS0 && | ||||
| 	    (IS_KABYLAKE(dev_priv) || | ||||
| 	     IS_BROXTON(dev_priv) || | ||||
| 	     IS_COFFEELAKE(dev_priv))) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (!pre && !gen9_render_mocs.initialized) | ||||
| @ -414,7 +416,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, | ||||
| 		offset.reg += 4; | ||||
| 	} | ||||
| 
 | ||||
| 	if (ring_id == RCS) { | ||||
| 	if (ring_id == RCS0) { | ||||
| 		l3_offset.reg = 0xb020; | ||||
| 		for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { | ||||
| 			if (pre) | ||||
|  | ||||
| @ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, | ||||
| 		i915_mmio_reg_offset(EU_PERF_CNTL6), | ||||
| 	}; | ||||
| 
 | ||||
| 	if (workload->ring_id != RCS) | ||||
| 	if (workload->ring_id != RCS0) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (save) { | ||||
| @ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | ||||
| 	COPY_REG_MASKED(ctx_ctrl); | ||||
| 	COPY_REG(ctx_timestamp); | ||||
| 
 | ||||
| 	if (ring_id == RCS) { | ||||
| 	if (ring_id == RCS0) { | ||||
| 		COPY_REG(bb_per_ctx_ptr); | ||||
| 		COPY_REG(rcs_indirect_ctx); | ||||
| 		COPY_REG(rcs_indirect_ctx_offset); | ||||
| @ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | ||||
| 
 | ||||
| 	context_page_num = context_page_num >> PAGE_SHIFT; | ||||
| 
 | ||||
| 	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) | ||||
| 	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) | ||||
| 		context_page_num = 19; | ||||
| 
 | ||||
| 	i = 2; | ||||
| @ -440,8 +440,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | ||||
| 	if (ret) | ||||
| 		goto err_unpin; | ||||
| 
 | ||||
| 	if ((workload->ring_id == RCS) && | ||||
| 	    (workload->wa_ctx.indirect_ctx.size != 0)) { | ||||
| 	if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { | ||||
| 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); | ||||
| 		if (ret) | ||||
| 			goto err_shadow; | ||||
| @ -791,7 +790,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) | ||||
| 	context_page_num = rq->engine->context_size; | ||||
| 	context_page_num = context_page_num >> PAGE_SHIFT; | ||||
| 
 | ||||
| 	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) | ||||
| 	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0) | ||||
| 		context_page_num = 19; | ||||
| 
 | ||||
| 	i = 2; | ||||
| @ -891,8 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | ||||
| 				workload->status = 0; | ||||
| 		} | ||||
| 
 | ||||
| 		if (!workload->status && !(vgpu->resetting_eng & | ||||
| 					   ENGINE_MASK(ring_id))) { | ||||
| 		if (!workload->status && | ||||
| 		    !(vgpu->resetting_eng & BIT(ring_id))) { | ||||
| 			update_guest_context(workload); | ||||
| 
 | ||||
| 			for_each_set_bit(event, workload->pending_events, | ||||
| @ -915,7 +914,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | ||||
| 
 | ||||
| 	list_del_init(&workload->list); | ||||
| 
 | ||||
| 	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { | ||||
| 	if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { | ||||
| 		/* if workload->status is not successful means HW GPU
 | ||||
| 		 * has occurred GPU hang or something wrong with i915/GVT, | ||||
| 		 * and GVT won't inject context switch interrupt to guest. | ||||
| @ -929,7 +928,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | ||||
| 		 * cleaned up during the resetting process later, so doing | ||||
| 		 * the workload clean up here doesn't have any impact. | ||||
| 		 **/ | ||||
| 		intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id)); | ||||
| 		intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); | ||||
| 	} | ||||
| 
 | ||||
| 	workload->complete(workload); | ||||
| @ -1438,7 +1437,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, | ||||
| 	workload->rb_start = start; | ||||
| 	workload->rb_ctl = ctl; | ||||
| 
 | ||||
| 	if (ring_id == RCS) { | ||||
| 	if (ring_id == RCS0) { | ||||
| 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | ||||
| 			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); | ||||
| 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | ||||
|  | ||||
| @ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) | ||||
| 	if (!IS_GEN(engine->i915, 7)) | ||||
| 		return; | ||||
| 
 | ||||
| 	switch (engine->id) { | ||||
| 	case RCS: | ||||
| 	switch (engine->class) { | ||||
| 	case RENDER_CLASS: | ||||
| 		if (IS_HASWELL(engine->i915)) { | ||||
| 			cmd_tables = hsw_render_ring_cmds; | ||||
| 			cmd_table_count = | ||||
| @ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) | ||||
| 
 | ||||
| 		engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; | ||||
| 		break; | ||||
| 	case VCS: | ||||
| 	case VIDEO_DECODE_CLASS: | ||||
| 		cmd_tables = gen7_video_cmds; | ||||
| 		cmd_table_count = ARRAY_SIZE(gen7_video_cmds); | ||||
| 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; | ||||
| 		break; | ||||
| 	case BCS: | ||||
| 	case COPY_ENGINE_CLASS: | ||||
| 		if (IS_HASWELL(engine->i915)) { | ||||
| 			cmd_tables = hsw_blt_ring_cmds; | ||||
| 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); | ||||
| @ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) | ||||
| 
 | ||||
| 		engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; | ||||
| 		break; | ||||
| 	case VECS: | ||||
| 	case VIDEO_ENHANCEMENT_CLASS: | ||||
| 		cmd_tables = hsw_vebox_cmds; | ||||
| 		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); | ||||
| 		/* VECS can use the same length_mask function as VCS */ | ||||
| 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; | ||||
| 		break; | ||||
| 	default: | ||||
| 		MISSING_CASE(engine->id); | ||||
| 		MISSING_CASE(engine->class); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -1298,7 +1298,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) | ||||
| 			seqno[id] = intel_engine_get_hangcheck_seqno(engine); | ||||
| 		} | ||||
| 
 | ||||
| 		intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); | ||||
| 		intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone); | ||||
| 	} | ||||
| 
 | ||||
| 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) | ||||
| @ -1325,7 +1325,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) | ||||
| 			   (long long)engine->hangcheck.acthd, | ||||
| 			   (long long)acthd[id]); | ||||
| 
 | ||||
| 		if (engine->id == RCS) { | ||||
| 		if (engine->id == RCS0) { | ||||
| 			seq_puts(m, "\tinstdone read =\n"); | ||||
| 
 | ||||
| 			i915_instdone_info(dev_priv, m, &instdone); | ||||
| @ -3178,7 +3178,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) | ||||
| static int i915_wa_registers(struct seq_file *m, void *unused) | ||||
| { | ||||
| 	struct drm_i915_private *i915 = node_to_i915(m->private); | ||||
| 	const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list; | ||||
| 	const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list; | ||||
| 	struct i915_wa *wa; | ||||
| 	unsigned int i; | ||||
| 
 | ||||
|  | ||||
| @ -330,16 +330,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, | ||||
| 		value = dev_priv->overlay ? 1 : 0; | ||||
| 		break; | ||||
| 	case I915_PARAM_HAS_BSD: | ||||
| 		value = !!dev_priv->engine[VCS]; | ||||
| 		value = !!dev_priv->engine[VCS0]; | ||||
| 		break; | ||||
| 	case I915_PARAM_HAS_BLT: | ||||
| 		value = !!dev_priv->engine[BCS]; | ||||
| 		value = !!dev_priv->engine[BCS0]; | ||||
| 		break; | ||||
| 	case I915_PARAM_HAS_VEBOX: | ||||
| 		value = !!dev_priv->engine[VECS]; | ||||
| 		value = !!dev_priv->engine[VECS0]; | ||||
| 		break; | ||||
| 	case I915_PARAM_HAS_BSD2: | ||||
| 		value = !!dev_priv->engine[VCS2]; | ||||
| 		value = !!dev_priv->engine[VCS1]; | ||||
| 		break; | ||||
| 	case I915_PARAM_HAS_LLC: | ||||
| 		value = HAS_LLC(dev_priv); | ||||
|  | ||||
| @ -2099,7 +2099,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) | ||||
| 
 | ||||
| /* Iterator over subset of engines selected by mask */ | ||||
| #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ | ||||
| 	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \ | ||||
| 	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \ | ||||
| 	     (tmp__) ? \ | ||||
| 	     ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \ | ||||
| 	     0;) | ||||
| @ -2420,24 +2420,8 @@ static inline unsigned int i915_sg_segment_size(void) | ||||
| #define IS_GEN9_LP(dev_priv)	(IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) | ||||
| #define IS_GEN9_BC(dev_priv)	(IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) | ||||
| 
 | ||||
| #define ENGINE_MASK(id)	BIT(id) | ||||
| #define RENDER_RING	ENGINE_MASK(RCS) | ||||
| #define BSD_RING	ENGINE_MASK(VCS) | ||||
| #define BLT_RING	ENGINE_MASK(BCS) | ||||
| #define VEBOX_RING	ENGINE_MASK(VECS) | ||||
| #define BSD2_RING	ENGINE_MASK(VCS2) | ||||
| #define BSD3_RING	ENGINE_MASK(VCS3) | ||||
| #define BSD4_RING	ENGINE_MASK(VCS4) | ||||
| #define VEBOX2_RING	ENGINE_MASK(VECS2) | ||||
| #define ALL_ENGINES	(~0) | ||||
| 
 | ||||
| #define HAS_ENGINE(dev_priv, id) \ | ||||
| 	(!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) | ||||
| 
 | ||||
| #define HAS_BSD(dev_priv)	HAS_ENGINE(dev_priv, VCS) | ||||
| #define HAS_BSD2(dev_priv)	HAS_ENGINE(dev_priv, VCS2) | ||||
| #define HAS_BLT(dev_priv)	HAS_ENGINE(dev_priv, BCS) | ||||
| #define HAS_VEBOX(dev_priv)	HAS_ENGINE(dev_priv, VECS) | ||||
| #define ALL_ENGINES	(~0u) | ||||
| #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) | ||||
| 
 | ||||
| #define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc) | ||||
| #define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop) | ||||
|  | ||||
| @ -583,7 +583,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) | ||||
| 	GEM_BUG_ON(dev_priv->kernel_context); | ||||
| 	GEM_BUG_ON(dev_priv->preempt_context); | ||||
| 
 | ||||
| 	intel_engine_init_ctx_wa(dev_priv->engine[RCS]); | ||||
| 	intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); | ||||
| 	init_contexts(dev_priv); | ||||
| 
 | ||||
| 	/* lowest priority; idle task */ | ||||
| @ -1089,7 +1089,7 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8); | ||||
| 	GEM_BUG_ON(engine->id != RCS); | ||||
| 	GEM_BUG_ON(engine->id != RCS0); | ||||
| 
 | ||||
| 	/* Nothing to do if unmodified. */ | ||||
| 	if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) | ||||
|  | ||||
| @ -1957,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) | ||||
| 	u32 *cs; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) { | ||||
| 	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { | ||||
| 		DRM_DEBUG("sol reset is gen7/rcs only\n"); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| @ -2082,11 +2082,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, | ||||
| #define I915_USER_RINGS (4) | ||||
| 
 | ||||
| static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { | ||||
| 	[I915_EXEC_DEFAULT]	= RCS, | ||||
| 	[I915_EXEC_RENDER]	= RCS, | ||||
| 	[I915_EXEC_BLT]		= BCS, | ||||
| 	[I915_EXEC_BSD]		= VCS, | ||||
| 	[I915_EXEC_VEBOX]	= VECS | ||||
| 	[I915_EXEC_DEFAULT]	= RCS0, | ||||
| 	[I915_EXEC_RENDER]	= RCS0, | ||||
| 	[I915_EXEC_BLT]		= BCS0, | ||||
| 	[I915_EXEC_BSD]		= VCS0, | ||||
| 	[I915_EXEC_VEBOX]	= VECS0 | ||||
| }; | ||||
| 
 | ||||
| static struct intel_engine_cs * | ||||
| @ -2109,7 +2109,7 @@ eb_select_engine(struct drm_i915_private *dev_priv, | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { | ||||
| 	if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) { | ||||
| 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; | ||||
| 
 | ||||
| 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) { | ||||
|  | ||||
| @ -799,7 +799,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, | ||||
|  */ | ||||
| static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | ||||
| { | ||||
| 	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; | ||||
| 	ppgtt->pd_dirty_engines = INTEL_INFO(ppgtt->vm.i915)->engine_mask; | ||||
| } | ||||
| 
 | ||||
| /* Removes entries from a single page table, releasing it if it's empty.
 | ||||
|  | ||||
| @ -390,7 +390,7 @@ struct i915_hw_ppgtt { | ||||
| 	struct i915_address_space vm; | ||||
| 	struct kref ref; | ||||
| 
 | ||||
| 	unsigned long pd_dirty_rings; | ||||
| 	unsigned long pd_dirty_engines; | ||||
| 	union { | ||||
| 		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */ | ||||
| 		struct i915_page_directory_pointer pdp;	/* GEN8+ */ | ||||
|  | ||||
| @ -42,7 +42,7 @@ struct intel_render_state { | ||||
| static const struct intel_renderstate_rodata * | ||||
| render_state_get_rodata(const struct intel_engine_cs *engine) | ||||
| { | ||||
| 	if (engine->id != RCS) | ||||
| 	if (engine->id != RCS0) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	switch (INTEL_GEN(engine->i915)) { | ||||
|  | ||||
| @ -411,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, | ||||
| 	err_printf(m, "  INSTDONE: 0x%08x\n", | ||||
| 		   ee->instdone.instdone); | ||||
| 
 | ||||
| 	if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3) | ||||
| 	if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3) | ||||
| 		return; | ||||
| 
 | ||||
| 	err_printf(m, "  SC_INSTDONE: 0x%08x\n", | ||||
| @ -1179,16 +1179,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error, | ||||
| 		if (IS_GEN(dev_priv, 7)) { | ||||
| 			switch (engine->id) { | ||||
| 			default: | ||||
| 			case RCS: | ||||
| 				MISSING_CASE(engine->id); | ||||
| 			case RCS0: | ||||
| 				mmio = RENDER_HWS_PGA_GEN7; | ||||
| 				break; | ||||
| 			case BCS: | ||||
| 			case BCS0: | ||||
| 				mmio = BLT_HWS_PGA_GEN7; | ||||
| 				break; | ||||
| 			case VCS: | ||||
| 			case VCS0: | ||||
| 				mmio = BSD_HWS_PGA_GEN7; | ||||
| 				break; | ||||
| 			case VECS: | ||||
| 			case VECS0: | ||||
| 				mmio = VEBOX_HWS_PGA_GEN7; | ||||
| 				break; | ||||
| 			} | ||||
|  | ||||
| @ -1427,20 +1427,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, | ||||
| 			       u32 gt_iir) | ||||
| { | ||||
| 	if (gt_iir & GT_RENDER_USER_INTERRUPT) | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); | ||||
| 	if (gt_iir & ILK_BSD_USER_INTERRUPT) | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); | ||||
| } | ||||
| 
 | ||||
| static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, | ||||
| 			       u32 gt_iir) | ||||
| { | ||||
| 	if (gt_iir & GT_RENDER_USER_INTERRUPT) | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); | ||||
| 	if (gt_iir & GT_BSD_USER_INTERRUPT) | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); | ||||
| 	if (gt_iir & GT_BLT_USER_INTERRUPT) | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]); | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]); | ||||
| 
 | ||||
| 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | | ||||
| 		      GT_BSD_CS_ERROR_INTERRUPT | | ||||
| @ -1475,8 +1475,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915, | ||||
| 
 | ||||
| #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ | ||||
| 		      GEN8_GT_BCS_IRQ | \ | ||||
| 		      GEN8_GT_VCS0_IRQ | \ | ||||
| 		      GEN8_GT_VCS1_IRQ | \ | ||||
| 		      GEN8_GT_VCS2_IRQ | \ | ||||
| 		      GEN8_GT_VECS_IRQ | \ | ||||
| 		      GEN8_GT_PM_IRQ | \ | ||||
| 		      GEN8_GT_GUC_IRQ) | ||||
| @ -1487,7 +1487,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915, | ||||
| 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); | ||||
| 	} | ||||
| 
 | ||||
| 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { | ||||
| 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { | ||||
| 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); | ||||
| 		if (likely(gt_iir[1])) | ||||
| 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); | ||||
| @ -1510,21 +1510,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, | ||||
| 				u32 master_ctl, u32 gt_iir[4]) | ||||
| { | ||||
| 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | ||||
| 		gen8_cs_irq_handler(i915->engine[RCS], | ||||
| 		gen8_cs_irq_handler(i915->engine[RCS0], | ||||
| 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); | ||||
| 		gen8_cs_irq_handler(i915->engine[BCS], | ||||
| 		gen8_cs_irq_handler(i915->engine[BCS0], | ||||
| 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); | ||||
| 	} | ||||
| 
 | ||||
| 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { | ||||
| 		gen8_cs_irq_handler(i915->engine[VCS], | ||||
| 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { | ||||
| 		gen8_cs_irq_handler(i915->engine[VCS0], | ||||
| 				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); | ||||
| 		gen8_cs_irq_handler(i915->engine[VCS1], | ||||
| 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); | ||||
| 		gen8_cs_irq_handler(i915->engine[VCS2], | ||||
| 				    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); | ||||
| 	} | ||||
| 
 | ||||
| 	if (master_ctl & GEN8_GT_VECS_IRQ) { | ||||
| 		gen8_cs_irq_handler(i915->engine[VECS], | ||||
| 		gen8_cs_irq_handler(i915->engine[VECS0], | ||||
| 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); | ||||
| 	} | ||||
| 
 | ||||
| @ -1802,7 +1802,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (pm_iir & PM_VEBOX_USER_INTERRUPT) | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]); | ||||
| 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); | ||||
| 
 | ||||
| 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) | ||||
| 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); | ||||
| @ -3780,7 +3780,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) | ||||
| 		 * RPS interrupts will get enabled/disabled on demand when RPS | ||||
| 		 * itself is enabled/disabled. | ||||
| 		 */ | ||||
| 		if (HAS_VEBOX(dev_priv)) { | ||||
| 		if (HAS_ENGINE(dev_priv, VECS0)) { | ||||
| 			pm_irqs |= PM_VEBOX_USER_INTERRUPT; | ||||
| 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; | ||||
| 		} | ||||
| @ -3892,18 +3892,21 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	/* These are interrupts we'll toggle with the ring mask register */ | ||||
| 	u32 gt_interrupts[] = { | ||||
| 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | ||||
| 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | ||||
| 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | | ||||
| 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, | ||||
| 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | ||||
| 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | ||||
| 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | | ||||
| 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, | ||||
| 		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | ||||
| 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | ||||
| 		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | | ||||
| 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), | ||||
| 
 | ||||
| 		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | | ||||
| 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | | ||||
| 		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | ||||
| 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), | ||||
| 
 | ||||
| 		0, | ||||
| 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | | ||||
| 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT | ||||
| 		}; | ||||
| 
 | ||||
| 		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | | ||||
| 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) | ||||
| 	}; | ||||
| 
 | ||||
| 	dev_priv->pm_ier = 0x0; | ||||
| 	dev_priv->pm_imr = ~dev_priv->pm_ier; | ||||
| @ -4231,7 +4234,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | ||||
| 		I915_WRITE16(IIR, iir); | ||||
| 
 | ||||
| 		if (iir & I915_USER_INTERRUPT) | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); | ||||
| 
 | ||||
| 		if (iir & I915_MASTER_ERROR_INTERRUPT) | ||||
| 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck); | ||||
| @ -4339,7 +4342,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | ||||
| 		I915_WRITE(IIR, iir); | ||||
| 
 | ||||
| 		if (iir & I915_USER_INTERRUPT) | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); | ||||
| 
 | ||||
| 		if (iir & I915_MASTER_ERROR_INTERRUPT) | ||||
| 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck); | ||||
| @ -4484,10 +4487,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | ||||
| 		I915_WRITE(IIR, iir); | ||||
| 
 | ||||
| 		if (iir & I915_USER_INTERRUPT) | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); | ||||
| 
 | ||||
| 		if (iir & I915_BSD_USER_INTERRUPT) | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); | ||||
| 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); | ||||
| 
 | ||||
| 		if (iir & I915_MASTER_ERROR_INTERRUPT) | ||||
| 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck); | ||||
|  | ||||
| @ -94,7 +94,7 @@ | ||||
| 	.gpu_reset_clobbers_display = true, \ | ||||
| 	.hws_needs_physical = 1, \ | ||||
| 	.unfenced_needs_alignment = 1, \ | ||||
| 	.ring_mask = RENDER_RING, \ | ||||
| 	.engine_mask = BIT(RCS0), \ | ||||
| 	.has_snoop = true, \ | ||||
| 	.has_coherent_ggtt = false, \ | ||||
| 	GEN_DEFAULT_PIPEOFFSETS, \ | ||||
| @ -133,7 +133,7 @@ static const struct intel_device_info intel_i865g_info = { | ||||
| 	.num_pipes = 2, \ | ||||
| 	.display.has_gmch = 1, \ | ||||
| 	.gpu_reset_clobbers_display = true, \ | ||||
| 	.ring_mask = RENDER_RING, \ | ||||
| 	.engine_mask = BIT(RCS0), \ | ||||
| 	.has_snoop = true, \ | ||||
| 	.has_coherent_ggtt = true, \ | ||||
| 	GEN_DEFAULT_PIPEOFFSETS, \ | ||||
| @ -210,7 +210,7 @@ static const struct intel_device_info intel_pineview_info = { | ||||
| 	.display.has_hotplug = 1, \ | ||||
| 	.display.has_gmch = 1, \ | ||||
| 	.gpu_reset_clobbers_display = true, \ | ||||
| 	.ring_mask = RENDER_RING, \ | ||||
| 	.engine_mask = BIT(RCS0), \ | ||||
| 	.has_snoop = true, \ | ||||
| 	.has_coherent_ggtt = true, \ | ||||
| 	GEN_DEFAULT_PIPEOFFSETS, \ | ||||
| @ -239,7 +239,7 @@ static const struct intel_device_info intel_i965gm_info = { | ||||
| static const struct intel_device_info intel_g45_info = { | ||||
| 	GEN4_FEATURES, | ||||
| 	PLATFORM(INTEL_G45), | ||||
| 	.ring_mask = RENDER_RING | BSD_RING, | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0), | ||||
| 	.gpu_reset_clobbers_display = false, | ||||
| }; | ||||
| 
 | ||||
| @ -249,7 +249,7 @@ static const struct intel_device_info intel_gm45_info = { | ||||
| 	.is_mobile = 1, | ||||
| 	.display.has_fbc = 1, | ||||
| 	.display.supports_tv = 1, | ||||
| 	.ring_mask = RENDER_RING | BSD_RING, | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0), | ||||
| 	.gpu_reset_clobbers_display = false, | ||||
| }; | ||||
| 
 | ||||
| @ -257,7 +257,7 @@ static const struct intel_device_info intel_gm45_info = { | ||||
| 	GEN(5), \ | ||||
| 	.num_pipes = 2, \ | ||||
| 	.display.has_hotplug = 1, \ | ||||
| 	.ring_mask = RENDER_RING | BSD_RING, \ | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0), \ | ||||
| 	.has_snoop = true, \ | ||||
| 	.has_coherent_ggtt = true, \ | ||||
| 	/* ilk does support rc6, but we do not implement [power] contexts */ \ | ||||
| @ -283,7 +283,7 @@ static const struct intel_device_info intel_ironlake_m_info = { | ||||
| 	.num_pipes = 2, \ | ||||
| 	.display.has_hotplug = 1, \ | ||||
| 	.display.has_fbc = 1, \ | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ | ||||
| 	.has_coherent_ggtt = true, \ | ||||
| 	.has_llc = 1, \ | ||||
| 	.has_rc6 = 1, \ | ||||
| @ -328,7 +328,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { | ||||
| 	.num_pipes = 3, \ | ||||
| 	.display.has_hotplug = 1, \ | ||||
| 	.display.has_fbc = 1, \ | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ | ||||
| 	.has_coherent_ggtt = true, \ | ||||
| 	.has_llc = 1, \ | ||||
| 	.has_rc6 = 1, \ | ||||
| @ -389,7 +389,7 @@ static const struct intel_device_info intel_valleyview_info = { | ||||
| 	.ppgtt = INTEL_PPGTT_FULL, | ||||
| 	.has_snoop = true, | ||||
| 	.has_coherent_ggtt = false, | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), | ||||
| 	.display_mmio_offset = VLV_DISPLAY_BASE, | ||||
| 	GEN_DEFAULT_PAGE_SIZES, | ||||
| 	GEN_DEFAULT_PIPEOFFSETS, | ||||
| @ -398,7 +398,7 @@ static const struct intel_device_info intel_valleyview_info = { | ||||
| 
 | ||||
| #define G75_FEATURES  \ | ||||
| 	GEN7_FEATURES, \ | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ | ||||
| 	.display.has_ddi = 1, \ | ||||
| 	.has_fpga_dbg = 1, \ | ||||
| 	.display.has_psr = 1, \ | ||||
| @ -462,7 +462,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = { | ||||
| static const struct intel_device_info intel_broadwell_gt3_info = { | ||||
| 	BDW_PLATFORM, | ||||
| 	.gt = 3, | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | ||||
| 	.engine_mask = | ||||
| 		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), | ||||
| }; | ||||
| 
 | ||||
| static const struct intel_device_info intel_cherryview_info = { | ||||
| @ -471,7 +472,7 @@ static const struct intel_device_info intel_cherryview_info = { | ||||
| 	.num_pipes = 3, | ||||
| 	.display.has_hotplug = 1, | ||||
| 	.is_lp = 1, | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), | ||||
| 	.has_64bit_reloc = 1, | ||||
| 	.has_runtime_pm = 1, | ||||
| 	.has_rc6 = 1, | ||||
| @ -521,7 +522,8 @@ static const struct intel_device_info intel_skylake_gt2_info = { | ||||
| 
 | ||||
| #define SKL_GT3_PLUS_PLATFORM \ | ||||
| 	SKL_PLATFORM, \ | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING | ||||
| 	.engine_mask = \ | ||||
| 		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) | ||||
| 
 | ||||
| 
 | ||||
| static const struct intel_device_info intel_skylake_gt3_info = { | ||||
| @ -538,7 +540,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { | ||||
| 	GEN(9), \ | ||||
| 	.is_lp = 1, \ | ||||
| 	.display.has_hotplug = 1, \ | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ | ||||
| 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ | ||||
| 	.num_pipes = 3, \ | ||||
| 	.has_64bit_reloc = 1, \ | ||||
| 	.display.has_ddi = 1, \ | ||||
| @ -592,7 +594,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = { | ||||
| static const struct intel_device_info intel_kabylake_gt3_info = { | ||||
| 	KBL_PLATFORM, | ||||
| 	.gt = 3, | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | ||||
| 	.engine_mask = | ||||
| 		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), | ||||
| }; | ||||
| 
 | ||||
| #define CFL_PLATFORM \ | ||||
| @ -612,7 +615,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = { | ||||
| static const struct intel_device_info intel_coffeelake_gt3_info = { | ||||
| 	CFL_PLATFORM, | ||||
| 	.gt = 3, | ||||
| 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | ||||
| 	.engine_mask = | ||||
| 		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), | ||||
| }; | ||||
| 
 | ||||
| #define GEN10_FEATURES \ | ||||
| @ -655,7 +659,8 @@ static const struct intel_device_info intel_icelake_11_info = { | ||||
| 	GEN11_FEATURES, | ||||
| 	PLATFORM(INTEL_ICELAKE), | ||||
| 	.is_alpha_support = 1, | ||||
| 	.ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING, | ||||
| 	.engine_mask = | ||||
| 		BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), | ||||
| }; | ||||
| 
 | ||||
| #undef GEN | ||||
|  | ||||
| @ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream, | ||||
| static struct intel_context *oa_pin_context(struct drm_i915_private *i915, | ||||
| 					    struct i915_gem_context *ctx) | ||||
| { | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS]; | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS0]; | ||||
| 	struct intel_context *ce; | ||||
| 	int ret; | ||||
| 
 | ||||
| @ -1681,7 +1681,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, | ||||
| 	CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, | ||||
| 		gen8_make_rpcs(dev_priv, | ||||
| 			       &to_intel_context(ctx, | ||||
| 						 dev_priv->engine[RCS])->sseu)); | ||||
| 						 dev_priv->engine[RCS0])->sseu)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -1711,7 +1711,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, | ||||
| static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, | ||||
| 				       const struct i915_oa_config *oa_config) | ||||
| { | ||||
| 	struct intel_engine_cs *engine = dev_priv->engine[RCS]; | ||||
| 	struct intel_engine_cs *engine = dev_priv->engine[RCS0]; | ||||
| 	unsigned int map_type = i915_coherent_map_type(dev_priv); | ||||
| 	struct i915_gem_context *ctx; | ||||
| 	struct i915_request *rq; | ||||
| @ -2143,7 +2143,7 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine, | ||||
| { | ||||
| 	struct i915_perf_stream *stream; | ||||
| 
 | ||||
| 	if (engine->id != RCS) | ||||
| 	if (engine->class != RENDER_CLASS) | ||||
| 		return; | ||||
| 
 | ||||
| 	stream = engine->i915->perf.oa.exclusive_stream; | ||||
|  | ||||
| @ -101,7 +101,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) | ||||
| 	 * | ||||
| 	 * Use RCS as proxy for all engines. | ||||
| 	 */ | ||||
| 	else if (intel_engine_supports_stats(i915->engine[RCS])) | ||||
| 	else if (intel_engine_supports_stats(i915->engine[RCS0])) | ||||
| 		enable &= ~BIT(I915_SAMPLE_BUSY); | ||||
| 
 | ||||
| 	/*
 | ||||
|  | ||||
| @ -210,14 +210,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | ||||
| 
 | ||||
| /* Engine ID */ | ||||
| 
 | ||||
| #define RCS_HW		0 | ||||
| #define VCS_HW		1 | ||||
| #define BCS_HW		2 | ||||
| #define VECS_HW		3 | ||||
| #define VCS2_HW		4 | ||||
| #define VCS3_HW		6 | ||||
| #define VCS4_HW		7 | ||||
| #define VECS2_HW	12 | ||||
| #define RCS0_HW		0 | ||||
| #define VCS0_HW		1 | ||||
| #define BCS0_HW		2 | ||||
| #define VECS0_HW	3 | ||||
| #define VCS1_HW		4 | ||||
| #define VCS2_HW		6 | ||||
| #define VCS3_HW		7 | ||||
| #define VECS1_HW	12 | ||||
| 
 | ||||
| /* Engine class */ | ||||
| 
 | ||||
| @ -7250,8 +7250,8 @@ enum { | ||||
| #define  GEN8_GT_VECS_IRQ		(1 << 6) | ||||
| #define  GEN8_GT_GUC_IRQ		(1 << 5) | ||||
| #define  GEN8_GT_PM_IRQ			(1 << 4) | ||||
| #define  GEN8_GT_VCS2_IRQ		(1 << 3) | ||||
| #define  GEN8_GT_VCS1_IRQ		(1 << 2) | ||||
| #define  GEN8_GT_VCS1_IRQ		(1 << 3) /* NB: VCS2 in bspec! */ | ||||
| #define  GEN8_GT_VCS0_IRQ		(1 << 2) /* NB: VCS1 in bpsec! */ | ||||
| #define  GEN8_GT_BCS_IRQ		(1 << 1) | ||||
| #define  GEN8_GT_RCS_IRQ		(1 << 0) | ||||
| 
 | ||||
| @ -7272,8 +7272,8 @@ enum { | ||||
| 
 | ||||
| #define GEN8_RCS_IRQ_SHIFT 0 | ||||
| #define GEN8_BCS_IRQ_SHIFT 16 | ||||
| #define GEN8_VCS1_IRQ_SHIFT 0 | ||||
| #define GEN8_VCS2_IRQ_SHIFT 16 | ||||
| #define GEN8_VCS0_IRQ_SHIFT 0  /* NB: VCS1 in bspec! */ | ||||
| #define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */ | ||||
| #define GEN8_VECS_IRQ_SHIFT 0 | ||||
| #define GEN8_WD_IRQ_SHIFT 16 | ||||
| 
 | ||||
|  | ||||
| @ -297,12 +297,12 @@ static int gen6_reset_engines(struct drm_i915_private *i915, | ||||
| 			      unsigned int retry) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	const u32 hw_engine_mask[I915_NUM_ENGINES] = { | ||||
| 		[RCS] = GEN6_GRDOM_RENDER, | ||||
| 		[BCS] = GEN6_GRDOM_BLT, | ||||
| 		[VCS] = GEN6_GRDOM_MEDIA, | ||||
| 		[VCS2] = GEN8_GRDOM_MEDIA2, | ||||
| 		[VECS] = GEN6_GRDOM_VECS, | ||||
| 	const u32 hw_engine_mask[] = { | ||||
| 		[RCS0]  = GEN6_GRDOM_RENDER, | ||||
| 		[BCS0]  = GEN6_GRDOM_BLT, | ||||
| 		[VCS0]  = GEN6_GRDOM_MEDIA, | ||||
| 		[VCS1]  = GEN8_GRDOM_MEDIA2, | ||||
| 		[VECS0] = GEN6_GRDOM_VECS, | ||||
| 	}; | ||||
| 	u32 hw_mask; | ||||
| 
 | ||||
| @ -312,8 +312,10 @@ static int gen6_reset_engines(struct drm_i915_private *i915, | ||||
| 		unsigned int tmp; | ||||
| 
 | ||||
| 		hw_mask = 0; | ||||
| 		for_each_engine_masked(engine, i915, engine_mask, tmp) | ||||
| 		for_each_engine_masked(engine, i915, engine_mask, tmp) { | ||||
| 			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); | ||||
| 			hw_mask |= hw_engine_mask[engine->id]; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return gen6_hw_domain_reset(i915, hw_mask); | ||||
| @ -420,28 +422,27 @@ static int gen11_reset_engines(struct drm_i915_private *i915, | ||||
| 			       unsigned int engine_mask, | ||||
| 			       unsigned int retry) | ||||
| { | ||||
| 	const u32 hw_engine_mask[I915_NUM_ENGINES] = { | ||||
| 		[RCS] = GEN11_GRDOM_RENDER, | ||||
| 		[BCS] = GEN11_GRDOM_BLT, | ||||
| 		[VCS] = GEN11_GRDOM_MEDIA, | ||||
| 		[VCS2] = GEN11_GRDOM_MEDIA2, | ||||
| 		[VCS3] = GEN11_GRDOM_MEDIA3, | ||||
| 		[VCS4] = GEN11_GRDOM_MEDIA4, | ||||
| 		[VECS] = GEN11_GRDOM_VECS, | ||||
| 		[VECS2] = GEN11_GRDOM_VECS2, | ||||
| 	const u32 hw_engine_mask[] = { | ||||
| 		[RCS0]  = GEN11_GRDOM_RENDER, | ||||
| 		[BCS0]  = GEN11_GRDOM_BLT, | ||||
| 		[VCS0]  = GEN11_GRDOM_MEDIA, | ||||
| 		[VCS1]  = GEN11_GRDOM_MEDIA2, | ||||
| 		[VCS2]  = GEN11_GRDOM_MEDIA3, | ||||
| 		[VCS3]  = GEN11_GRDOM_MEDIA4, | ||||
| 		[VECS0] = GEN11_GRDOM_VECS, | ||||
| 		[VECS1] = GEN11_GRDOM_VECS2, | ||||
| 	}; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	unsigned int tmp; | ||||
| 	u32 hw_mask; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); | ||||
| 
 | ||||
| 	if (engine_mask == ALL_ENGINES) { | ||||
| 		hw_mask = GEN11_GRDOM_FULL; | ||||
| 	} else { | ||||
| 		hw_mask = 0; | ||||
| 		for_each_engine_masked(engine, i915, engine_mask, tmp) { | ||||
| 			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); | ||||
| 			hw_mask |= hw_engine_mask[engine->id]; | ||||
| 			hw_mask |= gen11_lock_sfc(i915, engine); | ||||
| 		} | ||||
| @ -692,7 +693,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) | ||||
| 		return err; | ||||
| 
 | ||||
| 	for_each_engine(engine, i915, id) | ||||
| 		intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id)); | ||||
| 		intel_engine_reset(engine, stalled_mask & engine->mask); | ||||
| 
 | ||||
| 	i915_gem_restore_fences(i915); | ||||
| 
 | ||||
| @ -1057,7 +1058,7 @@ error: | ||||
| static inline int intel_gt_reset_engine(struct drm_i915_private *i915, | ||||
| 					struct intel_engine_cs *engine) | ||||
| { | ||||
| 	return intel_gpu_reset(i915, intel_engine_flag(engine)); | ||||
| 	return intel_gpu_reset(i915, engine->mask); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -1193,7 +1194,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv) | ||||
| 				   I915_READ(RING_FAULT_REG(engine)) & | ||||
| 				   ~RING_FAULT_VALID); | ||||
| 		} | ||||
| 		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); | ||||
| 		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0])); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -1241,7 +1242,7 @@ void i915_handle_error(struct drm_i915_private *i915, | ||||
| 	 */ | ||||
| 	wakeref = intel_runtime_pm_get(i915); | ||||
| 
 | ||||
| 	engine_mask &= INTEL_INFO(i915)->ring_mask; | ||||
| 	engine_mask &= INTEL_INFO(i915)->engine_mask; | ||||
| 
 | ||||
| 	if (flags & I915_ERROR_CAPTURE) { | ||||
| 		i915_capture_error_state(i915, engine_mask, msg); | ||||
| @ -1260,7 +1261,7 @@ void i915_handle_error(struct drm_i915_private *i915, | ||||
| 				continue; | ||||
| 
 | ||||
| 			if (i915_reset_engine(engine, msg) == 0) | ||||
| 				engine_mask &= ~intel_engine_flag(engine); | ||||
| 				engine_mask &= ~engine->mask; | ||||
| 
 | ||||
| 			clear_bit(I915_RESET_ENGINE + engine->id, | ||||
| 				  &error->flags); | ||||
|  | ||||
| @ -738,7 +738,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) | ||||
| 		runtime->num_scalers[PIPE_C] = 1; | ||||
| 	} | ||||
| 
 | ||||
| 	BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t)); | ||||
| 	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); | ||||
| 
 | ||||
| 	if (IS_GEN(dev_priv, 11)) | ||||
| 		for_each_pipe(dev_priv, pipe) | ||||
| @ -887,7 +887,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) { | ||||
| 			info->ring_mask &= ~ENGINE_MASK(_VCS(i)); | ||||
| 			info->engine_mask &= ~BIT(_VCS(i)); | ||||
| 			DRM_DEBUG_DRIVER("vcs%u fused off\n", i); | ||||
| 			continue; | ||||
| 		} | ||||
| @ -906,7 +906,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) { | ||||
| 			info->ring_mask &= ~ENGINE_MASK(_VECS(i)); | ||||
| 			info->engine_mask &= ~BIT(_VECS(i)); | ||||
| 			DRM_DEBUG_DRIVER("vecs%u fused off\n", i); | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @ -150,14 +150,14 @@ struct sseu_dev_info { | ||||
| 	u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; | ||||
| }; | ||||
| 
 | ||||
| typedef u8 intel_ring_mask_t; | ||||
| typedef u8 intel_engine_mask_t; | ||||
| 
 | ||||
| struct intel_device_info { | ||||
| 	u16 gen_mask; | ||||
| 
 | ||||
| 	u8 gen; | ||||
| 	u8 gt; /* GT number, 0 if undefined */ | ||||
| 	intel_ring_mask_t ring_mask; /* Rings supported by the HW */ | ||||
| 	intel_engine_mask_t engine_mask; /* Engines supported by the HW */ | ||||
| 
 | ||||
| 	enum intel_platform platform; | ||||
| 	u32 platform_mask; | ||||
| @ -200,7 +200,7 @@ struct intel_runtime_info { | ||||
| 	u8 num_sprites[I915_MAX_PIPES]; | ||||
| 	u8 num_scalers[I915_MAX_PIPES]; | ||||
| 
 | ||||
| 	u8 num_rings; | ||||
| 	u8 num_engines; | ||||
| 
 | ||||
| 	/* Slice/subslice/EU info */ | ||||
| 	struct sseu_dev_info sseu; | ||||
|  | ||||
| @ -94,24 +94,24 @@ struct engine_info { | ||||
| }; | ||||
| 
 | ||||
| static const struct engine_info intel_engines[] = { | ||||
| 	[RCS] = { | ||||
| 		.hw_id = RCS_HW, | ||||
| 	[RCS0] = { | ||||
| 		.hw_id = RCS0_HW, | ||||
| 		.class = RENDER_CLASS, | ||||
| 		.instance = 0, | ||||
| 		.mmio_bases = { | ||||
| 			{ .gen = 1, .base = RENDER_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[BCS] = { | ||||
| 		.hw_id = BCS_HW, | ||||
| 	[BCS0] = { | ||||
| 		.hw_id = BCS0_HW, | ||||
| 		.class = COPY_ENGINE_CLASS, | ||||
| 		.instance = 0, | ||||
| 		.mmio_bases = { | ||||
| 			{ .gen = 6, .base = BLT_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[VCS] = { | ||||
| 		.hw_id = VCS_HW, | ||||
| 	[VCS0] = { | ||||
| 		.hw_id = VCS0_HW, | ||||
| 		.class = VIDEO_DECODE_CLASS, | ||||
| 		.instance = 0, | ||||
| 		.mmio_bases = { | ||||
| @ -120,8 +120,8 @@ static const struct engine_info intel_engines[] = { | ||||
| 			{ .gen = 4, .base = BSD_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[VCS2] = { | ||||
| 		.hw_id = VCS2_HW, | ||||
| 	[VCS1] = { | ||||
| 		.hw_id = VCS1_HW, | ||||
| 		.class = VIDEO_DECODE_CLASS, | ||||
| 		.instance = 1, | ||||
| 		.mmio_bases = { | ||||
| @ -129,24 +129,24 @@ static const struct engine_info intel_engines[] = { | ||||
| 			{ .gen = 8, .base = GEN8_BSD2_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[VCS3] = { | ||||
| 		.hw_id = VCS3_HW, | ||||
| 	[VCS2] = { | ||||
| 		.hw_id = VCS2_HW, | ||||
| 		.class = VIDEO_DECODE_CLASS, | ||||
| 		.instance = 2, | ||||
| 		.mmio_bases = { | ||||
| 			{ .gen = 11, .base = GEN11_BSD3_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[VCS4] = { | ||||
| 		.hw_id = VCS4_HW, | ||||
| 	[VCS3] = { | ||||
| 		.hw_id = VCS3_HW, | ||||
| 		.class = VIDEO_DECODE_CLASS, | ||||
| 		.instance = 3, | ||||
| 		.mmio_bases = { | ||||
| 			{ .gen = 11, .base = GEN11_BSD4_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[VECS] = { | ||||
| 		.hw_id = VECS_HW, | ||||
| 	[VECS0] = { | ||||
| 		.hw_id = VECS0_HW, | ||||
| 		.class = VIDEO_ENHANCEMENT_CLASS, | ||||
| 		.instance = 0, | ||||
| 		.mmio_bases = { | ||||
| @ -154,8 +154,8 @@ static const struct engine_info intel_engines[] = { | ||||
| 			{ .gen = 7, .base = VEBOX_RING_BASE } | ||||
| 		}, | ||||
| 	}, | ||||
| 	[VECS2] = { | ||||
| 		.hw_id = VECS2_HW, | ||||
| 	[VECS1] = { | ||||
| 		.hw_id = VECS1_HW, | ||||
| 		.class = VIDEO_ENHANCEMENT_CLASS, | ||||
| 		.instance = 1, | ||||
| 		.mmio_bases = { | ||||
| @ -304,7 +304,10 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | ||||
| 	if (!engine) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); | ||||
| 
 | ||||
| 	engine->id = id; | ||||
| 	engine->mask = BIT(id); | ||||
| 	engine->i915 = dev_priv; | ||||
| 	__sprint_engine_name(engine->name, info); | ||||
| 	engine->hw_id = engine->guc_id = info->hw_id; | ||||
| @ -345,15 +348,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | ||||
| int intel_engines_init_mmio(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	struct intel_device_info *device_info = mkwrite_device_info(dev_priv); | ||||
| 	const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; | ||||
| 	const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	enum intel_engine_id id; | ||||
| 	unsigned int mask = 0; | ||||
| 	unsigned int i; | ||||
| 	int err; | ||||
| 
 | ||||
| 	WARN_ON(ring_mask == 0); | ||||
| 	WARN_ON(ring_mask & | ||||
| 	WARN_ON(engine_mask == 0); | ||||
| 	WARN_ON(engine_mask & | ||||
| 		GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); | ||||
| 
 | ||||
| 	if (i915_inject_load_failure()) | ||||
| @ -367,7 +370,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) | ||||
| 		if (err) | ||||
| 			goto cleanup; | ||||
| 
 | ||||
| 		mask |= ENGINE_MASK(i); | ||||
| 		mask |= BIT(i); | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| @ -375,16 +378,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) | ||||
| 	 * are added to the driver by a warning and disabling the forgotten | ||||
| 	 * engines. | ||||
| 	 */ | ||||
| 	if (WARN_ON(mask != ring_mask)) | ||||
| 		device_info->ring_mask = mask; | ||||
| 	if (WARN_ON(mask != engine_mask)) | ||||
| 		device_info->engine_mask = mask; | ||||
| 
 | ||||
| 	/* We always presume we have at least RCS available for later probing */ | ||||
| 	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) { | ||||
| 	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) { | ||||
| 		err = -ENODEV; | ||||
| 		goto cleanup; | ||||
| 	} | ||||
| 
 | ||||
| 	RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask); | ||||
| 	RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask); | ||||
| 
 | ||||
| 	i915_check_and_clear_faults(dev_priv); | ||||
| 
 | ||||
| @ -954,7 +957,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, | ||||
| 	default: | ||||
| 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); | ||||
| 
 | ||||
| 		if (engine->id != RCS) | ||||
| 		if (engine->id != RCS0) | ||||
| 			break; | ||||
| 
 | ||||
| 		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); | ||||
| @ -970,7 +973,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, | ||||
| 	case 7: | ||||
| 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); | ||||
| 
 | ||||
| 		if (engine->id != RCS) | ||||
| 		if (engine->id != RCS0) | ||||
| 			break; | ||||
| 
 | ||||
| 		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); | ||||
| @ -983,7 +986,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, | ||||
| 	case 4: | ||||
| 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); | ||||
| 
 | ||||
| 		if (engine->id == RCS) | ||||
| 		if (engine->id == RCS0) | ||||
| 			/* HACK: Using the wrong struct member */ | ||||
| 			instdone->slice_common = I915_READ(GEN4_INSTDONE1); | ||||
| 		break; | ||||
| @ -1355,7 +1358,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, | ||||
| 		&engine->execlists; | ||||
| 	u64 addr; | ||||
| 
 | ||||
| 	if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7)) | ||||
| 	if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7)) | ||||
| 		drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID)); | ||||
| 	drm_printf(m, "\tRING_START: 0x%08x\n", | ||||
| 		   I915_READ(RING_START(engine->mmio_base))); | ||||
|  | ||||
| @ -122,7 +122,7 @@ int intel_guc_ads_create(struct intel_guc *guc) | ||||
| 	 * because our GuC shared data is there. | ||||
| 	 */ | ||||
| 	kernel_ctx_vma = to_intel_context(dev_priv->kernel_context, | ||||
| 					  dev_priv->engine[RCS])->state; | ||||
| 					  dev_priv->engine[RCS0])->state; | ||||
| 	blob->ads.golden_context_lrca = | ||||
| 		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; | ||||
| 
 | ||||
|  | ||||
| @ -575,7 +575,7 @@ static void inject_preempt_context(struct work_struct *work) | ||||
| 		u32 *cs; | ||||
| 
 | ||||
| 		cs = ce->ring->vaddr; | ||||
| 		if (engine->id == RCS) { | ||||
| 		if (engine->class == RENDER_CLASS) { | ||||
| 			cs = gen8_emit_ggtt_write_rcs(cs, | ||||
| 						      GUC_PREEMPT_FINISHED, | ||||
| 						      addr, | ||||
| @ -1030,7 +1030,7 @@ static int guc_clients_create(struct intel_guc *guc) | ||||
| 	GEM_BUG_ON(guc->preempt_client); | ||||
| 
 | ||||
| 	client = guc_client_alloc(dev_priv, | ||||
| 				  INTEL_INFO(dev_priv)->ring_mask, | ||||
| 				  INTEL_INFO(dev_priv)->engine_mask, | ||||
| 				  GUC_CLIENT_PRIORITY_KMD_NORMAL, | ||||
| 				  dev_priv->kernel_context); | ||||
| 	if (IS_ERR(client)) { | ||||
| @ -1041,7 +1041,7 @@ static int guc_clients_create(struct intel_guc *guc) | ||||
| 
 | ||||
| 	if (dev_priv->preempt_context) { | ||||
| 		client = guc_client_alloc(dev_priv, | ||||
| 					  INTEL_INFO(dev_priv)->ring_mask, | ||||
| 					  INTEL_INFO(dev_priv)->engine_mask, | ||||
| 					  GUC_CLIENT_PRIORITY_KMD_HIGH, | ||||
| 					  dev_priv->preempt_context); | ||||
| 		if (IS_ERR(client)) { | ||||
|  | ||||
| @ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) | ||||
| 	int slice; | ||||
| 	int subslice; | ||||
| 
 | ||||
| 	if (engine->id != RCS) | ||||
| 	if (engine->id != RCS0) | ||||
| 		return true; | ||||
| 
 | ||||
| 	intel_engine_get_instdone(engine, &instdone); | ||||
| @ -120,7 +120,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) | ||||
| 	 */ | ||||
| 	tmp = I915_READ_CTL(engine); | ||||
| 	if (tmp & RING_WAIT) { | ||||
| 		i915_handle_error(dev_priv, BIT(engine->id), 0, | ||||
| 		i915_handle_error(dev_priv, engine->mask, 0, | ||||
| 				  "stuck wait on %s", engine->name); | ||||
| 		I915_WRITE_CTL(engine, tmp); | ||||
| 		return ENGINE_WAIT_KICK; | ||||
| @ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) | ||||
| 		hangcheck_store_sample(engine, &hc); | ||||
| 
 | ||||
| 		if (hc.stalled) { | ||||
| 			hung |= intel_engine_flag(engine); | ||||
| 			hung |= engine->mask; | ||||
| 			if (hc.action != ENGINE_DEAD) | ||||
| 				stuck |= intel_engine_flag(engine); | ||||
| 				stuck |= engine->mask; | ||||
| 		} | ||||
| 
 | ||||
| 		if (hc.wedged) | ||||
| 			wedged |= intel_engine_flag(engine); | ||||
| 			wedged |= engine->mask; | ||||
| 	} | ||||
| 
 | ||||
| 	if (GEM_SHOW_DEBUG() && (hung | stuck)) { | ||||
|  | ||||
| @ -1777,7 +1777,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) | ||||
| 	unsigned int i; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (GEM_DEBUG_WARN_ON(engine->id != RCS)) | ||||
| 	if (GEM_DEBUG_WARN_ON(engine->id != RCS0)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	switch (INTEL_GEN(engine->i915)) { | ||||
| @ -2376,11 +2376,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) | ||||
| 
 | ||||
| 	if (INTEL_GEN(engine->i915) < 11) { | ||||
| 		const u8 irq_shifts[] = { | ||||
| 			[RCS]  = GEN8_RCS_IRQ_SHIFT, | ||||
| 			[BCS]  = GEN8_BCS_IRQ_SHIFT, | ||||
| 			[VCS]  = GEN8_VCS1_IRQ_SHIFT, | ||||
| 			[VCS2] = GEN8_VCS2_IRQ_SHIFT, | ||||
| 			[VECS] = GEN8_VECS_IRQ_SHIFT, | ||||
| 			[RCS0]  = GEN8_RCS_IRQ_SHIFT, | ||||
| 			[BCS0]  = GEN8_BCS_IRQ_SHIFT, | ||||
| 			[VCS0]  = GEN8_VCS0_IRQ_SHIFT, | ||||
| 			[VCS1]  = GEN8_VCS1_IRQ_SHIFT, | ||||
| 			[VECS0] = GEN8_VECS_IRQ_SHIFT, | ||||
| 		}; | ||||
| 
 | ||||
| 		shift = irq_shifts[engine->id]; | ||||
|  | ||||
| @ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, | ||||
| static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) | ||||
| { | ||||
| 	switch (engine_id) { | ||||
| 	case RCS: | ||||
| 	case RCS0: | ||||
| 		return GEN9_GFX_MOCS(index); | ||||
| 	case VCS: | ||||
| 	case VCS0: | ||||
| 		return GEN9_MFX0_MOCS(index); | ||||
| 	case BCS: | ||||
| 	case BCS0: | ||||
| 		return GEN9_BLT_MOCS(index); | ||||
| 	case VECS: | ||||
| 	case VECS0: | ||||
| 		return GEN9_VEBOX_MOCS(index); | ||||
| 	case VCS2: | ||||
| 	case VCS1: | ||||
| 		return GEN9_MFX1_MOCS(index); | ||||
| 	case VCS3: | ||||
| 	case VCS2: | ||||
| 		return GEN11_MFX2_MOCS(index); | ||||
| 	default: | ||||
| 		MISSING_CASE(engine_id); | ||||
|  | ||||
| @ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | ||||
| static struct i915_request *alloc_request(struct intel_overlay *overlay) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = overlay->i915; | ||||
| 	struct intel_engine_cs *engine = dev_priv->engine[RCS]; | ||||
| 	struct intel_engine_cs *engine = dev_priv->engine[RCS0]; | ||||
| 
 | ||||
| 	return i915_request_alloc(engine, dev_priv->kernel_context); | ||||
| } | ||||
|  | ||||
| @ -552,16 +552,17 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) | ||||
| 		 */ | ||||
| 		default: | ||||
| 			GEM_BUG_ON(engine->id); | ||||
| 		case RCS: | ||||
| 			/* fallthrough */ | ||||
| 		case RCS0: | ||||
| 			hwsp = RENDER_HWS_PGA_GEN7; | ||||
| 			break; | ||||
| 		case BCS: | ||||
| 		case BCS0: | ||||
| 			hwsp = BLT_HWS_PGA_GEN7; | ||||
| 			break; | ||||
| 		case VCS: | ||||
| 		case VCS0: | ||||
| 			hwsp = BSD_HWS_PGA_GEN7; | ||||
| 			break; | ||||
| 		case VECS: | ||||
| 		case VECS0: | ||||
| 			hwsp = VEBOX_HWS_PGA_GEN7; | ||||
| 			break; | ||||
| 		} | ||||
| @ -1692,8 +1693,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) | ||||
| 	struct drm_i915_private *i915 = rq->i915; | ||||
| 	struct intel_engine_cs *engine = rq->engine; | ||||
| 	enum intel_engine_id id; | ||||
| 	const int num_rings = | ||||
| 		IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0; | ||||
| 	const int num_engines = | ||||
| 		IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; | ||||
| 	bool force_restore = false; | ||||
| 	int len; | ||||
| 	u32 *cs; | ||||
| @ -1707,7 +1708,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) | ||||
| 
 | ||||
| 	len = 4; | ||||
| 	if (IS_GEN(i915, 7)) | ||||
| 		len += 2 + (num_rings ? 4*num_rings + 6 : 0); | ||||
| 		len += 2 + (num_engines ? 4 * num_engines + 6 : 0); | ||||
| 	if (flags & MI_FORCE_RESTORE) { | ||||
| 		GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); | ||||
| 		flags &= ~MI_FORCE_RESTORE; | ||||
| @ -1722,10 +1723,10 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) | ||||
| 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ | ||||
| 	if (IS_GEN(i915, 7)) { | ||||
| 		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; | ||||
| 		if (num_rings) { | ||||
| 		if (num_engines) { | ||||
| 			struct intel_engine_cs *signaller; | ||||
| 
 | ||||
| 			*cs++ = MI_LOAD_REGISTER_IMM(num_rings); | ||||
| 			*cs++ = MI_LOAD_REGISTER_IMM(num_engines); | ||||
| 			for_each_engine(signaller, i915, id) { | ||||
| 				if (signaller == engine) | ||||
| 					continue; | ||||
| @ -1768,11 +1769,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) | ||||
| 	*cs++ = MI_NOOP; | ||||
| 
 | ||||
| 	if (IS_GEN(i915, 7)) { | ||||
| 		if (num_rings) { | ||||
| 		if (num_engines) { | ||||
| 			struct intel_engine_cs *signaller; | ||||
| 			i915_reg_t last_reg = {}; /* keep gcc quiet */ | ||||
| 
 | ||||
| 			*cs++ = MI_LOAD_REGISTER_IMM(num_rings); | ||||
| 			*cs++ = MI_LOAD_REGISTER_IMM(num_engines); | ||||
| 			for_each_engine(signaller, i915, id) { | ||||
| 				if (signaller == engine) | ||||
| 					continue; | ||||
| @ -1850,7 +1851,7 @@ static int switch_context(struct i915_request *rq) | ||||
| 		 * explanation. | ||||
| 		 */ | ||||
| 		loops = 1; | ||||
| 		if (engine->id == BCS && IS_VALLEYVIEW(engine->i915)) | ||||
| 		if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915)) | ||||
| 			loops = 32; | ||||
| 
 | ||||
| 		do { | ||||
| @ -1859,15 +1860,15 @@ static int switch_context(struct i915_request *rq) | ||||
| 				goto err; | ||||
| 		} while (--loops); | ||||
| 
 | ||||
| 		if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { | ||||
| 			unwind_mm = intel_engine_flag(engine); | ||||
| 			ppgtt->pd_dirty_rings &= ~unwind_mm; | ||||
| 		if (ppgtt->pd_dirty_engines & engine->mask) { | ||||
| 			unwind_mm = engine->mask; | ||||
| 			ppgtt->pd_dirty_engines &= ~unwind_mm; | ||||
| 			hw_flags = MI_FORCE_RESTORE; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (rq->hw_context->state) { | ||||
| 		GEM_BUG_ON(engine->id != RCS); | ||||
| 		GEM_BUG_ON(engine->id != RCS0); | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * The kernel context(s) is treated as pure scratch and is not | ||||
| @ -1927,7 +1928,7 @@ static int switch_context(struct i915_request *rq) | ||||
| 
 | ||||
| err_mm: | ||||
| 	if (unwind_mm) | ||||
| 		ppgtt->pd_dirty_rings |= unwind_mm; | ||||
| 		ppgtt->pd_dirty_engines |= unwind_mm; | ||||
| err: | ||||
| 	return ret; | ||||
| } | ||||
|  | ||||
| @ -10,12 +10,12 @@ | ||||
| #include <linux/seqlock.h> | ||||
| 
 | ||||
| #include "i915_gem_batch_pool.h" | ||||
| 
 | ||||
| #include "i915_reg.h" | ||||
| #include "i915_pmu.h" | ||||
| #include "i915_reg.h" | ||||
| #include "i915_request.h" | ||||
| #include "i915_selftest.h" | ||||
| #include "i915_timeline.h" | ||||
| #include "intel_device_info.h" | ||||
| #include "intel_gpu_commands.h" | ||||
| #include "intel_workarounds.h" | ||||
| 
 | ||||
| @ -175,16 +175,16 @@ struct i915_request; | ||||
|  * Keep instances of the same type engine together. | ||||
|  */ | ||||
| enum intel_engine_id { | ||||
| 	RCS = 0, | ||||
| 	BCS, | ||||
| 	VCS, | ||||
| 	RCS0 = 0, | ||||
| 	BCS0, | ||||
| 	VCS0, | ||||
| 	VCS1, | ||||
| 	VCS2, | ||||
| 	VCS3, | ||||
| 	VCS4, | ||||
| #define _VCS(n) (VCS + (n)) | ||||
| 	VECS, | ||||
| 	VECS2 | ||||
| #define _VECS(n) (VECS + (n)) | ||||
| #define _VCS(n) (VCS0 + (n)) | ||||
| 	VECS0, | ||||
| 	VECS1 | ||||
| #define _VECS(n) (VECS0 + (n)) | ||||
| }; | ||||
| 
 | ||||
| struct st_preempt_hang { | ||||
| @ -334,6 +334,7 @@ struct intel_engine_cs { | ||||
| 	enum intel_engine_id id; | ||||
| 	unsigned int hw_id; | ||||
| 	unsigned int guc_id; | ||||
| 	intel_engine_mask_t mask; | ||||
| 
 | ||||
| 	u8 uabi_class; | ||||
| 
 | ||||
| @ -667,12 +668,6 @@ execlists_port_complete(struct intel_engine_execlists * const execlists, | ||||
| 	return port; | ||||
| } | ||||
| 
 | ||||
| static inline unsigned int | ||||
| intel_engine_flag(const struct intel_engine_cs *engine) | ||||
| { | ||||
| 	return BIT(engine->id); | ||||
| } | ||||
| 
 | ||||
| static inline u32 | ||||
| intel_read_status_page(const struct intel_engine_cs *engine, int reg) | ||||
| { | ||||
|  | ||||
| @ -1055,7 +1055,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) | ||||
| 	struct drm_i915_private *i915 = engine->i915; | ||||
| 	struct i915_wa_list *w = &engine->whitelist; | ||||
| 
 | ||||
| 	GEM_BUG_ON(engine->id != RCS); | ||||
| 	GEM_BUG_ON(engine->id != RCS0); | ||||
| 
 | ||||
| 	wa_init_start(w, "whitelist"); | ||||
| 
 | ||||
| @ -1228,7 +1228,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal | ||||
| 	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (engine->id == RCS) | ||||
| 	if (engine->id == RCS0) | ||||
| 		rcs_engine_wa_init(engine, wal); | ||||
| 	else | ||||
| 		xcs_engine_wa_init(engine, wal); | ||||
|  | ||||
| @ -1535,7 +1535,7 @@ static int igt_ppgtt_pin_update(void *arg) | ||||
| 	 * land in the now stale 2M page. | ||||
| 	 */ | ||||
| 
 | ||||
| 	err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf); | ||||
| 	err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); | ||||
| 	if (err) | ||||
| 		goto out_unpin; | ||||
| 
 | ||||
| @ -1653,7 +1653,7 @@ static int igt_shrink_thp(void *arg) | ||||
| 	if (err) | ||||
| 		goto out_unpin; | ||||
| 
 | ||||
| 	err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf); | ||||
| 	err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); | ||||
| 	if (err) | ||||
| 		goto out_unpin; | ||||
| 
 | ||||
|  | ||||
| @ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, | ||||
| 	if (IS_ERR(vma)) | ||||
| 		return PTR_ERR(vma); | ||||
| 
 | ||||
| 	rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); | ||||
| 	rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); | ||||
| 	if (IS_ERR(rq)) { | ||||
| 		i915_vma_unpin(vma); | ||||
| 		return PTR_ERR(rq); | ||||
| @ -256,7 +256,7 @@ static bool needs_mi_store_dword(struct drm_i915_private *i915) | ||||
| 	if (i915_terminally_wedged(i915)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	return intel_engine_can_store_dword(i915->engine[RCS]); | ||||
| 	return intel_engine_can_store_dword(i915->engine[RCS0]); | ||||
| } | ||||
| 
 | ||||
| static const struct igt_coherency_mode { | ||||
|  | ||||
| @ -556,7 +556,7 @@ static int igt_ctx_exec(void *arg) | ||||
| 		ncontexts++; | ||||
| 	} | ||||
| 	pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n", | ||||
| 		ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords); | ||||
| 		ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords); | ||||
| 
 | ||||
| 	dw = 0; | ||||
| 	list_for_each_entry(obj, &objects, st_link) { | ||||
| @ -923,7 +923,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, | ||||
| 	       unsigned int flags) | ||||
| { | ||||
| 	struct intel_sseu default_sseu = intel_device_default_sseu(i915); | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS]; | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS0]; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	struct i915_gem_context *ctx; | ||||
| 	struct intel_sseu pg_sseu; | ||||
| @ -1126,7 +1126,7 @@ static int igt_ctx_readonly(void *arg) | ||||
| 		} | ||||
| 	} | ||||
| 	pr_info("Submitted %lu dwords (across %u engines)\n", | ||||
| 		ndwords, RUNTIME_INFO(i915)->num_rings); | ||||
| 		ndwords, RUNTIME_INFO(i915)->num_engines); | ||||
| 
 | ||||
| 	dw = 0; | ||||
| 	list_for_each_entry(obj, &objects, st_link) { | ||||
| @ -1459,7 +1459,7 @@ static int igt_vm_isolation(void *arg) | ||||
| 		count += this; | ||||
| 	} | ||||
| 	pr_info("Checked %lu scratch offsets across %d engines\n", | ||||
| 		count, RUNTIME_INFO(i915)->num_rings); | ||||
| 		count, RUNTIME_INFO(i915)->num_engines); | ||||
| 
 | ||||
| out_rpm: | ||||
| 	intel_runtime_pm_put(i915, wakeref); | ||||
|  | ||||
| @ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); | ||||
| 	rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); | ||||
| 	if (IS_ERR(rq)) { | ||||
| 		i915_vma_unpin(vma); | ||||
| 		return PTR_ERR(rq); | ||||
|  | ||||
| @ -42,7 +42,7 @@ static int igt_add_request(void *arg) | ||||
| 	/* Basic preliminary test to create a request and let it loose! */ | ||||
| 
 | ||||
| 	mutex_lock(&i915->drm.struct_mutex); | ||||
| 	request = mock_request(i915->engine[RCS], | ||||
| 	request = mock_request(i915->engine[RCS0], | ||||
| 			       i915->kernel_context, | ||||
| 			       HZ / 10); | ||||
| 	if (!request) | ||||
| @ -66,7 +66,7 @@ static int igt_wait_request(void *arg) | ||||
| 	/* Submit a request, then wait upon it */ | ||||
| 
 | ||||
| 	mutex_lock(&i915->drm.struct_mutex); | ||||
| 	request = mock_request(i915->engine[RCS], i915->kernel_context, T); | ||||
| 	request = mock_request(i915->engine[RCS0], i915->kernel_context, T); | ||||
| 	if (!request) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out_unlock; | ||||
| @ -136,7 +136,7 @@ static int igt_fence_wait(void *arg) | ||||
| 	/* Submit a request, treat it as a fence and wait upon it */ | ||||
| 
 | ||||
| 	mutex_lock(&i915->drm.struct_mutex); | ||||
| 	request = mock_request(i915->engine[RCS], i915->kernel_context, T); | ||||
| 	request = mock_request(i915->engine[RCS0], i915->kernel_context, T); | ||||
| 	if (!request) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out_locked; | ||||
| @ -193,7 +193,7 @@ static int igt_request_rewind(void *arg) | ||||
| 
 | ||||
| 	mutex_lock(&i915->drm.struct_mutex); | ||||
| 	ctx[0] = mock_context(i915, "A"); | ||||
| 	request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ); | ||||
| 	request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ); | ||||
| 	if (!request) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_context_0; | ||||
| @ -203,7 +203,7 @@ static int igt_request_rewind(void *arg) | ||||
| 	i915_request_add(request); | ||||
| 
 | ||||
| 	ctx[1] = mock_context(i915, "B"); | ||||
| 	vip = mock_request(i915->engine[RCS], ctx[1], 0); | ||||
| 	vip = mock_request(i915->engine[RCS0], ctx[1], 0); | ||||
| 	if (!vip) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_context_1; | ||||
| @ -415,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg) | ||||
| { | ||||
| 	struct drm_i915_private *i915 = arg; | ||||
| 	struct smoketest t = { | ||||
| 		.engine = i915->engine[RCS], | ||||
| 		.engine = i915->engine[RCS0], | ||||
| 		.ncontexts = 1024, | ||||
| 		.max_batch = 1024, | ||||
| 		.request_alloc = __mock_request_alloc | ||||
| @ -1216,7 +1216,7 @@ out_flush: | ||||
| 		num_fences += atomic_long_read(&t[id].num_fences); | ||||
| 	} | ||||
| 	pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", | ||||
| 		num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus); | ||||
| 		num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); | ||||
| 
 | ||||
| 	mutex_lock(&i915->drm.struct_mutex); | ||||
| 	ret = igt_live_test_end(&live) ?: ret; | ||||
|  | ||||
| @ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client, | ||||
| 			dev_priv->preempt_context : dev_priv->kernel_context; | ||||
| 
 | ||||
| 	if (client->owner != ctx_owner || | ||||
| 	    client->engines != INTEL_INFO(dev_priv)->ring_mask || | ||||
| 	    client->engines != INTEL_INFO(dev_priv)->engine_mask || | ||||
| 	    client->priority != client_priority || | ||||
| 	    client->doorbell_id == GUC_DOORBELL_INVALID) | ||||
| 		return -EINVAL; | ||||
| @ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg) | ||||
| 
 | ||||
| 	for (i = 0; i < ATTEMPTS; i++) { | ||||
| 		clients[i] = guc_client_alloc(dev_priv, | ||||
| 					      INTEL_INFO(dev_priv)->ring_mask, | ||||
| 					      INTEL_INFO(dev_priv)->engine_mask, | ||||
| 					      i % GUC_CLIENT_PRIORITY_NUM, | ||||
| 					      dev_priv->kernel_context); | ||||
| 
 | ||||
|  | ||||
| @ -1126,7 +1126,7 @@ static int igt_reset_wait(void *arg) | ||||
| 	long timeout; | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (!intel_engine_can_store_dword(i915->engine[RCS])) | ||||
| 	if (!intel_engine_can_store_dword(i915->engine[RCS0])) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* Check that we detect a stuck waiter and issue a reset */ | ||||
| @ -1138,7 +1138,7 @@ static int igt_reset_wait(void *arg) | ||||
| 	if (err) | ||||
| 		goto unlock; | ||||
| 
 | ||||
| 	rq = hang_create_request(&h, i915->engine[RCS]); | ||||
| 	rq = hang_create_request(&h, i915->engine[RCS0]); | ||||
| 	if (IS_ERR(rq)) { | ||||
| 		err = PTR_ERR(rq); | ||||
| 		goto fini; | ||||
| @ -1255,7 +1255,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, | ||||
| 	struct hang h; | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (!intel_engine_can_store_dword(i915->engine[RCS])) | ||||
| 	if (!intel_engine_can_store_dword(i915->engine[RCS0])) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* Check that we can recover an unbind stuck on a hanging request */ | ||||
| @ -1285,7 +1285,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, | ||||
| 		goto out_obj; | ||||
| 	} | ||||
| 
 | ||||
| 	rq = hang_create_request(&h, i915->engine[RCS]); | ||||
| 	rq = hang_create_request(&h, i915->engine[RCS0]); | ||||
| 	if (IS_ERR(rq)) { | ||||
| 		err = PTR_ERR(rq); | ||||
| 		goto out_obj; | ||||
| @ -1358,7 +1358,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, | ||||
| 
 | ||||
| out_reset: | ||||
| 	igt_global_reset_lock(i915); | ||||
| 	fake_hangcheck(rq->i915, intel_engine_flag(rq->engine)); | ||||
| 	fake_hangcheck(rq->i915, rq->engine->mask); | ||||
| 	igt_global_reset_unlock(i915); | ||||
| 
 | ||||
| 	if (tsk) { | ||||
| @ -1537,7 +1537,7 @@ static int igt_reset_queue(void *arg) | ||||
| 				goto fini; | ||||
| 			} | ||||
| 
 | ||||
| 			reset_count = fake_hangcheck(i915, ENGINE_MASK(id)); | ||||
| 			reset_count = fake_hangcheck(i915, BIT(id)); | ||||
| 
 | ||||
| 			if (prev->fence.error != -EIO) { | ||||
| 				pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", | ||||
| @ -1596,7 +1596,7 @@ unlock: | ||||
| static int igt_handle_error(void *arg) | ||||
| { | ||||
| 	struct drm_i915_private *i915 = arg; | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS]; | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS0]; | ||||
| 	struct hang h; | ||||
| 	struct i915_request *rq; | ||||
| 	struct i915_gpu_state *error; | ||||
| @ -1643,7 +1643,7 @@ static int igt_handle_error(void *arg) | ||||
| 	/* Temporarily disable error capture */ | ||||
| 	error = xchg(&i915->gpu_error.first_error, (void *)-1); | ||||
| 
 | ||||
| 	i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL); | ||||
| 	i915_handle_error(i915, engine->mask, 0, NULL); | ||||
| 
 | ||||
| 	xchg(&i915->gpu_error.first_error, error); | ||||
| 
 | ||||
|  | ||||
| @ -929,7 +929,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) | ||||
| 
 | ||||
| 	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", | ||||
| 		count, flags, | ||||
| 		RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext); | ||||
| 		RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -957,7 +957,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) | ||||
| 
 | ||||
| 	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", | ||||
| 		count, flags, | ||||
| 		RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext); | ||||
| 		RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -222,7 +222,7 @@ out_put: | ||||
| 
 | ||||
| static int do_device_reset(struct intel_engine_cs *engine) | ||||
| { | ||||
| 	i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); | ||||
| 	i915_reset(engine->i915, engine->mask, "live_workarounds"); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -709,7 +709,7 @@ out_rpm: | ||||
| static int live_reset_whitelist(void *arg) | ||||
| { | ||||
| 	struct drm_i915_private *i915 = arg; | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS]; | ||||
| 	struct intel_engine_cs *engine = i915->engine[RCS0]; | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	/* If we reset the gpu, we should not lose the RING_NONPRIV */ | ||||
|  | ||||
| @ -223,6 +223,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, | ||||
| 	engine->base.i915 = i915; | ||||
| 	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); | ||||
| 	engine->base.id = id; | ||||
| 	engine->base.mask = BIT(id); | ||||
| 	engine->base.status_page.addr = (void *)(engine + 1); | ||||
| 
 | ||||
| 	engine->base.context_pin = mock_context_pin; | ||||
|  | ||||
| @ -206,13 +206,13 @@ struct drm_i915_private *mock_gem_device(void) | ||||
| 
 | ||||
| 	mock_init_ggtt(i915, &i915->ggtt); | ||||
| 
 | ||||
| 	mkwrite_device_info(i915)->ring_mask = BIT(0); | ||||
| 	mkwrite_device_info(i915)->engine_mask = BIT(0); | ||||
| 	i915->kernel_context = mock_context(i915, NULL); | ||||
| 	if (!i915->kernel_context) | ||||
| 		goto err_unlock; | ||||
| 
 | ||||
| 	i915->engine[RCS] = mock_engine(i915, "mock", RCS); | ||||
| 	if (!i915->engine[RCS]) | ||||
| 	i915->engine[RCS0] = mock_engine(i915, "mock", RCS0); | ||||
| 	if (!i915->engine[RCS0]) | ||||
| 		goto err_context; | ||||
| 
 | ||||
| 	mutex_unlock(&i915->drm.struct_mutex); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user