drm/i915: take a reference to uncore in the engine and use it
A few advantages: - Prepares us for the planned split of display uncore from GT uncore - Improves our engine-centric view of the world in the engine code and allows us to avoid jumping back to dev_priv. - Allows us to wrap accesses to engine register in nice macros that automatically pick the right mmio base. Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-10-daniele.ceraolospurio@intel.com
This commit is contained in:
parent
97a04e0d07
commit
baba6e572b
@ -1848,7 +1848,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
|
||||
|
||||
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
|
||||
MMIO_D(GEN7_CXT_SIZE, D_ALL);
|
||||
|
||||
|
@ -880,7 +880,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
seq_printf(m,
|
||||
"Graphics Interrupt mask (%s): %08x\n",
|
||||
engine->name, I915_READ_IMR(engine));
|
||||
engine->name, ENGINE_READ(engine, RING_IMR));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1136,7 +1136,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
|
||||
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
|
||||
else
|
||||
@ -1144,32 +1144,32 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
|
||||
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
|
||||
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
||||
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
|
||||
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
|
||||
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
|
||||
ee->instps = ENGINE_READ(engine, RING_INSTPS);
|
||||
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
|
||||
ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
|
||||
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
|
||||
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
|
||||
}
|
||||
ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
|
||||
ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
|
||||
} else {
|
||||
ee->faddr = I915_READ(DMA_FADD_I8XX);
|
||||
ee->ipeir = I915_READ(IPEIR);
|
||||
ee->ipehr = I915_READ(IPEHR);
|
||||
ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
|
||||
ee->ipeir = ENGINE_READ(engine, IPEIR);
|
||||
ee->ipehr = ENGINE_READ(engine, IPEHR);
|
||||
}
|
||||
|
||||
intel_engine_get_instdone(engine, &ee->instdone);
|
||||
|
||||
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ee->instpm = ENGINE_READ(engine, RING_INSTPM);
|
||||
ee->acthd = intel_engine_get_active_head(engine);
|
||||
ee->start = I915_READ_START(engine);
|
||||
ee->head = I915_READ_HEAD(engine);
|
||||
ee->tail = I915_READ_TAIL(engine);
|
||||
ee->ctl = I915_READ_CTL(engine);
|
||||
ee->start = ENGINE_READ(engine, RING_START);
|
||||
ee->head = ENGINE_READ(engine, RING_HEAD);
|
||||
ee->tail = ENGINE_READ(engine, RING_TAIL);
|
||||
ee->ctl = ENGINE_READ(engine, RING_CTL);
|
||||
if (INTEL_GEN(dev_priv) > 2)
|
||||
ee->mode = I915_READ_MODE(engine);
|
||||
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
|
||||
|
||||
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
|
||||
i915_reg_t mmio;
|
||||
@ -1214,10 +1214,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
ee->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine));
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
ee->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE(engine));
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE);
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
for (i = 0; i < 4; i++) {
|
||||
ee->vm_info.pdp[i] =
|
||||
@ -1601,7 +1601,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 5)
|
||||
error->ccid = I915_READ(CCID);
|
||||
error->ccid = I915_READ(CCID(RENDER_RING_BASE));
|
||||
|
||||
/* 3: Feature specific registers */
|
||||
if (IS_GEN_RANGE(dev_priv, 6, 7)) {
|
||||
|
@ -434,9 +434,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
|
||||
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
|
||||
|
||||
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
|
||||
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
|
||||
#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
|
||||
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
|
||||
#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
|
||||
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
|
||||
#define PP_DIR_DCLV_2G 0xffffffff
|
||||
|
||||
#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
|
||||
@ -2568,12 +2568,12 @@ enum i915_power_well_id {
|
||||
#define HWS_START_ADDRESS_SHIFT 4
|
||||
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
|
||||
#define PWRCTX_EN (1 << 0)
|
||||
#define IPEIR _MMIO(0x2088)
|
||||
#define IPEHR _MMIO(0x208c)
|
||||
#define IPEIR(base) _MMIO((base) + 0x88)
|
||||
#define IPEHR(base) _MMIO((base) + 0x8c)
|
||||
#define GEN2_INSTDONE _MMIO(0x2090)
|
||||
#define NOPID _MMIO(0x2094)
|
||||
#define HWSTAM _MMIO(0x2098)
|
||||
#define DMA_FADD_I8XX _MMIO(0x20d0)
|
||||
#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0)
|
||||
#define RING_BBSTATE(base) _MMIO((base) + 0x110)
|
||||
#define RING_BB_PPGTT (1 << 5)
|
||||
#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
|
||||
@ -2747,7 +2747,7 @@ enum i915_power_well_id {
|
||||
#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
|
||||
#define INSTPM_TLB_INVALIDATE (1 << 9)
|
||||
#define INSTPM_SYNC_FLUSH (1 << 5)
|
||||
#define ACTHD _MMIO(0x20c8)
|
||||
#define ACTHD(base) _MMIO((base) + 0xc8)
|
||||
#define MEM_MODE _MMIO(0x20cc)
|
||||
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
|
||||
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
|
||||
@ -3947,7 +3947,7 @@ enum i915_power_well_id {
|
||||
/*
|
||||
* Logical Context regs
|
||||
*/
|
||||
#define CCID _MMIO(0x2180)
|
||||
#define CCID(base) _MMIO((base) + 0x180)
|
||||
#define CCID_EN BIT(0)
|
||||
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
|
||||
#define CCID_EXTENDED_STATE_SAVE BIT(3)
|
||||
|
@ -1173,19 +1173,24 @@ static void i915_reset_device(struct drm_i915_private *i915,
|
||||
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
|
||||
}
|
||||
|
||||
static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
|
||||
{
|
||||
I915_WRITE(reg, I915_READ(reg));
|
||||
}
|
||||
|
||||
void i915_clear_error_registers(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 eir;
|
||||
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
|
||||
clear_register(dev_priv, PGTBL_ER);
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 4)
|
||||
I915_WRITE(IPEIR, I915_READ(IPEIR));
|
||||
clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
|
||||
else
|
||||
I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
|
||||
clear_register(dev_priv, IPEIR_I965);
|
||||
|
||||
I915_WRITE(EIR, I915_READ(EIR));
|
||||
clear_register(dev_priv, EIR);
|
||||
eir = I915_READ(EIR);
|
||||
if (eir) {
|
||||
/*
|
||||
|
@ -255,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
|
||||
|
||||
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
i915_reg_t hwstam;
|
||||
|
||||
/*
|
||||
* Though they added more rings on g4x/ilk, they did not add
|
||||
* per-engine HWSTAM until gen6.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
|
||||
if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
|
||||
return;
|
||||
|
||||
hwstam = RING_HWSTAM(engine->mmio_base);
|
||||
if (INTEL_GEN(dev_priv) >= 3)
|
||||
I915_WRITE(hwstam, mask);
|
||||
if (INTEL_GEN(engine->i915) >= 3)
|
||||
ENGINE_WRITE(engine, RING_HWSTAM, mask);
|
||||
else
|
||||
I915_WRITE16(hwstam, mask);
|
||||
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
|
||||
}
|
||||
|
||||
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
|
||||
@ -309,6 +305,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
|
||||
engine->id = id;
|
||||
engine->mask = BIT(id);
|
||||
engine->i915 = dev_priv;
|
||||
engine->uncore = &dev_priv->uncore;
|
||||
__sprint_engine_name(engine->name, info);
|
||||
engine->hw_id = engine->guc_id = info->hw_id;
|
||||
engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
|
||||
@ -787,37 +784,35 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
||||
|
||||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
u64 acthd;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
|
||||
RING_ACTHD_UDW(engine->mmio_base));
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
|
||||
else if (INTEL_GEN(i915) >= 4)
|
||||
acthd = ENGINE_READ(engine, RING_ACTHD);
|
||||
else
|
||||
acthd = I915_READ(ACTHD);
|
||||
acthd = ENGINE_READ(engine, ACTHD);
|
||||
|
||||
return acthd;
|
||||
}
|
||||
|
||||
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
u64 bbaddr;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
|
||||
RING_BBADDR_UDW(engine->mmio_base));
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
|
||||
else
|
||||
bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
||||
bbaddr = ENGINE_READ(engine, RING_BBADDR);
|
||||
|
||||
return bbaddr;
|
||||
}
|
||||
|
||||
int intel_engine_stop_cs(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_uncore *uncore = &engine->i915->uncore;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
const u32 base = engine->mmio_base;
|
||||
const i915_reg_t mode = RING_MI_MODE(base);
|
||||
int err;
|
||||
@ -846,12 +841,9 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
|
||||
|
||||
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
|
||||
_MASKED_BIT_DISABLE(STOP_RING));
|
||||
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
}
|
||||
|
||||
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
|
||||
@ -946,6 +938,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
struct intel_instdone *instdone)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u32 mmio_base = engine->mmio_base;
|
||||
int slice;
|
||||
int subslice;
|
||||
@ -954,12 +947,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
|
||||
switch (INTEL_GEN(dev_priv)) {
|
||||
default:
|
||||
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
|
||||
instdone->instdone =
|
||||
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
|
||||
|
||||
if (engine->id != RCS0)
|
||||
break;
|
||||
|
||||
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone->slice_common =
|
||||
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
|
||||
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
|
||||
instdone->sampler[slice][subslice] =
|
||||
read_subslice_reg(dev_priv, slice, subslice,
|
||||
@ -970,28 +965,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
}
|
||||
break;
|
||||
case 7:
|
||||
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
|
||||
instdone->instdone =
|
||||
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
|
||||
|
||||
if (engine->id != RCS0)
|
||||
break;
|
||||
|
||||
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||
instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
|
||||
instdone->slice_common =
|
||||
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
|
||||
instdone->sampler[0][0] =
|
||||
intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
|
||||
instdone->row[0][0] =
|
||||
intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
|
||||
|
||||
break;
|
||||
case 6:
|
||||
case 5:
|
||||
case 4:
|
||||
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
|
||||
|
||||
instdone->instdone =
|
||||
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
|
||||
if (engine->id == RCS0)
|
||||
/* HACK: Using the wrong struct member */
|
||||
instdone->slice_common = I915_READ(GEN4_INSTDONE1);
|
||||
instdone->slice_common =
|
||||
intel_uncore_read(uncore, GEN4_INSTDONE1);
|
||||
break;
|
||||
case 3:
|
||||
case 2:
|
||||
instdone->instdone = I915_READ(GEN2_INSTDONE);
|
||||
instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1011,12 +1011,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
||||
return true;
|
||||
|
||||
/* First check that no commands are left in the ring */
|
||||
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
|
||||
(I915_READ_TAIL(engine) & TAIL_ADDR))
|
||||
if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
|
||||
(ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
|
||||
idle = false;
|
||||
|
||||
/* No bit for gen2, so assume the CS parser is idle */
|
||||
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
|
||||
if (INTEL_GEN(dev_priv) > 2 &&
|
||||
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
|
||||
idle = false;
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
@ -1332,24 +1333,25 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
||||
u64 addr;
|
||||
|
||||
if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||
drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
|
||||
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
|
||||
drm_printf(m, "\tRING_START: 0x%08x\n",
|
||||
I915_READ(RING_START(engine->mmio_base)));
|
||||
ENGINE_READ(engine, RING_START));
|
||||
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
|
||||
I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
|
||||
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
|
||||
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
|
||||
I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
|
||||
ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
|
||||
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
|
||||
I915_READ(RING_CTL(engine->mmio_base)),
|
||||
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
|
||||
if (INTEL_GEN(engine->i915) > 2) {
|
||||
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
|
||||
I915_READ(RING_MI_MODE(engine->mmio_base)),
|
||||
I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
|
||||
ENGINE_READ(engine, RING_MI_MODE),
|
||||
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
|
||||
drm_printf(m, "\tRING_IMR: %08x\n",
|
||||
ENGINE_READ(engine, RING_IMR));
|
||||
}
|
||||
|
||||
addr = intel_engine_get_active_head(engine);
|
||||
@ -1359,22 +1361,21 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
||||
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
|
||||
RING_DMA_FADD_UDW(engine->mmio_base));
|
||||
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||
addr = ENGINE_READ(engine, RING_DMA_FADD);
|
||||
else
|
||||
addr = I915_READ(DMA_FADD_I8XX);
|
||||
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
|
||||
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n",
|
||||
I915_READ(RING_IPEIR(engine->mmio_base)));
|
||||
ENGINE_READ(engine, RING_IPEIR));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n",
|
||||
I915_READ(RING_IPEHR(engine->mmio_base)));
|
||||
ENGINE_READ(engine, RING_IPEHR));
|
||||
} else {
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
|
||||
}
|
||||
|
||||
if (HAS_EXECLISTS(dev_priv)) {
|
||||
@ -1384,15 +1385,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
||||
u8 read, write;
|
||||
|
||||
drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
|
||||
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
|
||||
I915_READ(RING_EXECLIST_STATUS_HI(engine)));
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
|
||||
|
||||
read = execlists->csb_head;
|
||||
write = READ_ONCE(*execlists->csb_write);
|
||||
|
||||
drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
|
||||
read, write,
|
||||
GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
|
||||
GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)),
|
||||
yesno(test_bit(TASKLET_STATE_SCHED,
|
||||
&engine->execlists.tasklet.state)),
|
||||
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
|
||||
@ -1407,9 +1408,13 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
||||
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
|
||||
idx,
|
||||
hws[idx * 2],
|
||||
I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
|
||||
ENGINE_READ_IDX(engine,
|
||||
RING_CONTEXT_STATUS_BUF_LO,
|
||||
idx),
|
||||
hws[idx * 2 + 1],
|
||||
I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
|
||||
ENGINE_READ_IDX(engine,
|
||||
RING_CONTEXT_STATUS_BUF_HI,
|
||||
idx));
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
@ -1436,11 +1441,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
||||
rcu_read_unlock();
|
||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE(engine)));
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE));
|
||||
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine)));
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
|
||||
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_DCLV(engine)));
|
||||
ENGINE_READ(engine, RING_PP_DIR_DCLV));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1687,8 +1692,7 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
|
||||
|
||||
static bool match_ring(struct i915_request *rq)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = rq->i915;
|
||||
u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
|
||||
u32 ring = ENGINE_READ(rq->engine, RING_START);
|
||||
|
||||
return ring == i915_ggtt_offset(rq->ring->vma);
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ struct drm_i915_reg_table;
|
||||
struct i915_gem_context;
|
||||
struct i915_request;
|
||||
struct i915_sched_attr;
|
||||
struct intel_uncore;
|
||||
|
||||
struct intel_hw_status_page {
|
||||
struct i915_vma *vma;
|
||||
@ -250,6 +251,7 @@ struct intel_engine_execlists {
|
||||
|
||||
struct intel_engine_cs {
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_uncore *uncore;
|
||||
char name[INTEL_ENGINE_CS_MAX_NAME];
|
||||
|
||||
enum intel_engine_id id;
|
||||
|
@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
* and break the hang. This should work on
|
||||
* all but the second generation chipsets.
|
||||
*/
|
||||
tmp = I915_READ_CTL(engine);
|
||||
tmp = ENGINE_READ(engine, RING_CTL);
|
||||
if (tmp & RING_WAIT) {
|
||||
i915_handle_error(dev_priv, engine->mask, 0,
|
||||
"stuck wait on %s", engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
ENGINE_WRITE(engine, RING_CTL, tmp);
|
||||
return ENGINE_WAIT_KICK;
|
||||
}
|
||||
|
||||
|
@ -2074,16 +2074,14 @@ static int gen8_emit_bb_start(struct i915_request *rq,
|
||||
|
||||
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
I915_WRITE_IMR(engine,
|
||||
~(engine->irq_enable_mask | engine->irq_keep_mask));
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
ENGINE_WRITE(engine, RING_IMR,
|
||||
~(engine->irq_enable_mask | engine->irq_keep_mask));
|
||||
ENGINE_POSTING_READ(engine, RING_IMR);
|
||||
}
|
||||
|
||||
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
|
||||
}
|
||||
|
||||
static int gen8_emit_flush(struct i915_request *request, u32 mode)
|
||||
@ -2288,7 +2286,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
||||
dev_priv = engine->i915;
|
||||
|
||||
if (engine->buffer) {
|
||||
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||
WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
||||
}
|
||||
|
||||
if (engine->cleanup)
|
||||
@ -2400,6 +2398,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
u32 base = engine->mmio_base;
|
||||
int ret;
|
||||
|
||||
ret = intel_engine_init_common(engine);
|
||||
@ -2410,12 +2409,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
|
||||
|
||||
if (HAS_LOGICAL_RING_ELSQ(i915)) {
|
||||
execlists->submit_reg = i915->uncore.regs +
|
||||
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
|
||||
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
|
||||
execlists->ctrl_reg = i915->uncore.regs +
|
||||
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
|
||||
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
|
||||
} else {
|
||||
execlists->submit_reg = i915->uncore.regs +
|
||||
i915_mmio_reg_offset(RING_ELSP(engine));
|
||||
i915_mmio_reg_offset(RING_ELSP(base));
|
||||
}
|
||||
|
||||
execlists->preempt_complete_status = ~0u;
|
||||
@ -2658,7 +2657,7 @@ static void execlists_init_reg_state(u32 *regs,
|
||||
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
|
||||
MI_LRI_FORCE_POSTED;
|
||||
|
||||
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
|
||||
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
|
||||
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
|
||||
if (INTEL_GEN(engine->i915) < 11) {
|
||||
|
@ -28,20 +28,20 @@
|
||||
#include "i915_gem_context.h"
|
||||
|
||||
/* Execlists regs */
|
||||
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
|
||||
#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
|
||||
#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
|
||||
#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
|
||||
#define RING_ELSP(base) _MMIO((base) + 0x230)
|
||||
#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
|
||||
#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
|
||||
#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
|
||||
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
|
||||
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
|
||||
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
|
||||
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
|
||||
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
|
||||
#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
|
||||
#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
|
||||
#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
|
||||
#define RING_CONTEXT_STATUS_BUF_BASE(base) _MMIO((base) + 0x370)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(base, i) _MMIO((base) + 0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(base, i) _MMIO((base) + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
|
||||
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
|
||||
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
|
||||
#define EL_CTRL_LOAD (1 << 0)
|
||||
|
||||
/* The docs specify that the write pointer wraps around after 5h, "After status
|
||||
|
@ -575,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
||||
static void flush_cs_tlb(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
|
||||
|
||||
if (!IS_GEN_RANGE(dev_priv, 6, 7))
|
||||
return;
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||
WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
||||
|
||||
I915_WRITE(instpm,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
instpm, INSTPM_SYNC_FLUSH, 0,
|
||||
ENGINE_WRITE(engine, RING_INSTPM,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (intel_wait_for_register(engine->uncore,
|
||||
RING_INSTPM(engine->mmio_base),
|
||||
INSTPM_SYNC_FLUSH, 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
engine->name);
|
||||
@ -606,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine)
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
if (INTEL_GEN(dev_priv) > 2) {
|
||||
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
ENGINE_WRITE(engine,
|
||||
RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
|
||||
if (intel_wait_for_register(engine->uncore,
|
||||
RING_MI_MODE(engine->mmio_base),
|
||||
MODE_IDLE,
|
||||
MODE_IDLE,
|
||||
1000)) {
|
||||
DRM_ERROR("%s : timed out trying to stop ring\n",
|
||||
engine->name);
|
||||
/* Sometimes we observe that the idle flag is not
|
||||
|
||||
/*
|
||||
* Sometimes we observe that the idle flag is not
|
||||
* set even though the ring is empty. So double
|
||||
* check before giving up.
|
||||
*/
|
||||
if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
|
||||
if (ENGINE_READ(engine, RING_HEAD) !=
|
||||
ENGINE_READ(engine, RING_TAIL))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
|
||||
ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
|
||||
|
||||
I915_WRITE_HEAD(engine, 0);
|
||||
I915_WRITE_TAIL(engine, 0);
|
||||
ENGINE_WRITE(engine, RING_HEAD, 0);
|
||||
ENGINE_WRITE(engine, RING_TAIL, 0);
|
||||
|
||||
/* The ring must be empty before it is disabled */
|
||||
I915_WRITE_CTL(engine, 0);
|
||||
ENGINE_WRITE(engine, RING_CTL, 0);
|
||||
|
||||
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
|
||||
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
|
||||
}
|
||||
|
||||
static int init_ring_common(struct intel_engine_cs *engine)
|
||||
@ -640,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
||||
struct intel_ring *ring = engine->buffer;
|
||||
int ret = 0;
|
||||
|
||||
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
|
||||
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
|
||||
|
||||
if (!stop_ring(engine)) {
|
||||
/* G45 ring initialization often fails to reset head to zero */
|
||||
DRM_DEBUG_DRIVER("%s head not reset to zero "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
engine->name,
|
||||
I915_READ_CTL(engine),
|
||||
I915_READ_HEAD(engine),
|
||||
I915_READ_TAIL(engine),
|
||||
I915_READ_START(engine));
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
ENGINE_READ(engine, RING_HEAD),
|
||||
ENGINE_READ(engine, RING_TAIL),
|
||||
ENGINE_READ(engine, RING_START));
|
||||
|
||||
if (!stop_ring(engine)) {
|
||||
DRM_ERROR("failed to set %s head to zero "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
engine->name,
|
||||
I915_READ_CTL(engine),
|
||||
I915_READ_HEAD(engine),
|
||||
I915_READ_TAIL(engine),
|
||||
I915_READ_START(engine));
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
ENGINE_READ(engine, RING_HEAD),
|
||||
ENGINE_READ(engine, RING_TAIL),
|
||||
ENGINE_READ(engine, RING_START));
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
@ -673,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
||||
intel_engine_reset_breadcrumbs(engine);
|
||||
|
||||
/* Enforce ordering by reading HEAD register back */
|
||||
I915_READ_HEAD(engine);
|
||||
ENGINE_READ(engine, RING_HEAD);
|
||||
|
||||
/* Initialize the ring. This must happen _after_ we've cleared the ring
|
||||
* registers with the above sequence (the readback of the HEAD registers
|
||||
* also enforces ordering), otherwise the hw might lose the new ring
|
||||
* register values. */
|
||||
I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
|
||||
ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
|
||||
|
||||
/* WaClearRingBufHeadRegAtInit:ctg,elk */
|
||||
if (I915_READ_HEAD(engine))
|
||||
if (ENGINE_READ(engine, RING_HEAD))
|
||||
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
|
||||
engine->name, I915_READ_HEAD(engine));
|
||||
engine->name, ENGINE_READ(engine, RING_HEAD));
|
||||
|
||||
/* Check that the ring offsets point within the ring! */
|
||||
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
|
||||
@ -692,43 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
||||
intel_ring_update_space(ring);
|
||||
|
||||
/* First wake the ring up to an empty/idle ring */
|
||||
I915_WRITE_HEAD(engine, ring->head);
|
||||
I915_WRITE_TAIL(engine, ring->head);
|
||||
(void)I915_READ_TAIL(engine);
|
||||
ENGINE_WRITE(engine, RING_HEAD, ring->head);
|
||||
ENGINE_WRITE(engine, RING_TAIL, ring->head);
|
||||
ENGINE_POSTING_READ(engine, RING_TAIL);
|
||||
|
||||
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
|
||||
ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
|
||||
|
||||
/* If the head is still not zero, the ring is dead */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
if (intel_wait_for_register(engine->uncore,
|
||||
RING_CTL(engine->mmio_base),
|
||||
RING_VALID, RING_VALID,
|
||||
50)) {
|
||||
DRM_ERROR("%s initialization failed "
|
||||
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
|
||||
engine->name,
|
||||
I915_READ_CTL(engine),
|
||||
I915_READ_CTL(engine) & RING_VALID,
|
||||
I915_READ_HEAD(engine), ring->head,
|
||||
I915_READ_TAIL(engine), ring->tail,
|
||||
I915_READ_START(engine),
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
ENGINE_READ(engine, RING_CTL) & RING_VALID,
|
||||
ENGINE_READ(engine, RING_HEAD), ring->head,
|
||||
ENGINE_READ(engine, RING_TAIL), ring->tail,
|
||||
ENGINE_READ(engine, RING_START),
|
||||
i915_ggtt_offset(ring->vma));
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) > 2)
|
||||
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
ENGINE_WRITE(engine,
|
||||
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
|
||||
/* Now awake, let it get started */
|
||||
if (ring->tail != ring->head) {
|
||||
I915_WRITE_TAIL(engine, ring->tail);
|
||||
(void)I915_READ_TAIL(engine);
|
||||
ENGINE_WRITE(engine, RING_TAIL, ring->tail);
|
||||
ENGINE_POSTING_READ(engine, RING_TAIL);
|
||||
}
|
||||
|
||||
/* Papering over lost _interrupts_ immediately following the restart */
|
||||
intel_engine_queue_breadcrumbs(engine);
|
||||
out:
|
||||
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
||||
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -869,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -896,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
|
||||
|
||||
static void i9xx_submit_request(struct i915_request *request)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = request->i915;
|
||||
|
||||
i915_request_submit(request);
|
||||
|
||||
I915_WRITE_TAIL(request->engine,
|
||||
intel_ring_set_tail(request->ring, request->tail));
|
||||
ENGINE_WRITE(request->engine, RING_TAIL,
|
||||
intel_ring_set_tail(request->ring, request->tail));
|
||||
}
|
||||
|
||||
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
@ -973,20 +976,20 @@ gen5_irq_disable(struct intel_engine_cs *engine)
|
||||
static void
|
||||
i9xx_irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
GEM_BUG_ON(engine->id != RCS0);
|
||||
|
||||
dev_priv->irq_mask &= ~engine->irq_enable_mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
engine->i915->irq_mask &= ~engine->irq_enable_mask;
|
||||
ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask);
|
||||
ENGINE_POSTING_READ(engine, RING_IMR);
|
||||
}
|
||||
|
||||
static void
|
||||
i9xx_irq_disable(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
GEM_BUG_ON(engine->id != RCS0);
|
||||
|
||||
dev_priv->irq_mask |= engine->irq_enable_mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
engine->i915->irq_mask |= engine->irq_enable_mask;
|
||||
ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1026,47 +1029,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode)
|
||||
static void
|
||||
gen6_irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE_IMR(engine,
|
||||
~(engine->irq_enable_mask |
|
||||
engine->irq_keep_mask));
|
||||
ENGINE_WRITE(engine, RING_IMR,
|
||||
~(engine->irq_enable_mask | engine->irq_keep_mask));
|
||||
|
||||
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
ENGINE_POSTING_READ(engine, RING_IMR);
|
||||
|
||||
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
|
||||
gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_irq_disable(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
|
||||
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
|
||||
gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
|
||||
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
|
||||
|
||||
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
ENGINE_POSTING_READ(engine, RING_IMR);
|
||||
|
||||
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
|
||||
gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE_IMR(engine, ~0);
|
||||
gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
|
||||
ENGINE_WRITE(engine, RING_IMR, ~0);
|
||||
gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1577,7 +1571,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
|
||||
(I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
||||
|
||||
intel_ring_unpin(engine->buffer);
|
||||
intel_ring_put(engine->buffer);
|
||||
@ -1612,11 +1606,11 @@ static int load_pd_dir(struct i915_request *rq,
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
|
||||
*cs++ = PP_DIR_DCLV_2G;
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
|
||||
*cs++ = ppgtt->pd.base.ggtt_offset << 10;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
@ -1635,7 +1629,7 @@ static int flush_pd_dir(struct i915_request *rq)
|
||||
|
||||
/* Stall until the page table load is complete */
|
||||
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
|
||||
*cs++ = i915_scratch_offset(rq->i915);
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
@ -2052,7 +2046,7 @@ int intel_ring_cacheline_align(struct i915_request *rq)
|
||||
|
||||
static void gen6_bsd_submit_request(struct i915_request *request)
|
||||
{
|
||||
struct intel_uncore *uncore = &request->i915->uncore;
|
||||
struct intel_uncore *uncore = request->engine->uncore;
|
||||
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
|
||||
|
@ -29,23 +29,44 @@ struct drm_printer;
|
||||
#define CACHELINE_BYTES 64
|
||||
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
|
||||
|
||||
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
|
||||
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
|
||||
/*
|
||||
* The register defines to be used with the following macros need to accept a
|
||||
* base param, e.g:
|
||||
*
|
||||
* REG_FOO(base) _MMIO((base) + <relative offset>)
|
||||
* ENGINE_READ(engine, REG_FOO);
|
||||
*
|
||||
* register arrays are to be defined and accessed as follows:
|
||||
*
|
||||
* REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
|
||||
* ENGINE_READ_IDX(engine, REG_BAR, i)
|
||||
*/
|
||||
|
||||
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
|
||||
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
|
||||
#define __ENGINE_REG_OP(op__, engine__, ...) \
|
||||
intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
|
||||
|
||||
#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
|
||||
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
|
||||
#define __ENGINE_READ_OP(op__, engine__, reg__) \
|
||||
__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
|
||||
|
||||
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
|
||||
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
|
||||
#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
|
||||
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
|
||||
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
|
||||
#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
|
||||
|
||||
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
|
||||
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
|
||||
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
|
||||
__ENGINE_REG_OP(read64_2x32, (engine__), \
|
||||
lower_reg__((engine__)->mmio_base), \
|
||||
upper_reg__((engine__)->mmio_base))
|
||||
|
||||
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
|
||||
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
|
||||
#define ENGINE_READ_IDX(engine__, reg__, idx__) \
|
||||
__ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
|
||||
|
||||
#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
|
||||
__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
|
||||
|
||||
#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
|
||||
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
|
||||
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
|
||||
|
||||
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
|
||||
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
|
||||
|
@ -1814,7 +1814,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
|
||||
|
||||
/**
|
||||
* __intel_wait_for_register - wait until register matches expected state
|
||||
* @dev_priv: the i915 device
|
||||
* @uncore: the struct intel_uncore
|
||||
* @reg: the register to read
|
||||
* @mask: mask to apply to register value
|
||||
* @value: expected value
|
||||
|
Loading…
Reference in New Issue
Block a user