mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
Merge tag 'gvt-fixes-2018-09-04' of https://github.com/intel/gvt-linux into drm-intel-fixes
gvt-fixes-2018-09-04 - two BXT virtual display emulation fixes (Colin) - gen9 dbuf guest warning fix (Xiaolin) - vgpu close pm warning fix (Hang) - dmabuf format_mod fix (Zhenyu) - multiple VM guest failure fix for scheduling (Zhenyu) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180904025437.GE20737@zhen-hp.sh.intel.com
This commit is contained in:
commit
f518cd94ec
@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
|
||||
unsigned int tiling_mode = 0;
|
||||
unsigned int stride = 0;
|
||||
|
||||
switch (info->drm_format_mod << 10) {
|
||||
case PLANE_CTL_TILED_LINEAR:
|
||||
switch (info->drm_format_mod) {
|
||||
case DRM_FORMAT_MOD_LINEAR:
|
||||
tiling_mode = I915_TILING_NONE;
|
||||
break;
|
||||
case PLANE_CTL_TILED_X:
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
tiling_mode = I915_TILING_X;
|
||||
stride = info->stride;
|
||||
break;
|
||||
case PLANE_CTL_TILED_Y:
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
case I915_FORMAT_MOD_Yf_TILED:
|
||||
tiling_mode = I915_TILING_Y;
|
||||
stride = info->stride;
|
||||
break;
|
||||
default:
|
||||
gvt_dbg_core("not supported tiling mode\n");
|
||||
gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
|
||||
info->drm_format_mod);
|
||||
}
|
||||
obj->tiling_and_stride = tiling_mode | stride;
|
||||
} else {
|
||||
@ -222,7 +224,24 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
||||
info->height = p.height;
|
||||
info->stride = p.stride;
|
||||
info->drm_format = p.drm_format;
|
||||
info->drm_format_mod = p.tiled;
|
||||
|
||||
switch (p.tiled) {
|
||||
case PLANE_CTL_TILED_LINEAR:
|
||||
info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
|
||||
break;
|
||||
case PLANE_CTL_TILED_X:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
|
||||
break;
|
||||
case PLANE_CTL_TILED_Y:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
|
||||
break;
|
||||
case PLANE_CTL_TILED_YF:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
|
||||
}
|
||||
|
||||
info->size = (((p.stride * p.height * p.bpp) / 8) +
|
||||
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
|
||||
|
@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
|
||||
_PLANE_CTL_TILED_SHIFT;
|
||||
plane->tiled = val & PLANE_CTL_TILED_MASK;
|
||||
fmt = skl_format_to_drm(
|
||||
val & PLANE_CTL_FORMAT_MASK,
|
||||
val & PLANE_CTL_ORDER_RGBX,
|
||||
@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
|
||||
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
|
||||
(IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) ?
|
||||
|
@ -101,7 +101,7 @@ struct intel_gvt;
|
||||
/* color space conversion and gamma correction are not included */
|
||||
struct intel_vgpu_primary_plane_format {
|
||||
u8 enabled; /* plane is enabled */
|
||||
u8 tiled; /* X-tiled */
|
||||
u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */
|
||||
u8 bpp; /* bits per pixel */
|
||||
u32 hw_format; /* format field in the PRI_CTL register */
|
||||
u32 drm_format; /* format in DRM definition */
|
||||
|
@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
|
||||
vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
|
||||
else
|
||||
vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
|
||||
u32 v = *(u32 *)p_data;
|
||||
u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
|
||||
|
||||
switch (offset) {
|
||||
case _PHY_CTL_FAMILY_EDP:
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
|
||||
break;
|
||||
case _PHY_CTL_FAMILY_DDI:
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
|
||||
break;
|
||||
}
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
|
||||
skl_power_well_ctl_write);
|
||||
|
||||
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
|
||||
|
||||
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
|
||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
NULL, gen9_trtte_write);
|
||||
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
|
||||
|
||||
MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
|
||||
@ -3026,6 +3045,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(_MMIO(0x4ab8), D_KBL);
|
||||
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
|
||||
|
@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||
* performace for batch mmio read/write, so we need
|
||||
* handle forcewake mannually.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
switch_mmio(pre, next, ring_id);
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We give 2 seconds higher prio for vGPU during start */
|
||||
#define GVT_SCHED_VGPU_PRI_TIME 2
|
||||
|
||||
struct vgpu_sched_data {
|
||||
struct list_head lru_list;
|
||||
struct intel_vgpu *vgpu;
|
||||
bool active;
|
||||
|
||||
bool pri_sched;
|
||||
ktime_t pri_time;
|
||||
ktime_t sched_in_time;
|
||||
ktime_t sched_time;
|
||||
ktime_t left_ts;
|
||||
@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
|
||||
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
|
||||
continue;
|
||||
|
||||
if (vgpu_data->pri_sched) {
|
||||
if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
|
||||
vgpu = vgpu_data->vgpu;
|
||||
break;
|
||||
} else
|
||||
vgpu_data->pri_sched = false;
|
||||
}
|
||||
|
||||
/* Return the vGPU only if it has time slice left */
|
||||
if (vgpu_data->left_ts > 0) {
|
||||
vgpu = vgpu_data->vgpu;
|
||||
@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct vgpu_sched_data *vgpu_data;
|
||||
struct intel_vgpu *vgpu = NULL;
|
||||
|
||||
/* no active vgpu or has already had a target */
|
||||
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
|
||||
goto out;
|
||||
@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
||||
vgpu = find_busy_vgpu(sched_data);
|
||||
if (vgpu) {
|
||||
scheduler->next_vgpu = vgpu;
|
||||
|
||||
/* Move the last used vGPU to the tail of lru_list */
|
||||
vgpu_data = vgpu->sched_data;
|
||||
if (!vgpu_data->pri_sched) {
|
||||
/* Move the last used vGPU to the tail of lru_list */
|
||||
list_del_init(&vgpu_data->lru_list);
|
||||
list_add_tail(&vgpu_data->lru_list,
|
||||
&sched_data->lru_runq_head);
|
||||
}
|
||||
} else {
|
||||
scheduler->next_vgpu = gvt->idle_vgpu;
|
||||
}
|
||||
@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
ktime_t now;
|
||||
|
||||
if (!list_empty(&vgpu_data->lru_list))
|
||||
return;
|
||||
|
||||
list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
|
||||
now = ktime_get();
|
||||
vgpu_data->pri_time = ktime_add(now,
|
||||
ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
|
||||
vgpu_data->pri_sched = true;
|
||||
|
||||
list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
|
||||
|
||||
if (!hrtimer_active(&sched_data->timer))
|
||||
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
|
||||
@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
&vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (!vgpu_data->active)
|
||||
return;
|
||||
@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user