- Track all runtime-PM wakerefs and other rpm improvements (Chris)

- Fix ILK-IVB primary plane enable delays (Juha-Pekka)
 - Differentiate between gtt->mutex and ppgtt->mutex (Chris)
 - Prevent concurrent GGTT update and use on Braswell (Chris)
 - Fix CNL macros for DDI vswing (Aditya)
 - Fix static code analysis warning (RK)
 - Only dump GPU state on set-wedged if interesting (Chris)
 - Port F detection improvements (Imre)
 - userptr mutex lock fixes (Chris)
 - Fix on MST allocation by propagating error value at compute_config (Lyude)
 - Serialise concurrent calls to set_wedge (Chris)
 - Unify reset functionality into i915_reset.c (Chris)
 - Switch to kernel fixed size types (Jani)
 - Limit the for_each_set_bit to the valid range (Chris)
 - Fix wakeref cooie handling (Tvrtko)
 - IRQs handling improvements (Chris)
 - Selftests improvements (Chris)
 - Remove superfluous PANEL_POWER_OFF macro (Jani)
 - Global seqno fix (Chris)
 - DSI fixes (Hans)
 - Refactor out intel_context_init() (Chris)
 - Show all active engines on hangcheck (Chris)
 - PSR2 fixes and improvements (Jose)
 - Do a posting read after irq install on Ice Lake (Daniele)
 - Add few more device IDs for Ice Lake (Rodrigo)
 - Mark up priority boost on preemption (Chris)
 - Add color management LUT validation helper (Matt)
 - Split out intel_crt_present to platform specific setup (Jani)
 - LVDS and TV clean up and improvements (Jani)
 - Simplify CRT VBT check for per-VLV/DDI (Jani)
 - De-inline intel_context_init() (Chris)
 - Backlight fixes (Maarten)
 - Enable fastset for non-boot modesets (Maarten)
 - Make HW readout mark CRTC scaler as in use (Maarten)
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJcSk7xAAoJEPpiX2QO6xPKDUIH/0gzfkkFehRV76LTKTzdiFr5
 4etRp4T7siGBN4E2MuhBrA5OBbzcXTlfjoO0nUZEwhC3HTJ/19eFZ5rjDCC4SuOt
 5c4IDynWxyKfZS1gDcBNHeH68fD3Oeek93Wh69DFtze+sml5aPtD5wEDwlOihOpH
 G/O5gBxH/91M4EXpF/IFUr6ZhdG4RVrBRV9PCjdo/wyji5zYVfnXFT2V1g2DDrX5
 WK54aOlsPsb7lpyQTbf8F/jmxyen3Y3Y+hhWs3KmrE5SmK/0YF59Swj+z7lJgLoP
 Y/wF+YJ9JWU1+E3zfz1NX97O4Prv8DHjbNh6tjNUoUAma/D91QYmUrOfuKSJO5Q=
 =f3aM
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-next-2019-01-24' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Track all runtime-PM wakerefs and other rpm improvements (Chris)
- Fix ILK-IVB primary plane enable delays (Juha-Pekka)
- Differentiate between gtt->mutex and ppgtt->mutex (Chris)
- Prevent concurrent GGTT update and use on Braswell (Chris)
- Fix CNL macros for DDI vswing (Aditya)
- Fix static code analysis warning (RK)
- Only dump GPU state on set-wedged if interesting (Chris)
- Port F detection improvements (Imre)
- userptr mutex lock fixes (Chris)
- Fix on MST allocation by propagating error value at compute_config (Lyude)
- Serialise concurrent calls to set_wedge (Chris)
- Unify reset functionality into i915_reset.c (Chris)
- Switch to kernel fixed size types (Jani)
- Limit the for_each_set_bit to the valid range (Chris)
- Fix wakeref cooie handling (Tvrtko)
- IRQs handling improvements (Chris)
- Selftests improvements (Chris)
- Remove superfluous PANEL_POWER_OFF macro (Jani)
- Global seqno fix (Chris)
- DSI fixes (Hans)
- Refactor out intel_context_init() (Chris)
- Show all active engines on hangcheck (Chris)
- PSR2 fixes and improvements (Jose)
- Do a posting read after irq install on Ice Lake (Daniele)
- Add few more device IDs for Ice Lake (Rodrigo)
- Mark up priority boost on preemption (Chris)
- Add color management LUT validation helper (Matt)
- Split out intel_crt_present to platform specific setup (Jani)
- LVDS and TV clean up and improvements (Jani)
- Simplify CRT VBT check for per-VLV/DDI (Jani)
- De-inline intel_context_init() (Chris)
- Backlight fixes (Maarten)
- Enable fastset for non-boot modesets (Maarten)
- Make HW readout mark CRTC scaler as in use (Maarten)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190128181000.GA5284@intel.com
This commit is contained in:
Dave Airlie 2019-02-01 09:34:49 +10:00
commit fb27a3cb9c
121 changed files with 5030 additions and 4092 deletions

View File

@ -462,3 +462,47 @@ int drm_plane_create_color_properties(struct drm_plane *plane,
return 0;
}
EXPORT_SYMBOL(drm_plane_create_color_properties);
/**
* drm_color_lut_check - check validity of lookup table
* @lut: property blob containing LUT to check
* @tests: bitmask of tests to run
*
* Helper to check whether a userspace-provided lookup table is valid and
* satisfies hardware requirements. Drivers pass a bitmask indicating which of
* the tests in &drm_color_lut_tests should be performed.
*
* Returns 0 on success, -EINVAL on failure.
*/
int drm_color_lut_check(struct drm_property_blob *lut,
uint32_t tests)
{
struct drm_color_lut *entry;
int i;
if (!lut || !tests)
return 0;
entry = lut->data;
for (i = 0; i < drm_color_lut_size(lut); i++) {
if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) {
if (entry[i].red != entry[i].blue ||
entry[i].red != entry[i].green) {
DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n");
return -EINVAL;
}
}
if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) {
if (entry[i].red < entry[i - 1].red ||
entry[i].green < entry[i - 1].green ||
entry[i].blue < entry[i - 1].blue) {
DRM_DEBUG_KMS("LUT entries must never decrease.\n");
return -EINVAL;
}
}
}
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);

View File

@ -21,11 +21,11 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
select STACKDEPOT
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
select DRM_DEBUG_SELFTEST
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
select STACKDEPOT
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during

View File

@ -40,9 +40,10 @@ i915-y := i915_drv.o \
i915_mm.o \
i915_params.o \
i915_pci.o \
i915_reset.o \
i915_suspend.o \
i915_syncmap.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
@ -166,6 +167,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
selftests/igt_live_test.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
@ -198,3 +200,4 @@ endif
i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o

View File

@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o

View File

@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
@ -219,7 +219,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
return -ENOSPC;
}
@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
/**

View File

@ -55,10 +55,10 @@ struct sub_op_bits {
int low;
};
struct decode_info {
char *name;
const char *name;
int op_len;
int nr_sub_op;
struct sub_op_bits *sub_op;
const struct sub_op_bits *sub_op;
};
#define MAX_CMD_BUDGET 0x7fffffff
@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
struct cmd_info {
char *name;
const char *name;
u32 opcode;
#define F_LEN_MASK (1U<<0)
@ -399,10 +399,10 @@ struct cmd_info {
#define R_VECS (1 << VECS)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
uint16_t rings;
u16 rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
uint16_t devices;
u16 devices;
/* which DWords are address that need fix up.
* bit 0 means a 32-bit non address operand in command
@ -412,20 +412,20 @@ struct cmd_info {
* No matter the address length, each address only takes
* one bit in the bitmap.
*/
uint16_t addr_bitmap;
u16 addr_bitmap;
/* flag == F_LEN_CONST : command length
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
uint8_t len;
u8 len;
parser_cmd_handler handler;
};
struct cmd_entry {
struct hlist_node hlist;
struct cmd_info *info;
const struct cmd_info *info;
};
enum {
@ -474,7 +474,7 @@ struct parser_exec_state {
int saved_buf_addr_type;
bool is_ctx_wa;
struct cmd_info *info;
const struct cmd_info *info;
struct intel_vgpu_workload *workload;
};
@ -485,12 +485,12 @@ struct parser_exec_state {
static unsigned long bypass_scan_mask = 0;
/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
static const struct sub_op_bits sub_op_mi[] = {
{31, 29},
{28, 23},
};
static struct decode_info decode_info_mi = {
static const struct decode_info decode_info_mi = {
"MI",
OP_LEN_MI,
ARRAY_SIZE(sub_op_mi),
@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = {
};
/* ring RCS, command type 2 */
static struct sub_op_bits sub_op_2d[] = {
static const struct sub_op_bits sub_op_2d[] = {
{31, 29},
{28, 22},
};
static struct decode_info decode_info_2d = {
static const struct decode_info decode_info_2d = {
"2D",
OP_LEN_2D,
ARRAY_SIZE(sub_op_2d),
@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = {
};
/* ring RCS, command type 3 */
static struct sub_op_bits sub_op_3d_media[] = {
static const struct sub_op_bits sub_op_3d_media[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 16},
};
static struct decode_info decode_info_3d_media = {
static const struct decode_info decode_info_3d_media = {
"3D_Media",
OP_LEN_3D_MEDIA,
ARRAY_SIZE(sub_op_3d_media),
@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = {
};
/* ring VCS, command type 3 */
static struct sub_op_bits sub_op_mfx_vc[] = {
static const struct sub_op_bits sub_op_mfx_vc[] = {
{31, 29},
{28, 27},
{26, 24},
@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = {
{20, 16},
};
static struct decode_info decode_info_mfx_vc = {
static const struct decode_info decode_info_mfx_vc = {
"MFX_VC",
OP_LEN_MFX_VC,
ARRAY_SIZE(sub_op_mfx_vc),
@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = {
};
/* ring VECS, command type 3 */
static struct sub_op_bits sub_op_vebox[] = {
static const struct sub_op_bits sub_op_vebox[] = {
{31, 29},
{28, 27},
{26, 24},
@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = {
{20, 16},
};
static struct decode_info decode_info_vebox = {
static const struct decode_info decode_info_vebox = {
"VEBOX",
OP_LEN_VEBOX,
ARRAY_SIZE(sub_op_vebox),
sub_op_vebox,
};
static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
[RCS] = {
&decode_info_mi,
NULL,
@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
static inline u32 get_opcode(u32 cmd, int ring_id)
{
struct decode_info *d_info;
const struct decode_info *d_info;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
return cmd >> (32 - d_info->op_len);
}
static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
unsigned int opcode, int ring_id)
{
struct cmd_entry *e;
@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
return NULL;
}
static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
u32 cmd, int ring_id)
{
u32 opcode;
@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
static inline void print_opcode(u32 cmd, int ring_id)
{
struct decode_info *d_info;
const struct decode_info *d_info;
int i;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s,
return 0;
}
static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
{
if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
return info->len;
@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL.
*/
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 9) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv))
if (INTEL_GEN(dev_priv) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@ -1643,8 +1638,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
unsigned long gma = 0;
struct cmd_info *info;
uint32_t cmd_len = 0;
const struct cmd_info *info;
u32 cmd_len = 0;
bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
@ -1842,7 +1837,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
static int mi_noop_index;
static struct cmd_info cmd_info[] = {
static const struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
@ -2521,7 +2516,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
static int cmd_parser_exec(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
struct cmd_info *info;
const struct cmd_info *info;
u32 cmd;
int ret = 0;
@ -2683,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
I915_GTT_PAGE_SIZE)))
return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma;
@ -2850,7 +2845,7 @@ put_obj:
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
u32 per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
@ -2895,10 +2890,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, unsigned long rings)
{
struct cmd_info *info = NULL;
const struct cmd_info *info = NULL;
unsigned int ring;
for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
@ -2913,7 +2908,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
{
int i;
struct cmd_entry *e;
struct cmd_info *info;
const struct cmd_info *info;
unsigned int gen_type;
gen_type = intel_gvt_get_device_type(gvt);

View File

@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else

View File

@ -29,7 +29,6 @@
*/
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <linux/vfio.h>
#include "i915_drv.h"
@ -164,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 9) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;

View File

@ -77,16 +77,32 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
else if (port_select == GMBUS_PIN_4_CNP)
port = PORT_E;
return port;
}
static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == 1)
if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
else if (port_select == 2)
else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
else if (port_select == 3)
else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
return port;
}
@ -96,13 +112,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == 2)
if (port_select == GMBUS_PIN_VGADDC)
port = PORT_E;
else if (port_select == 4)
else if (port_select == GMBUS_PIN_DPC)
port = PORT_C;
else if (port_select == 5)
else if (port_select == GMBUS_PIN_DPB)
port = PORT_B;
else if (port_select == 6)
else if (port_select == GMBUS_PIN_DPD)
port = PORT_D;
return port;
}
@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (IS_BROXTON(dev_priv))
port = bxt_get_port_from_gmbus0(pin_select);
else if (IS_COFFEELAKE(dev_priv))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))

View File

@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 9) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 9) {
plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
(IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) ?
(INTEL_GEN(dev_priv) >= 9) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);

View File

@ -187,52 +187,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.write_protect_handler = intel_vgpu_page_track_handler,
};
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
*
* This function is called at the driver loading stage. If failed to find a
* loadable MPT module or detect currently we're running in a VM, then GVT-g
* will be disabled
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_init_host(void)
{
if (intel_gvt_host.initialized)
return 0;
/* Xen DOM U */
if (xen_domain() && !xen_initial_domain())
return -ENODEV;
/* Try to load MPT modules for hypervisors */
if (xen_initial_domain()) {
/* In Xen dom0 */
intel_gvt_host.mpt = try_then_request_module(
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
#endif
}
/* Fail to load MPT modules - bail out */
if (!intel_gvt_host.mpt)
return -EINVAL;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
intel_gvt_host.initialized = true;
return 0;
}
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
@ -316,7 +270,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
@ -352,13 +305,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
struct intel_vgpu *vgpu;
int ret;
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
*/
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
if (WARN_ON(dev_priv->gvt))
return -EEXIST;
@ -420,13 +366,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_types;
}
ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
&intel_gvt_ops);
if (ret) {
gvt_err("failed to register gvt-g host device: %d\n", ret);
goto out_clean_types;
}
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@ -441,6 +380,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0;
out_clean_types:
@ -467,6 +408,45 @@ out_clean_idr:
return ret;
}
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
MODULE_SOFTDEP("pre: kvmgt");
#endif
int
intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
{
int ret;
void *gvt;
if (!intel_gvt_host.initialized)
return -ENODEV;
if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
m->type != INTEL_GVT_HYPERVISOR_XEN)
return -EINVAL;
/* Get a reference for device model module */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
intel_gvt_host.mpt = m;
intel_gvt_host.hypervisor_type = m->type;
gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
&intel_gvt_ops);
if (ret < 0) {
gvt_err("Failed to init %s hypervisor module\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
module_put(THIS_MODULE);
return -ENODEV;
}
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
return 0;
}
EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);

View File

@ -52,12 +52,8 @@
#define GVT_MAX_VGPU 8
enum {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
struct intel_gvt_host {
struct device *dev;
bool initialized;
int hypervisor_type;
struct intel_gvt_mpt *mpt;
@ -597,7 +593,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
/**

View File

@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_KBL;
else if (IS_BROXTON(gvt->dev_priv))
return D_BXT;
else if (IS_COFFEELAKE(gvt->dev_priv))
return D_CFL;
return 0;
}
@ -276,14 +278,12 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 old, new;
uint32_t ack_reg_offset;
u32 ack_reg_offset;
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_BROXTON(vgpu->gvt->dev_priv)) {
if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
}
static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
uint8_t t)
u8 t)
{
if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
/* training pattern 1 for CR */
@ -889,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_BROXTON(vgpu->gvt->dev_priv))
if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@ -919,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
if (op == GVT_AUX_NATIVE_WRITE) {
int t;
uint8_t buf[16];
u8 buf[16];
if ((addr + len + 1) >= DPCD_SIZE) {
/*
@ -1407,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@ -1431,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv))
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@ -3041,8 +3041,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
return 0;
}
@ -3302,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)) {
|| IS_KABYLAKE(dev_priv)
|| IS_COFFEELAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;

View File

@ -33,13 +33,19 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
void (*host_exit)(struct device *dev);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
@ -67,6 +73,5 @@ struct intel_gvt_mpt {
};
extern struct intel_gvt_mpt xengt_mpt;
extern struct intel_gvt_mpt kvmgt_mpt;
#endif /* _GVT_HYPERCALL_H_ */

View File

@ -581,9 +581,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)
|| IS_BROXTON(gvt->dev_priv)) {
} else if (INTEL_GEN(gvt->dev_priv) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);

View File

@ -627,6 +627,12 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
if (!try_module_get(THIS_MODULE))
goto undo_group;
ret = kvmgt_guest_init(mdev);
if (ret)
goto undo_group;
@ -679,6 +685,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
&vgpu->vdev.group_notifier);
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
@ -703,7 +712,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu);
}
static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{
u32 start_lo, start_hi;
u32 mem_type;
@ -730,10 +739,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
return ((u64)start_hi << 32) | start_lo;
}
static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
void *buf, unsigned int count, bool is_write)
{
uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
int ret;
if (is_write)
@ -745,13 +754,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret;
}
static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
{
return off >= vgpu_aperture_offset(vgpu) &&
off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
}
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write)
{
void *aperture_va;
@ -783,7 +792,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
@ -1029,7 +1038,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start,
unsigned int count, uint32_t flags,
unsigned int count, u32 flags,
void *data)
{
return 0;
@ -1037,21 +1046,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start,
unsigned int count, uint32_t flags, void *data)
unsigned int count, u32 flags, void *data)
{
return 0;
}
static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
uint32_t flags, void *data)
u32 flags, void *data)
{
return 0;
}
static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
uint32_t flags, void *data)
u32 flags, void *data)
{
struct eventfd_ctx *trigger;
@ -1070,12 +1079,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
return 0;
}
static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
unsigned int index, unsigned int start, unsigned int count,
void *data)
{
int (*func)(struct intel_vgpu *vgpu, unsigned int index,
unsigned int start, unsigned int count, uint32_t flags,
unsigned int start, unsigned int count, u32 flags,
void *data) = NULL;
switch (index) {
@ -1467,7 +1476,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
static void kvmgt_host_exit(struct device *dev)
{
mdev_unregister_device(dev);
}
@ -1849,7 +1858,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
return ret;
}
struct intel_gvt_mpt kvmgt_mpt = {
static struct intel_gvt_mpt kvmgt_mpt = {
.type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
@ -1868,15 +1878,17 @@ struct intel_gvt_mpt kvmgt_mpt = {
.put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
static int __init kvmgt_init(void)
{
if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
return -ENODEV;
return 0;
}
static void __exit kvmgt_exit(void)
{
intel_gvt_unregister_hypervisor();
}
module_init(kvmgt_init);

View File

@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes, bool read)
{
struct intel_gvt *gvt = NULL;
@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
@ -171,7 +171,7 @@ out:
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;

View File

@ -43,15 +43,16 @@ struct intel_vgpu;
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
#define D_BXT (1 << 3)
#define D_CFL (1 << 4)
#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
#define D_PRE_SKL (D_BDW)
#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);

View File

@ -353,8 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@ -391,7 +390,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
|| IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@ -457,9 +457,7 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv))
if (INTEL_GEN(dev_priv) >= 9)
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@ -471,8 +469,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
&& mmio->in_context)
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
|| IS_COFFEELAKE(dev_priv)) && mmio->in_context)
continue;
// save
@ -565,9 +563,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
if (IS_SKYLAKE(gvt->dev_priv) ||
IS_KABYLAKE(gvt->dev_priv) ||
IS_BROXTON(gvt->dev_priv))
if (INTEL_GEN(gvt->dev_priv) >= 9)
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;

View File

@ -50,11 +50,10 @@
* Zero on success, negative error code if failed
*/
static inline int intel_gvt_hypervisor_host_init(struct device *dev,
void *gvt, const void *ops)
void *gvt, const void *ops)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_init)
return 0;
return -ENODEV;
return intel_gvt_host.mpt->host_init(dev, gvt, ops);
}
@ -62,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
void *gvt)
static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
intel_gvt_host.mpt->host_exit(dev, gvt);
intel_gvt_host.mpt->host_exit(dev);
}
/**
@ -362,4 +360,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn(
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
void intel_gvt_unregister_hypervisor(void);
#endif /* _GVT_MPT_H_ */

View File

@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
{
struct vgpu_sched_data *vgpu_data;
struct list_head *pos;
static uint64_t stage_check;
static u64 stage_check;
int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
/* The timeslice accumulation reset at stage 0, which is
@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}

View File

@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
void *shadow_ring_buffer_va;
u32 *cs;
if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
|| IS_COFFEELAKE(req->i915))
&& is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
@ -939,9 +940,7 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)
|| IS_BROXTON(gvt->dev_priv);
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@ -997,7 +996,7 @@ complete:
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@ -1451,7 +1450,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put_unchecked(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {

View File

@ -61,7 +61,7 @@ struct shadow_indirect_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
void *shadow_va;
uint32_t size;
u32 size;
};
#define PER_CTX_ADDR_MASK 0xfffff000

View File

@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync,
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
u32 cmd_len, u32 buf_type, u32 buf_addr_type,
void *workload, char *cmd_name),
void *workload, const char *cmd_name),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
buf_addr_type, workload, cmd_name),

View File

@ -32,6 +32,8 @@
#include "intel_drv.h"
#include "intel_guc_submission.h"
#include "i915_reset.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
return to_i915(node->minor->dev);
@ -626,10 +628,12 @@ static void gen8_display_interrupt_info(struct seq_file *m)
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain)) {
wakeref = intel_display_power_get_if_enabled(dev_priv,
power_domain);
if (!wakeref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@ -644,7 +648,7 @@ static void gen8_display_interrupt_info(struct seq_file *m)
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@ -674,11 +678,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int i, pipe;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
if (IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t pref;
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@ -694,8 +701,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain)) {
pref = intel_display_power_get_if_enabled(dev_priv,
power_domain);
if (!pref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@ -705,17 +713,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, pref);
}
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@ -778,10 +786,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain)) {
pref = intel_display_power_get_if_enabled(dev_priv,
power_domain);
if (!pref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@ -790,7 +800,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, pref);
}
seq_printf(m, "Master IER:\t%08x\n",
@ -877,7 +887,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
}
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -950,10 +960,11 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_state *gpu;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
gpu = i915_capture_gpu_state(i915);
intel_runtime_pm_put(i915);
gpu = NULL;
with_intel_runtime_pm(i915, wakeref)
gpu = i915_capture_gpu_state(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@ -1012,9 +1023,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
intel_wakeref_t wakeref;
int ret = 0;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
if (IS_GEN(dev_priv, 5)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
@ -1226,7 +1238,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@ -1265,6 +1277,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
struct intel_instdone instdone;
intel_wakeref_t wakeref;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
@ -1283,17 +1296,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
intel_runtime_pm_get(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref) {
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
}
intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
@ -1568,18 +1579,17 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
int err;
intel_wakeref_t wakeref;
int err = -ENODEV;
intel_runtime_pm_get(dev_priv);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
err = vlv_drpc_info(m);
else if (INTEL_GEN(dev_priv) >= 6)
err = gen6_drpc_info(m);
else
err = ironlake_drpc_info(m);
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
err = vlv_drpc_info(m);
else if (INTEL_GEN(dev_priv) >= 6)
err = gen6_drpc_info(m);
else
err = ironlake_drpc_info(m);
}
return err;
}
@ -1601,11 +1611,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_fbc *fbc = &dev_priv->fbc;
intel_wakeref_t wakeref;
if (!HAS_FBC(dev_priv))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
@ -1632,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -1677,11 +1688,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
if (!HAS_IPS(dev_priv))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
@ -1695,7 +1707,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -1703,10 +1715,10 @@ static int i915_ips_status(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (INTEL_GEN(dev_priv) >= 9)
/* no global SR status; inspect per-plane WM */;
@ -1722,8 +1734,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
@ -1732,31 +1743,24 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
unsigned long temp, chipset, gfx;
int ret;
struct drm_i915_private *i915 = node_to_i915(m->private);
intel_wakeref_t wakeref;
if (!IS_GEN(dev_priv, 5))
if (!IS_GEN(i915, 5))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
with_intel_runtime_pm(i915, wakeref) {
unsigned long temp, chipset, gfx;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
temp = i915_mch_val(i915);
chipset = i915_chipset_val(i915);
gfx = i915_gfx_val(i915);
temp = i915_mch_val(dev_priv);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GMCH temp: %ld\n", temp);
seq_printf(m, "Chipset power: %ld\n", chipset);
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
intel_runtime_pm_put(dev_priv);
seq_printf(m, "GMCH temp: %ld\n", temp);
seq_printf(m, "Chipset power: %ld\n", chipset);
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
}
return 0;
}
@ -1766,13 +1770,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
int ret;
if (!HAS_LLC(dev_priv))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
@ -1805,7 +1810,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
mutex_unlock(&dev_priv->pcu_lock);
out:
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@ -1978,8 +1983,9 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@ -2017,7 +2023,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -2054,9 +2060,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
struct drm_file *file;
if (intel_runtime_pm_get_if_in_use(dev_priv)) {
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
act_freq = vlv_punit_read(dev_priv,
@ -2067,7 +2074,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
}
intel_runtime_pm_put(dev_priv);
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
@ -2150,6 +2156,7 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
struct drm_printer p;
if (!HAS_HUC(dev_priv))
@ -2158,9 +2165,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
}
@ -2168,8 +2174,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
struct drm_printer p;
u32 tmp, i;
if (!HAS_GUC(dev_priv))
return -ENODEV;
@ -2177,22 +2183,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
intel_runtime_pm_get(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
u32 i;
tmp = I915_READ(GUC_STATUS);
seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
seq_printf(m, "\tBootrom status = 0x%x\n",
(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
seq_printf(m, "\tuKernel status = 0x%x\n",
(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
seq_printf(m, "\tMIA Core status = 0x%x\n",
(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
seq_puts(m, "\nScratch registers:\n");
for (i = 0; i < 16; i++)
seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
intel_runtime_pm_put(dev_priv);
seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
seq_printf(m, "\tBootrom status = 0x%x\n",
(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
seq_printf(m, "\tuKernel status = 0x%x\n",
(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
seq_printf(m, "\tMIA Core status = 0x%x\n",
(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
seq_puts(m, "\nScratch registers:\n");
for (i = 0; i < 16; i++) {
seq_printf(m, "\t%2d: \t0x%x\n",
i, I915_READ(SOFT_SCRATCH(i)));
}
}
return 0;
}
@ -2244,7 +2251,7 @@ static void i915_guc_client_info(struct seq_file *m,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
uint64_t tot = 0;
u64 tot = 0;
seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
client->priority, client->stage_id, client->proc_desc_offset);
@ -2499,7 +2506,8 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static void
psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
{
u32 val, psr_status;
u32 val, status_val;
const char *status = "unknown";
if (dev_priv->psr.psr2_enabled) {
static const char * const live_status[] = {
@ -2515,14 +2523,11 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
psr_status = I915_READ(EDP_PSR2_STATUS);
val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (val < ARRAY_SIZE(live_status)) {
seq_printf(m, "Source PSR status: 0x%x [%s]\n",
psr_status, live_status[val]);
return;
}
val = I915_READ(EDP_PSR2_STATUS);
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
} else {
static const char * const live_status[] = {
"IDLE",
@ -2534,74 +2539,102 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
psr_status = I915_READ(EDP_PSR_STATUS);
val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (val < ARRAY_SIZE(live_status)) {
seq_printf(m, "Source PSR status: 0x%x [%s]\n",
psr_status, live_status[val]);
return;
}
val = I915_READ(EDP_PSR_STATUS);
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
}
seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
bool enabled = false;
bool sink_support;
struct i915_psr *psr = &dev_priv->psr;
intel_wakeref_t wakeref;
const char *status;
bool enabled;
u32 val;
if (!HAS_PSR(dev_priv))
return -ENODEV;
sink_support = dev_priv->psr.sink_support;
seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
if (!sink_support)
seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
if (psr->dp)
seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
seq_puts(m, "\n");
if (!psr->sink_support)
return 0;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&psr->lock);
mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "PSR mode: %s\n",
dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
if (dev_priv->psr.psr2_enabled)
enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
if (psr->enabled)
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
if (!psr->enabled)
goto unlock;
seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (psr->psr2_enabled) {
val = I915_READ(EDP_PSR2_CTL);
enabled = val & EDP_PSR2_ENABLE;
} else {
val = I915_READ(EDP_PSR_CTL);
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
enableddisabled(enabled), val);
psr_source_status(dev_priv, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
psr_source_status(dev_priv, m);
mutex_unlock(&dev_priv->psr.lock);
if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
if (psr->debug & I915_PSR_DEBUG_IRQ) {
seq_printf(m, "Last attempted entry at: %lld\n",
dev_priv->psr.last_entry_attempt);
seq_printf(m, "Last exit at: %lld\n",
dev_priv->psr.last_exit);
psr->last_entry_attempt);
seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
}
intel_runtime_pm_put(dev_priv);
if (psr->psr2_enabled) {
u32 su_frames_val[3];
int frame;
/*
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
u32 su_blocks;
su_blocks = su_frames_val[frame / 3] &
PSR2_SU_STATUS_MASK(frame);
su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
seq_printf(m, "%d\t%d\n", frame, su_blocks);
}
}
unlock:
mutex_unlock(&psr->lock);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -2610,6 +2643,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_modeset_acquire_ctx ctx;
intel_wakeref_t wakeref;
int ret;
if (!CAN_PSR(dev_priv))
@ -2617,7 +2651,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
@ -2632,7 +2666,7 @@ retry:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@ -2657,24 +2691,20 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
unsigned long long power;
intel_wakeref_t wakeref;
u32 units;
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
intel_runtime_pm_get(dev_priv);
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
intel_runtime_pm_put(dev_priv);
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
return -ENODEV;
}
units = (power & 0x1f00) >> 8;
power = I915_READ(MCH_SECP_NRG_STTS);
with_intel_runtime_pm(dev_priv, wakeref)
power = I915_READ(MCH_SECP_NRG_STTS);
power = (1000000 * power) >> units; /* convert to uJ */
intel_runtime_pm_put(dev_priv);
seq_printf(m, "%llu", power);
return 0;
@ -2688,6 +2718,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.wakeref));
seq_printf(m, "GPU idle: %s (epoch %u)\n",
yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "IRQs disabled: %s\n",
@ -2702,6 +2735,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
pci_power_name(pdev->current_state),
pdev->current_state);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
struct drm_printer p = drm_seq_file_printer(m);
print_intel_runtime_pm_wakeref(dev_priv, &p);
}
return 0;
}
@ -2736,6 +2775,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
struct intel_csr *csr;
if (!HAS_CSR(dev_priv))
@ -2743,7 +2783,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
csr = &dev_priv->csr;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
@ -2769,7 +2809,7 @@ out:
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -3052,8 +3092,10 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(dev_priv);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
@ -3101,7 +3143,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -3110,10 +3152,11 @@ static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
enum intel_engine_id id;
struct drm_printer p;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "GT awake? %s (epoch %u)\n",
yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
@ -3126,7 +3169,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@ -3239,20 +3282,21 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
int ret;
intel_wakeref_t wakeref;
bool enable;
int ret;
ret = kstrtobool_from_user(ubuf, len, &enable);
if (ret < 0)
return ret;
intel_runtime_pm_get(dev_priv);
if (!dev_priv->ipc_enabled && enable)
DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref) {
if (!dev_priv->ipc_enabled && enable)
DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
}
return len;
}
@ -3620,7 +3664,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
struct drm_i915_private *dev_priv = m->private;
struct drm_device *dev = &dev_priv->drm;
@ -3663,7 +3707,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@ -3678,7 +3722,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@ -3693,7 +3737,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@ -3736,12 +3780,12 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, uint16_t wm[8])
size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
struct drm_device *dev = &dev_priv->drm;
uint16_t new[8] = { 0 };
u16 new[8] = { 0 };
int num_levels;
int level;
int ret;
@ -3786,7 +3830,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@ -3801,7 +3845,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@ -3816,7 +3860,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@ -4018,11 +4062,12 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
intel_wakeref_t wakeref;
int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
i915_gem_set_wedged(i915);
@ -4077,7 +4122,7 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_drain_freed_objects(i915);
out:
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
return ret;
}
@ -4090,16 +4135,14 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
u32 snpcr;
intel_wakeref_t wakeref;
u32 snpcr = 0;
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@ -4110,7 +4153,7 @@ static int
i915_cache_sharing_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
u32 snpcr;
intel_wakeref_t wakeref;
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
@ -4118,16 +4161,17 @@ i915_cache_sharing_set(void *data, u64 val)
if (val > 3)
return -EINVAL;
intel_runtime_pm_get(dev_priv);
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
with_intel_runtime_pm(dev_priv, wakeref) {
u32 snpcr;
/* Update the cache sharing policy here as well */
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
/* Update the cache sharing policy here as well */
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
}
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -4349,6 +4393,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
if (INTEL_GEN(dev_priv) < 8)
return -ENODEV;
@ -4363,20 +4408,17 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
sseu.max_eus_per_subslice =
RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
intel_runtime_pm_get(dev_priv);
if (IS_CHERRYVIEW(dev_priv)) {
cherryview_sseu_device_status(dev_priv, &sseu);
} else if (IS_BROADWELL(dev_priv)) {
broadwell_sseu_device_status(dev_priv, &sseu);
} else if (IS_GEN(dev_priv, 9)) {
gen9_sseu_device_status(dev_priv, &sseu);
} else if (INTEL_GEN(dev_priv) >= 10) {
gen10_sseu_device_status(dev_priv, &sseu);
with_intel_runtime_pm(dev_priv, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_device_status(dev_priv, &sseu);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_device_status(dev_priv, &sseu);
else if (INTEL_GEN(dev_priv) >= 10)
gen10_sseu_device_status(dev_priv, &sseu);
}
intel_runtime_pm_put(dev_priv);
i915_print_sseu_info(m, false, &sseu);
return 0;
@ -4389,7 +4431,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
intel_runtime_pm_get(i915);
file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
intel_uncore_forcewake_user_get(i915);
return 0;
@ -4403,7 +4445,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
return 0;
intel_uncore_forcewake_user_put(i915);
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915,
(intel_wakeref_t)(uintptr_t)file->private_data);
return 0;
}
@ -4836,7 +4879,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
enc_to_intel_dp(&intel_attached_encoder(connector)->base);
uint8_t buf[16];
u8 buf[16];
ssize_t err;
int i;
@ -4951,9 +4994,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
yesno(crtc_state->dsc_params.compression_enable));
if (intel_dp->dsc_dpcd)
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
if (!intel_dp_is_edp(intel_dp))
seq_printf(m, "FEC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));

View File

@ -48,6 +48,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
#include "i915_reset.h"
#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
@ -905,6 +906,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->pps_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
@ -1779,6 +1781,9 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
@ -1807,8 +1812,7 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_cleanup_mmio(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
intel_runtime_pm_cleanup(dev_priv);
}
static void i915_driver_release(struct drm_device *dev)
@ -2010,6 +2014,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(dev_priv);
if (!dev_priv->uncore.user_forcewake.count)
intel_runtime_pm_cleanup(dev_priv);
return ret;
}
@ -2200,211 +2206,6 @@ static int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
/**
* i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset
* @stalled_mask: mask of the stalled engines with the guilty requests
* @reason: user error message for why we are resetting
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
*
* Caller must hold the struct_mutex.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
void i915_reset(struct drm_i915_private *i915,
unsigned int stalled_mask,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
int ret;
int i;
GEM_TRACE("flags=%lx\n", error->flags);
might_sleep();
lockdep_assert_held(&i915->drm.struct_mutex);
assert_rpm_wakelock_held(i915);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
return;
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!i915_gem_unset_wedged(i915))
goto wakeup;
if (reason)
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
ret = i915_gem_reset_prepare(i915);
if (ret) {
dev_err(i915->drm.dev, "GPU recovery failed\n");
goto taint;
}
if (!intel_has_gpu_reset(i915)) {
if (i915_modparams.reset)
dev_err(i915->drm.dev, "GPU reset not supported\n");
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
for (i = 0; i < 3; i++) {
ret = intel_gpu_reset(i915, ALL_ENGINES);
if (ret == 0)
break;
msleep(100);
}
if (ret) {
dev_err(i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
/* Ok, now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
* there.
*/
ret = i915_ggtt_enable_hw(i915);
if (ret) {
DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
ret);
goto error;
}
i915_gem_reset(i915, stalled_mask);
intel_overlay_reset(i915);
/*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
ret = i915_gem_init_hw(i915);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
goto error;
}
i915_queue_hangcheck(i915);
finish:
i915_gem_reset_finish(i915);
wakeup:
clear_bit(I915_RESET_HANDOFF, &error->flags);
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
return;
taint:
/*
* History tells us that if we cannot reset the GPU now, we
* never will. This then impacts everything that is run
* subsequently. On failing the reset, we mark the driver
* as wedged, preventing further execution on the GPU.
* We also want to go one step further and add a taint to the
* kernel so that any subsequent faults can be traced back to
* this failure. This is important for CI, where if the
* GPU/driver fails we would like to reboot and restart testing
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
i915_retire_requests(i915);
goto finish;
}
static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
}
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no dev_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
*
* Procedure is:
* - identifies the request that caused the hang and it is dropped
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
struct i915_request *active_request;
int ret;
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
active_request = i915_gem_reset_prepare_engine(engine);
if (IS_ERR_OR_NULL(active_request)) {
/* Either the previous reset failed, or we pardon the reset. */
ret = PTR_ERR(active_request);
goto out;
}
if (msg)
dev_notice(engine->i915->drm.dev,
"Resetting %s for %s\n", engine->name, msg);
error->reset_engine_count[engine->id]++;
if (!engine->i915->guc.execbuf_client)
ret = intel_gt_reset_engine(engine->i915, engine);
else
ret = intel_guc_reset_engine(&engine->i915->guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
engine->i915->guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
/*
* The request that caused the hang is stuck on elsp, we know the
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
i915_gem_reset_engine(engine, active_request, true);
/*
* The engine and its registers (and workarounds in case of render)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->init_hw(engine);
if (ret)
goto out;
out:
intel_engine_cancel_stop_cs(engine);
i915_gem_reset_finish_engine(engine);
return ret;
}
static int i915_pm_prepare(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@ -2965,7 +2766,7 @@ static int intel_runtime_suspend(struct device *kdev)
}
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
intel_runtime_pm_cleanup(dev_priv);
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");

View File

@ -45,6 +45,7 @@
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
@ -90,8 +91,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20190110"
#define DRIVER_TIMESTAMP 1547162337
#define DRIVER_DATE "20190124"
#define DRIVER_TIMESTAMP 1548370857
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -130,6 +131,8 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
typedef depot_stack_handle_t intel_wakeref_t;
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@ -331,16 +334,17 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
uint32_t required_version;
uint32_t max_fw_size; /* bytes */
uint32_t *dmc_payload;
uint32_t dmc_fw_size; /* dwords */
uint32_t version;
uint32_t mmio_count;
u32 required_version;
u32 max_fw_size; /* bytes */
u32 *dmc_payload;
u32 dmc_fw_size; /* dwords */
u32 version;
u32 mmio_count;
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
uint32_t dc_state;
uint32_t allowed_dc_mask;
u32 mmiodata[8];
u32 dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
};
enum i915_cache_level {
@ -396,7 +400,7 @@ struct intel_fbc {
struct {
unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate;
u32 hsw_bdw_pixel_rate;
} crtc;
struct {
@ -415,7 +419,7 @@ struct intel_fbc {
int y;
uint16_t pixel_blend_mode;
u16 pixel_blend_mode;
} plane;
struct {
@ -555,7 +559,7 @@ struct i915_suspend_saved_registers {
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u64 saveFENCE[I915_MAX_NUM_FENCES];
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@ -818,6 +822,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
intel_wakeref_t wakeref;
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
struct i915_power_well *power_wells;
@ -900,9 +906,9 @@ struct i915_gem_mm {
atomic_t bsd_engine_dispatch_index;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
u32 bit_6_swizzle_y;
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
@ -929,20 +935,20 @@ struct ddi_vbt_port_info {
* populate this field.
*/
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
uint8_t hdmi_level_shift;
u8 hdmi_level_shift;
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
uint8_t supports_edp:1;
uint8_t supports_typec_usb:1;
uint8_t supports_tbt:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
u8 supports_edp:1;
u8 supports_typec_usb:1;
u8 supports_tbt:1;
uint8_t alternate_aux_channel;
uint8_t alternate_ddc_pin;
u8 alternate_aux_channel;
u8 alternate_ddc_pin;
uint8_t dp_boost_level;
uint8_t hdmi_boost_level;
u8 dp_boost_level;
u8 hdmi_boost_level;
int dp_max_link_rate; /* 0 for not limited by VBT */
};
@ -1033,41 +1039,41 @@ enum intel_ddb_partitioning {
struct intel_wm_level {
bool enable;
uint32_t pri_val;
uint32_t spr_val;
uint32_t cur_val;
uint32_t fbc_val;
u32 pri_val;
u32 spr_val;
u32 cur_val;
u32 fbc_val;
};
struct ilk_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
uint32_t wm_linetime[3];
u32 wm_pipe[3];
u32 wm_lp[3];
u32 wm_lp_spr[3];
u32 wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
struct g4x_pipe_wm {
uint16_t plane[I915_MAX_PLANES];
uint16_t fbc;
u16 plane[I915_MAX_PLANES];
u16 fbc;
};
struct g4x_sr_wm {
uint16_t plane;
uint16_t cursor;
uint16_t fbc;
u16 plane;
u16 cursor;
u16 fbc;
};
struct vlv_wm_ddl_values {
uint8_t plane[I915_MAX_PLANES];
u8 plane[I915_MAX_PLANES];
};
struct vlv_wm_values {
struct g4x_pipe_wm pipe[3];
struct g4x_sr_wm sr;
struct vlv_wm_ddl_values ddl[3];
uint8_t level;
u8 level;
bool cxsr;
};
@ -1081,10 +1087,10 @@ struct g4x_wm_values {
};
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
u16 start, end; /* in number of blocks, 'end' is exclusive */
};
static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
{
return entry->end - entry->start;
}
@ -1108,8 +1114,8 @@ struct skl_ddb_values {
};
struct skl_wm_level {
uint16_t plane_res_b;
uint8_t plane_res_l;
u16 plane_res_b;
u8 plane_res_l;
bool plane_en;
};
@ -1118,15 +1124,15 @@ struct skl_wm_params {
bool x_tiled, y_tiled;
bool rc_surface;
bool is_planar;
uint32_t width;
uint8_t cpp;
uint32_t plane_pixel_rate;
uint32_t y_min_scanlines;
uint32_t plane_bytes_per_line;
u32 width;
u8 cpp;
u32 plane_pixel_rate;
u32 y_min_scanlines;
u32 plane_bytes_per_line;
uint_fixed_16_16_t plane_blocks_per_line;
uint_fixed_16_16_t y_tile_minimum;
uint32_t linetime_us;
uint32_t dbuf_block_size;
u32 linetime_us;
u32 dbuf_block_size;
};
/*
@ -1156,6 +1162,25 @@ struct i915_runtime_pm {
atomic_t wakeref_count;
bool suspended;
bool irqs_enabled;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/*
* To aide detection of wakeref leaks and general misuse, we
* track all wakeref holders. With manual markup (i.e. returning
* a cookie to each rpm_get caller which they then supply to their
* paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs.
*/
struct intel_runtime_pm_debug {
spinlock_t lock;
depot_stack_handle_t last_acquire;
depot_stack_handle_t last_release;
depot_stack_handle_t *owners;
unsigned long count;
} debug;
#endif
};
enum intel_pipe_crc_source {
@ -1311,6 +1336,12 @@ struct i915_perf_stream {
*/
struct list_head link;
/**
* @wakeref: As we keep the device awake while the perf stream is
* active, we track our runtime pm reference for later release.
*/
intel_wakeref_t wakeref;
/**
* @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
* properties given when opening a stream, representing the contents
@ -1484,14 +1515,14 @@ struct drm_i915_private {
* Base address of where the gmbus and gpio blocks are located (either
* on PCH or on SoC for platforms without PCH).
*/
uint32_t gpio_mmio_base;
u32 gpio_mmio_base;
/* MMIO base address for MIPI regs */
uint32_t mipi_mmio_base;
u32 mipi_mmio_base;
uint32_t psr_mmio_base;
u32 psr_mmio_base;
uint32_t pps_mmio_base;
u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
@ -1746,17 +1777,17 @@ struct drm_i915_private {
* in 0.5us units for WM1+.
*/
/* primary */
uint16_t pri_latency[5];
u16 pri_latency[5];
/* sprite */
uint16_t spr_latency[5];
u16 spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
u16 cur_latency[5];
/*
* Raw watermark memory latency values
* for SKL for all 8 levels
* in 1us units.
*/
uint16_t skl_latency[8];
u16 skl_latency[8];
/* current hardware state */
union {
@ -1766,7 +1797,7 @@ struct drm_i915_private {
struct g4x_wm_values g4x;
};
uint8_t max_level;
u8 max_level;
/*
* Should be held around atomic WM register writing; also
@ -1957,7 +1988,7 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
bool awake;
intel_wakeref_t awake;
/**
* The number of times we have woken up.
@ -2584,19 +2615,7 @@ extern const struct dev_pm_ops i915_pm_ops;
extern int i915_driver_load(struct pci_dev *pdev,
const struct pci_device_id *ent);
extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *i915,
unsigned int stalled_mask,
const char *reason);
extern int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_reset_guc(struct drm_i915_private *dev_priv);
extern int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@ -2639,20 +2658,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
&dev_priv->gpu_error.hangcheck_work, delay);
}
__printf(4, 5)
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
void i915_clear_error_registers(struct drm_i915_private *dev_priv);
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@ -2676,45 +2686,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits);
u32 mask,
u32 bits);
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
u32 interrupt_mask,
u32 enabled_irq_mask);
static inline void
ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
{
ilk_update_display_irq(dev_priv, bits, bits);
}
static inline void
ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
{
ilk_update_display_irq(dev_priv, bits, 0);
}
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
u32 interrupt_mask,
u32 enabled_irq_mask);
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, uint32_t bits)
enum pipe pipe, u32 bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
}
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, uint32_t bits)
enum pipe pipe, u32 bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
}
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
u32 interrupt_mask,
u32 enabled_irq_mask);
static inline void
ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
{
ibx_display_interrupt_update(dev_priv, bits, bits);
}
static inline void
ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
{
ibx_display_interrupt_update(dev_priv, bits, 0);
}
@ -2904,8 +2914,8 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
};
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type {
@ -2974,7 +2984,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
u32 handle, u64 *offset);
int i915_gem_mmap_gtt_version(void);
void i915_gem_track_fb(struct drm_i915_gem_object *old,
@ -3017,18 +3027,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]);
}
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv,
unsigned int stalled_mask);
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct i915_request *request,
bool stalled);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@ -3125,7 +3125,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
uint32_t *reg_state);
u32 *reg_state);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@ -3377,10 +3377,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask);
uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
u8 lane_lat_optim_mask);
u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,

View File

@ -27,15 +27,6 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
@ -46,6 +37,18 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@ -138,6 +141,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
lockdep_assert_held(&i915->drm.struct_mutex);
@ -168,14 +173,13 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
i915_pmu_gt_parked(i915);
i915_vma_parked(i915);
i915->gt.awake = false;
wakeref = fetch_and_zero(&i915->gt.awake);
GEM_BUG_ON(!wakeref);
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
intel_runtime_pm_put(i915);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return i915->gt.epoch;
}
@ -200,12 +204,11 @@ void i915_gem_unpark(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
assert_rpm_wakelock_held(i915);
if (i915->gt.awake)
return;
intel_runtime_pm_get_noresume(i915);
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@ -217,9 +220,9 @@ void i915_gem_unpark(struct drm_i915_private *i915)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!i915->gt.awake);
i915->gt.awake = true;
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
@ -710,8 +713,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
uint64_t size,
uint32_t *handle_p)
u64 size,
u32 *handle_p)
{
struct drm_i915_gem_object *obj;
int ret;
@ -782,6 +785,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref;
/*
* No actual flushing is required for the GTT write domain for reads
* from the GTT domain. Writes to it "immediately" go to main memory
@ -808,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
spin_unlock_irq(&dev_priv->uncore.lock);
intel_runtime_pm_put(dev_priv);
spin_unlock_irq(&dev_priv->uncore.lock);
}
}
static void
@ -1066,6 +1071,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
void __user *user_data;
@ -1076,7 +1082,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
@ -1149,7 +1155,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_unlock:
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@ -1250,6 +1256,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
u64 remain, offset;
@ -1268,13 +1275,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
if (!intel_runtime_pm_get_if_in_use(i915)) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@ -1356,7 +1364,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@ -1565,8 +1573,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
u32 read_domains = args->read_domains;
u32 write_domain = args->write_domain;
int err;
/* Only handle setting domains to types used by the CPU. */
@ -1748,7 +1756,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (IS_ERR((void *)addr))
return addr;
args->addr_ptr = (uint64_t) addr;
args->addr_ptr = (u64)addr;
return 0;
}
@ -1861,6 +1869,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool write = area->vm_flags & VM_WRITE;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int ret;
@ -1890,7 +1899,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@ -1968,7 +1977,7 @@ err_unpin:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
@ -2041,6 +2050,7 @@ void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
@ -2051,7 +2061,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (!obj->userfault_count)
goto out;
@ -2068,7 +2078,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
}
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
@ -2148,8 +2158,8 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
u32 handle,
u64 *offset)
{
struct drm_i915_gem_object *obj;
int ret;
@ -2296,8 +2306,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (!pages)
return NULL;
if (IS_ERR_OR_NULL(pages))
return pages;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@ -2321,22 +2331,23 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
{
struct sg_table *pages;
int ret;
if (i915_gem_object_has_pinned_pages(obj))
return;
return -EBUSY;
GEM_BUG_ON(obj->bind_count);
if (!i915_gem_object_has_pages(obj))
return;
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, subclass);
if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
ret = -EBUSY;
goto unlock;
}
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
@ -2344,11 +2355,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
* lists early.
*/
pages = __i915_gem_object_unset_pages(obj);
/*
* XXX Temporary hijinx to avoid updating all backends to handle
* NULL pages. In the future, when we have more asynchronous
* get_pages backends we should be better able to handle the
* cancellation of the async task in a more uniform manner.
*/
if (!pages && !i915_gem_object_needs_async_cancel(obj))
pages = ERR_PTR(-EINVAL);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
ret = 0;
unlock:
mutex_unlock(&obj->mm.lock);
return ret;
}
bool i915_sg_trim(struct sg_table *orig_st)
@ -2852,61 +2876,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
const struct i915_gem_context *ctx)
{
unsigned int score;
unsigned long prev_hang;
if (i915_gem_context_is_banned(ctx))
score = I915_CLIENT_SCORE_CONTEXT_BAN;
else
score = 0;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
score += I915_CLIENT_SCORE_HANG_FAST;
if (score) {
atomic_add(score, &file_priv->ban_score);
DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
ctx->name, score,
atomic_read(&file_priv->ban_score));
}
}
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
unsigned int score;
bool banned, bannable;
atomic_inc(&ctx->guilty_count);
bannable = i915_gem_context_is_bannable(ctx);
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
/* Cool contexts don't accumulate client ban score */
if (!bannable)
return;
if (banned) {
DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
ctx->name, atomic_read(&ctx->guilty_count),
score);
i915_gem_context_set_banned(ctx);
}
if (!IS_ERR_OR_NULL(ctx->file_priv))
i915_gem_client_mark_guilty(ctx->file_priv, ctx);
}
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
atomic_inc(&ctx->active_count);
}
struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
@ -2937,361 +2906,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return active;
}
/*
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
struct i915_request *request;
/*
* During the reset sequence, we must prevent the engine from
* entering RC6. As the context state is undefined until we restart
* the engine, if it does enter RC6 during the reset, the state
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
request = engine->reset.prepare(engine);
if (request && request->fence.error == -EIO)
request = ERR_PTR(-EIO); /* Previous reset failed! */
return request;
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
struct i915_request *request;
enum intel_engine_id id;
int err = 0;
for_each_engine(engine, dev_priv, id) {
request = i915_gem_reset_prepare_engine(engine);
if (IS_ERR(request)) {
err = PTR_ERR(request);
continue;
}
engine->hangcheck.active_request = request;
}
i915_gem_revoke_fences(dev_priv);
intel_uc_sanitize(dev_priv);
return err;
}
static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_context *hung_ctx = request->gem_context;
struct i915_timeline *timeline = request->timeline;
unsigned long flags;
GEM_BUG_ON(timeline == &engine->timeline);
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline.requests, link)
if (request->gem_context == hung_ctx)
i915_request_skip(request, -EIO);
list_for_each_entry(request, &timeline->requests, link)
i915_request_skip(request, -EIO);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
/* Returns the request if it was guilty of the hang */
static struct i915_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
struct i915_request *request,
bool stalled)
{
/* The guilty request will get skipped on a hung engine.
*
* Users of client default contexts do not rely on logical
* state preserved between batches so it is safe to execute
* queued requests following the hang. Non default contexts
* rely on preserved state, so skipping a batch loses the
* evolution of the state and it needs to be considered corrupted.
* Executing more queued batches on top of corrupted state is
* risky. But we take the risk by trying to advance through
* the queued requests in order to make the client behaviour
* more predictable around resets, by not throwing away random
* amount of batches it has prepared for execution. Sophisticated
* clients can use gem_reset_stats_ioctl and dma fence status
* (exported via sync_file info ioctl on explicit fences) to observe
* when it loses the context state and should rebuild accordingly.
*
* The context ban, and ultimately the client ban, mechanism are safety
* valves if client submission ends up resulting in nothing more than
* subsequent hangs.
*/
if (i915_request_completed(request)) {
GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n",
engine->name, request->global_seqno,
request->fence.context, request->fence.seqno,
intel_engine_get_seqno(engine));
stalled = false;
}
if (stalled) {
i915_gem_context_mark_guilty(request->gem_context);
i915_request_skip(request, -EIO);
/* If this context is now banned, skip all pending requests. */
if (i915_gem_context_is_banned(request->gem_context))
engine_skip_context(request);
} else {
/*
* Since this is not the hung engine, it may have advanced
* since the hang declaration. Double check by refinding
* the active request at the time of the reset.
*/
request = i915_gem_find_active_request(engine);
if (request) {
unsigned long flags;
i915_gem_context_mark_innocent(request->gem_context);
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
spin_lock_irqsave(&engine->timeline.lock, flags);
request = list_prev_entry(request, link);
if (&request->link == &engine->timeline.requests)
request = NULL;
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
}
return request;
}
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct i915_request *request,
bool stalled)
{
if (request)
request = i915_gem_reset_request(engine, request, stalled);
/* Setup the CS to resume from the breadcrumb of the hung request */
engine->reset.reset(engine, request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv,
unsigned int stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce;
i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
ce = fetch_and_zero(&engine->last_retired_context);
if (ce)
intel_context_unpin(ce);
/*
* Ostensibily, we always want a context loaded for powersaving,
* so if the engine is idle after the reset, send a request
* to load our scratch kernel_context.
*
* More mysteriously, if we leave the engine idle after a reset,
* the next userspace batch may hang, with what appears to be
* an incoherent read by the CS (presumably stale TLB). An
* empty request appears sufficient to paper over the glitch.
*/
if (intel_engine_is_idle(engine)) {
struct i915_request *rq;
rq = i915_request_alloc(engine,
dev_priv->kernel_context);
if (!IS_ERR(rq))
i915_request_add(rq);
}
}
i915_gem_restore_fences(dev_priv);
}
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
engine->hangcheck.active_request = NULL;
i915_gem_reset_finish_engine(engine);
}
}
static void nop_submit_request(struct i915_request *request)
{
unsigned long flags;
GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
request->engine->name,
request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline.lock, flags);
__i915_request_submit(request);
intel_engine_write_global_seqno(request->engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
}
void i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("start\n");
if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_debug_printer(__func__);
for_each_engine(engine, i915, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
goto out;
/*
* First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
for_each_engine(engine, i915, id)
i915_gem_reset_prepare_engine(engine);
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
intel_gpu_reset(i915, ALL_ENGINES);
for_each_engine(engine, i915, id) {
engine->submit_request = nop_submit_request;
engine->schedule = NULL;
}
i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
synchronize_rcu();
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
for_each_engine(engine, i915, id) {
i915_gem_reset_finish_engine(engine);
intel_engine_wakeup(engine);
}
out:
GEM_TRACE("end\n");
wake_up_all(&i915->gpu_error.reset_queue);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
lockdep_assert_held(&i915->drm.struct_mutex);
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
return true;
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
GEM_TRACE("start\n");
/*
* Before unwedging, make sure that all pending operations
* are flushed and errored out - we may have requests waiting upon
* third party fences. We marked all inflight requests as EIO, and
* every execbuf since returned EIO, for consistency we want all
* the currently pending requests to also be marked as EIO, which
* is done inside our nop_submit_request - and so we must wait.
*
* No more can be submitted until we reset the wedged bit.
*/
list_for_each_entry(tl, &i915->gt.timelines, link) {
struct i915_request *rq;
rq = i915_gem_active_peek(&tl->last_request,
&i915->drm.struct_mutex);
if (!rq)
continue;
/*
* We can't use our normal waiter as we want to
* avoid recursively trying to handle the current
* reset. The basic dma_fence_default_wait() installs
* a callback for dma_fence_signal(), which is
* triggered by our nop handler (indirectly, the
* callback enables the signaler thread which is
* woken by the nop_submit_request() advancing the seqno
* and when the seqno passes the fence, the signaler
* then signals the fence waking us up).
*/
if (dma_fence_default_wait(&rq->fence, true,
MAX_SCHEDULE_TIMEOUT) < 0)
return false;
}
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
intel_engines_sanitize(i915, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
* being queued (by disallowing execbuf whilst wedged) so having
* waited for all active requests above, we know the system is idle
* and do not have to worry about a thread being inside
* engine->submit_request() as we swap over. So unlike installing
* the nop_submit_request on reset, we can do this from normal
* context and do not require stop_machine().
*/
intel_engines_reset_default_submission(i915);
i915_gem_contexts_lost(i915);
GEM_TRACE("end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
return true;
}
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@ -4703,8 +4317,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
@ -4765,7 +4380,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (on)
cond_resched();
}
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
@ -4874,11 +4489,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
/*
@ -4901,7 +4518,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
@ -4909,11 +4526,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
int i915_gem_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
int ret;
GEM_TRACE("\n");
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(i915);
mutex_lock(&i915->drm.struct_mutex);
@ -4965,12 +4583,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(i915); /* no hope, discard everything */
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
return 0;
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
return ret;
}
@ -5681,6 +5299,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

View File

@ -321,6 +321,14 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc;
}
void
intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
ce->gem_context = ctx;
}
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@ -338,11 +346,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
ce->gem_context = ctx;
}
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);

View File

@ -364,4 +364,8 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
#endif /* !__I915_GEM_CONTEXT_H__ */

View File

@ -2202,6 +2202,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
@ -2272,7 +2273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
intel_runtime_pm_get(eb.i915);
wakeref = intel_runtime_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
@ -2424,7 +2425,7 @@ err_vma:
eb_release_vmas(&eb);
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915);
intel_runtime_pm_put(eb.i915, wakeref);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);

View File

@ -209,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
int ret;
if (vma) {
@ -256,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
if (intel_runtime_pm_get_if_in_use(fence->i915)) {
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
if (wakeref) {
fence_write(fence, vma);
intel_runtime_pm_put(fence->i915);
intel_runtime_pm_put(fence->i915, wakeref);
}
if (vma) {
@ -553,8 +555,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
void
i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
@ -577,7 +579,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
uint32_t dimm_c0, dimm_c1;
u32 dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
@ -609,7 +611,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev_priv) ||
IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
uint32_t dcc;
u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU

View File

@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@ -473,8 +474,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
spin_unlock(&vm->free_pages.lock);
}
static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@ -482,6 +482,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
* attempt holding the lock is immediately reported by lockdep.
*/
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
GEM_BUG_ON(!vm->total);
@ -1547,7 +1548,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
/* From bdw, there is support for read-only pages in the PPGTT. */
ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, i915);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@ -1996,7 +1997,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
i915_address_space_init(&ppgtt->base.vm, i915);
i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
@ -2527,6 +2528,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
{
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
intel_wakeref_t wakeref;
u32 pte_flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
@ -2534,9 +2536,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref)
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@ -2553,10 +2554,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
@ -2588,9 +2589,12 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
intel_wakeref_t wakeref;
with_intel_runtime_pm(i915, wakeref) {
vma->vm->insert_entries(vma->vm, vma,
cache_level, pte_flags);
}
}
return 0;
@ -2601,9 +2605,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
if (vma->flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
intel_runtime_pm_put(i915);
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
with_intel_runtime_pm(i915, wakeref)
vm->clear_range(vm, vma->node.start, vma->size);
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
@ -3227,7 +3233,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
@ -3428,7 +3435,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, dev_priv);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;

View File

@ -288,6 +288,8 @@ struct i915_address_space {
bool closed;
struct mutex mutex; /* protects vma and our lists */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
u64 scratch_pte;
struct i915_page_dma scratch_page;

View File

@ -57,6 +57,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@ -387,6 +388,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{

View File

@ -153,6 +153,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
{ &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
@ -182,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(i915))
flags &= ~I915_SHRINK_BOUND;
if (flags & I915_SHRINK_BOUND) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref)
flags &= ~I915_SHRINK_BOUND;
}
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@ -265,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
i915_retire_requests(i915);
@ -292,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
unsigned long freed;
intel_wakeref_t wakeref;
unsigned long freed = 0;
intel_runtime_pm_get(i915);
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
}
return freed;
}
@ -370,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(i915);
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put(i915);
intel_wakeref_t wakeref;
with_intel_runtime_pm(i915, wakeref) {
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
}
}
shrinker_unlock(i915, unlock);
@ -392,12 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
freed_pages = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put(i915);
freed_pages = 0;
with_intel_runtime_pm(i915, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
@ -435,6 +442,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
bool unlock;
if (!shrinker_lock(i915, 0, &unlock))
@ -446,12 +454,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
MAX_SCHEDULE_TIMEOUT))
goto out;
intel_runtime_pm_get(i915);
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_VMAPS);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,

View File

@ -49,77 +49,67 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root_cached objects;
struct workqueue_struct *wq;
struct i915_mm_struct *mm;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mn;
struct drm_i915_gem_object *obj;
struct interval_tree_node it;
struct list_head link;
struct work_struct work;
bool attached;
};
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct work_struct *active;
/* Cancel any active worker and force us to re-evaluate gup */
mutex_lock(&obj->mm.lock);
active = fetch_and_zero(&obj->userptr.work);
mutex_unlock(&obj->mm.lock);
if (active)
goto out;
i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&obj->base.dev->struct_mutex);
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
WARN_ONCE(i915_gem_object_has_pages(obj),
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
obj->bind_count,
atomic_read(&obj->mm.pages_pin_count),
obj->pin_global);
mutex_unlock(&obj->base.dev->struct_mutex);
out:
i915_gem_object_put(obj);
}
static void add_object(struct i915_mmu_object *mo)
{
if (mo->attached)
return;
GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
interval_tree_insert(&mo->it, &mo->mn->objects);
mo->attached = true;
}
static void del_object(struct i915_mmu_object *mo)
{
if (!mo->attached)
if (RB_EMPTY_NODE(&mo->it.rb))
return;
interval_tree_remove(&mo->it, &mo->mn->objects);
mo->attached = false;
RB_CLEAR_NODE(&mo->it.rb);
}
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
const struct mmu_notifier_range *range)
static void
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
struct i915_mmu_object *mo = obj->userptr.mmu_object;
/*
* During mm_invalidate_range we need to cancel any userptr that
* overlaps the range being invalidated. Doing so requires the
* struct_mutex, and that risks recursion. In order to cause
* recursion, the user must alias the userptr address space with
* a GTT mmapping (possible with a MAP_FIXED) - then when we have
* to invalidate that mmaping, mm_invalidate_range is called with
* the userptr address *and* the struct_mutex held. To prevent that
* we set a flag under the i915_mmu_notifier spinlock to indicate
* whether this object is valid.
*/
if (!mo)
return;
spin_lock(&mo->mn->lock);
if (value)
add_object(mo);
else
del_object(mo);
spin_unlock(&mo->mn->lock);
}
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct i915_mmu_object *mo;
struct interval_tree_node *it;
LIST_HEAD(cancelled);
struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
@ -130,11 +120,15 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
struct drm_i915_gem_object *obj;
if (!range->blockable) {
spin_unlock(&mn->lock);
return -EAGAIN;
ret = -EAGAIN;
break;
}
/* The mmu_object is released late when destroying the
/*
* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
@ -143,29 +137,65 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
* use-after-free we only acquire a reference on the
* object if it is not in the process of being destroyed.
*/
mo = container_of(it, struct i915_mmu_object, it);
if (kref_get_unless_zero(&mo->obj->base.refcount))
queue_work(mn->wq, &mo->work);
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!kref_get_unless_zero(&obj->base.refcount)) {
it = interval_tree_iter_next(it, range->start, end);
continue;
}
spin_unlock(&mn->lock);
list_add(&mo->link, &cancelled);
it = interval_tree_iter_next(it, range->start, end);
if (!unlock) {
unlock = &mn->mm->i915->drm.struct_mutex;
switch (mutex_trylock_recursive(unlock)) {
default:
case MUTEX_TRYLOCK_FAILED:
if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
i915_gem_object_put(obj);
return -EINTR;
}
/* fall through */
case MUTEX_TRYLOCK_SUCCESS:
break;
case MUTEX_TRYLOCK_RECURSIVE:
unlock = ERR_PTR(-EEXIST);
break;
}
}
ret = i915_gem_object_unbind(obj);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
goto unlock;
spin_lock(&mn->lock);
/*
* As we do not (yet) protect the mmu from concurrent insertion
* over this range, there is no guarantee that this search will
* terminate given a pathologic workload.
*/
it = interval_tree_iter_first(&mn->objects, range->start, end);
}
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
spin_unlock(&mn->lock);
if (!list_empty(&cancelled))
flush_workqueue(mn->wq);
unlock:
if (!IS_ERR_OR_NULL(unlock))
mutex_unlock(unlock);
return ret;
return 0;
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
.invalidate_range_start = userptr_mn_invalidate_range_start,
};
static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct *mm)
i915_mmu_notifier_create(struct i915_mm_struct *mm)
{
struct i915_mmu_notifier *mn;
@ -176,13 +206,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
mn->wq = alloc_workqueue("i915-userptr-release",
WQ_UNBOUND | WQ_MEM_RECLAIM,
0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
}
mn->mm = mm;
return mn;
}
@ -192,16 +216,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
struct i915_mmu_object *mo;
mo = obj->userptr.mmu_object;
if (mo == NULL)
mo = fetch_and_zero(&obj->userptr.mmu_object);
if (!mo)
return;
spin_lock(&mo->mn->lock);
del_object(mo);
spin_unlock(&mo->mn->lock);
kfree(mo);
obj->userptr.mmu_object = NULL;
}
static struct i915_mmu_notifier *
@ -214,7 +236,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (mn)
return mn;
mn = i915_mmu_notifier_create(mm->mm);
mn = i915_mmu_notifier_create(mm);
if (IS_ERR(mn))
err = PTR_ERR(mn);
@ -237,10 +259,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
if (mn && !IS_ERR(mn)) {
destroy_workqueue(mn->wq);
if (mn && !IS_ERR(mn))
kfree(mn);
}
return err ? ERR_PTR(err) : mm->mn;
}
@ -263,14 +283,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return PTR_ERR(mn);
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
if (mo == NULL)
if (!mo)
return -ENOMEM;
mo->mn = mn;
mo->obj = obj;
mo->it.start = obj->userptr.ptr;
mo->it.last = obj->userptr.ptr + obj->base.size - 1;
INIT_WORK(&mo->work, cancel_userptr);
RB_CLEAR_NODE(&mo->it.rb);
obj->userptr.mmu_object = mo;
return 0;
@ -284,12 +304,16 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
destroy_workqueue(mn->wq);
kfree(mn);
}
#else
static void
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
}
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
@ -458,42 +482,6 @@ alloc_table:
return st;
}
static int
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
bool value)
{
int ret = 0;
/* During mm_invalidate_range we need to cancel any userptr that
* overlaps the range being invalidated. Doing so requires the
* struct_mutex, and that risks recursion. In order to cause
* recursion, the user must alias the userptr address space with
* a GTT mmapping (possible with a MAP_FIXED) - then when we have
* to invalidate that mmaping, mm_invalidate_range is called with
* the userptr address *and* the struct_mutex held. To prevent that
* we set a flag under the i915_mmu_notifier spinlock to indicate
* whether this object is valid.
*/
#if defined(CONFIG_MMU_NOTIFIER)
if (obj->userptr.mmu_object == NULL)
return 0;
spin_lock(&obj->userptr.mmu_object->mn->lock);
/* In order to serialise get_pages with an outstanding
* cancel_userptr, we must drop the struct_mutex and try again.
*/
if (!value)
del_object(obj->userptr.mmu_object);
else if (!work_pending(&obj->userptr.mmu_object->work))
add_object(obj->userptr.mmu_object);
else
ret = -EAGAIN;
spin_unlock(&obj->userptr.mmu_object->mn->lock);
#endif
return ret;
}
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
@ -679,8 +667,11 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
BUG_ON(obj->userptr.work != NULL);
/* Cancel any inflight work and force them to restart their gup */
obj->userptr.work = NULL;
__i915_gem_userptr_set_active(obj, false);
if (!pages)
return;
if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false;
@ -718,7 +709,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,

View File

@ -1082,7 +1082,7 @@ i915_error_object_create(struct drm_i915_private *i915,
/* The error capture is special as tries to run underneath the normal
* locking rules - so we use the raw version of the i915_gem_active lookup.
*/
static inline uint32_t
static inline u32
__active_get_seqno(struct i915_gem_active *active)
{
struct i915_request *request;
@ -1153,11 +1153,11 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
*
* It's only a small step better than a random number in its current form.
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error,
int *engine_id)
static u32 i915_error_generate_code(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error,
int *engine_id)
{
uint32_t error_code = 0;
u32 error_code = 0;
int i;
/* IPEHR would be an ideal way to detect errors, as it's the gross

View File

@ -271,8 +271,8 @@ struct i915_gpu_error {
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
#define I915_RESET_MODESET 2
#define I915_RESET_ENGINE 3
#define I915_WEDGED (BITS_PER_LONG - 1)
#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
@ -283,6 +283,8 @@ struct i915_gpu_error {
/** Reason for the current *global* reset */
const char *reason;
struct mutex wedge_mutex; /* serialises wedging/unwedging */
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
* to release the struct_mutex for the reset to procede.

View File

@ -223,10 +223,10 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits)
u32 mask,
u32 bits)
{
uint32_t val;
u32 val;
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
@ -250,8 +250,8 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* version is also available.
*/
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits)
u32 mask,
u32 bits)
{
spin_lock_irq(&dev_priv->irq_lock);
i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
@ -300,10 +300,10 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
u32 interrupt_mask,
u32 enabled_irq_mask)
{
uint32_t new_val;
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@ -330,8 +330,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
u32 interrupt_mask,
u32 enabled_irq_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@ -345,13 +345,13 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
POSTING_READ_FW(GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@ -390,10 +390,10 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
u32 interrupt_mask,
u32 enabled_irq_mask)
{
uint32_t new_val;
u32 new_val;
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@ -577,11 +577,11 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
u32 interrupt_mask,
u32 enabled_irq_mask)
{
uint32_t new_val;
uint32_t old_val;
u32 new_val;
u32 old_val;
lockdep_assert_held(&dev_priv->irq_lock);
@ -611,10 +611,10 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
*/
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
u32 interrupt_mask,
u32 enabled_irq_mask)
{
uint32_t new_val;
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@ -641,10 +641,10 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
u32 interrupt_mask,
u32 enabled_irq_mask)
{
uint32_t sdeimr = I915_READ(SDEIMR);
u32 sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@ -1182,8 +1182,7 @@ static void notify_ring(struct intel_engine_cs *engine)
struct i915_request *waiter = wait->request;
if (waiter &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
!i915_request_signaled(waiter) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
@ -1368,8 +1367,8 @@ static void ivybridge_parity_work(struct work_struct *work)
container_of(work, typeof(*dev_priv), l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
uint8_t slice = 0;
u32 misccpctl;
u8 slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
@ -1730,13 +1729,13 @@ static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
#if defined(CONFIG_DEBUG_FS)
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4)
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
uint32_t crcs[5];
u32 crcs[5];
spin_lock(&pipe_crc->lock);
/*
@ -1768,9 +1767,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static inline void
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4) {}
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
@ -1796,7 +1795,7 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
uint32_t res1, res2;
u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
@ -2930,46 +2929,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
struct wedge_me {
struct delayed_work work;
struct drm_i915_private *i915;
const char *name;
};
static void wedge_me(struct work_struct *work)
{
struct wedge_me *w = container_of(work, typeof(*w), work.work);
dev_err(w->i915->drm.dev,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
i915_gem_set_wedged(w->i915);
}
static void __init_wedge(struct wedge_me *w,
struct drm_i915_private *i915,
long timeout,
const char *name)
{
w->i915 = i915;
w->name = name;
INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
schedule_delayed_work(&w->work, timeout);
}
static void __fini_wedge(struct wedge_me *w)
{
cancel_delayed_work_sync(&w->work);
destroy_delayed_work_on_stack(&w->work);
w->i915 = NULL;
}
#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
(W)->i915; \
__fini_wedge((W)))
static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
@ -3180,203 +3139,6 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static void i915_reset_device(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *reason)
{
struct i915_gpu_error *error = &dev_priv->gpu_error;
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
struct wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
intel_prepare_reset(dev_priv);
error->reason = reason;
error->stalled_mask = engine_mask;
/* Signal that locked waiters should reset the GPU */
smp_mb__before_atomic();
set_bit(I915_RESET_HANDOFF, &error->flags);
wake_up_all(&error->wait_queue);
/* Wait for anyone holding the lock to wakeup, without
* blocking indefinitely on struct_mutex.
*/
do {
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
i915_reset(dev_priv, engine_mask, reason);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
} while (wait_on_bit_timeout(&error->flags,
I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE,
1));
error->stalled_mask = 0;
error->reason = NULL;
intel_finish_reset(dev_priv);
}
if (!test_bit(I915_WEDGED, &error->flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
void i915_clear_error_registers(struct drm_i915_private *dev_priv)
{
u32 eir;
if (!IS_GEN(dev_priv, 2))
I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
if (INTEL_GEN(dev_priv) < 4)
I915_WRITE(IPEIR, I915_READ(IPEIR));
else
I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
}
if (INTEL_GEN(dev_priv) >= 8) {
I915_WRITE(GEN8_RING_FAULT_REG,
I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
POSTING_READ(GEN8_RING_FAULT_REG);
} else if (INTEL_GEN(dev_priv) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
I915_WRITE(RING_FAULT_REG(engine),
I915_READ(RING_FAULT_REG(engine)) &
~RING_FAULT_VALID);
}
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
}
/**
* i915_handle_error - handle a gpu error
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
* @flags: control flags
* @fmt: Error message format string
*
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
unsigned long flags,
const char *fmt, ...)
{
struct intel_engine_cs *engine;
unsigned int tmp;
char error_msg[80];
char *msg = NULL;
if (fmt) {
va_list args;
va_start(args, fmt);
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
msg = error_msg;
}
/*
* In most cases it's guaranteed that we get here with an RPM
* reference held, for example because there is a pending GPU
* request that won't finish until the reset is done. This
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
intel_runtime_pm_get(dev_priv);
engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(dev_priv, engine_mask, msg);
i915_clear_error_registers(dev_priv);
}
/*
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
if (intel_has_reset_engine(dev_priv) &&
!i915_terminally_wedged(&dev_priv->gpu_error)) {
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags))
continue;
if (i915_reset_engine(engine, msg) == 0)
engine_mask &= ~intel_engine_flag(engine);
clear_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags);
wake_up_bit(&dev_priv->gpu_error.flags,
I915_RESET_ENGINE + engine->id);
}
}
if (!engine_mask)
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
wait_event(dev_priv->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
&dev_priv->gpu_error.flags));
goto out;
}
/* Prevent any other reset-engine attempt. */
for_each_engine(engine, dev_priv, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags))
wait_on_bit(&dev_priv->gpu_error.flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
i915_reset_device(dev_priv, engine_mask, msg);
for_each_engine(engine, dev_priv, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags);
}
clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
wake_up_all(&dev_priv->gpu_error.reset_queue);
out:
intel_runtime_pm_put(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@ -3409,7 +3171,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@ -3471,7 +3233,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@ -3689,7 +3451,7 @@ static void gen11_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@ -4158,7 +3920,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
u32 gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
@ -4186,8 +3948,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
uint32_t de_pipe_enables;
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
@ -4327,6 +4089,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
gen11_master_intr_enable(dev_priv->regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
}

View File

@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
oa_put_render_ctx_id(stream);
@ -2087,7 +2087,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
intel_runtime_pm_get(dev_priv);
stream->wakeref = intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
@ -2123,7 +2123,7 @@ err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
if (stream->ctx)
@ -3021,7 +3021,7 @@ static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
(addr >= 0x182300 && addr <= 0x1823A4);
}
static uint32_t mask_reg_value(u32 reg, u32 val)
static u32 mask_reg_value(u32 reg, u32 val)
{
/* HALF_SLICE_CHICKEN2 is programmed with a the
* WaDisableSTUnitPowerOptimization workaround. Make sure the value

View File

@ -167,6 +167,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
bool fw = false;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
@ -175,7 +176,8 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (!dev_priv->gt.awake)
return;
if (!intel_runtime_pm_get_if_in_use(dev_priv))
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return;
for_each_engine(engine, dev_priv, id) {
@ -210,7 +212,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (fw)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
}
static void
@ -227,11 +229,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
u32 val;
val = dev_priv->gt_pm.rps.cur_freq;
if (dev_priv->gt.awake &&
intel_runtime_pm_get_if_in_use(dev_priv)) {
val = intel_get_cagf(dev_priv,
I915_READ_NOTRACE(GEN6_RPSTAT1));
intel_runtime_pm_put(dev_priv);
if (dev_priv->gt.awake) {
intel_wakeref_t wakeref;
with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
val = intel_get_cagf(dev_priv,
I915_READ_NOTRACE(GEN6_RPSTAT1));
}
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
@ -443,12 +446,14 @@ static u64 __get_rc6(struct drm_i915_private *i915)
static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
if (intel_runtime_pm_get_if_in_use(i915)) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (wakeref) {
val = __get_rc6(i915);
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
/*
* If we are coming back from being runtime suspended we must

View File

@ -117,14 +117,14 @@
*/
typedef struct {
uint32_t reg;
u32 reg;
} i915_reg_t;
#define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
#define INVALID_MMIO_REG _MMIO(0)
static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
@ -1814,7 +1814,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \
#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_B_GRP_OFFSET, \
_CNL_PORT_TX_B_GRP_OFFSET, \
@ -1822,7 +1822,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
4 * (dw))
#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@ -1858,9 +1858,9 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@ -1888,8 +1888,8 @@ enum i915_power_well_id {
#define RTERM_SELECT(x) ((x) << 3)
#define RTERM_SELECT_MASK (0x7 << 3)
#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
@ -4272,6 +4272,15 @@ enum {
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
#define _PSR2_SU_STATUS_0 0x6F914
#define _PSR2_SU_STATUS_1 0x6F918
#define _PSR2_SU_STATUS_2 0x6F91C
#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
#define PSR2_SU_STATUS_FRAMES 8
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@ -4687,7 +4696,6 @@ enum {
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define _PP_ON_DELAYS 0x61208

View File

@ -29,6 +29,7 @@
#include <linux/sched/signal.h>
#include "i915_drv.h"
#include "i915_reset.h"
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@ -197,7 +198,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
spin_unlock(&engine->timeline.lock);
spin_lock(&rq->lock);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
intel_engine_cancel_signaling(rq);
@ -342,6 +343,13 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
static u32 next_global_seqno(struct i915_timeline *tl)
{
if (!++tl->seqno)
++tl->seqno;
return tl->seqno;
}
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@ -358,7 +366,7 @@ void __i915_request_submit(struct i915_request *request)
GEM_BUG_ON(request->global_seqno);
seqno = timeline_get_seqno(&engine->timeline);
seqno = next_global_seqno(&engine->timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(intel_engine_signaled(engine, seqno));

View File

@ -280,6 +280,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
static inline bool i915_request_signaled(const struct i915_request *rq)
{
return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
}
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
u32 seqno);
static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2008-2018 Intel Corporation
*/
#ifndef I915_RESET_H
#define I915_RESET_H
#include <linux/compiler.h>
#include <linux/types.h>
struct drm_i915_private;
struct intel_engine_cs;
struct intel_guc;
__printf(4, 5)
void i915_handle_error(struct drm_i915_private *i915,
u32 engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
void i915_clear_error_registers(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
unsigned int stalled_mask,
const char *reason);
int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
bool intel_has_gpu_reset(struct drm_i915_private *i915);
bool intel_has_reset_engine(struct drm_i915_private *i915);
int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
int intel_reset_guc(struct drm_i915_private *i915);
struct i915_wedge_me {
struct delayed_work work;
struct drm_i915_private *i915;
const char *name;
};
void __i915_init_wedge(struct i915_wedge_me *w,
struct drm_i915_private *i915,
long timeout,
const char *name);
void __i915_fini_wedge(struct i915_wedge_me *w);
#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \
(W)->i915; \
__i915_fini_wedge((W)))
#endif /* I915_RESET_H */

View File

@ -42,11 +42,11 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
u64 res;
intel_wakeref_t wakeref;
u64 res = 0;
intel_runtime_pm_get(dev_priv);
res = intel_rc6_residency_us(dev_priv, reg);
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref)
res = intel_rc6_residency_us(dev_priv, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@ -258,9 +258,10 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
intel_wakeref_t wakeref;
int ret;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@ -274,7 +275,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
}
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
@ -354,6 +355,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
intel_wakeref_t wakeref;
u32 val;
ssize_t ret;
@ -361,7 +363,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
@ -371,7 +373,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return -EINVAL;
}
@ -392,7 +394,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
}
@ -412,6 +414,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
intel_wakeref_t wakeref;
u32 val;
ssize_t ret;
@ -419,7 +422,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
@ -429,7 +432,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return -EINVAL;
}
@ -446,7 +449,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
}

View File

@ -337,9 +337,11 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
intel_display_power_get(dev_priv, port == PORT_A ?
POWER_DOMAIN_PORT_DDI_A_IO :
POWER_DOMAIN_PORT_DDI_B_IO);
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_A_IO :
POWER_DOMAIN_PORT_DDI_B_IO);
}
}
@ -1125,10 +1127,18 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
enum port port;
u32 tmp;
intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
for_each_dsi_port(port, intel_dsi->ports) {
intel_wakeref_t wakeref;
if (intel_dsi->dual_link)
intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
if (wakeref) {
intel_display_power_put(dev_priv,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_A_IO :
POWER_DOMAIN_PORT_DDI_B_IO,
wakeref);
}
}
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
@ -1229,13 +1239,15 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
enum port port;
bool ret = false;
u32 tmp;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return false;
for_each_dsi_port(port, intel_dsi->ports) {
@ -1260,7 +1272,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
ret = tmp & PIPECONF_ENABLE;
}
out:
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@ -1378,6 +1390,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->disable = gen11_dsi_disable;
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
encoder->update_pipe = intel_panel_update_backlight;
encoder->compute_config = gen11_dsi_compute_config;
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->type = INTEL_OUTPUT_DSI;

View File

@ -46,7 +46,7 @@
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
u64 *val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -78,7 +78,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val)
u64 val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);

View File

@ -311,7 +311,7 @@ int
intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
u64 *val)
{
DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
@ -334,7 +334,7 @@ int
intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
u64 val)
{
DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);

View File

@ -748,7 +748,8 @@ static void i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev)
{
intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
intel_display_power_put_unchecked(kdev_to_i915(kdev),
POWER_DOMAIN_AUDIO);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,

View File

@ -1946,6 +1946,15 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
};
int i;
if (HAS_DDI(dev_priv)) {
const struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
return port_info->supports_dp ||
port_info->supports_dvi ||
port_info->supports_hdmi;
}
/* FIXME maybe deal with port A as well? */
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;

View File

@ -158,30 +158,24 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
static void irq_enable(struct intel_engine_cs *engine)
{
/*
* FIXME: Ideally we want this on the API boundary, but for the
* sake of testing with mock breadcrumbs (no HW so unable to
* enable irqs) we place it deep within the bowels, at the point
* of no return.
*/
GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
if (!engine->irq_enable)
return;
/* Caller disables interrupts */
if (engine->irq_enable) {
spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->i915->irq_lock);
}
spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
if (!engine->irq_disable)
return;
/* Caller disables interrupts */
if (engine->irq_disable) {
spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->i915->irq_lock);
}
spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->i915->irq_lock);
}
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@ -293,25 +287,16 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
if (b->irq_armed)
return false;
/* The breadcrumb irq will be disarmed on the interrupt after the
/*
* The breadcrumb irq will be disarmed on the interrupt after the
* waiters are signaled. This gives us a single interrupt window in
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
b->irq_armed = true;
if (I915_SELFTEST_ONLY(b->mock)) {
/* For our mock objects we want to avoid interaction
* with the real hardware (which is not set up). So
* we simply pretend we have enabled the powerwell
* and the irq, and leave it up to the mock
* implementation to call intel_engine_wakeup()
* itself when it wants to simulate a user interrupt,
*/
return true;
}
/* Since we are waiting on a request, the GPU should be busy
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. This is tracked
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
@ -646,8 +631,7 @@ static int intel_breadcrumbs_signaler(void *arg)
rq->signaling.wait.seqno = 0;
__list_del_entry(&rq->signaling.link);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&rq->fence.flags)) {
if (!i915_request_signaled(rq)) {
list_add_tail(&rq->signaling.link,
&list);
i915_request_get(rq);

View File

@ -218,7 +218,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
};
const unsigned int *vco_table;
unsigned int vco;
uint8_t tmp = 0;
u8 tmp = 0;
/* FIXME other chipsets? */
if (IS_GM45(dev_priv))
@ -249,13 +249,13 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
const uint8_t *div_table;
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
const u8 *div_table;
unsigned int cdclk_sel;
uint16_t tmp = 0;
u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@ -330,12 +330,12 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
const uint8_t *div_table;
static const u8 div_3200[] = { 16, 10, 8 };
static const u8 div_4000[] = { 20, 12, 10 };
static const u8 div_5333[] = { 24, 16, 14 };
const u8 *div_table;
unsigned int cdclk_sel;
uint16_t tmp = 0;
u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@ -375,7 +375,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
{
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int cdclk_sel;
uint16_t tmp = 0;
u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@ -403,8 +403,8 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
u32 lcpll = I915_READ(LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_state->cdclk = 800000;
@ -520,6 +520,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
case 400000:
@ -539,7 +540,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@ -593,7 +594,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@ -601,6 +602,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
case 333333:
@ -619,7 +621,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@ -637,7 +639,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@ -670,8 +672,8 @@ static u8 bdw_calc_voltage_level(int cdclk)
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
u32 lcpll = I915_READ(LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_state->cdclk = 800000;
@ -698,7 +700,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
uint32_t val;
u32 val;
int ret;
if (WARN((I915_READ(LCPLL_CTL) &
@ -1081,7 +1083,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
uint32_t cdctl, expected;
u32 cdctl, expected;
/*
* check if the pre-os initialized the display
@ -2688,7 +2690,7 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
static int g4x_hrawclk(struct drm_i915_private *dev_priv)
{
uint32_t clkcfg;
u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
clkcfg = I915_READ(CLKCFG);

View File

@ -142,7 +142,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i, pipe = crtc->pipe;
uint16_t coeffs[9] = { 0, };
u16 coeffs[9] = { 0, };
bool limited_color_range = false;
/*
@ -171,7 +171,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
* hardware.
*/
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
* Clamp input value to min/max supported by
@ -233,7 +233,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
u16 postoff = 0;
if (limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
@ -244,7 +244,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
u32 mode = CSC_MODE_YUV_TO_RGB;
if (limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
@ -261,15 +261,15 @@ static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state)
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
uint32_t mode;
u32 mode;
if (crtc_state->base.ctm) {
struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
uint16_t coeffs[9] = { 0, };
u16 coeffs[9] = { 0, };
int i;
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
uint64_t abs_coeff =
u64 abs_coeff =
((1ULL << 63) - 1) & ctm->matrix[i];
/* Round coefficient. */
@ -331,7 +331,7 @@ static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
if (blob) {
struct drm_color_lut *lut = blob->data;
for (i = 0; i < 256; i++) {
uint32_t word =
u32 word =
(drm_color_lut_extract(lut[i].red, 8) << 16) |
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
@ -343,7 +343,7 @@ static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
}
} else {
for (i = 0; i < 256; i++) {
uint32_t word = (i << 16) | (i << 8) | i;
u32 word = (i << 16) | (i << 8) | i;
if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
@ -388,7 +388,7 @@ static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
I915_WRITE(PREC_PAL_INDEX(pipe),
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
@ -397,7 +397,7 @@ static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
u32 word =
drm_color_lut_extract(lut[i].red, 10) << 20 |
drm_color_lut_extract(lut[i].green, 10) << 10 |
drm_color_lut_extract(lut[i].blue, 10);
@ -406,7 +406,7 @@ static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
}
} else {
for (i = 0; i < lut_size; i++) {
uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
I915_WRITE(PREC_PAL_DATA(pipe),
(v << 20) | (v << 10) | v);
@ -418,7 +418,7 @@ static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
@ -431,7 +431,7 @@ static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
struct drm_color_lut *lut = crtc_state->base.gamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
u32 word =
(drm_color_lut_extract(lut[i].red, 10) << 20) |
(drm_color_lut_extract(lut[i].green, 10) << 10) |
drm_color_lut_extract(lut[i].blue, 10);
@ -449,7 +449,7 @@ static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
drm_color_lut_extract(lut[i].blue, 16));
} else {
for (i = 0; i < lut_size; i++) {
uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
I915_WRITE(PREC_PAL_DATA(pipe),
(v << 20) | (v << 10) | v);
@ -491,8 +491,8 @@ static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
const uint32_t lut_size = 33;
uint32_t i;
const u32 lut_size = 33;
u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
@ -507,7 +507,7 @@ static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
* different values per channel, so this just loads a linear table.
*/
for (i = 0; i < lut_size; i++) {
uint32_t v = (i * (1 << 16)) / (lut_size - 1);
u32 v = (i * (1 << 16)) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
@ -544,8 +544,8 @@ static void cherryview_load_luts(struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
uint32_t word0, word1;
u32 i, lut_size;
u32 word0, word1;
if (crtc_state_is_legacy_gamma(crtc_state)) {
/* Turn off degamma/gamma on CGM block. */
@ -609,10 +609,26 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
size_t gamma_length, degamma_length;
uint32_t tests = DRM_COLOR_LUT_NON_DECREASING;
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
/*
* All of our platforms mandate that the degamma curve be
* non-decreasing. Additionally, GLK and gen11 only accept a single
* value for red, green, and blue in the degamma table. Make sure
* userspace didn't try to pass us something we can't handle.
*
* We don't have any extra hardware constraints on the gamma table,
* so no need to explicitly check it.
*/
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
tests |= DRM_COLOR_LUT_EQUAL_CHANNELS;
if (drm_color_lut_check(crtc_state->base.degamma_lut, tests) != 0)
return -EINVAL;
/*
* We allow both degamma & gamma luts at the right size or
* NULL.

View File

@ -83,15 +83,17 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
intel_wakeref_t wakeref;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return false;
ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@ -629,19 +631,19 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
uint32_t vsample;
uint32_t vblank, vblank_start, vblank_end;
uint32_t dsl;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
u32 vsample;
u32 vblank, vblank_start, vblank_end;
u32 dsl;
i915_reg_t bclrpat_reg, vtotal_reg,
vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
uint8_t st00;
u8 st00;
enum drm_connector_status status;
DRM_DEBUG_KMS("starting load-detect on CRT\n");
@ -667,7 +669,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
I915_WRITE(bclrpat_reg, 0x500050);
if (!IS_GEN(dev_priv, 2)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
u32 pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
@ -688,8 +690,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
uint32_t vsync = I915_READ(vsync_reg);
uint32_t vsync_start = (vsync & 0xffff) + 1;
u32 vsync = I915_READ(vsync_reg);
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
I915_WRITE(vblank_reg,
@ -777,6 +779,7 @@ intel_crt_detect(struct drm_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
int status, ret;
struct intel_load_detect_pipe tmp;
@ -785,7 +788,8 @@ intel_crt_detect(struct drm_connector *connector,
force);
if (i915_modparams.load_detect_test) {
intel_display_power_get(dev_priv, intel_encoder->power_domain);
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
}
@ -793,7 +797,8 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
intel_display_power_get(dev_priv, intel_encoder->power_domain);
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
@ -848,7 +853,7 @@ load_detect:
}
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return status;
}
@ -858,10 +863,12 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
int ret;
intel_wakeref_t wakeref;
struct i2c_adapter *i2c;
int ret;
intel_display_power_get(dev_priv, intel_encoder->power_domain);
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
@ -873,7 +880,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}

View File

@ -70,50 +70,50 @@ MODULE_FIRMWARE(BXT_CSR_PATH);
struct intel_css_header {
/* 0x09 for DMC */
uint32_t module_type;
u32 module_type;
/* Includes the DMC specific header in dwords */
uint32_t header_len;
u32 header_len;
/* always value would be 0x10000 */
uint32_t header_ver;
u32 header_ver;
/* Not used */
uint32_t module_id;
u32 module_id;
/* Not used */
uint32_t module_vendor;
u32 module_vendor;
/* in YYYYMMDD format */
uint32_t date;
u32 date;
/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
uint32_t size;
u32 size;
/* Not used */
uint32_t key_size;
u32 key_size;
/* Not used */
uint32_t modulus_size;
u32 modulus_size;
/* Not used */
uint32_t exponent_size;
u32 exponent_size;
/* Not used */
uint32_t reserved1[12];
u32 reserved1[12];
/* Major Minor */
uint32_t version;
u32 version;
/* Not used */
uint32_t reserved2[8];
u32 reserved2[8];
/* Not used */
uint32_t kernel_header_info;
u32 kernel_header_info;
} __packed;
struct intel_fw_info {
uint16_t reserved1;
u16 reserved1;
/* Stepping (A, B, C, ..., *). * is a wildcard */
char stepping;
@ -121,8 +121,8 @@ struct intel_fw_info {
/* Sub-stepping (0, 1, ..., *). * is a wildcard */
char substepping;
uint32_t offset;
uint32_t reserved2;
u32 offset;
u32 reserved2;
} __packed;
struct intel_package_header {
@ -135,14 +135,14 @@ struct intel_package_header {
unsigned char reserved[10];
/* Number of valid entries in the FWInfo array below */
uint32_t num_entries;
u32 num_entries;
struct intel_fw_info fw_info[20];
} __packed;
struct intel_dmc_header {
/* always value would be 0x40403E3E */
uint32_t signature;
u32 signature;
/* DMC binary header length */
unsigned char header_len;
@ -151,30 +151,30 @@ struct intel_dmc_header {
unsigned char header_ver;
/* Reserved */
uint16_t dmcc_ver;
u16 dmcc_ver;
/* Major, Minor */
uint32_t project;
u32 project;
/* Firmware program size (excluding header) in dwords */
uint32_t fw_size;
u32 fw_size;
/* Major Minor version */
uint32_t fw_version;
u32 fw_version;
/* Number of valid MMIO cycles present. */
uint32_t mmio_count;
u32 mmio_count;
/* MMIO address */
uint32_t mmioaddr[8];
u32 mmioaddr[8];
/* MMIO data */
uint32_t mmiodata[8];
u32 mmiodata[8];
/* FW filename */
unsigned char dfile[32];
uint32_t reserved1[2];
u32 reserved1[2];
} __packed;
struct stepping_info {
@ -230,7 +230,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
uint32_t val, mask;
u32 val, mask;
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
@ -257,7 +257,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
u32 i, fw_size;
if (!HAS_CSR(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
@ -289,17 +289,17 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
u32 i;
u32 *dmc_payload;
if (!fw)
return NULL;
@ -409,6 +409,21 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
{
WARN_ON(dev_priv->csr.wakeref);
dev_priv->csr.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&dev_priv->csr.wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
@ -424,8 +439,7 @@ static void csr_load_work_fn(struct work_struct *work)
if (dev_priv->csr.dmc_payload) {
intel_csr_load_program(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_csr_runtime_pm_put(dev_priv);
DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
dev_priv->csr.fw_path,
@ -467,7 +481,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working CSR for whatever
* reason.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
@ -538,7 +552,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
/* Drop the reference held in case DMC isn't loaded. */
if (!dev_priv->csr.dmc_payload)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_csr_runtime_pm_put(dev_priv);
}
/**
@ -558,7 +572,7 @@ void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
* loaded.
*/
if (!dev_priv->csr.dmc_payload)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_csr_runtime_pm_get(dev_priv);
}
/**
@ -574,6 +588,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_csr_ucode_suspend(dev_priv);
WARN_ON(dev_priv->csr.wakeref);
kfree(dev_priv->csr.dmc_payload);
}

View File

@ -974,7 +974,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@ -995,8 +995,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
}
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
static u32 icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
int clock = crtc_state->port_clock;
@ -1243,8 +1243,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
{
i915_reg_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
u32 cfgcr1_val, cfgcr2_val;
u32 p0, p1, p2, dco_freq;
cfgcr1_reg = DPLL_CFGCR1(pll_id);
cfgcr2_reg = DPLL_CFGCR2(pll_id);
@ -1305,8 +1305,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
u32 cfgcr0, cfgcr1;
u32 p0, p1, p2, dco_freq, ref_clock;
if (INTEL_GEN(dev_priv) >= 11) {
cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
@ -1471,7 +1471,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int link_clock = 0;
uint32_t pll_id;
u32 pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
@ -1496,7 +1496,7 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
uint32_t cfgcr0;
u32 cfgcr0;
enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@ -1550,7 +1550,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
uint32_t dpll_ctl1;
u32 dpll_ctl1;
enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@ -1739,7 +1739,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
u32 temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@ -1757,7 +1757,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
uint32_t temp;
u32 temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
@ -1841,7 +1841,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
u32 val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
@ -1860,12 +1860,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
enum pipe pipe = 0;
int ret = 0;
uint32_t tmp;
u32 tmp;
if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain)))
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
if (WARN_ON(!wakeref))
return -ENXIO;
if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
@ -1880,7 +1882,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@ -1891,13 +1893,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
uint32_t tmp;
intel_wakeref_t wakeref;
enum pipe pipe = 0;
u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return false;
if (!encoder->get_hw_state(encoder, &pipe)) {
@ -1939,7 +1943,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@ -1950,6 +1954,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
u8 mst_pipe_mask;
@ -1957,8 +1962,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*pipe_mask = 0;
*is_dp_mst = false;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return;
tmp = I915_READ(DDI_BUF_CTL(port));
@ -2029,7 +2035,7 @@ out:
"(PHY_CTL %08x)\n", port_name(port), tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@ -2126,7 +2132,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
enum port port, uint8_t iboost)
enum port port, u8 iboost)
{
u32 tmp;
@ -2145,7 +2151,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint8_t iboost;
u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
@ -2659,7 +2665,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
static uint32_t translate_signal_level(int signal_levels)
static u32 translate_signal_level(int signal_levels)
{
int i;
@ -2674,9 +2680,9 @@ static uint32_t translate_signal_level(int signal_levels)
return 0;
}
static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
{
uint8_t train_set = intel_dp->train_set[0];
u8 train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@ -2701,7 +2707,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
return 0;
}
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
u32 ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@ -2715,8 +2721,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
}
static inline
uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
enum port port)
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
enum port port)
{
if (intel_port_is_combophy(dev_priv, port)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
@ -2851,7 +2857,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint32_t val;
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
@ -3286,7 +3292,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_display_power_put_unchecked(dev_priv,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
}
@ -3306,7 +3313,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_ddi_buf(encoder, old_crtc_state);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_display_power_put_unchecked(dev_priv,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@ -3348,7 +3356,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t val;
u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@ -3548,6 +3556,8 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
intel_psr_enable(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
intel_panel_update_backlight(encoder, crtc_state, conn_state);
}
static void intel_ddi_update_pipe(struct intel_encoder *encoder,
@ -3626,8 +3636,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
intel_display_power_put(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
intel_display_power_put_unchecked(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@ -3636,7 +3646,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
uint32_t val;
u32 val;
bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {

View File

@ -31,13 +31,7 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
@ -48,8 +42,17 @@
#include <linux/intel-iommu.h>
#include <linux/reservation.h>
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_frontbuffer.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_reset.h"
#include "i915_trace.h"
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
@ -57,7 +60,7 @@ static const uint32_t i8xx_primary_formats[] = {
};
/* Primary plane formats for gen >= 4 */
static const uint32_t i965_primary_formats[] = {
static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@ -66,18 +69,18 @@ static const uint32_t i965_primary_formats[] = {
DRM_FORMAT_XBGR2101010,
};
static const uint64_t i9xx_format_modifiers[] = {
static const u64 i9xx_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
static const uint64_t cursor_format_modifiers[] = {
static const u64 cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@ -493,7 +496,7 @@ static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
static u32 i9xx_dpll_compute_m(struct dpll *dpll)
{
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
@ -528,8 +531,8 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
clock->n << 22);
clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
return clock->dot / 5;
@ -891,7 +894,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
struct dpll clock;
uint64_t m2;
u64 m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
@ -913,7 +916,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p = clock.p1 * clock.p2;
m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
clock.n) << 22, refclk * clock.m1);
if (m2 > INT_MAX/clock.m1)
@ -1197,17 +1200,19 @@ void assert_pipe(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
cur_state = false;
}
@ -1608,7 +1613,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
uint32_t val, pipeconf_val;
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
@ -1696,7 +1701,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
i915_reg_t reg;
uint32_t val;
u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@ -2023,6 +2028,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
intel_wakeref_t wakeref;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
@ -2046,7 +2052,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
@ -2101,7 +2107,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return vma;
}
@ -2372,7 +2378,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
return 0;
}
static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case I915_FORMAT_MOD_X_TILED:
@ -3411,6 +3417,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
intel_wakeref_t wakeref;
bool ret;
u32 val;
@ -3420,7 +3427,8 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
val = I915_READ(DSPCNTR(i9xx_plane));
@ -3433,7 +3441,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -3502,7 +3510,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
static u32 skl_plane_ctl_format(uint32_t pixel_format)
static u32 skl_plane_ctl_format(u32 pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
@ -3572,7 +3580,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
}
}
static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
static u32 skl_plane_ctl_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_LINEAR:
@ -4592,7 +4600,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
uint32_t temp;
u32 temp;
temp = I915_READ(SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
@ -5715,7 +5723,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
uint32_t val;
u32 val;
val = MBUS_DBOX_A_CREDIT(2);
val |= MBUS_DBOX_BW_CREDIT(1);
@ -6106,7 +6114,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain;
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
intel_display_power_put_unchecked(dev_priv, domain);
}
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
@ -6353,7 +6361,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
intel_display_power_put_unchecked(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
@ -6619,9 +6627,9 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
uint32_t pixel_rate;
u32 pixel_rate;
pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
@ -6631,8 +6639,8 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
*/
if (pipe_config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = pipe_config->pch_pfit.size;
u64 pipe_w, pipe_h, pfit_w, pfit_h;
u32 pfit_size = pipe_config->pch_pfit.size;
pipe_w = pipe_config->pipe_src_w;
pipe_h = pipe_config->pipe_src_h;
@ -6647,7 +6655,7 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
@ -6743,7 +6751,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
static void
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
intel_reduce_m_n_ratio(u32 *num, u32 *den)
{
while (*num > DATA_LINK_M_N_MASK ||
*den > DATA_LINK_M_N_MASK) {
@ -6753,7 +6761,7 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
}
static void compute_m_n(unsigned int m, unsigned int n,
uint32_t *ret_m, uint32_t *ret_n,
u32 *ret_m, u32 *ret_n,
bool constant_n)
{
/*
@ -6768,7 +6776,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((uint64_t) m * *ret_n, n);
*ret_m = div_u64((u64)m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
}
@ -6798,12 +6806,12 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
static u32 pnv_dpll_compute_fp(struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
}
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
{
return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
}
@ -7359,7 +7367,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
u32 crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
@ -7434,7 +7442,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
uint32_t tmp;
u32 tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
@ -7505,7 +7513,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t pipeconf;
u32 pipeconf;
pipeconf = 0;
@ -7750,7 +7758,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t tmp;
u32 tmp;
if (INTEL_GEN(dev_priv) <= 3 &&
(IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
@ -7965,11 +7973,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
intel_wakeref_t wakeref;
u32 tmp;
bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@ -8070,7 +8080,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -8244,7 +8254,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
uint32_t tmp;
u32 tmp;
tmp = I915_READ(SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
@ -8266,7 +8276,7 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
/* WaMPhyProgramming:hsw */
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
{
uint32_t tmp;
u32 tmp;
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@ -8347,7 +8357,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
bool with_spread, bool with_fdi)
{
uint32_t reg, tmp;
u32 reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
@ -8386,7 +8396,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
uint32_t reg, tmp;
u32 reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@ -8411,7 +8421,7 @@ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
#define BEND_IDX(steps) ((50 + (steps)) / 5)
static const uint16_t sscdivintphase[] = {
static const u16 sscdivintphase[] = {
[BEND_IDX( 50)] = 0x3B23,
[BEND_IDX( 45)] = 0x3B23,
[BEND_IDX( 40)] = 0x3C23,
@ -8443,7 +8453,7 @@ static const uint16_t sscdivintphase[] = {
*/
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
{
uint32_t tmp;
u32 tmp;
int idx = BEND_IDX(steps);
if (WARN_ON(steps % 5 != 0))
@ -8509,7 +8519,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
uint32_t val;
u32 val;
val = 0;
@ -8856,7 +8866,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
uint32_t ps_ctrl = 0;
u32 ps_ctrl = 0;
int id = -1;
int i;
@ -8868,6 +8878,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
scaler_state->scalers[i].in_use = true;
break;
}
}
@ -9012,7 +9023,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
u32 tmp;
tmp = I915_READ(PF_CTL(crtc->pipe));
@ -9037,11 +9048,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
intel_wakeref_t wakeref;
u32 tmp;
bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@ -9124,7 +9137,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -9164,7 +9177,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
@ -9172,7 +9185,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
return I915_READ(D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
@ -9197,7 +9210,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
uint32_t val;
u32 val;
assert_can_disable_lcpll(dev_priv);
@ -9244,7 +9257,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
*/
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
u32 val;
val = I915_READ(LCPLL_CTL);
@ -9319,7 +9332,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
uint32_t val;
u32 val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@ -9335,7 +9348,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
uint32_t val;
u32 val;
DRM_DEBUG_KMS("Disabling package C8+\n");
@ -9457,7 +9470,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
@ -9514,7 +9527,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
for_each_set_bit(panel_transcoder,
&panel_transcoder_mask,
ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
enum pipe trans_pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
@ -9560,6 +9575,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
*power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@ -9587,6 +9604,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
*power_domain_mask |= BIT_ULL(power_domain);
/*
@ -9621,7 +9640,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
u32 tmp;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
@ -9703,7 +9722,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
WARN_ON(power_domain_mask & BIT_ULL(power_domain));
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@ -9733,7 +9754,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put_unchecked(dev_priv, power_domain);
return active;
}
@ -9983,17 +10004,19 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -10216,6 +10239,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
u32 val;
@ -10225,7 +10249,8 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
val = I915_READ(CURCNTR(plane->pipe));
@ -10238,7 +10263,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
MCURSOR_PIPE_SELECT_SHIFT;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -10832,8 +10857,11 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* Despite the w/a only being listed for IVB we assume that
* the ILK/SNB note has similar ramifications, hence we apply
* the w/a on all three platforms.
*
* With experimental results seems this is needed also for primary
* plane, not only sprite plane.
*/
if (plane->id == PLANE_SPRITE0 &&
if (plane->id != PLANE_CURSOR &&
(IS_GEN_RANGE(dev_priv, 5, 6) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
@ -11673,6 +11701,11 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !i915_modparams.fastboot) {
DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
ret = false;
}
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
@ -12696,8 +12729,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
if (i915_modparams.fastboot &&
intel_pipe_config_compare(dev_priv,
if (intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(old_crtc_state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@ -12956,6 +12988,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
u64 put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = 0;
int i;
intel_atomic_commit_fence_wait(intel_state);
@ -12963,7 +12996,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
@ -13100,7 +13133,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
/*
@ -13728,8 +13761,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
u32 src_x, u32 src_y,
u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@ -14203,7 +14236,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
return index_mask;
}
static bool has_edp_a(struct drm_i915_private *dev_priv)
static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
{
if (!IS_MOBILE(dev_priv))
return false;
@ -14217,7 +14250,7 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
return true;
}
static bool intel_crt_present(struct drm_i915_private *dev_priv)
static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) >= 9)
return false;
@ -14225,15 +14258,12 @@ static bool intel_crt_present(struct drm_i915_private *dev_priv)
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
return false;
if (IS_CHERRYVIEW(dev_priv))
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@ -14288,23 +14318,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
/*
* intel_edp_init_connector() depends on this completing first, to
* prevent the registeration of both eDP and LVDS and the incorrect
* sharing of the PPS.
*/
intel_lvds_init(dev_priv);
if (intel_crt_present(dev_priv))
intel_crt_init(dev_priv);
if (IS_ICELAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
intel_ddi_init(dev_priv, PORT_F);
/*
* On some ICL SKUs port F is not present. No strap bits for
* this, so rely on VBT.
*/
if (intel_bios_is_port_present(dev_priv, PORT_F))
intel_ddi_init(dev_priv, PORT_F);
icl_dsi_init(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
/*
@ -14320,6 +14346,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (HAS_DDI(dev_priv)) {
int found;
if (intel_ddi_crt_present(dev_priv))
intel_crt_init(dev_priv);
/*
* Haswell uses DDI functions to detect digital outputs.
* On SKL pre-D0 the strap isn't connected, so we assume
@ -14346,16 +14375,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
if (IS_GEN9_BC(dev_priv) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
intel_bios_is_port_present(dev_priv, PORT_E))
intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
/*
* intel_edp_init_connector() depends on this completing first,
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
intel_lvds_init(dev_priv);
intel_crt_init(dev_priv);
dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
if (ilk_has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@ -14381,6 +14417,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
intel_crt_init(dev_priv);
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@ -14423,9 +14462,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
}
vlv_dsi_init(dev_priv);
} else if (!IS_GEN(dev_priv, 2) && !IS_PINEVIEW(dev_priv)) {
} else if (IS_PINEVIEW(dev_priv)) {
intel_lvds_init(dev_priv);
intel_crt_init(dev_priv);
} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
bool found = false;
if (IS_MOBILE(dev_priv))
intel_lvds_init(dev_priv);
intel_crt_init(dev_priv);
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
@ -14457,11 +14504,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev_priv, DP_D, PORT_D);
} else if (IS_GEN(dev_priv, 2))
intel_dvo_init(dev_priv);
if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev_priv);
if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev_priv);
} else if (IS_GEN(dev_priv, 2)) {
if (IS_I85X(dev_priv))
intel_lvds_init(dev_priv);
intel_crt_init(dev_priv);
intel_dvo_init(dev_priv);
}
intel_psr_init(dev_priv);
@ -15502,19 +15554,25 @@ void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
/* This function can be called both from intel_modeset_setup_hw_state or
intel_wakeref_t wakeref;
/*
* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
* paranoid "someone might have enabled VGA while we were not looking"
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
* the rest of the driver uses.
*/
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_VGA);
if (!wakeref)
return;
i915_redisable_vga_power_on(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
}
/* FIXME read out full plane state for all planes */
@ -15814,12 +15872,13 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
int i;
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
@ -15889,7 +15948,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
intel_fbc_init_pipe_state(dev_priv);
}

View File

@ -429,7 +429,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
}
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
uint8_t lane_count)
u8 lane_count)
{
/*
* FIXME: we need to synchronize the current link parameters with
@ -449,7 +449,7 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int link_rate,
uint8_t lane_count)
u8 lane_count)
{
const struct drm_display_mode *fixed_mode =
intel_dp->attached_connector->panel.fixed_mode;
@ -464,7 +464,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
}
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count)
int link_rate, u8 lane_count)
{
int index;
@ -572,19 +572,19 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
{
int i;
uint32_t v = 0;
int i;
u32 v = 0;
if (src_bytes > 4)
src_bytes = 4;
for (i = 0; i < src_bytes; i++)
v |= ((uint32_t) src[i]) << ((3-i) * 8);
v |= ((u32)src[i]) << ((3 - i) * 8);
return v;
}
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@ -601,30 +601,39 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
static void
intel_dp_pps_init(struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
static intel_wakeref_t
pps_lock(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
intel_wakeref_t wakeref;
/*
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
intel_display_power_get(dev_priv,
intel_aux_power_domain(dp_to_dig_port(intel_dp)));
wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dp_to_dig_port(intel_dp)));
mutex_lock(&dev_priv->pps_mutex);
return wakeref;
}
static void pps_unlock(struct intel_dp *intel_dp)
static intel_wakeref_t
pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
mutex_unlock(&dev_priv->pps_mutex);
intel_display_power_put(dev_priv,
intel_aux_power_domain(dp_to_dig_port(intel_dp)));
intel_aux_power_domain(dp_to_dig_port(intel_dp)),
wakeref);
return 0;
}
#define with_pps_lock(dp, wf) \
for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
@ -634,7 +643,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
uint32_t DP;
u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power sequencer kick due to port %c being active\n",
@ -973,30 +982,29 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
pps_lock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
pp_ctrl_reg = PP_CONTROL(pipe);
pp_div_reg = PP_DIVISOR(pipe);
pp_div = I915_READ(pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
pp_ctrl_reg = PP_CONTROL(pipe);
pp_div_reg = PP_DIVISOR(pipe);
pp_div = I915_READ(pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
/* 0x1F write to PP_DIV_REG sets max cycle delay */
I915_WRITE(pp_div_reg, pp_div | 0x1F);
I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
msleep(intel_dp->panel_power_cycle_delay);
/* 0x1F write to PP_DIV_REG sets max cycle delay */
I915_WRITE(pp_div_reg, pp_div | 0x1F);
I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
msleep(intel_dp->panel_power_cycle_delay);
}
}
pps_unlock(intel_dp);
return 0;
}
@ -1042,12 +1050,12 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
}
static uint32_t
static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
uint32_t status;
u32 status;
bool done;
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
@ -1060,7 +1068,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
return status;
}
static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -1074,7 +1082,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@ -1093,7 +1101,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@ -1110,7 +1118,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return ilk_get_aux_clock_divider(intel_dp, index);
}
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
/*
* SKL doesn't need us to program the AUX clock divider (Hardware will
@ -1120,14 +1128,14 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
uint32_t aux_clock_divider)
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
uint32_t precharge, timeout;
u32 precharge, timeout;
if (IS_GEN(dev_priv, 6))
precharge = 3;
@ -1150,12 +1158,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
uint32_t unused)
static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 unused)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t ret;
u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
@ -1175,25 +1183,26 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
static int
intel_dp_aux_xfer(struct intel_dp *intel_dp,
const uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size,
const u8 *send, int send_bytes,
u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl, ch_data[5];
uint32_t aux_clock_divider;
u32 aux_clock_divider;
intel_wakeref_t wakeref;
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
u32 status;
bool vdd;
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
pps_lock(intel_dp);
wakeref = pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@ -1337,7 +1346,7 @@ out:
if (vdd)
edp_panel_vdd_off(intel_dp, false);
pps_unlock(intel_dp);
pps_unlock(intel_dp, wakeref);
return ret;
}
@ -1359,7 +1368,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
uint8_t txbuf[20], rxbuf[20];
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
@ -1692,7 +1701,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
}
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select)
u8 *link_bw, u8 *rate_select)
{
/* eDP 1.4 rate select method. */
if (intel_dp->use_rate_select) {
@ -2207,7 +2216,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count,
int link_rate, u8 lane_count,
bool link_mst)
{
intel_dp->link_trained = false;
@ -2469,15 +2478,15 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
*/
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
bool vdd;
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
vdd = false;
with_pps_lock(intel_dp, wakeref)
vdd = edp_panel_vdd_on(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->base.port));
}
@ -2516,19 +2525,21 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
intel_display_power_put(dev_priv,
intel_aux_power_domain(intel_dig_port));
intel_display_power_put_unchecked(dev_priv,
intel_aux_power_domain(intel_dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct intel_dp *intel_dp =
container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
intel_wakeref_t wakeref;
pps_lock(intel_dp);
if (!intel_dp->want_panel_vdd)
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
if (!intel_dp->want_panel_vdd)
edp_panel_vdd_off_sync(intel_dp);
}
}
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
@ -2618,12 +2629,13 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
edp_panel_on(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref)
edp_panel_on(intel_dp);
}
@ -2662,25 +2674,25 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
edp_panel_off(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref)
edp_panel_off(intel_dp);
}
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
intel_wakeref_t wakeref;
/*
* If we enable the backlight right away following a panel power
@ -2690,17 +2702,16 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
*/
wait_backlight_on(intel_dp);
pps_lock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
pps_unlock(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
}
/* Enable backlight PWM and backlight PP control. */
@ -2722,23 +2733,21 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
pps_unlock(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
intel_dp->last_backlight_off = jiffies;
edp_wait_backlight_off(intel_dp);
@ -2766,12 +2775,12 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
intel_wakeref_t wakeref;
bool is_enabled;
pps_lock(intel_dp);
is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
pps_unlock(intel_dp);
is_enabled = false;
with_pps_lock(intel_dp, wakeref)
is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@ -2988,16 +2997,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_wakeref_t wakeref;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return false;
ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
encoder->port, pipe);
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@ -3165,20 +3176,20 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
static void
_intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
u32 *DP,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
if (dp_train_pat & train_pat_mask)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
u32 temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@ -3277,24 +3288,23 @@ static void intel_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
pps_lock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(encoder, pipe_config);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
pps_unlock(intel_dp);
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
@ -3497,14 +3507,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
* link status information
*/
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/* These are source-specific values. */
uint8_t
u8
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -3523,8 +3533,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
u8
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@ -3569,12 +3579,12 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
}
}
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
static u32 vlv_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
u8 train_set = intel_dp->train_set[0];
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@ -3655,12 +3665,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
static u32 chv_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u32 deemph_reg_value, margin_reg_value;
bool uniq_trans_scale = false;
uint8_t train_set = intel_dp->train_set[0];
u8 train_set = intel_dp->train_set[0];
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@ -3738,10 +3748,10 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
static uint32_t
g4x_signal_levels(uint8_t train_set)
static u32
g4x_signal_levels(u8 train_set)
{
uint32_t signal_levels = 0;
u32 signal_levels = 0;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@ -3777,8 +3787,8 @@ g4x_signal_levels(uint8_t train_set)
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
snb_cpu_edp_signal_levels(uint8_t train_set)
static u32
snb_cpu_edp_signal_levels(u8 train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@ -3805,8 +3815,8 @@ snb_cpu_edp_signal_levels(uint8_t train_set)
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
ivb_cpu_edp_signal_levels(uint8_t train_set)
static u32
ivb_cpu_edp_signal_levels(u8 train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@ -3841,8 +3851,8 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
u32 signal_levels, mask = 0;
u8 train_set = intel_dp->train_set[0];
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
signal_levels = bxt_signal_levels(intel_dp);
@ -3881,7 +3891,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
u8 dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
@ -3898,7 +3908,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
uint32_t val;
u32 val;
if (!HAS_DDI(dev_priv))
return;
@ -3933,7 +3943,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum port port = encoder->port;
uint32_t DP = intel_dp->DP;
u32 DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev_priv)))
return;
@ -3992,9 +4002,10 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
pps_lock(intel_dp);
intel_dp->active_pipe = INVALID_PIPE;
pps_unlock(intel_dp);
intel_wakeref_t wakeref;
with_pps_lock(intel_dp, wakeref)
intel_dp->active_pipe = INVALID_PIPE;
}
}
@ -4273,7 +4284,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
int mode_clock, int mode_hdisplay)
{
u16 bits_per_pixel, max_bpp_small_joiner_ram;
@ -4340,7 +4351,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
/* Also take into account max slice width */
min_slice_count = min_t(uint8_t, min_slice_count,
min_slice_count = min_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
@ -4358,11 +4369,11 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
int test_link_rate;
uint8_t test_lane_count, test_link_bw;
u8 test_lane_count, test_link_bw;
/* (DP CTS 1.2)
* 4.3.1.11
*/
@ -4395,10 +4406,10 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
uint8_t test_pattern;
uint8_t test_misc;
u8 test_pattern;
u8 test_misc;
__be16 h_width, v_height;
int status = 0;
@ -4456,9 +4467,9 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_ACK;
u8 test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@ -4500,16 +4511,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
return test_result;
}
static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
u8 test_result = DP_TEST_NAK;
return test_result;
}
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
uint8_t response = DP_TEST_NAK;
uint8_t request = 0;
u8 response = DP_TEST_NAK;
u8 request = 0;
int status;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
@ -4835,8 +4846,8 @@ static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
uint8_t *dpcd = intel_dp->dpcd;
uint8_t type;
u8 *dpcd = intel_dp->dpcd;
u8 type;
if (lspcon->active)
lspcon_resume(lspcon);
@ -5370,12 +5381,13 @@ intel_dp_detect(struct drm_connector *connector,
enum drm_connector_status status;
enum intel_display_power_domain aux_domain =
intel_aux_power_domain(dig_port);
intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
intel_display_power_get(dev_priv, aux_domain);
wakeref = intel_display_power_get(dev_priv, aux_domain);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@ -5441,7 +5453,7 @@ intel_dp_detect(struct drm_connector *connector,
ret = intel_dp_retrain_link(encoder, ctx);
if (ret) {
intel_display_power_put(dev_priv, aux_domain);
intel_display_power_put(dev_priv, aux_domain, wakeref);
return ret;
}
}
@ -5465,7 +5477,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
intel_display_power_put(dev_priv, aux_domain);
intel_display_power_put(dev_priv, aux_domain, wakeref);
return status;
}
@ -5478,6 +5490,7 @@ intel_dp_force(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
enum intel_display_power_domain aux_domain =
intel_aux_power_domain(dig_port);
intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@ -5486,11 +5499,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
intel_display_power_get(dev_priv, aux_domain);
wakeref = intel_display_power_get(dev_priv, aux_domain);
intel_dp_set_edid(intel_dp);
intel_display_power_put(dev_priv, aux_domain);
intel_display_power_put(dev_priv, aux_domain, wakeref);
}
static int intel_dp_get_modes(struct drm_connector *connector)
@ -5562,14 +5575,15 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (intel_dp_is_edp(intel_dp)) {
intel_wakeref_t wakeref;
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref)
edp_panel_vdd_off_sync(intel_dp);
if (intel_dp->edp_notifier.notifier_call) {
unregister_reboot_notifier(&intel_dp->edp_notifier);
@ -5591,6 +5605,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
@ -5600,9 +5615,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
* Make sure vdd is actually turned off here.
*/
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref)
edp_panel_vdd_off_sync(intel_dp);
}
static
@ -5615,7 +5629,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
.address = DP_AUX_HDCP_AKSV,
.size = DRM_HDCP_KSV_LEN,
};
uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
ssize_t dpcd_ret;
int ret;
@ -5883,6 +5897,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
@ -5892,18 +5907,19 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
pps_lock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
/*
* Reinit the power sequencer, in case BIOS did
* something nasty with it.
*/
intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
}
}
pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@ -5936,6 +5952,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum irqreturn ret = IRQ_NONE;
intel_wakeref_t wakeref;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@ -5958,8 +5975,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
return IRQ_NONE;
}
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@ -5989,7 +6006,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
put_power:
intel_display_power_put(dev_priv,
intel_aux_power_domain(intel_dig_port));
intel_aux_power_domain(intel_dig_port),
wakeref);
return ret;
}
@ -6697,8 +6715,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
intel_wakeref_t wakeref;
struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
@ -6718,13 +6737,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref) {
intel_dp_init_panel_power_timestamps(intel_dp);
intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
}
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@ -6809,9 +6826,8 @@ out_vdd_off:
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
with_pps_lock(intel_dp, wakeref)
edp_panel_vdd_off_sync(intel_dp);
return false;
}
@ -6985,6 +7001,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->update_pipe = intel_panel_update_backlight;
intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;

View File

@ -24,7 +24,7 @@
#include "intel_drv.h"
static void
intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
{
DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
@ -34,17 +34,17 @@ intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
const u8 link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
u8 v = 0;
u8 p = 0;
int lane;
uint8_t voltage_max;
uint8_t preemph_max;
u8 voltage_max;
u8 preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@ -66,9 +66,9 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
u8 dp_train_pat)
{
uint8_t buf[sizeof(intel_dp->train_set) + 1];
u8 buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
@ -92,7 +92,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
@ -128,11 +128,11 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
uint8_t voltage;
u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
u8 link_config[2];
u8 link_bw, rate_select;
if (intel_dp->prepare_link_retrain)
intel_dp->prepare_link_retrain(intel_dp);
@ -186,7 +186,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
u8 link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
@ -282,7 +282,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
int tries;
u32 training_pattern;
uint8_t link_status[DP_LINK_STATUS_SIZE];
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);

View File

@ -247,7 +247,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
uint32_t temp;
u32 temp;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.

View File

@ -413,7 +413,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
}
if (phy_info->rcomp_phy != -1) {
uint32_t grc_code;
u32 grc_code;
bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
@ -445,7 +445,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
uint32_t val;
u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
@ -515,7 +515,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
uint32_t mask;
u32 mask;
bool ok;
phy_info = bxt_get_phy_info(dev_priv, phy);
@ -567,8 +567,8 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
#undef _CHK
}
uint8_t
bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
u8
bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
switch (lane_count) {
case 1:
@ -585,7 +585,7 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
}
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask)
u8 lane_lat_optim_mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@ -610,7 +610,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
}
}
uint8_t
u8
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -618,7 +618,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
uint8_t mask;
u8 mask;
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
u32 val;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)

View File

@ -345,9 +345,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
intel_wakeref_t wakeref;
u32 val;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
val = I915_READ(PCH_DPLL(id));
@ -355,7 +358,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(id));
hw_state->fp1 = I915_READ(PCH_FP1(id));
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & DPLL_VCO_ENABLE;
}
@ -487,7 +490,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
u32 val;
val = I915_READ(WRPLL_CTL(id));
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
@ -497,7 +500,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
u32 val;
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
@ -509,15 +512,18 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
intel_wakeref_t wakeref;
u32 val;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
val = I915_READ(WRPLL_CTL(id));
hw_state->wrpll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & WRPLL_PLL_ENABLE;
}
@ -526,15 +532,18 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
intel_wakeref_t wakeref;
u32 val;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & SPLL_PLL_ENABLE;
}
@ -630,11 +639,12 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock)
return budget;
}
static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
unsigned r2, unsigned n2, unsigned p,
static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
unsigned int r2, unsigned int n2,
unsigned int p,
struct hsw_wrpll_rnp *best)
{
uint64_t a, b, c, d, diff, diff_best;
u64 a, b, c, d, diff, diff_best;
/* No best (r,n,p) yet */
if (best->p == 0) {
@ -693,7 +703,7 @@ static void
hsw_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
uint64_t freq2k;
u64 freq2k;
unsigned p, n2, r2;
struct hsw_wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
@ -759,7 +769,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
uint32_t val;
u32 val;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
@ -921,7 +931,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
u32 val;
val = I915_READ(DPLL_CTRL1);
@ -986,12 +996,15 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
u32 val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
ret = false;
@ -1011,7 +1024,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@ -1020,12 +1033,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
ret = false;
@ -1041,15 +1057,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
struct skl_wrpll_context {
uint64_t min_deviation; /* current minimal deviation */
uint64_t central_freq; /* chosen central freq */
uint64_t dco_freq; /* chosen dco freq */
u64 min_deviation; /* current minimal deviation */
u64 central_freq; /* chosen central freq */
u64 dco_freq; /* chosen dco freq */
unsigned int p; /* chosen divider */
};
@ -1065,11 +1081,11 @@ static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
#define SKL_DCO_MAX_NDEVIATION 600
static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
uint64_t central_freq,
uint64_t dco_freq,
u64 central_freq,
u64 dco_freq,
unsigned int divider)
{
uint64_t deviation;
u64 deviation;
deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
central_freq);
@ -1143,21 +1159,21 @@ static void skl_wrpll_get_multipliers(unsigned int p,
}
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
uint32_t qdiv_ratio;
uint32_t qdiv_mode;
uint32_t kdiv;
uint32_t pdiv;
uint32_t central_freq;
u32 dco_fraction;
u32 dco_integer;
u32 qdiv_ratio;
u32 qdiv_mode;
u32 kdiv;
u32 pdiv;
u32 central_freq;
};
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
uint64_t afe_clock,
uint64_t central_freq,
uint32_t p0, uint32_t p1, uint32_t p2)
u64 afe_clock,
u64 central_freq,
u32 p0, u32 p1, u32 p2)
{
uint64_t dco_freq;
u64 dco_freq;
switch (central_freq) {
case 9600000000ULL:
@ -1223,10 +1239,10 @@ static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
{
uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
uint64_t dco_central_freq[3] = {8400000000ULL,
9000000000ULL,
9600000000ULL};
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
u64 dco_central_freq[3] = { 8400000000ULL,
9000000000ULL,
9600000000ULL };
static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
24, 28, 30, 32, 36, 40, 42, 44,
48, 52, 54, 56, 60, 64, 66, 68,
@ -1250,7 +1266,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
for (i = 0; i < dividers[d].n_dividers; i++) {
unsigned int p = dividers[d].list[i];
uint64_t dco_freq = p * afe_clock;
u64 dco_freq = p * afe_clock;
skl_wrpll_try_divider(&ctx,
dco_central_freq[dco],
@ -1296,7 +1312,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
int clock)
{
uint32_t ctrl1, cfgcr1, cfgcr2;
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
/*
@ -1333,7 +1349,7 @@ static bool
skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
uint32_t ctrl1;
u32 ctrl1;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
@ -1435,7 +1451,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t temp;
u32 temp;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
@ -1556,7 +1572,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
uint32_t temp;
u32 temp;
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
@ -1579,14 +1595,17 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
intel_wakeref_t wakeref;
enum dpio_phy phy;
enum dpio_channel ch;
u32 val;
bool ret;
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
ret = false;
@ -1643,7 +1662,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@ -1651,12 +1670,12 @@ out:
/* bxt clock parameters */
struct bxt_clk_div {
int clock;
uint32_t p1;
uint32_t p2;
uint32_t m2_int;
uint32_t m2_frac;
u32 p1;
u32 p2;
u32 m2_int;
u32 m2_frac;
bool m2_frac_en;
uint32_t n;
u32 n;
int vco;
};
@ -1723,8 +1742,8 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
int vco = clk_div->vco;
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
uint32_t lanestagger;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
u32 lanestagger;
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
@ -1873,7 +1892,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_GEN(dev_priv) < 9) {
uint32_t val = I915_READ(LCPLL_CTL);
u32 val = I915_READ(LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@ -1959,7 +1978,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
u32 val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(id));
@ -2034,7 +2053,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
u32 val;
/*
* 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
@ -2091,10 +2110,13 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
intel_wakeref_t wakeref;
u32 val;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
ret = false;
@ -2113,7 +2135,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@ -2225,7 +2247,7 @@ cnl_ddi_calculate_wrpll(int clock,
struct skl_wrpll_params *wrpll_params)
{
u32 afe_clock = clock * 5;
uint32_t ref_clock;
u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@ -2271,7 +2293,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
int clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t cfgcr0, cfgcr1;
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
@ -2300,7 +2322,7 @@ static bool
cnl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
uint32_t cfgcr0;
u32 cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
@ -2517,7 +2539,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t cfgcr0, cfgcr1;
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
@ -2547,10 +2569,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
}
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id)
u32 pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
u32 cfgcr0, cfgcr1;
u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
const struct skl_wrpll_params *params;
int index, n_entries, link_clock;
@ -2633,10 +2655,10 @@ bool intel_dpll_is_combophy(enum intel_dpll_id id)
}
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
uint32_t *target_dco_khz,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
{
uint32_t dco_min_freq, dco_max_freq;
u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
unsigned int i;
int div2;
@ -2712,12 +2734,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int refclk_khz = dev_priv->cdclk.hw.ref;
uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
uint32_t iref_ndiv, iref_trim, iref_pulse_w;
uint32_t prop_coeff, int_coeff;
uint32_t tdc_targetcnt, feedfwgain;
uint64_t ssc_stepsize, ssc_steplen, ssc_steplog;
uint64_t tmp;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
u32 prop_coeff, int_coeff;
u32 tdc_targetcnt, feedfwgain;
u64 ssc_stepsize, ssc_steplen, ssc_steplog;
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
@ -2740,7 +2762,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
m2div_rem = dco_khz % (refclk_khz * m1div);
tmp = (uint64_t)m2div_rem * (1 << 22);
tmp = (u64)m2div_rem * (1 << 22);
do_div(tmp, refclk_khz * m1div);
m2div_frac = tmp;
@ -2799,11 +2821,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
if (use_ssc) {
tmp = (uint64_t)dco_khz * 47 * 32;
tmp = (u64)dco_khz * 47 * 32;
do_div(tmp, refclk_khz * m1div * 10000);
ssc_stepsize = tmp;
tmp = (uint64_t)dco_khz * 1000;
tmp = (u64)dco_khz * 1000;
ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
} else {
ssc_stepsize = 0;
@ -2950,11 +2972,14 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
uint32_t val;
enum port port;
intel_wakeref_t wakeref;
bool ret = false;
enum port port;
u32 val;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_PLLS);
if (!wakeref)
return false;
val = I915_READ(icl_pll_id_to_enable_reg(id));
@ -3007,7 +3032,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@ -3077,7 +3102,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
uint32_t val;
u32 val;
val = I915_READ(enable_reg);
val |= PLL_POWER_ENABLE;
@ -3118,7 +3143,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
uint32_t val;
u32 val;
/* The first steps are done by intel_ddi_post_disable(). */

View File

@ -138,14 +138,14 @@ enum intel_dpll_id {
struct intel_dpll_hw_state {
/* i9xx, pch plls */
uint32_t dpll;
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
u32 dpll;
u32 dpll_md;
u32 fp0;
u32 fp1;
/* hsw, bdw */
uint32_t wrpll;
uint32_t spll;
u32 wrpll;
u32 spll;
/* skl */
/*
@ -154,34 +154,33 @@ struct intel_dpll_hw_state {
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
uint32_t ctrl1;
u32 ctrl1;
/* HDMI only, 0 when used for DP */
uint32_t cfgcr1, cfgcr2;
u32 cfgcr1, cfgcr2;
/* cnl */
uint32_t cfgcr0;
u32 cfgcr0;
/* CNL also uses cfgcr1 */
/* bxt */
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
pcsdw12;
u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
/*
* ICL uses the following, already defined:
* uint32_t cfgcr0, cfgcr1;
* u32 cfgcr0, cfgcr1;
*/
uint32_t mg_refclkin_ctl;
uint32_t mg_clktop2_coreclkctl1;
uint32_t mg_clktop2_hsclkctl;
uint32_t mg_pll_div0;
uint32_t mg_pll_div1;
uint32_t mg_pll_lf;
uint32_t mg_pll_frac_lock;
uint32_t mg_pll_ssc;
uint32_t mg_pll_bias;
uint32_t mg_pll_tdc_coldst_bias;
uint32_t mg_pll_bias_mask;
uint32_t mg_pll_tdc_coldst_bias_mask;
u32 mg_refclkin_ctl;
u32 mg_clktop2_coreclkctl1;
u32 mg_clktop2_hsclkctl;
u32 mg_pll_div0;
u32 mg_pll_div1;
u32 mg_pll_lf;
u32 mg_pll_frac_lock;
u32 mg_pll_ssc;
u32 mg_pll_bias;
u32 mg_pll_tdc_coldst_bias;
u32 mg_pll_bias_mask;
u32 mg_pll_tdc_coldst_bias_mask;
};
/**
@ -280,7 +279,7 @@ struct dpll_info {
* Inform the state checker that the DPLL is kept enabled even if
* not in use by any CRTC.
*/
uint32_t flags;
u32 flags;
};
/**
@ -343,7 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);

View File

@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <linux/sched/clock.h>
#include <linux/stackdepot.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
@ -41,6 +42,8 @@
#include <drm/drm_atomic.h>
#include <media/cec-notifier.h>
struct drm_printer;
/**
* __wait_for - magic wait macro
*
@ -297,13 +300,12 @@ struct intel_panel {
/* Connector and platform specific backlight functions */
int (*setup)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get)(struct intel_connector *connector);
void (*set)(const struct drm_connector_state *conn_state, uint32_t level);
u32 (*get)(struct intel_connector *connector);
void (*set)(const struct drm_connector_state *conn_state, u32 level);
void (*disable)(const struct drm_connector_state *conn_state);
void (*enable)(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
uint32_t (*hz_to_pwm)(struct intel_connector *connector,
uint32_t hz);
u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@ -595,7 +597,7 @@ struct intel_initial_plane_config {
struct intel_scaler {
int in_use;
uint32_t mode;
u32 mode;
};
struct intel_crtc_scaler_state {
@ -633,7 +635,7 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
u32 linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
@ -649,7 +651,7 @@ struct skl_plane_wm {
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
uint32_t linetime;
u32 linetime;
};
enum vlv_wm_level {
@ -662,7 +664,7 @@ enum vlv_wm_level {
struct vlv_wm_state {
struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
uint8_t num_levels;
u8 num_levels;
bool cxsr;
};
@ -875,13 +877,13 @@ struct intel_crtc_state {
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
uint8_t lane_count;
u8 lane_count;
/*
* Used by platforms having DP/HDMI PHY with programmable lane
* latency optimization.
*/
uint8_t lane_lat_optim_mask;
u8 lane_lat_optim_mask;
/* minimum acceptable voltage level */
u8 min_voltage_level;
@ -925,7 +927,7 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
/* Gamma mode programmed on the pipe */
uint32_t gamma_mode;
u32 gamma_mode;
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
@ -1011,7 +1013,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
uint32_t frontbuffer_bit;
u32 frontbuffer_bit;
struct {
u32 base, cntl, size;
@ -1106,9 +1108,9 @@ enum link_m_n_set {
struct intel_dp_compliance_data {
unsigned long edid;
uint8_t video_pattern;
uint16_t hdisplay, vdisplay;
uint8_t bpc;
u8 video_pattern;
u16 hdisplay, vdisplay;
u8 bpc;
};
struct intel_dp_compliance {
@ -1121,18 +1123,18 @@ struct intel_dp_compliance {
struct intel_dp {
i915_reg_t output_reg;
uint32_t DP;
u32 DP;
int link_rate;
uint8_t lane_count;
uint8_t sink_count;
u8 lane_count;
u8 sink_count;
bool link_mst;
bool link_trained;
bool has_audio;
bool reset_link_params;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capable;
/* source rates */
@ -1152,7 +1154,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
uint8_t train_set[4];
u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
@ -1194,14 +1196,13 @@ struct intel_dp {
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
* register with to kick off an AUX transaction.
*/
uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
int send_bytes,
uint32_t aux_clock_divider);
u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes,
u32 aux_clock_divider);
i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
@ -1235,7 +1236,7 @@ struct intel_digital_port {
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
@ -1470,8 +1471,8 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@ -1534,7 +1535,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u32 ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
@ -1674,11 +1675,11 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val);
u64 *val);
int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
u64 val);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct drm_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@ -1798,10 +1799,10 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count,
int link_rate, u8 lane_count,
bool link_mst);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count);
int link_rate, u8 lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
@ -1833,7 +1834,7 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@ -1846,24 +1847,24 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
uint8_t dp_train_pat);
u8 dp_train_pat);
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
uint8_t
u8
intel_dp_voltage_max(struct intel_dp *intel_dp);
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
u8
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select);
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
int mode_clock, int mode_hdisplay);
uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
int mode_clock, int mode_hdisplay);
u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
/* intel_vdsc.c */
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@ -2022,6 +2023,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
enum pipe pipe);
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_update_backlight(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
@ -2083,6 +2087,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
@ -2105,6 +2110,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@ -2112,33 +2118,42 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
#else
#define intel_display_power_put(i915, domain, wakeref) \
intel_display_power_put_unchecked(i915, domain)
#endif
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
assert_rpm_device_not_suspended(struct drm_i915_private *i915)
{
WARN_ONCE(dev_priv->runtime_pm.suspended,
WARN_ONCE(i915->runtime_pm.suspended,
"Device suspended during HW access\n");
}
static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
assert_rpm_wakelock_held(struct drm_i915_private *i915)
{
assert_rpm_device_not_suspended(dev_priv);
WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
assert_rpm_device_not_suspended(i915);
WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
"RPM wakelock ref not held during HW access");
}
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function disable asserts that check if we hold an RPM wakelock
* reference, while keeping the device-not-suspended checks still enabled.
@ -2155,14 +2170,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
* enable_rpm_wakeref_asserts().
*/
static inline void
disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
atomic_inc(&i915->runtime_pm.wakeref_count);
}
/**
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function re-enables the RPM assert checks after disabling them with
* disable_rpm_wakeref_asserts. It's meant to be used only in special
@ -2172,15 +2187,39 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
* disable_rpm_wakeref_asserts().
*/
static inline void
enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
atomic_dec(&dev_priv->runtime_pm.wakeref_count);
atomic_dec(&i915->runtime_pm.wakeref_count);
}
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
#define with_intel_runtime_pm(i915, wf) \
for ((wf) = intel_runtime_pm_get(i915); (wf); \
intel_runtime_pm_put((i915), (wf)), (wf) = 0)
#define with_intel_runtime_pm_if_in_use(i915, wf) \
for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
intel_runtime_pm_put((i915), (wf)), (wf) = 0)
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
#else
#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
#endif
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
struct drm_printer *p);
#else
static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
struct drm_printer *p)
{
}
#endif
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
@ -2286,11 +2325,11 @@ void intel_tv_init(struct drm_i915_private *dev_priv);
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
u64 *val);
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
u64 val);
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *new_state);
struct drm_connector_state *

View File

@ -39,6 +39,7 @@ struct intel_dsi {
struct intel_encoder base;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
intel_wakeref_t io_wakeref[I915_MAX_PORTS];
/* GPIO Desc for CRC based Panel control */
struct gpio_desc *gpio_panel;
@ -172,7 +173,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void vlv_dsi_pll_disable(struct intel_encoder *encoder);
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
@ -182,7 +183,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void bxt_dsi_pll_disable(struct intel_encoder *encoder);
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);

View File

@ -25,6 +25,7 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_reset.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@ -799,15 +800,15 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
return mcr_s_ss_select;
}
static inline uint32_t
static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
uint32_t mcr_slice_subslice_mask;
uint32_t mcr_slice_subslice_select;
uint32_t default_mcr_s_ss_select;
uint32_t mcr;
uint32_t ret;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
u32 mcr;
u32 ret;
enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) {
@ -913,10 +914,15 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_wakeref_t wakeref;
bool idle = true;
if (I915_SELFTEST_ONLY(!engine->mmio_base))
return true;
/* If the whole device is asleep, the engine must be idle */
if (!intel_runtime_pm_get_if_in_use(dev_priv))
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return true;
/* First check that no commands are left in the ring */
@ -928,7 +934,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return idle;
}
@ -952,9 +958,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
return false;
if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
return true;
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@ -980,10 +983,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false;
/* Ring stopped? */
if (!ring_is_idle(engine))
return false;
return true;
return ring_is_idle(engine);
}
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
@ -1420,14 +1420,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
const int MAX_REQUESTS_TO_SHOW = 8;
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq, *last;
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
struct rb_node *rb;
int count;
if (header) {
va_list ap;
@ -1483,59 +1481,17 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_unlock();
if (intel_runtime_pm_get_if_in_use(engine->i915)) {
wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
if (wakeref) {
intel_engine_print_registers(engine, m);
intel_runtime_pm_put(engine->i915);
intel_runtime_pm_put(engine->i915, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
local_irq_save(flags);
spin_lock(&engine->timeline.lock);
intel_execlists_show_requests(engine, m, print_request, 8);
last = NULL;
count = 0;
list_for_each_entry(rq, &engine->timeline.requests, link) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tE ");
else
last = rq;
}
if (last) {
if (count > MAX_REQUESTS_TO_SHOW) {
drm_printf(m,
"\t\t...skipping %d executing requests...\n",
count - MAX_REQUESTS_TO_SHOW);
}
print_request(m, last, "\t\tE ");
}
last = NULL;
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ ");
else
last = rq;
}
}
if (last) {
if (count > MAX_REQUESTS_TO_SHOW) {
drm_printf(m,
"\t\t...skipping %d queued requests...\n",
count - MAX_REQUESTS_TO_SHOW);
}
print_request(m, last, "\t\tQ ");
}
spin_unlock(&engine->timeline.lock);
spin_lock(&b->rb_lock);
spin_lock_irqsave(&b->rb_lock, flags);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
@ -1544,8 +1500,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
task_state_to_char(w->tsk),
w->seqno);
}
spin_unlock(&b->rb_lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&b->rb_lock, flags);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.page_addr, PAGE_SIZE);

View File

@ -594,7 +594,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
}
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
uint32_t pixel_format)
u32 pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_XRGB8888:

View File

@ -177,8 +177,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
struct fb_info *info;
struct drm_framebuffer *fb;
intel_wakeref_t wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
@ -209,7 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@ -276,7 +277,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@ -284,7 +285,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}

View File

@ -127,8 +127,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
ilk_enable_display_irq(dev_priv, bit);
@ -140,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
uint32_t err_int = I915_READ(GEN7_ERR_INT);
u32 err_int = I915_READ(GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
@ -193,8 +193,8 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
ibx_enable_display_interrupt(dev_priv, bit);
@ -206,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
uint32_t serr_int = I915_READ(SERR_INT);
u32 serr_int = I915_READ(SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);

View File

@ -192,4 +192,7 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
spin_unlock_irq(&guc->irq_lock);
}
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
#endif

View File

@ -436,6 +436,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@ -443,9 +444,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
intel_runtime_pm_get(dev_priv);
guc_action_flush_log_complete(guc);
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref)
guc_action_flush_log_complete(guc);
}
int intel_guc_log_create(struct intel_guc_log *log)
@ -505,7 +505,8 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
intel_wakeref_t wakeref;
int ret = 0;
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
@ -519,16 +520,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
mutex_lock(&dev_priv->drm.struct_mutex);
if (log->level == level) {
ret = 0;
if (log->level == level)
goto out_unlock;
}
intel_runtime_pm_get(dev_priv);
ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
@ -601,6 +600,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
/*
* Before initiating the forceful flush, wait for any pending/ongoing
@ -608,9 +608,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
*/
flush_work(&log->relay.flush_work);
intel_runtime_pm_get(i915);
guc_action_flush_log(guc);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);

View File

@ -49,6 +49,9 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_BROXTON(dev_priv))
return true;
if (IS_COFFEELAKE(dev_priv))
return true;
return false;
}
@ -105,15 +108,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return -EIO;
}
/*
* We're not in host or fail to find a MPT module, disable GVT-g
*/
ret = intel_gvt_init_host();
if (ret) {
DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
goto bail;
}
ret = intel_gvt_init_device(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("Fail to init GVT device\n");

View File

@ -23,6 +23,7 @@
*/
#include "i915_drv.h"
#include "i915_reset.h"
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
@ -194,10 +195,6 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
break;
case ENGINE_DEAD:
if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_debug_printer("hangcheck");
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
break;
default:
@ -284,6 +281,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
wedged |= intel_engine_flag(engine);
}
if (GEM_SHOW_DEBUG() && (hung | stuck)) {
struct drm_printer p = drm_debug_printer("hangcheck");
for_each_engine(engine, dev_priv, id) {
if (intel_engine_is_idle(engine))
continue;
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
}
if (wedged) {
dev_err(dev_priv->drm.dev,
"GPU recovery timed out,"

View File

@ -838,8 +838,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
{
uint64_t old_cp = old_state->content_protection;
uint64_t new_cp = new_state->content_protection;
u64 old_cp = old_state->content_protection;
u64 new_cp = new_state->content_protection;
struct drm_crtc_state *crtc_state;
if (!new_state->crtc) {

View File

@ -1186,15 +1186,17 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
intel_wakeref_t wakeref;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return false;
ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@ -1890,11 +1892,12 @@ intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
intel_wakeref_t wakeref;
struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
@ -1909,7 +1912,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@ -1931,11 +1934,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (IS_ICELAKE(dev_priv) &&
!intel_digital_port_connected(encoder))
@ -1947,7 +1951,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);

View File

@ -226,9 +226,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
intel_wakeref_t wakeref;
enum hpd_pin pin;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(pin) {
@ -261,7 +262,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,

View File

@ -115,14 +115,14 @@ fail:
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
bool status;
intel_wakeref_t wakeref;
bool status = false;
if (!HAS_HUC(dev_priv))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
intel_runtime_pm_put(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref)
status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
return status;
}

View File

@ -697,12 +697,13 @@ out:
static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
adapter);
struct intel_gmbus *bus =
container_of(adapter, struct intel_gmbus, adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_wakeref_t wakeref;
int ret;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@ -714,17 +715,16 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
adapter);
struct intel_gmbus *bus =
container_of(adapter, struct intel_gmbus, adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int ret;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
struct i2c_msg msgs[] = {
@ -741,8 +741,10 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
.buf = buf,
}
};
intel_wakeref_t wakeref;
int ret;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
/*
@ -753,7 +755,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}

View File

@ -302,6 +302,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
*/
if (!(prio & I915_PRIORITY_NEWCLIENT)) {
prio |= I915_PRIORITY_NEWCLIENT;
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
}
@ -625,6 +626,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
GEM_BUG_ON(last &&
need_preempt(engine, last, rq_prio(rq)));
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@ -816,7 +820,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
if (i915_request_signaled(rq))
continue;
dma_fence_set_error(&rq->fence, -EIO);
@ -1045,7 +1049,7 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s awake?=%d, active=%x\n",
engine->name,
engine->i915->gt.awake,
!!engine->i915->gt.awake,
engine->execlists.active);
spin_lock_irqsave(&engine->timeline.lock, flags);
@ -2608,7 +2612,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
uint32_t context_size;
u32 context_size;
struct intel_ring *ring;
struct i915_timeline *timeline;
int ret;
@ -2702,6 +2706,64 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
}
}
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
struct i915_request *rq,
const char *prefix),
unsigned int max)
{
const struct intel_engine_execlists *execlists = &engine->execlists;
struct i915_request *rq, *last;
unsigned long flags;
unsigned int count;
struct rb_node *rb;
spin_lock_irqsave(&engine->timeline.lock, flags);
last = NULL;
count = 0;
list_for_each_entry(rq, &engine->timeline.requests, link) {
if (count++ < max - 1)
show_request(m, rq, "\t\tE ");
else
last = rq;
}
if (last) {
if (count > max) {
drm_printf(m,
"\t\t...skipping %d executing requests...\n",
count - max);
}
show_request(m, last, "\t\tE ");
}
last = NULL;
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
priolist_for_each_request(rq, p, i) {
if (count++ < max - 1)
show_request(m, rq, "\t\tQ ");
else
last = rq;
}
}
if (last) {
if (count > max) {
drm_printf(m,
"\t\t...skipping %d queued requests...\n",
count - max);
}
show_request(m, last, "\t\tQ ");
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_lrc.c"
#endif

View File

@ -97,11 +97,19 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
struct drm_printer;
struct drm_i915_private;
struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
struct i915_request *rq,
const char *prefix),
unsigned int max);
#endif /* _INTEL_LRC_H_ */

View File

@ -288,12 +288,12 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
}
static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
uint8_t *avi_buf)
u8 *avi_buf)
{
u8 avi_if_ctrl;
u8 block_count = 0;
u8 *data;
uint16_t reg;
u16 reg;
ssize_t ret;
while (block_count < 4) {
@ -335,10 +335,10 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
}
static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
const uint8_t *frame,
const u8 *frame,
ssize_t len)
{
uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
/*
* Parade's frames contains 32 bytes of data, divided
@ -367,13 +367,13 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
}
static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
const uint8_t *buffer, ssize_t len)
const u8 *buffer, ssize_t len)
{
int ret;
uint32_t val = 0;
uint32_t retry;
uint16_t reg;
const uint8_t *data = buffer;
u32 val = 0;
u32 retry;
u16 reg;
const u8 *data = buffer;
reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
while (val < len) {
@ -459,7 +459,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
{
ssize_t ret;
union hdmi_infoframe frame;
uint8_t buf[VIDEO_DIP_DATA_SIZE];
u8 buf[VIDEO_DIP_DATA_SIZE];
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_lspcon *lspcon = &dig_port->lspcon;
const struct drm_display_mode *adjusted_mode =

View File

@ -94,15 +94,17 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
intel_wakeref_t wakeref;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
if (!wakeref)
return false;
ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
intel_display_power_put(dev_priv, encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@ -796,26 +798,6 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
/*
* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it.
*/
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
/*
* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm
*/
if (INTEL_GEN(dev_priv) <= 4 &&
IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
return false;
}
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev_priv: i915 device
@ -840,9 +822,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
u8 pin;
u32 allowed_scalers;
if (!intel_lvds_supported(dev_priv))
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
WARN(!dev_priv->vbt.int_lvds_support,
@ -908,6 +887,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_encoder->update_pipe = intel_panel_update_backlight;
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);

View File

@ -1087,20 +1087,11 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (!panel->backlight.present)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@ -1117,6 +1108,24 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
}
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (!panel->backlight.present)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
__intel_panel_enable_backlight(crtc_state, conn_state);
mutex_unlock(&dev_priv->backlight_lock);
}
@ -1203,17 +1212,20 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_level;
int ret;
intel_wakeref_t wakeref;
int ret = 0;
intel_runtime_pm_get(dev_priv);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
with_intel_runtime_pm(dev_priv, wakeref) {
u32 hw_level;
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector,
hw_level, bd->props.max_brightness);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
return ret;
}
@ -1484,8 +1496,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val;
bool alt;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
if (HAS_PCH_LPT(dev_priv))
alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
@ -1499,6 +1511,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@ -1507,12 +1521,28 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.min = get_backlight_min_vbt(connector);
val = lpt_get_backlight(connector);
panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
if (cpu_mode)
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
if (cpu_mode) {
DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
}
return 0;
}
@ -1773,6 +1803,24 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
void intel_panel_update_backlight(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
return;
mutex_lock(&dev_priv->backlight_lock);
if (!panel->backlight.enabled)
__intel_panel_enable_backlight(crtc_state, conn_state);
mutex_unlock(&dev_priv->backlight_lock);
}
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);

View File

@ -44,7 +44,7 @@ static const char * const pipe_crc_sources[] = {
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
uint32_t *val)
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@ -120,7 +120,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
u32 *val)
{
bool need_stable_symbols = false;
@ -165,7 +165,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
u32 tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@ -190,7 +190,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
u32 *val)
{
bool need_stable_symbols = false;
@ -244,7 +244,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
u32 tmp = I915_READ(PORT_DFT2_G4X);
WARN_ON(!IS_G4X(dev_priv));
@ -265,7 +265,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
u32 tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
case PIPE_A:
@ -289,7 +289,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
u32 tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
tmp &= ~PIPE_A_SCRAMBLE_RESET;
@ -304,7 +304,7 @@ static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
uint32_t *val)
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@ -392,7 +392,7 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val,
u32 *val,
bool set_wa)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@ -589,6 +589,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
@ -598,7 +599,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@ -624,7 +626,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
pipe_crc->skipped = 0;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}

View File

@ -480,7 +480,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
int sprite0_start, sprite1_start;
switch (pipe) {
uint32_t dsparb, dsparb2, dsparb3;
u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@ -513,7 +513,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@ -529,7 +529,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
@ -546,7 +546,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@ -667,9 +667,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
unsigned int cpp,
unsigned int latency)
{
uint64_t ret;
u64 ret;
ret = (uint64_t) pixel_rate * cpp * latency;
ret = (u64)pixel_rate * cpp * latency;
ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret;
@ -1089,9 +1089,9 @@ static int g4x_fbc_fifo_size(int level)
}
}
static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -1188,9 +1188,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t pri_val);
static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@ -1598,9 +1598,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -1968,7 +1968,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&dev_priv->uncore.lock);
switch (crtc->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ_FW(DSPARB);
dsparb2 = I915_READ_FW(DSPARB2);
@ -2262,8 +2262,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
u32 fwater_lo;
u32 fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
@ -2406,7 +2406,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
u32 fwater_lo;
int planea_wm;
crtc = single_enabled_crtc(dev_priv);
@ -2455,8 +2455,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate,
return ret;
}
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t cpp)
static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
{
/*
* Neither of these should be possible since this function shouldn't be
@ -2473,22 +2472,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
}
struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
uint16_t cur;
uint16_t fbc;
u16 pri;
u16 spr;
u16 cur;
u16 fbc;
};
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value,
bool is_lp)
static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
u32 mem_value, bool is_lp)
{
uint32_t method1, method2;
u32 method1, method2;
int cpp;
if (mem_value == 0)
@ -2516,11 +2514,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
u32 mem_value)
{
uint32_t method1, method2;
u32 method1, method2;
int cpp;
if (mem_value == 0)
@ -2543,9 +2541,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
u32 mem_value)
{
int cpp;
@ -2563,9 +2561,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
}
/* Only for WM_LP. */
static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t pri_val)
static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
u32 pri_val)
{
int cpp;
@ -2731,9 +2729,9 @@ static bool ilk_validate_wm_level(int level,
DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
result->pri_val = min_t(u32, result->pri_val, max->pri);
result->spr_val = min_t(u32, result->spr_val, max->spr);
result->cur_val = min_t(u32, result->cur_val, max->cur);
result->enable = true;
}
@ -2749,9 +2747,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
uint16_t spr_latency = dev_priv->wm.spr_latency[level];
uint16_t cur_latency = dev_priv->wm.cur_latency[level];
u16 pri_latency = dev_priv->wm.pri_latency[level];
u16 spr_latency = dev_priv->wm.spr_latency[level];
u16 cur_latency = dev_priv->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@ -2775,7 +2773,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
static uint32_t
static u32
hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
const struct intel_atomic_state *intel_state =
@ -2804,10 +2802,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[8])
u16 wm[8])
{
if (INTEL_GEN(dev_priv) >= 9) {
uint32_t val;
u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
@ -2891,7 +2889,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
u64 sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
@ -2901,14 +2899,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_GEN(dev_priv) >= 6) {
uint32_t sskpd = I915_READ(MCH_SSKPD);
u32 sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_GEN(dev_priv) >= 5) {
uint32_t mltr = I915_READ(MLTR_ILK);
u32 mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
@ -2920,7 +2918,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5])
u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
if (IS_GEN(dev_priv, 5))
@ -2928,7 +2926,7 @@ static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
}
static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5])
u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
if (IS_GEN(dev_priv, 5))
@ -2950,7 +2948,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
const uint16_t wm[8])
const u16 wm[8])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@ -2979,7 +2977,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
}
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
u16 wm[5], u16 min)
{
int level, max_level = ilk_wm_max_level(dev_priv);
@ -2988,7 +2986,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
wm[0] = max(wm[0], min);
for (level = 1; level <= max_level; level++)
wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
@ -3535,7 +3533,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
{
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
u32 val;
dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
if (!dirty)
@ -3989,10 +3987,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return;
for_each_plane_id_on_crtc(crtc, plane_id)
@ -4001,7 +4001,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
&ddb_y[plane_id],
&ddb_uv[plane_id]);
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
}
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@ -4031,7 +4031,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
struct intel_plane *plane = to_intel_plane(pstate->base.plane);
uint32_t src_w, src_h, dst_w, dst_h;
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@ -4077,8 +4077,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
return pipe_downscale;
if (crtc_state->pch_pfit.enabled) {
uint32_t src_w, src_h, dst_w, dst_h;
uint32_t pfit_size = crtc_state->pch_pfit.size;
u32 src_w, src_h, dst_w, dst_h;
u32 pfit_size = crtc_state->pch_pfit.size;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@ -4111,7 +4111,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
const struct drm_plane_state *pstate;
struct intel_plane_state *intel_pstate;
int crtc_clock, dotclk;
uint32_t pipe_max_pixel_rate;
u32 pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
@ -4167,8 +4167,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane *intel_plane =
to_intel_plane(intel_pstate->base.plane);
uint32_t data_rate;
uint32_t width = 0, height = 0;
u32 data_rate;
u32 width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
@ -4311,15 +4311,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
struct skl_plane_wm *wm;
uint16_t alloc_size, start = 0;
uint16_t total[I915_MAX_PLANES] = {};
uint16_t uv_total[I915_MAX_PLANES] = {};
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
int num_active;
u64 plane_data_rate[I915_MAX_PLANES] = {};
u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
uint16_t blocks = 0;
u16 blocks = 0;
int level;
/* Clear the partitioning for disabled planes. */
@ -4491,10 +4491,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
uint32_t wm_intermediate_val;
u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@ -4509,12 +4509,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
return ret;
}
static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
uint32_t pipe_htotal,
uint32_t latency,
uint_fixed_16_16_t plane_blocks_per_line)
static uint_fixed_16_16_t
skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
uint_fixed_16_16_t plane_blocks_per_line)
{
uint32_t wm_intermediate_val;
u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@ -4530,8 +4529,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
uint32_t pixel_rate;
uint32_t crtc_htotal;
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
if (!cstate->base.active)
@ -4548,11 +4547,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate)
return linetime_us;
}
static uint32_t
static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
uint64_t adjusted_pixel_rate;
u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
@ -4579,7 +4578,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
uint32_t interm_pbpl;
u32 interm_pbpl;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
@ -4684,10 +4683,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
{
struct drm_i915_private *dev_priv =
to_i915(intel_pstate->base.plane->dev);
uint32_t latency = dev_priv->wm.skl_latency[level];
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
uint32_t res_blocks, res_lines;
u32 res_blocks, res_lines;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
@ -4793,13 +4792,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
}
}
static uint32_t
static u32
skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
uint32_t linetime_wm;
u32 linetime_wm;
linetime_us = intel_get_linetime_us(cstate);
@ -4822,9 +4821,9 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t trans_min, trans_y_tile_min;
const uint16_t trans_amount = 10; /* This is configurable amount */
uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
u16 trans_min, trans_y_tile_min;
const u16 trans_amount = 10; /* This is configurable amount */
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
@ -4853,8 +4852,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
wp->y_tile_minimum);
trans_y_tile_min =
(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
@ -5028,7 +5027,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const struct skl_wm_level *level)
{
uint32_t val = 0;
u32 val = 0;
if (level->plane_en) {
val |= PLANE_WM_EN;
@ -5159,12 +5158,12 @@ static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
return 0;
}
static uint32_t
static u32
pipes_modified(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state *cstate;
uint32_t i, ret = 0;
u32 i, ret = 0;
for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
ret |= drm_crtc_mask(&crtc->base);
@ -5265,7 +5264,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
const struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
uint32_t realloc_pipes = pipes_modified(state);
u32 realloc_pipes = pipes_modified(state);
int ret, i;
/*
@ -5564,7 +5563,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
static inline void skl_wm_level_from_reg_val(uint32_t val,
static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
@ -5580,7 +5579,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
int level, max_level;
enum plane_id plane_id;
uint32_t val;
u32 val;
max_level = ilk_wm_max_level(dev_priv);
@ -5692,7 +5691,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
uint32_t tmp;
u32 tmp;
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
@ -5719,7 +5718,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
enum pipe pipe;
uint32_t tmp;
u32 tmp;
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
@ -6201,10 +6200,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
/* Global for IPS driver to get at the current i915 device. Protected by
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
u16 rgvswctl;
@ -6667,7 +6662,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (!rps->enabled)
return;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
if (i915_request_signaled(rq))
return;
/* Serializes with i915_request_retire() */
@ -7847,16 +7842,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
intel_wakeref_t wakeref;
unsigned long val = 0;
if (!IS_GEN(dev_priv, 5))
return 0;
spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
}
return val;
}
@ -7933,14 +7929,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref;
if (!IS_GEN(dev_priv, 5))
return;
spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
}
}
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@ -7982,20 +7980,36 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
intel_wakeref_t wakeref;
unsigned long val = 0;
if (!IS_GEN(dev_priv, 5))
return 0;
spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
}
return val;
}
static struct drm_i915_private *i915_mch_dev;
static struct drm_i915_private *mchdev_get(void)
{
struct drm_i915_private *i915;
rcu_read_lock();
i915 = i915_mch_dev;
if (!kref_get_unless_zero(&i915->drm.ref))
i915 = NULL;
rcu_read_unlock();
return i915;
}
/**
* i915_read_mch_val - return value for IPS use
*
@ -8004,23 +8018,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
*/
unsigned long i915_read_mch_val(void)
{
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
struct drm_i915_private *i915;
unsigned long chipset_val = 0;
unsigned long graphics_val = 0;
intel_wakeref_t wakeref;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
i915 = mchdev_get();
if (!i915)
return 0;
chipset_val = __i915_chipset_val(dev_priv);
graphics_val = __i915_gfx_val(dev_priv);
with_intel_runtime_pm(i915, wakeref) {
spin_lock_irq(&mchdev_lock);
chipset_val = __i915_chipset_val(i915);
graphics_val = __i915_gfx_val(i915);
spin_unlock_irq(&mchdev_lock);
}
ret = chipset_val + graphics_val;
out_unlock:
spin_unlock_irq(&mchdev_lock);
return ret;
drm_dev_put(&i915->drm);
return chipset_val + graphics_val;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@ -8031,23 +8046,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
struct drm_i915_private *i915;
i915 = mchdev_get();
if (!i915)
return false;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
dev_priv->ips.max_delay--;
out_unlock:
if (i915->ips.max_delay > i915->ips.fmax)
i915->ips.max_delay--;
spin_unlock_irq(&mchdev_lock);
return ret;
drm_dev_put(&i915->drm);
return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@ -8059,23 +8070,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
struct drm_i915_private *i915;
i915 = mchdev_get();
if (!i915)
return false;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
dev_priv->ips.max_delay++;
out_unlock:
if (i915->ips.max_delay < i915->ips.min_delay)
i915->ips.max_delay++;
spin_unlock_irq(&mchdev_lock);
return ret;
drm_dev_put(&i915->drm);
return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@ -8086,13 +8093,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
bool ret = false;
struct drm_i915_private *i915;
bool ret;
spin_lock_irq(&mchdev_lock);
if (i915_mch_dev)
ret = i915_mch_dev->gt.awake;
spin_unlock_irq(&mchdev_lock);
i915 = mchdev_get();
if (!i915)
return false;
ret = i915->gt.awake;
drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@ -8105,24 +8115,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
struct drm_i915_private *i915;
bool ret;
i915 = mchdev_get();
if (!i915)
return false;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
dev_priv->ips.max_delay = dev_priv->ips.fstart;
if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
ret = false;
out_unlock:
i915->ips.max_delay = i915->ips.fstart;
ret = ironlake_set_drps(i915, i915->ips.fstart);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@ -8151,18 +8156,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
spin_lock_irq(&mchdev_lock);
i915_mch_dev = dev_priv;
spin_unlock_irq(&mchdev_lock);
rcu_assign_pointer(i915_mch_dev, dev_priv);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
spin_lock_irq(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock_irq(&mchdev_lock);
rcu_assign_pointer(i915_mch_dev, NULL);
}
static void intel_init_emon(struct drm_i915_private *dev_priv)
@ -8501,7 +8502,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
* Required for FBC
@ -8573,7 +8574,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
int pipe;
uint32_t val;
u32 val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@ -8608,7 +8609,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
uint32_t tmp;
u32 tmp;
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
@ -8618,7 +8619,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@ -8712,7 +8713,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
/*
* WaVSThreadDispatchOverride:ivb,vlv
@ -8748,7 +8749,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@ -8986,7 +8987,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t snpcr;
u32 snpcr;
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@ -9195,7 +9196,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t dspclk_gate;
u32 dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@ -9445,7 +9446,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
{
uint32_t flags =
u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {
@ -9468,7 +9469,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
{
uint32_t flags =
u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {

View File

@ -70,17 +70,17 @@ static bool psr_global_enabled(u32 debug)
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Disable PSR2 by default for all platforms */
if (i915_modparams.enable_psr == -1)
return false;
/* Cannot enable DSC and PSR2 simultaneously */
WARN_ON(crtc_state->dsc_params.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
case I915_PSR_DEBUG_DEFAULT:
if (i915_modparams.enable_psr <= 0)
return false;
default:
return crtc_state->has_psr2;
}
@ -230,7 +230,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
u8 dprx = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
&dprx) != 1)
@ -240,7 +240,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
uint8_t alpm_caps = 0;
u8 alpm_caps = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
&alpm_caps) != 1)
@ -384,7 +384,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 aux_clock_divider, aux_ctl;
int i;
static const uint8_t aux_msg[] = {
static const u8 aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,

View File

@ -836,8 +836,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&request->fence.flags))
if (i915_request_signaled(request))
continue;
dma_fence_set_error(&request->fence, -EIO);

View File

@ -28,7 +28,7 @@ struct i915_sched_attr;
* workarounds!
*/
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
struct intel_hw_status_page {
struct i915_vma *vma;
@ -398,7 +398,6 @@ struct intel_engine_cs {
unsigned int irq_count;
bool irq_armed : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
struct {

View File

@ -29,6 +29,8 @@
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "intel_drv.h"
@ -49,6 +51,268 @@
* present for a given platform.
*/
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
#include <linux/sort.h>
#define STACKDEPTH 8
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
struct stack_trace trace = {
.entries = entries,
.max_entries = ARRAY_SIZE(entries),
.skip = 1,
};
save_stack_trace(&trace);
if (trace.nr_entries &&
trace.entries[trace.nr_entries - 1] == ULONG_MAX)
trace.nr_entries--;
return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
}
static void __print_depot_stack(depot_stack_handle_t stack,
char *buf, int sz, int indent)
{
unsigned long entries[STACKDEPTH];
struct stack_trace trace = {
.entries = entries,
.max_entries = ARRAY_SIZE(entries),
};
depot_fetch_stack(stack, &trace);
snprint_stack_trace(buf, sz, &trace, indent);
}
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
spin_lock_init(&rpm->debug.lock);
}
static noinline depot_stack_handle_t
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
depot_stack_handle_t stack, *stacks;
unsigned long flags;
atomic_inc(&rpm->wakeref_count);
assert_rpm_wakelock_held(i915);
if (!HAS_RUNTIME_PM(i915))
return -1;
stack = __save_depot_stack();
if (!stack)
return -1;
spin_lock_irqsave(&rpm->debug.lock, flags);
if (!rpm->debug.count)
rpm->debug.last_acquire = stack;
stacks = krealloc(rpm->debug.owners,
(rpm->debug.count + 1) * sizeof(*stacks),
GFP_NOWAIT | __GFP_NOWARN);
if (stacks) {
stacks[rpm->debug.count++] = stack;
rpm->debug.owners = stacks;
} else {
stack = -1;
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
return stack;
}
static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
depot_stack_handle_t stack)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long flags, n;
bool found = false;
if (unlikely(stack == -1))
return;
spin_lock_irqsave(&rpm->debug.lock, flags);
for (n = rpm->debug.count; n--; ) {
if (rpm->debug.owners[n] == stack) {
memmove(rpm->debug.owners + n,
rpm->debug.owners + n + 1,
(--rpm->debug.count - n) * sizeof(stack));
found = true;
break;
}
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
if (WARN(!found,
"Unmatched wakeref (tracking %lu), count %u\n",
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return;
__print_depot_stack(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
stack = READ_ONCE(rpm->debug.last_release);
if (stack) {
__print_depot_stack(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
}
kfree(buf);
}
}
static int cmphandle(const void *_a, const void *_b)
{
const depot_stack_handle_t * const a = _a, * const b = _b;
if (*a < *b)
return -1;
else if (*a > *b)
return 1;
else
return 0;
}
static void
__print_intel_runtime_pm_wakeref(struct drm_printer *p,
const struct intel_runtime_pm_debug *dbg)
{
unsigned long i;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return;
if (dbg->last_acquire) {
__print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last acquired:\n%s", buf);
}
if (dbg->last_release) {
__print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last released:\n%s", buf);
}
drm_printf(p, "Wakeref count: %lu\n", dbg->count);
sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
for (i = 0; i < dbg->count; i++) {
depot_stack_handle_t stack = dbg->owners[i];
unsigned long rep;
rep = 1;
while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
rep++, i++;
__print_depot_stack(stack, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
}
kfree(buf);
}
static noinline void
untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
struct intel_runtime_pm_debug dbg = {};
struct drm_printer p;
unsigned long flags;
assert_rpm_wakelock_held(i915);
if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
&rpm->debug.lock,
flags)) {
dbg = rpm->debug;
rpm->debug.owners = NULL;
rpm->debug.count = 0;
rpm->debug.last_release = __save_depot_stack();
spin_unlock_irqrestore(&rpm->debug.lock, flags);
}
if (!dbg.count)
return;
p = drm_debug_printer("i915");
__print_intel_runtime_pm_wakeref(&p, &dbg);
kfree(dbg.owners);
}
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
struct drm_printer *p)
{
struct intel_runtime_pm_debug dbg = {};
do {
struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long alloc = dbg.count;
depot_stack_handle_t *s;
spin_lock_irq(&rpm->debug.lock);
dbg.count = rpm->debug.count;
if (dbg.count <= alloc) {
memcpy(dbg.owners,
rpm->debug.owners,
dbg.count * sizeof(*s));
}
dbg.last_acquire = rpm->debug.last_acquire;
dbg.last_release = rpm->debug.last_release;
spin_unlock_irq(&rpm->debug.lock);
if (dbg.count <= alloc)
break;
s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
dbg.owners = s;
} while (1);
__print_intel_runtime_pm_wakeref(p, &dbg);
out:
kfree(dbg.owners);
}
#else
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
}
static depot_stack_handle_t
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
atomic_inc(&i915->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(i915);
return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
assert_rpm_wakelock_held(i915);
atomic_dec(&i915->runtime_pm.wakeref_count);
}
#endif
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@ -639,10 +903,10 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
* back on and register state is restored. This is guaranteed by the MMIO write
* to DC_STATE_EN blocking until the state is restored.
*/
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
uint32_t val;
uint32_t mask;
u32 val;
u32 mask;
if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
@ -1274,7 +1538,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
enum pipe pipe;
uint32_t tmp;
u32 tmp;
WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
@ -1591,18 +1855,19 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
intel_runtime_pm_get(dev_priv);
intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(dev_priv, domain);
mutex_unlock(&power_domains->lock);
return wakeref;
}
/**
@ -1617,13 +1882,16 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
intel_wakeref_t wakeref;
bool is_enabled;
if (!intel_runtime_pm_get_if_in_use(dev_priv))
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return false;
mutex_lock(&power_domains->lock);
@ -1637,23 +1905,16 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
if (!is_enabled)
intel_runtime_pm_put(dev_priv);
if (!is_enabled) {
intel_runtime_pm_put(dev_priv, wakeref);
wakeref = 0;
}
return is_enabled;
return wakeref;
}
/**
* intel_display_power_put - release a power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*/
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
static void __intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
@ -1671,10 +1932,34 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
intel_runtime_pm_put(dev_priv);
}
/**
* intel_display_power_put - release a power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*/
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put_unchecked(dev_priv);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put(dev_priv, wakeref);
}
#endif
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
@ -3043,10 +3328,10 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
return 1;
}
static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int enable_dc)
static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int enable_dc)
{
uint32_t mask;
u32 mask;
int requested_dc;
int max_dc;
@ -3311,7 +3596,7 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
uint32_t val;
u32 val;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
@ -3622,7 +3907,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* current lane status.
*/
if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
uint32_t status = I915_READ(DPLL(PIPE_A));
u32 status = I915_READ(DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
@ -3653,7 +3938,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
}
if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
uint32_t status = I915_READ(DPIO_PHY_STATUS);
u32 status = I915_READ(DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@ -3712,7 +3997,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @dev_priv: i915 device instance
* @i915: i915 device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
@ -3726,30 +4011,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_fini_hw().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_domains *power_domains = &i915->power_domains;
power_domains->initializing = true;
if (IS_ICELAKE(dev_priv)) {
icl_display_core_init(dev_priv, resume);
} else if (IS_CANNONLAKE(dev_priv)) {
cnl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev_priv)) {
if (IS_ICELAKE(i915)) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
cnl_display_core_init(i915, resume);
} else if (IS_GEN9_BC(i915)) {
skl_display_core_init(i915, resume);
} else if (IS_GEN9_LP(i915)) {
bxt_display_core_init(i915, resume);
} else if (IS_CHERRYVIEW(i915)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
} else if (IS_VALLEYVIEW(dev_priv)) {
} else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
} else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
}
/*
* Keep all power wells enabled for any dependent HW access during
@ -3757,18 +4043,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
if (!i915_modparams.disable_power_well)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(dev_priv);
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
}
/**
* intel_power_domains_fini_hw - deinitialize hw power domain state
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
@ -3777,21 +4065,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
void intel_power_domains_fini_hw(struct drm_i915_private *i915)
{
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_runtime_pm_put(dev_priv);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->power_domains.wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(dev_priv);
intel_power_domains_verify_state(i915);
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_runtime_pm_put(i915, wakeref);
}
/**
* intel_power_domains_enable - enable toggling of display power wells
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@ -3801,30 +4092,36 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
void intel_power_domains_enable(struct drm_i915_private *dev_priv)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->power_domains.wakeref);
intel_power_domains_verify_state(dev_priv);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
void intel_power_domains_disable(struct drm_i915_private *dev_priv)
void intel_power_domains_disable(struct drm_i915_private *i915)
{
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
struct i915_power_domains *power_domains = &i915->power_domains;
intel_power_domains_verify_state(dev_priv);
WARN_ON(power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_suspend - suspend power domain state
* @dev_priv: i915 device instance
* @i915: i915 device instance
* @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
*
* This function prepares the hardware power domain state before entering
@ -3833,12 +4130,14 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv)
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_domains *power_domains = &i915->power_domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
@ -3847,10 +4146,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
dev_priv->csr.dmc_payload != NULL) {
intel_power_domains_verify_state(dev_priv);
i915->csr.dmc_payload) {
intel_power_domains_verify_state(i915);
return;
}
@ -3859,25 +4158,25 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915_modparams.disable_power_well) {
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(dev_priv);
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
}
if (IS_ICELAKE(dev_priv))
icl_display_core_uninit(dev_priv);
else if (IS_CANNONLAKE(dev_priv))
cnl_display_core_uninit(dev_priv);
else if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv);
else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);
if (IS_ICELAKE(i915))
icl_display_core_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_display_core_uninit(i915);
else if (IS_GEN9_BC(i915))
skl_display_core_uninit(i915);
else if (IS_GEN9_LP(i915))
bxt_display_core_uninit(i915);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function resume the hardware power domain state during system resume.
*
@ -3885,28 +4184,30 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
void intel_power_domains_resume(struct drm_i915_private *dev_priv)
void intel_power_domains_resume(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_domains *power_domains = &i915->power_domains;
if (power_domains->display_core_suspended) {
intel_power_domains_init_hw(dev_priv, true);
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
WARN_ON(power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
intel_power_domains_verify_state(dev_priv);
intel_power_domains_verify_state(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_domains *power_domains = &i915->power_domains;
struct i915_power_well *power_well;
for_each_power_well(dev_priv, power_well) {
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%-25s %d\n",
@ -3921,7 +4222,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
@ -3929,22 +4230,21 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_domains *power_domains = &i915->power_domains;
struct i915_power_well *power_well;
bool dump_domain_info;
mutex_lock(&power_domains->lock);
dump_domain_info = false;
for_each_power_well(dev_priv, power_well) {
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
enabled = power_well->desc->ops->is_enabled(dev_priv,
power_well);
enabled = power_well->desc->ops->is_enabled(i915, power_well);
if ((power_well->count || power_well->desc->always_on) !=
enabled)
DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
@ -3968,7 +4268,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
static bool dumped;
if (!dumped) {
intel_power_domains_dump_info(dev_priv);
intel_power_domains_dump_info(i915);
dumped = true;
}
}
@ -3978,7 +4278,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
#else
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
}
@ -3986,30 +4286,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
int ret;
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
@ -4018,12 +4319,13 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*
* Returns: True if the wakeref was acquired, or False otherwise.
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
{
if (IS_ENABLED(CONFIG_PM)) {
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/*
@ -4033,18 +4335,15 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* atm to the late/early system suspend/resume handlers.
*/
if (pm_runtime_get_if_in_use(kdev) <= 0)
return false;
return 0;
}
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
return true;
return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
@ -4058,41 +4357,50 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
assert_rpm_wakelock_held(i915);
pm_runtime_get_noresume(kdev);
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_put - release a runtime pm reference
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
atomic_dec(&dev_priv->runtime_pm.wakeref_count);
untrack_intel_runtime_pm_wakeref(i915);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
{
cancel_intel_runtime_pm_wakeref(i915, wref);
intel_runtime_pm_put_unchecked(i915);
}
#endif
/**
* intel_runtime_pm_enable - enable runtime pm
* @dev_priv: i915 device instance
* @i915: i915 device instance
*
* This function enables runtime pm at the end of the driver load sequence.
*
@ -4100,9 +4408,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
* subordinate display power domains. That is done by
* intel_power_domains_enable().
*/
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable(struct drm_i915_private *i915)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/*
@ -4124,7 +4432,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
if (!HAS_RUNTIME_PM(dev_priv)) {
if (!HAS_RUNTIME_PM(i915)) {
int ret;
pm_runtime_dont_use_autosuspend(kdev);
@ -4142,17 +4450,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
pm_runtime_put_autosuspend(kdev);
}
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
void intel_runtime_pm_disable(struct drm_i915_private *i915)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/* Transfer rpm ownership back to core */
WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
WARN(pm_runtime_get_sync(kdev) < 0,
"Failed to pass rpm ownership back to core\n");
pm_runtime_dont_use_autosuspend(kdev);
if (!HAS_RUNTIME_PM(dev_priv))
if (!HAS_RUNTIME_PM(i915))
pm_runtime_put(kdev);
}
void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
int count;
count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
WARN(count,
"i915->runtime_pm.wakeref_count=%d on cleanup\n",
count);
untrack_intel_runtime_pm_wakeref(i915);
}
void intel_runtime_pm_init_early(struct drm_i915_private *i915)
{
init_intel_runtime_pm_wakeref(i915);
}

View File

@ -76,7 +76,7 @@ struct intel_sdvo {
i915_reg_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
u16 controlled_output;
/*
* Capabilities of the SDVO device returned by
@ -91,12 +91,12 @@ struct intel_sdvo {
* For multiple function SDVO device,
* this is for current attached outputs.
*/
uint16_t attached_output;
u16 attached_output;
/*
* Hotplug activation bits for this device
*/
uint16_t hotplug_active;
u16 hotplug_active;
enum port port;
@ -104,19 +104,19 @@ struct intel_sdvo {
bool has_hdmi_audio;
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
u8 ddc_bus;
/*
* the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
*/
uint8_t dtd_sdvo_flags;
u8 dtd_sdvo_flags;
};
struct intel_sdvo_connector {
struct intel_connector base;
/* Mark the type of connector */
uint16_t output_flag;
u16 output_flag;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
@ -184,7 +184,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
container_of((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags);
static bool
intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
@ -746,9 +746,9 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
uint16_t clock,
uint16_t width,
uint16_t height)
u16 clock,
u16 width,
u16 height)
{
struct intel_sdvo_preferred_input_timing_args args;
@ -791,9 +791,9 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
const struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
uint16_t h_sync_offset, v_sync_offset;
u16 width, height;
u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
u16 h_sync_offset, v_sync_offset;
int mode_clock;
memset(dtd, 0, sizeof(*dtd));
@ -898,13 +898,13 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
}
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
uint8_t mode)
u8 mode)
{
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
uint8_t mode)
u8 mode)
{
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
@ -913,11 +913,11 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
int i, j;
uint8_t set_buf_index[2];
uint8_t av_split;
uint8_t buf_size;
uint8_t buf[48];
uint8_t *pos;
u8 set_buf_index[2];
u8 av_split;
u8 buf_size;
u8 buf[48];
u8 *pos;
intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
@ -940,11 +940,11 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
#endif
static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned if_index, uint8_t tx_rate,
const uint8_t *data, unsigned length)
unsigned int if_index, u8 tx_rate,
const u8 *data, unsigned int length)
{
uint8_t set_buf_index[2] = { if_index, 0 };
uint8_t hbuf_size, tmp[8];
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tmp[8];
int i;
if (!intel_sdvo_set_value(intel_sdvo,
@ -984,7 +984,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
{
const struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
int ret;
ssize_t len;
@ -1017,7 +1017,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
struct intel_sdvo_tv_format format;
uint32_t format_map;
u32 format_map;
format_map = 1 << conn_state->tv.mode;
memset(&format, 0, sizeof(format));
@ -1208,7 +1208,7 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state = &sdvo_state->base.base;
struct intel_sdvo_connector *intel_sdvo_conn =
to_intel_sdvo_connector(conn_state->connector);
uint16_t val;
u16 val;
if (intel_sdvo_conn->left)
UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H);
@ -1692,10 +1692,10 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
uint16_t hotplug;
u16 hotplug;
if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
@ -1826,7 +1826,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
u16 response;
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
@ -1977,7 +1977,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
u32 reply = 0, format_map = 0;
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@ -2062,7 +2062,7 @@ static int
intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
u64 *val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
@ -2121,7 +2121,7 @@ static int
intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val)
u64 val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
@ -2270,7 +2270,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
static void
intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
{
uint16_t mask = 0;
u16 mask = 0;
unsigned int num_bits;
/*
@ -2671,7 +2671,7 @@ err:
}
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
{
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
@ -2747,7 +2747,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
u32 format_map, i;
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
@ -2814,7 +2814,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
uint16_t response, data_value[2];
u16 response, data_value[2];
/* when horizontal overscan is supported, Add the left/right property */
if (enhancements.overscan_h) {
@ -2925,7 +2925,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
u16 response, data_value[2];
ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS);
@ -2939,7 +2939,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
{
union {
struct intel_sdvo_enhancements_reply reply;
uint16_t response;
u16 response;
} enhancements;
BUILD_BUG_ON(sizeof(enhancements) != 2);

View File

@ -321,8 +321,8 @@ skl_program_scaler(struct intel_plane *plane,
&crtc_state->scaler_state.scalers[scaler_id];
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
u32 crtc_w = drm_rect_width(&plane_state->base.dst);
u32 crtc_h = drm_rect_height(&plane_state->base.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
@ -477,10 +477,10 @@ skl_program_plane(struct intel_plane *plane,
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t x = plane_state->color_plane[color_plane].x;
uint32_t y = plane_state->color_plane[color_plane].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
u32 x = plane_state->color_plane[color_plane].x;
u32 y = plane_state->color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
struct intel_plane *linked = plane_state->linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
@ -618,17 +618,19 @@ skl_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -812,10 +814,10 @@ vlv_update_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
u32 crtc_w = drm_rect_width(&plane_state->base.dst);
u32 crtc_h = drm_rect_height(&plane_state->base.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
unsigned long irqflags;
/* Sizes are 0 based */
@ -882,17 +884,19 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -972,12 +976,12 @@ ivb_update_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
u32 crtc_w = drm_rect_width(&plane_state->base.dst);
u32 crtc_h = drm_rect_height(&plane_state->base.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
/* Sizes are 0 based */
@ -1051,17 +1055,19 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -1146,12 +1152,12 @@ g4x_update_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
u32 crtc_w = drm_rect_width(&plane_state->base.dst);
u32 crtc_h = drm_rect_height(&plane_state->base.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
/* Sizes are 0 based */
@ -1217,17 +1223,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return false;
ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@ -1698,7 +1706,7 @@ out:
return ret;
}
static const uint32_t g4x_plane_formats[] = {
static const u32 g4x_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@ -1706,13 +1714,13 @@ static const uint32_t g4x_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const uint64_t i9xx_plane_format_modifiers[] = {
static const u64 i9xx_plane_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const uint32_t snb_plane_formats[] = {
static const u32 snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@ -1721,7 +1729,7 @@ static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const uint32_t vlv_plane_formats[] = {
static const u32 vlv_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
@ -1735,7 +1743,7 @@ static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const uint32_t skl_plane_formats[] = {
static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@ -1750,7 +1758,7 @@ static const uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const uint32_t skl_planar_formats[] = {
static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@ -1766,7 +1774,7 @@ static const uint32_t skl_planar_formats[] = {
DRM_FORMAT_NV12,
};
static const uint64_t skl_plane_format_modifiers_noccs[] = {
static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
@ -1774,7 +1782,7 @@ static const uint64_t skl_plane_format_modifiers_noccs[] = {
DRM_FORMAT_MOD_INVALID
};
static const uint64_t skl_plane_format_modifiers_ccs[] = {
static const u64 skl_plane_format_modifiers_ccs[] = {
I915_FORMAT_MOD_Yf_TILED_CCS,
I915_FORMAT_MOD_Y_TILED_CCS,
I915_FORMAT_MOD_Yf_TILED,

View File

@ -26,6 +26,7 @@
#include "intel_guc_submission.h"
#include "intel_guc.h"
#include "i915_drv.h"
#include "i915_reset.h"
static void guc_free_load_err_log(struct intel_guc *guc);

View File

@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
struct reg_whitelist const *entry;
intel_wakeref_t wakeref;
unsigned int flags;
int remain;
int ret = 0;
@ -1695,388 +1696,22 @@ int i915_reg_read_ioctl(struct drm_device *dev,
flags = reg->offset & (entry->size - 1);
intel_runtime_pm_get(dev_priv);
if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
reg->val = I915_READ64_2x32(entry->offset_ldw,
entry->offset_udw);
else if (entry->size == 8 && flags == 0)
reg->val = I915_READ64(entry->offset_ldw);
else if (entry->size == 4 && flags == 0)
reg->val = I915_READ(entry->offset_ldw);
else if (entry->size == 2 && flags == 0)
reg->val = I915_READ16(entry->offset_ldw);
else if (entry->size == 1 && flags == 0)
reg->val = I915_READ8(entry->offset_ldw);
else
ret = -EINVAL;
intel_runtime_pm_put(dev_priv);
return ret;
}
static void gen3_stop_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
const u32 base = engine->mmio_base;
if (intel_engine_stop_cs(engine))
DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
I915_WRITE_FW(RING_HEAD(base), 0);
I915_WRITE_FW(RING_TAIL(base), 0);
POSTING_READ_FW(RING_TAIL(base));
/* The ring must be empty before it is disabled */
I915_WRITE_FW(RING_CTL(base), 0);
/* Check acts as a post */
if (I915_READ_FW(RING_HEAD(base)) != 0)
DRM_DEBUG_DRIVER("%s: ring head not parked\n",
engine->name);
}
static void i915_stop_engines(struct drm_i915_private *dev_priv,
unsigned int engine_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (INTEL_GEN(dev_priv) < 3)
return;
for_each_engine_masked(engine, dev_priv, engine_mask, id)
gen3_stop_engine(engine);
}
static bool i915_in_reset(struct pci_dev *pdev)
{
u8 gdrst;
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return gdrst & GRDOM_RESET_STATUS;
}
static int i915_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
usleep_range(50, 200);
err = wait_for(i915_in_reset(pdev), 500);
/* Clear the reset request. */
pci_write_config_byte(pdev, I915_GDRST, 0);
usleep_range(50, 200);
if (!err)
err = wait_for(!i915_in_reset(pdev), 500);
return err;
}
static bool g4x_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
static int g33_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(pdev), 500);
}
static int g4x_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
I915_WRITE(VDECCLK_GATE_D,
I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret) {
DRM_DEBUG_DRIVER("Wait for media reset failed\n");
goto out;
with_intel_runtime_pm(dev_priv, wakeref) {
if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
reg->val = I915_READ64_2x32(entry->offset_ldw,
entry->offset_udw);
else if (entry->size == 8 && flags == 0)
reg->val = I915_READ64(entry->offset_ldw);
else if (entry->size == 4 && flags == 0)
reg->val = I915_READ(entry->offset_ldw);
else if (entry->size == 2 && flags == 0)
reg->val = I915_READ16(entry->offset_ldw);
else if (entry->size == 1 && flags == 0)
reg->val = I915_READ8(entry->offset_ldw);
else
ret = -EINVAL;
}
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret) {
DRM_DEBUG_DRIVER("Wait for render reset failed\n");
goto out;
}
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
I915_WRITE(VDECCLK_GATE_D,
I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
return ret;
}
static int ironlake_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
int ret;
I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = intel_wait_for_register(dev_priv,
ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
500);
if (ret) {
DRM_DEBUG_DRIVER("Wait for render reset failed\n");
goto out;
}
I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = intel_wait_for_register(dev_priv,
ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
500);
if (ret) {
DRM_DEBUG_DRIVER("Wait for media reset failed\n");
goto out;
}
out:
I915_WRITE(ILK_GDSR, 0);
POSTING_READ(ILK_GDSR);
return ret;
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
u32 hw_domain_mask)
{
int err;
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
err = __intel_wait_for_register_fw(dev_priv,
GEN6_GDRST, hw_domain_mask, 0,
500, 0,
NULL);
if (err)
DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
return err;
}
/**
* gen6_reset_engines - reset individual engines
* @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
* @retry: the count of of previous attempts to reset.
*
* This function will reset the individual engines that are set in engine_mask.
* If you provide ALL_ENGINES as mask, full global domain reset will be issued.
*
* Note: It is responsibility of the caller to handle the difference between
* asking full domain reset versus reset for all available individual engines.
*
* Returns 0 on success, nonzero on error.
*/
static int gen6_reset_engines(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN6_GRDOM_RENDER,
[BCS] = GEN6_GRDOM_BLT,
[VCS] = GEN6_GRDOM_MEDIA,
[VCS2] = GEN8_GRDOM_MEDIA2,
[VECS] = GEN6_GRDOM_VECS,
};
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
unsigned int tmp;
hw_mask = 0;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
hw_mask |= hw_engine_mask[engine->id];
}
return gen6_hw_domain_reset(dev_priv, hw_mask);
}
static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
u32 sfc_usage_bit;
u32 sfc_reset_bit;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
return 0;
sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
break;
case VIDEO_ENHANCEMENT_CLASS:
sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
sfc_usage = GEN11_VECS_SFC_USAGE(engine);
sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
break;
default:
return 0;
}
/*
* Tell the engine that a software reset is going to happen. The engine
* will then try to force lock the SFC (if currently locked, it will
* remain so until we tell the engine it is safe to unlock; if currently
* unlocked, it will ignore this and all new lock requests). If SFC
* ends up being locked to the engine we want to reset, we have to reset
* it as well (we will unlock it once the reset sequence is completed).
*/
I915_WRITE_FW(sfc_forced_lock,
I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
if (__intel_wait_for_register_fw(dev_priv,
sfc_forced_lock_ack,
sfc_forced_lock_ack_bit,
sfc_forced_lock_ack_bit,
1000, 0, NULL)) {
DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
return 0;
}
if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
return sfc_reset_bit;
return 0;
}
static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
return;
sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
break;
case VIDEO_ENHANCEMENT_CLASS:
sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
break;
default:
return;
}
I915_WRITE_FW(sfc_forced_lock,
I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
}
/**
* gen11_reset_engines - reset individual engines
* @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
*
* This function will reset the individual engines that are set in engine_mask.
* If you provide ALL_ENGINES as mask, full global domain reset will be issued.
*
* Note: It is responsibility of the caller to handle the difference between
* asking full domain reset versus reset for all available individual engines.
*
* Returns 0 on success, nonzero on error.
*/
static int gen11_reset_engines(struct drm_i915_private *dev_priv,
unsigned int engine_mask)
{
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN11_GRDOM_RENDER,
[BCS] = GEN11_GRDOM_BLT,
[VCS] = GEN11_GRDOM_MEDIA,
[VCS2] = GEN11_GRDOM_MEDIA2,
[VCS3] = GEN11_GRDOM_MEDIA3,
[VCS4] = GEN11_GRDOM_MEDIA4,
[VECS] = GEN11_GRDOM_VECS,
[VECS2] = GEN11_GRDOM_VECS2,
};
struct intel_engine_cs *engine;
unsigned int tmp;
u32 hw_mask;
int ret;
BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
hw_mask |= hw_engine_mask[engine->id];
hw_mask |= gen11_lock_sfc(dev_priv, engine);
}
}
ret = gen6_hw_domain_reset(dev_priv, hw_mask);
if (engine_mask != ALL_ENGINES)
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
gen11_unlock_sfc(dev_priv, engine);
return ret;
}
@ -2190,196 +1825,6 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
ret = __intel_wait_for_register_fw(dev_priv,
RING_RESET_CTL(engine->mmio_base),
RESET_CTL_READY_TO_RESET,
RESET_CTL_READY_TO_RESET,
700, 0,
NULL);
if (ret)
DRM_ERROR("%s: reset request timeout\n", engine->name);
return ret;
}
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int reset_engines(struct drm_i915_private *i915,
unsigned int engine_mask,
unsigned int retry)
{
if (INTEL_GEN(i915) >= 11)
return gen11_reset_engines(i915, engine_mask);
else
return gen6_reset_engines(i915, engine_mask, retry);
}
static int gen8_reset_engines(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
unsigned int tmp;
int ret;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
/*
* If this is not the first failed attempt to prepare,
* we decide to proceed anyway.
*
* By doing so we risk context corruption and with
* some gens (kbl), possible system hang if reset
* happens during active bb execution.
*
* We rather take context corruption instead of
* failed reset with a wedged driver/gpu. And
* active bb execution case should be covered by
* i915_stop_engines we have before the reset.
*/
}
ret = reset_engines(dev_priv, engine_mask, retry);
skip_reset:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
typedef int (*reset_func)(struct drm_i915_private *,
unsigned int engine_mask, unsigned int retry);
static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
if (!i915_modparams.reset)
return NULL;
if (INTEL_GEN(dev_priv) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(dev_priv) >= 6)
return gen6_reset_engines;
else if (IS_GEN(dev_priv, 5))
return ironlake_do_reset;
else if (IS_G4X(dev_priv))
return g4x_do_reset;
else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
return g33_do_reset;
else if (INTEL_GEN(dev_priv) >= 3)
return i915_do_reset;
else
return NULL;
}
int intel_gpu_reset(struct drm_i915_private *dev_priv,
const unsigned int engine_mask)
{
reset_func reset = intel_get_gpu_reset(dev_priv);
unsigned int retry;
int ret;
GEM_BUG_ON(!engine_mask);
/*
* We want to perform per-engine reset from atomic context (e.g.
* softirq), which imposes the constraint that we cannot sleep.
* However, experience suggests that spending a bit of time waiting
* for a reset helps in various cases, so for a full-device reset
* we apply the opposite rule and wait if we want to. As we should
* always follow up a failed per-engine reset with a full device reset,
* being a little faster, stricter and more error prone for the
* atomic case seems an acceptable compromise.
*
* Unfortunately this leads to a bimodal routine, when the goal was
* to have a single reset function that worked for resetting any
* number of engines simultaneously.
*/
might_sleep_if(engine_mask == ALL_ENGINES);
/*
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
for (retry = 0; retry < 3; retry++) {
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
* from system hang if batchbuffer is progressing when
* the reset is issued, regardless of READY_TO_RESET ack.
* Thus assume it is best to stop engines on all gens
* where we have a gpu reset.
*
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
*
* FIXME: Wa for more modern gens needs to be validated
*/
i915_stop_engines(dev_priv, engine_mask);
ret = -ENODEV;
if (reset) {
ret = reset(dev_priv, engine_mask, retry);
GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n",
engine_mask, ret, retry);
}
if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
break;
cond_resched();
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
{
return intel_get_gpu_reset(dev_priv) != NULL;
}
bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->has_reset_engine &&
i915_modparams.reset >= 2);
}
int intel_reset_guc(struct drm_i915_private *dev_priv)
{
u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
GEN9_GRDOM_GUC;
int ret;
GEM_BUG_ON(!HAS_GUC(dev_priv));
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = gen6_hw_domain_reset(dev_priv, guc_domain);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
return check_for_unclaimed_mmio(dev_priv);

View File

@ -1082,6 +1082,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
intel_display_power_put(dev_priv,
intel_dsc_power_domain(old_crtc_state));
intel_display_power_put_unchecked(dev_priv,
intel_dsc_power_domain(old_crtc_state));
}

View File

@ -1449,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@ -1756,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
};
struct drm_file *file;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
int err;
if (!HAS_PPGTT(dev_priv)) {
@ -1771,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@ -1785,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
intel_runtime_pm_put(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);

View File

@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915,
i915_request_add(rq);
}
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
return err;
}
@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915)
static void simulate_hibernate(struct drm_i915_private *i915)
{
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
@ -93,39 +96,39 @@ static int pm_prepare(struct drm_i915_private *i915)
static void pm_suspend(struct drm_i915_private *i915)
{
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
}
}
static void pm_hibernate(struct drm_i915_private *i915)
{
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
i915_gem_suspend_gtt_mappings(i915);
with_intel_runtime_pm(i915, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
intel_runtime_pm_put(i915);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
}
}
static void pm_resume(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
/*
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
intel_runtime_pm_get(i915);
intel_engines_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
intel_runtime_pm_put(i915);
with_intel_runtime_pm(i915, wakeref) {
intel_engines_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}
}
static int igt_gem_suspend(void *arg)

View File

@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg)
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg)
}
}
unlock:
intel_runtime_pm_put(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;

Some files were not shown because too many files have changed in this diff Show More