i915, nouveau, sun4i, amd, ttm and core drm fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJaPIcmAAoJEAx081l5xIa+kusP/RMXDXgMwSYT4pRdBnTPu5TD
 IqmKdeO5RF4+bBhYmWs/bEglnftLs8RdMCpmxR63ATF2hmDrdEUj9tjwYvaiVN1/
 qC4UoJaOPmuQlrz/Aiyax9cgsAtvHyqXf/h2j6HxEkmzTasS/o9akj0D5NBeDBZ2
 vDUpCYUv3bubE0y+8SlygWtU+O0Di7HZpMMCBq/V6Obqi8YNt7azN9OM/xOR2bhc
 KLtu7xVVzSzX1uuwQK6tuBMoJgS/6yd1EWiqsVTZhsYqYI6nM5uno0lKnD0ARSi+
 zyj/O69iYjqRny368zIG64mulCnAQQgajDNbjqJaQA29PPMbdK4JYfFv0tFM3EU1
 hC1TmWbNeO4ck7n6aVFUAzvJpr1EQkl5nRaA9x9Z0U+ZS/gtZRpFaeZ5SQYfsX+2
 r8i44jIPbWXFaaWaRcu2qJYpbhxpFiPhsZzuEOSD68Nn2YPN0rNY2tZRaSAuAfDj
 v1q0iKRXzEhAgbkOJcCzJdKY6uuGv31HwoVtOwdlXaLoLr2U3HsrjXW9t7sPyoj8
 qz/KplcyDCLxaB/wbCxald1L/hyWdrIWntHuTH0wkpiITufC1bA54iGKdD8Vws9y
 ljPBWyJh4nIqp3O7HEHyk7LoiHj9s/oX8wuFPPuA8yfPzTqrWi/7w9k0fqe67jeq
 atvNsJDUyWF6rpIwT8yo
 =yyuL
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-for-v4.15-rc5' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "I've got most of two weeks worth of fixes here due to being on
  holidays last week.

  The main things are:

  - Core:
     * Syncobj fd reference count fix
     * Leasing ioctl misuse fix

   - nouveau regression fixes

   - further amdgpu DC fixes

   - sun4i regression fixes

  I'm not sure I'll see many fixes over next couple of weeks, we'll see
  how we go"

* tag 'drm-fixes-for-v4.15-rc5' of git://people.freedesktop.org/~airlied/linux: (27 commits)
  drm/syncobj: Stop reusing the same struct file for all syncobj -> fd
  drm: move lease init after validation in drm_lease_create
  drm/plane: Make framebuffer refcounting the responsibility of setplane_internal callers
  drm/sun4i: hdmi: Move the mode_valid callback to the encoder
  drm/nouveau: fix obvious memory leak
  drm/i915: Protect DDI port to DPLL map from theoretical race.
  drm/i915/lpe: Remove double-encapsulation of info string
  drm/sun4i: Fix error path handling
  drm/nouveau: use alternate memory type for system-memory buffers with kind != 0
  drm/nouveau: avoid GPU page sizes > PAGE_SIZE for buffer objects in host memory
  drm/nouveau/mmu/gp10b: use correct implementation
  drm/nouveau/pci: do a msi rearm on init
  drm/nouveau/imem/nv50: fix refcount_t warning
  drm/nouveau/bios/dp: support DP Info Table 2.0
  drm/nouveau/fbcon: fix NULL pointer access in nouveau_fbcon_destroy
  drm/amd/display: Fix rehook MST display not light back on
  drm/amd/display: fix missing pixel clock adjustment for dongle
  drm/amd/display: set chroma taps to 1 when not scaling
  drm/amd/display: add pipe locking before front end programing
  drm/sun4i: validate modes for HDMI
  ...
This commit is contained in:
Linus Torvalds 2017-12-22 11:51:01 -08:00
commit e7ae59cb4b
31 changed files with 268 additions and 147 deletions

View File

@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));

View File

@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dm_connector_state *dm_state)
{
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector *drm_connector;
struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!aconnector->dc_sink) {
/*
* Exclude MST from creating fake_sink
* TODO: need to enable MST into fake_sink feature
* Create dc_sink when necessary to MST
* Don't apply fake_sink to MST
*/
if (aconnector->mst_port)
goto stream_create_fail;
if (aconnector->mst_port) {
dm_dp_mst_dc_sink_create(drm_connector);
goto mst_dc_sink_create_done;
}
if (create_fake_sink(aconnector))
goto stream_create_fail;
@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream_create_fail:
dm_state_null:
drm_connector_null:
mst_dc_sink_create_done:
return stream;
}

View File

@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)

View File

@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
return ret;
}
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct edid *edid;
struct dc_sink *dc_sink;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
return;
}
aconnector->edid = edid;
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
(uint8_t *)aconnector->edid,
(aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
dc_sink->priv = aconnector;
aconnector->dc_sink = dc_sink;
amdgpu_dm_add_sink_to_freesync_module(
connector, aconnector->edid);
drm_mode_connector_update_edid_property(
&aconnector->base, aconnector->edid);
}
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
return &aconnector->base;
}
}
@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
aconnector->mst_connected = true;
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
aconnector->mst_connected = false;
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
drm_connector_register(connector);
if (aconnector->mst_connected)
dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {

View File

@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif

View File

@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
/*
* Spreadsheet doesn't handle taps_c is one properly,
* need to force Chroma to always be scaled to pass
* bandwidth validation.
*/
if (v->override_hta_pschroma[input_idx] == 1)
v->override_hta_pschroma[input_idx] = 2;
if (v->override_vta_pschroma[input_idx] == 1)
v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
}
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)

View File

@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
}
bool dp_active_dongle_validate_timing(
static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dc_dongle_caps *dongle_caps)
{
@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
/* Check Color Depth and Pixel Clock */
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
required_pix_clk /= 2;
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
required_pix_clk = required_pix_clk * 2 / 3;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:

View File

@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
int num_planes,
struct dc_state *context)
{
int i, be_idx;
int i;
if (num_planes == 0)
return;
be_idx = -1;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (stream == context->res_ctx.pipe_ctx[i].stream) {
be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
break;
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (stream == pipe_ctx->stream) {
if (!pipe_ctx->top_pipe &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
}
}
@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx);
dc->hwss.update_plane_addr(dc, pipe_ctx);
program_surface_visibility(dc, pipe_ctx);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if ((stream == pipe_ctx->stream) &&
(!pipe_ctx->top_pipe) &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
}
static void dce110_power_down_fe(struct dc *dc, int fe_idx)

View File

@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
/*
* Spreadsheet doesn't handle taps_c is one properly,
* need to force Chroma to always be scaled to pass
* bandwidth validation.
*/
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
scl_data->taps.v_taps_c = 1;
}
return true;

View File

@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
mutex_lock(&dev->mode_config.idr_mutex);
/* Insert the new lessee into the tree */
id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
if (id < 0) {
error = id;
goto out_lessee;
}
lessee->lessee_id = id;
lessee->lessor = drm_master_get(lessor);
list_add_tail(&lessee->lessee_list, &lessor->lessees);
idr_for_each_entry(leases, entry, object) {
error = 0;
if (!idr_find(&dev->mode_config.crtc_idr, object))
@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
}
}
/* Insert the new lessee into the tree */
id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
if (id < 0) {
error = id;
goto out_lessee;
}
lessee->lessee_id = id;
lessee->lessor = drm_master_get(lessor);
list_add_tail(&lessee->lessee_list, &lessor->lessees);
/* Move the leases over */
lessee->leases = *leases;
DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);

View File

@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
}
/*
* setplane_internal - setplane handler for internal callers
* __setplane_internal - setplane handler for internal callers
*
* Note that we assume an extra reference has already been taken on fb. If the
* update fails, this reference will be dropped before return; if it succeeds,
* the previous framebuffer (if any) will be unreferenced instead.
* This function will take a reference on the new fb for the plane
* on success.
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
fb = NULL;
drm_framebuffer_get(plane->fb);
} else {
plane->old_fb = NULL;
}
out:
if (fb)
drm_framebuffer_put(fb);
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
}
/*
* setplane_internal will take care of deref'ing either the old or new
* framebuffer depending on success.
*/
return setplane_internal(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
ret = setplane_internal(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (fb)
drm_framebuffer_put(fb);
return ret;
}
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
src_h = fb->height << 16;
}
/*
* setplane_internal will take care of deref'ing either the old or new
* framebuffer depending on success.
*/
ret = __setplane_internal(crtc->cursor, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
0, 0, src_w, src_h, ctx);
crtc_x, crtc_y, crtc_w, crtc_h,
0, 0, src_w, src_h, ctx);
if (fb)
drm_framebuffer_put(fb);
/* Update successful; save new cursor position, if necessary */
if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {

View File

@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
.release = drm_syncobj_file_release,
};
static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
{
struct file *file = anon_inode_getfile("syncobj_file",
&drm_syncobj_file_fops,
syncobj, 0);
if (IS_ERR(file))
return PTR_ERR(file);
drm_syncobj_get(syncobj);
if (cmpxchg(&syncobj->file, NULL, file)) {
/* lost the race */
fput(file);
}
return 0;
}
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
int ret;
struct file *file;
int fd;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
if (!syncobj->file) {
ret = drm_syncobj_alloc_file(syncobj);
if (ret) {
put_unused_fd(fd);
return ret;
}
file = anon_inode_getfile("syncobj_file",
&drm_syncobj_file_fops,
syncobj, 0);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
fd_install(fd, syncobj->file);
drm_syncobj_get(syncobj);
fd_install(fd, file);
*p_fd = fd;
return 0;
}
@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
return ret;
}
static struct drm_syncobj *drm_syncobj_fdget(int fd)
{
struct file *file = fget(fd);
if (!file)
return NULL;
if (file->f_op != &drm_syncobj_file_fops)
goto err;
return file->private_data;
err:
fput(file);
return NULL;
};
static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
struct drm_syncobj *syncobj;
struct file *file;
int ret;
if (!syncobj)
file = fget(fd);
if (!file)
return -EINVAL;
if (file->f_op != &drm_syncobj_file_fops) {
fput(file);
return -EINVAL;
}
/* take a reference to put in the idr */
syncobj = file->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
spin_unlock(&file_private->syncobj_table_lock);
idr_preload_end();
if (ret < 0) {
fput(syncobj->file);
return ret;
}
*handle = ret;
return 0;
if (ret > 0) {
*handle = ret;
ret = 0;
} else
drm_syncobj_put(syncobj);
fput(file);
return ret;
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,

View File

@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT,
NULL);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
return ret;
i915_gem_retire_requests(to_i915(obj->base.dev));
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {

View File

@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
struct dma_fence *dma;
struct timer_list timer;
struct irq_work work;
struct rcu_head rcu;
};
static void timer_i915_sw_fence_wake(struct timer_list *t)
@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
del_timer_sync(&cb->timer);
dma_fence_put(cb->dma);
kfree(cb);
kfree_rcu(cb, rcu);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,

View File

@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_wait *wait, *n, *first;
if (!b->irq_armed)
return;
goto wakeup_signaler;
/* We only disarm the irq when we are idle (all requests completed),
* so if the bottom-half remains asleep, it missed the request
@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
b->waiters = RB_ROOT;
spin_unlock_irq(&b->rb_lock);
/*
* The signaling thread may be asleep holding a reference to a request,
* that had its signaling cancelled prior to being preempted. We need
* to kick the signaler, just in case, to release any such reference.
*/
wakeup_signaler:
wake_up_process(b->signaler);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
DEFINE_WAIT(exec);
if (kthread_should_park())
kthread_parkme();
if (kthread_should_stop()) {
GEM_BUG_ON(request);
if (unlikely(kthread_should_stop())) {
i915_gem_request_put(request);
break;
}
if (request)
add_wait_queue(&request->execute, &exec);
schedule();
if (request)
remove_wait_queue(&request->execute, &exec);
}
i915_gem_request_put(request);
} while (1);

View File

@ -2128,6 +2128,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
mutex_lock(&dev_priv->dpll_lock);
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@ -2157,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
} else if (INTEL_INFO(dev_priv)->gen < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
mutex_unlock(&dev_priv->dpll_lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)

View File

@ -9944,11 +9944,10 @@ found:
}
ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
drm_framebuffer_put(fb);
if (ret)
goto fail;
drm_framebuffer_put(fb);
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;

View File

@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}

View File

@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
nvbo->force_coherent = true;
}
@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
continue;
if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
if ((flags & TTM_PL_FLAG_TT) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Select this page size if it's the first that supports

View File

@ -157,8 +157,8 @@ struct nouveau_drm {
struct nvif_object copy;
int mtrr;
int type_vram;
int type_host;
int type_ncoh;
int type_host[2];
int type_ncoh[2];
} ttm;
/* GEM interface support */
@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
return dev->dev_private;
}
static inline bool
nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
{
struct nvif_mmu *mmu = &drm->client.mmu;
return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
}
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
bool nouveau_pmops_runtime(void);

View File

@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
if (nouveau_fb && nouveau_fb->nvbo) {
nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);

View File

@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
u8 type;
int ret;
if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
type = drm->ttm.type_ncoh;
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
type = drm->ttm.type_ncoh[!!mem->kind];
else
type = drm->ttm.type_host;
type = drm->ttm.type_host[0];
if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
mem->comp = mem->kind = 0;

View File

@ -235,6 +235,27 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
drm->ttm.mem_global_ref.release = NULL;
}
static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
struct nvif_mmu *mmu = &drm->client.mmu;
int typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
kind | NVIF_MEM_COHERENT);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_host[!!kind] = typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_ncoh[!!kind] = typei;
return 0;
}
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
@ -244,18 +265,16 @@ nouveau_ttm_init(struct nouveau_drm *drm)
struct drm_device *dev = drm->dev;
int typei, ret;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
NVIF_MEM_COHERENT);
if (typei < 0)
return -ENOSYS;
ret = nouveau_ttm_init_host(drm, 0);
if (ret)
return ret;
drm->ttm.type_host = typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_ncoh = typei;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
drm->client.device.info.chipset != 0x50) {
ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
if (ret)
return ret;
}
if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {

View File

@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
nvif_vmm_put(&vma->vmm->vmm, &tmp);
}
list_del(&vma->head);
*pvma = NULL;
kfree(*pvma);
*pvma = NULL;
}
}

View File

@ -2369,7 +2369,7 @@ nv13b_chipset = {
.imem = gk20a_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp10b_mc_new,
.mmu = gf100_mmu_new,
.mmu = gp10b_mmu_new,
.secboot = gp10b_secboot_new,
.pmu = gm20b_pmu_new,
.timer = gk20a_timer_new,

View File

@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x20:
case 0x21:
case 0x30:
case 0x40:
@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
if (data && idx < *cnt) {
u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
switch (*ver * !!outp) {
case 0x20:
case 0x21:
case 0x30:
*hdr = nvbios_rd08(bios, data + 0x04);
@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
info->type = nvbios_rd16(bios, data + 0x00);
info->mask = nvbios_rd16(bios, data + 0x02);
switch (*ver) {
case 0x20:
info->mask |= 0x00c0; /* match any link */
/* fall-through */
case 0x21:
case 0x30:
info->flags = nvbios_rd08(bios, data + 0x05);
info->script[0] = nvbios_rd16(bios, data + 0x06);
info->script[1] = nvbios_rd16(bios, data + 0x08);
info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
if (*len >= 0x0c)
info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
if (*len >= 0x0f) {
info->script[2] = nvbios_rd16(bios, data + 0x0c);
info->script[3] = nvbios_rd16(bios, data + 0x0e);
@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
memset(info, 0x00, sizeof(*info));
if (data) {
switch (*ver) {
case 0x20:
case 0x21:
info->dc = nvbios_rd08(bios, data + 0x02);
info->pe = nvbios_rd08(bios, data + 0x03);

View File

@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
refcount_inc(&iobj->maps);
refcount_set(&iobj->maps, 1);
}
mutex_unlock(&imem->subdev.mutex);

View File

@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
return ret;
pci->irq = pdev->irq;
/* Ensure MSI interrupts are armed, for the case where there are
* already interrupts pending (for whatever reason) at load time.
*/
if (pci->msi)
pci->func->msi_rearm(pci);
return ret;
}

View File

@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
}
static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
unsigned long rate = mode->clock * 1000;
unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
long rounded_rate;
/* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
if (rate > 165000000)
return MODE_CLOCK_HIGH;
rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
if (rounded_rate > 0 &&
max_t(unsigned long, rounded_rate, rate) -
min_t(unsigned long, rounded_rate, rate) < diff)
return MODE_OK;
return MODE_NOCLOCK;
}
static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.atomic_check = sun4i_hdmi_atomic_check,
.disable = sun4i_hdmi_disable,
.enable = sun4i_hdmi_enable,
.mode_set = sun4i_hdmi_mode_set,
.mode_valid = sun4i_hdmi_mode_valid,
};
static const struct drm_encoder_funcs sun4i_hdmi_funcs = {

View File

@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
if (IS_ERR(tcon->crtc)) {
dev_err(dev, "Couldn't create our CRTC\n");
ret = PTR_ERR(tcon->crtc);
goto err_free_clocks;
goto err_free_dotclock;
}
ret = sun4i_rgb_init(drm, tcon);
if (ret < 0)
goto err_free_clocks;
goto err_free_dotclock;
if (tcon->quirks->needs_de_be_mux) {
/*

View File

@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan)
break;
shrink_pages <<= pool->order;
}
mutex_unlock(&lock);
return freed;
@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int r = 0;
unsigned i, j, cpages;
unsigned npages = 1 << order;
unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);