mirror of
https://github.com/torvalds/linux.git
synced 2024-11-20 02:51:44 +00:00
drm/vmwgfx: Fix a destoy-while-held mutex problem.
When validating legacy surfaces, the backup bo might be destroyed at surface validate time. However, the kms resource validation code may have the bo reserved, so we will destroy a locked mutex. While there shouldn't be any other users of that mutex when it is destroyed, it causes a lock leak and thus throws a lockdep error. Fix this by having the kms resource validation code hold a reference to the bo while we have it reserved. We do this by introducing a validation context which might come in handy when the kms code is extended to validate multiple resources or buffers. Cc: <stable@vger.kernel.org> Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
parent
140bcaa23a
commit
73a88250b7
@ -31,7 +31,6 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
|
||||
|
||||
/* Might need a hrtimer here? */
|
||||
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
|
||||
|
||||
@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
* Helper to be used if an error forces the caller to undo the actions of
|
||||
* vmw_kms_helper_resource_prepare.
|
||||
*/
|
||||
void vmw_kms_helper_resource_revert(struct vmw_resource *res)
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
vmw_kms_helper_buffer_revert(res->backup);
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
vmw_dmabuf_unreference(&ctx->buf);
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
|
||||
* interrupted by a signal.
|
||||
*/
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible)
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ctx->buf = NULL;
|
||||
ctx->res = res;
|
||||
|
||||
if (interruptible)
|
||||
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
|
||||
else
|
||||
@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
res->dev_priv->has_mob);
|
||||
if (ret)
|
||||
goto out_unreserve;
|
||||
|
||||
ctx->buf = vmw_dmabuf_reference(res->backup);
|
||||
}
|
||||
ret = vmw_resource_validate(res);
|
||||
if (ret)
|
||||
@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
return 0;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(res->backup);
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
out_unreserve:
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
out_unlock:
|
||||
@ -2581,11 +2589,13 @@ out_unlock:
|
||||
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
|
||||
* ref-counted fence pointer is returned here.
|
||||
*/
|
||||
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
{
|
||||
if (res->backup || out_fence)
|
||||
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
if (ctx->buf || out_fence)
|
||||
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
|
||||
out_fence, NULL);
|
||||
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
|
@ -240,6 +240,11 @@ struct vmw_display_unit {
|
||||
int set_gui_y;
|
||||
};
|
||||
|
||||
struct vmw_validation_ctx {
|
||||
struct vmw_resource *res;
|
||||
struct vmw_dma_buffer *buf;
|
||||
};
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
#define vmw_connector_to_du(x) \
|
||||
@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_resource *res);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
|
@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_kms_sou_surface_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true);
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
||||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
vmw_kms_helper_resource_finish(srf, out_fence);
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_stdu_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true);
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
out_finish:
|
||||
vmw_kms_helper_resource_finish(srf, out_fence);
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user