drm/vc4: Fix race between page flip completion event and clean-up
There was a small window where a userspace program could submit a pageflip after receiving a pageflip completion event yet still receive EBUSY. Signed-off-by: Derek Foreman <derekf@osg.samsung.com> Signed-off-by: Eric Anholt <eric@anholt.net> Reviewed-by: Eric Anholt <eric@anholt.net> Reviewed-by: Daniel Stone <daniels@collabora.com>
This commit is contained in:
committed by
Eric Anholt
parent
c778cc5df9
commit
26fc78f6fe
@@ -669,6 +669,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id)
|
|||||||
CRTC_WRITE(PV_INTEN, 0);
|
CRTC_WRITE(PV_INTEN, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Must be called with the event lock held */
|
||||||
|
bool vc4_event_pending(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||||
|
|
||||||
|
return !!vc4_crtc->event;
|
||||||
|
}
|
||||||
|
|
||||||
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
|
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
|
||||||
{
|
{
|
||||||
struct drm_crtc *crtc = &vc4_crtc->base;
|
struct drm_crtc *crtc = &vc4_crtc->base;
|
||||||
|
|||||||
@@ -442,6 +442,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
|
|||||||
extern struct platform_driver vc4_crtc_driver;
|
extern struct platform_driver vc4_crtc_driver;
|
||||||
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
|
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
|
||||||
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
|
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
|
||||||
|
bool vc4_event_pending(struct drm_crtc *crtc);
|
||||||
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
|
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
|
||||||
int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
|
int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
|
||||||
unsigned int flags, int *vpos, int *hpos,
|
unsigned int flags, int *vpos, int *hpos,
|
||||||
|
|||||||
@@ -119,18 +119,35 @@ static int vc4_atomic_commit(struct drm_device *dev,
|
|||||||
|
|
||||||
/* Make sure that any outstanding modesets have finished. */
|
/* Make sure that any outstanding modesets have finished. */
|
||||||
if (nonblock) {
|
if (nonblock) {
|
||||||
ret = down_trylock(&vc4->async_modeset);
|
struct drm_crtc *crtc;
|
||||||
if (ret) {
|
struct drm_crtc_state *crtc_state;
|
||||||
|
unsigned long flags;
|
||||||
|
bool busy = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there's an undispatched event to send then we're
|
||||||
|
* obviously still busy. If there isn't, then we can
|
||||||
|
* unconditionally wait for the semaphore because it
|
||||||
|
* shouldn't be contended (for long).
|
||||||
|
*
|
||||||
|
* This is to prevent a race where queuing a new flip
|
||||||
|
* from userspace immediately on receipt of an event
|
||||||
|
* beats our clean-up and returns EBUSY.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
for_each_crtc_in_state(state, crtc, crtc_state, i)
|
||||||
|
busy |= vc4_event_pending(crtc);
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
if (busy) {
|
||||||
kfree(c);
|
kfree(c);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
ret = down_interruptible(&vc4->async_modeset);
|
ret = down_interruptible(&vc4->async_modeset);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(c);
|
kfree(c);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|||||||
Reference in New Issue
Block a user