drm/omap: fix race conditon in DMM

The omapdrm DMM code sometimes crashes with:

WARNING: CPU: 0 PID: 1235 at lib/list_debug.c:36 __list_add+0x8c/0xbc()
list_add double add: new=e9265368, prev=e90139c4, next=e9265368.

This is caused by the code calling release_engine() twice for the same
engine.

dmm_txn_commit(wait=true) call is supposed to wait until the DMM
transaction has been finished. And it does that, but it does not wait
for the irq handler to finish.

What happens is that the irq handler is triggered, and it either wakes
up the thread that called dmm_txn_commit(), or that thread never even
slept because the transaction was finished in the HW very quickly. That
thread then continues executing, even if the irq handler is not yet
finished, and a new transaction may be initiated. If that transaction is
async (i.e. wait=false), a 'async' flag is set to true. The original irq
handler, which has yet not finished, then sees the transaction as
'async', even if it was supposed to be 'sync'.

When that happens, the irq handler does an extra release_engine() call
because it thinks it need to release the engine, leading to the crash.

This patch fixes the issue by using completion to ensure that the irq
handler has finished before a dmm_txn_commit(wait=true) may continue.

Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
This commit is contained in:
Tomi Valkeinen 2014-12-17 14:34:23 +02:00
parent 76c4055f2c
commit 7439507f01
2 changed files with 9 additions and 8 deletions

View File

@ -148,7 +148,7 @@ struct refill_engine {
bool async;
wait_queue_head_t wait_for_refill;
struct completion compl;
struct list_head idle_node;
};

View File

@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/list.h>
#include <linux/completion.h>
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
@ -146,10 +147,10 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_LST) {
wake_up_interruptible(&dmm->engines[i].wait_for_refill);
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
complete(&dmm->engines[i].compl);
}
status >>= 8;
@ -273,7 +274,8 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
/* mark whether it is async to denote list management in IRQ handler */
engine->async = wait ? false : true;
/* verify that the irq handler sees the 'async' value */
reinit_completion(&engine->compl);
/* verify that the irq handler sees the 'async' and completion value */
smp_mb();
/* kick reload */
@ -281,9 +283,8 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
dmm->base + reg[PAT_DESCR][engine->id]);
if (wait) {
if (wait_event_interruptible_timeout(engine->wait_for_refill,
wait_status(engine, DMM_PATSTATUS_READY) == 0,
msecs_to_jiffies(1)) <= 0) {
if (!wait_for_completion_timeout(&engine->compl,
msecs_to_jiffies(1))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
@ -719,7 +720,7 @@ static int omap_dmm_probe(struct platform_device *dev)
(REFILL_BUFFER_SIZE * i);
omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
(REFILL_BUFFER_SIZE * i);
init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
init_completion(&omap_dmm->engines[i].compl);
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}