platform/mellanox: fix potential deadlock in the tmfifo driver

This commit fixes the potential deadlock caused by the console Rx
and Tx processing at the same time. Rx and Tx both take the console
and tmfifo spinlock but in different order which causes potential
deadlock. The fix is to use different tmfifo spinlock for Rx and
Tx since they protect different resources and it's safe to split
the lock.

Below is the reported call trace when copying/pasting large string
in the console.

Rx:
    _raw_spin_lock_irqsave (hvc lock)
    __hvc_poll
    hvc_poll
    in_intr
    vring_interrupt
    mlxbf_tmfifo_rxtx_one_desc (tmfifo lock)
    mlxbf_tmfifo_rxtx
    mlxbf_tmfifo_work_rxtx
Tx:
    _raw_spin_lock_irqsave (tmfifo lock)
    mlxbf_tmfifo_virtio_notify
    virtqueue_notify
    virtqueue_kick
    put_chars
    hvc_push
    hvc_write (hvc lock)
    ...
    do_tty_write
    tty_write

Fixes: 1357dfd726 ("platform/mellanox: Add TmFifo driver for Mellanox BlueField Soc")
Cc: <stable@vger.kernel.org> # 5.4+
Reviewed-by: David Woods <dwoods@mellanox.com>
Signed-off-by: Liming Sun <lsun@mellanox.com>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
This commit is contained in:
Liming Sun 2019-12-20 12:04:33 -05:00 committed by Andy Shevchenko
parent 84abc5a1c9
commit 3454eeeebd

View File

@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
* @spin_lock: spin lock
* @spin_lock: Tx/Rx spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
spinlock_t spin_lock; /* spin lock */
spinlock_t spin_lock[2]; /* spin lock */
bool is_ready;
};
@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
/* Use spin-lock to protect the 'cons->tx_buf'. */
spin_lock_irqsave(&fifo->spin_lock, flags);
spin_lock_irqsave(&fifo->spin_lock[0], flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
}
}
spin_unlock_irqrestore(&fifo->spin_lock, flags);
spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
}
/* Rx/Tx one word in the descriptor buffer. */
@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
fifo->vring[is_rx] = NULL;
/* Notify upper layer that packet is done. */
spin_lock_irqsave(&fifo->spin_lock, flags);
spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
spin_unlock_irqrestore(&fifo->spin_lock, flags);
spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
}
mlxbf_tmfifo_desc_done:
@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
spin_lock_irqsave(&fifo->spin_lock, flags);
spin_lock_irqsave(&fifo->spin_lock[0], flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
spin_unlock_irqrestore(&fifo->spin_lock, flags);
spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
if (!fifo)
return -ENOMEM;
spin_lock_init(&fifo->spin_lock);
spin_lock_init(&fifo->spin_lock[0]);
spin_lock_init(&fifo->spin_lock[1]);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);