B43: Handle DMA RX descriptor underrun
Add handling of rx descriptor underflow. This fixes a fault that could happen on slow machines, where data is received faster than the CPU can handle. In such a case the device will use up all rx descriptors and refuse to send any more data before confirming that it is ok. This patch enables necessary interrupt to discover such a situation and will handle them by dropping everything in the ring buffer. Reviewed-by: Michael Buesch <m@bues.ch> Signed-off-by: Thommy Jakobsson <thommyj@gmail.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
parent
db17834043
commit
73b82bf0bf
@ -1733,6 +1733,25 @@ drop_recycle_buffer:
|
||||
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
|
||||
}
|
||||
|
||||
void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
|
||||
{
|
||||
int current_slot, previous_slot;
|
||||
|
||||
B43_WARN_ON(ring->tx);
|
||||
|
||||
/* Device has filled all buffers, drop all packets and let TCP
|
||||
* decrease speed.
|
||||
* Decrement RX index by one will let the device to see all slots
|
||||
* as free again
|
||||
*/
|
||||
/*
|
||||
*TODO: How to increase rx_drop in mac80211?
|
||||
*/
|
||||
current_slot = ring->ops->get_current_rxslot(ring);
|
||||
previous_slot = prev_slot(ring, current_slot);
|
||||
ring->ops->set_current_rxslot(ring, previous_slot);
|
||||
}
|
||||
|
||||
void b43_dma_rx(struct b43_dmaring *ring)
|
||||
{
|
||||
const struct b43_dma_ops *ops = ring->ops;
|
||||
|
@ -9,7 +9,7 @@
|
||||
/* DMA-Interrupt reasons. */
|
||||
#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
|
||||
| (1 << 14) | (1 << 15))
|
||||
#define B43_DMAIRQ_NONFATALMASK (1 << 13)
|
||||
#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
|
||||
#define B43_DMAIRQ_RX_DONE (1 << 16)
|
||||
|
||||
/*** 32-bit DMA Engine. ***/
|
||||
@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
|
||||
void b43_dma_handle_txstatus(struct b43_wldev *dev,
|
||||
const struct b43_txstatus *status);
|
||||
|
||||
void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
|
||||
|
||||
void b43_dma_rx(struct b43_dmaring *ring);
|
||||
|
||||
void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
|
||||
|
@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
|
||||
B43_DMAIRQ_NONFATALMASK))) {
|
||||
if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
|
||||
b43err(dev->wl, "Fatal DMA error: "
|
||||
"0x%08X, 0x%08X, 0x%08X, "
|
||||
"0x%08X, 0x%08X, 0x%08X\n",
|
||||
dma_reason[0], dma_reason[1],
|
||||
dma_reason[2], dma_reason[3],
|
||||
dma_reason[4], dma_reason[5]);
|
||||
b43err(dev->wl, "This device does not support DMA "
|
||||
if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
|
||||
b43err(dev->wl,
|
||||
"Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
|
||||
dma_reason[0], dma_reason[1],
|
||||
dma_reason[2], dma_reason[3],
|
||||
dma_reason[4], dma_reason[5]);
|
||||
b43err(dev->wl, "This device does not support DMA "
|
||||
"on your system. It will now be switched to PIO.\n");
|
||||
/* Fall back to PIO transfers if we get fatal DMA errors! */
|
||||
dev->use_pio = true;
|
||||
b43_controller_restart(dev, "DMA error");
|
||||
return;
|
||||
}
|
||||
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
|
||||
b43err(dev->wl, "DMA error: "
|
||||
"0x%08X, 0x%08X, 0x%08X, "
|
||||
"0x%08X, 0x%08X, 0x%08X\n",
|
||||
dma_reason[0], dma_reason[1],
|
||||
dma_reason[2], dma_reason[3],
|
||||
dma_reason[4], dma_reason[5]);
|
||||
}
|
||||
/* Fall back to PIO transfers if we get fatal DMA errors! */
|
||||
dev->use_pio = true;
|
||||
b43_controller_restart(dev, "DMA error");
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
|
||||
@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
|
||||
handle_irq_noise(dev);
|
||||
|
||||
/* Check the DMA reason registers for received data. */
|
||||
if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
|
||||
if (B43_DEBUG)
|
||||
b43warn(dev->wl, "RX descriptor underrun\n");
|
||||
b43_dma_handle_rx_overflow(dev->dma.rx_ring);
|
||||
}
|
||||
if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
|
||||
if (b43_using_pio_transfers(dev))
|
||||
b43_pio_rx(dev->pio.rx_queue);
|
||||
@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
|
||||
return IRQ_NONE;
|
||||
|
||||
dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
|
||||
& 0x0001DC00;
|
||||
& 0x0001FC00;
|
||||
dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
|
||||
& 0x0000DC00;
|
||||
dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
|
||||
@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev)
|
||||
b43_write32(dev, 0x018C, 0x02000000);
|
||||
}
|
||||
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
|
||||
b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
|
||||
b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
|
||||
b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
|
||||
b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
|
||||
b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
|
||||
|
Loading…
Reference in New Issue
Block a user