Files
linux/drivers/net/wireless/mediatek/mt76/dma.c

814 lines
17 KiB
C
Raw Normal View History

// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
#include <linux/dma-mapping.h>
#include "mt76.h"
#include "dma.h"
#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
#define Q_READ(_dev, _q, _field) ({ \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
u32 _val; \
if ((_q)->flags & MT_QFLAG_WED) \
_val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
((_q)->wed_regs + \
_offset)); \
else \
_val = readl(&(_q)->regs->_field); \
_val; \
})
#define Q_WRITE(_dev, _q, _field, _val) do { \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
if ((_q)->flags & MT_QFLAG_WED) \
mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
((_q)->wed_regs + _offset), \
_val); \
else \
writel(_val, &(_q)->regs->_field); \
} while (0)
#else
#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
#endif
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
u8 *txwi;
int size;
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
txwi = kzalloc(size, GFP_ATOMIC);
if (!txwi)
return NULL;
addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
return t;
}
static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
spin_lock(&dev->lock);
if (!list_empty(&dev->txwi_cache)) {
t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
spin_unlock(&dev->lock);
return t;
}
static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
if (t)
return t;
return mt76_alloc_txwi(dev);
}
void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
if (!t)
return;
spin_lock(&dev->lock);
list_add(&t->list, &dev->txwi_cache);
spin_unlock(&dev->lock);
}
EXPORT_SYMBOL_GPL(mt76_put_txwi);
static void
mt76_free_pending_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
mt76: dma: fix possible deadlock running mt76_dma_cleanup Fix the following possible deadlock reported by lockdep disabling BH running mt76_free_pending_txwi() ================================ WARNING: inconsistent lock state 5.9.0-rc6 #14 Not tainted -------------------------------- inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. rmmod/1227 [HC0[0]:SC0[0]:HE1:SE1] takes: ffff888156a83530 (&dev->lock#2){+.?.}-{2:2}, at: mt76_dma_cleanup+0x125/0x150 [mt76] {IN-SOFTIRQ-W} state was registered at: __lock_acquire+0x20c/0x6b0 lock_acquire+0x9d/0x220 _raw_spin_lock+0x2c/0x70 mt76_dma_tx_cleanup+0xc7/0x200 [mt76] mt76x02_poll_tx+0x31/0xb0 [mt76x02_lib] napi_poll+0x3a/0x100 net_rx_action+0xa8/0x200 __do_softirq+0xc4/0x430 asm_call_on_stack+0xf/0x20 do_softirq_own_stack+0x49/0x60 irq_exit_rcu+0x9a/0xd0 common_interrupt+0xa4/0x190 asm_common_interrupt+0x1e/0x40 irq event stamp: 9915 hardirqs last enabled at (9915): [<ffffffff8124e286>] __free_pages_ok+0x336/0x3b0 hardirqs last disabled at (9914): [<ffffffff8124e24e>] __free_pages_ok+0x2fe/0x3b0 softirqs last enabled at (9912): [<ffffffffa03aa672>] mt76_dma_rx_cleanup+0xa2/0x120 [mt76] softirqs last disabled at (9846): [<ffffffffa03aa5ea>] mt76_dma_rx_cleanup+0x1a/0x120 [mt76] other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&dev->lock#2); <Interrupt> lock(&dev->lock#2); *** DEADLOCK *** 1 lock held by rmmod/1227: #0: ffff88815b5eb240 (&dev->mutex){....}-{3:3}, at: driver_detach+0xb5/0x110 stack backtrace: CPU: 1 PID: 1227 Comm: rmmod Kdump: loaded Not tainted 5.9.0-rc6-wdn-src+ #14 Hardware name: Dell Inc. Studio XPS 1340/0K183D, BIOS A11 09/08/2009 Call Trace: dump_stack+0x77/0xa0 mark_lock_irq.cold+0x15/0x39 mark_lock+0x1fc/0x500 mark_usage+0xc7/0x140 __lock_acquire+0x20c/0x6b0 ? find_held_lock+0x2b/0x80 ? sched_clock_cpu+0xc/0xb0 lock_acquire+0x9d/0x220 ? mt76_dma_cleanup+0x125/0x150 [mt76] _raw_spin_lock+0x2c/0x70 ? mt76_dma_cleanup+0x125/0x150 [mt76] mt76_dma_cleanup+0x125/0x150 [mt76] mt76x2_cleanup+0x5a/0x70 [mt76x2e] mt76x2e_remove+0x18/0x30 [mt76x2e] pci_device_remove+0x36/0xa0 __device_release_driver+0x16c/0x220 driver_detach+0xcf/0x110 bus_remove_driver+0x56/0xca pci_unregister_driver+0x36/0x80 __do_sys_delete_module.constprop.0+0x127/0x200 ? syscall_enter_from_user_mode+0x1d/0x50 ? trace_hardirqs_on+0x1c/0xe0 do_syscall_64+0x33/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7ff0da54e36b Code: 73 01 c3 48 8b 0d 2d 0b 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d fd 0a 0c 00 f7 d8 64 89 01 48 Fixes: dd57a95cfddc ("mt76: move txwi handling code to dma.c, since it is mmio specific") Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Felix Fietkau <nbd@nbd.name>
2020-10-12 00:43:55 +02:00
local_bh_disable();
while ((t = __mt76_get_txwi(dev)) != NULL) {
dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
kfree(mt76_get_txwi_ptr(dev, t));
}
mt76: dma: fix possible deadlock running mt76_dma_cleanup Fix the following possible deadlock reported by lockdep disabling BH running mt76_free_pending_txwi() ================================ WARNING: inconsistent lock state 5.9.0-rc6 #14 Not tainted -------------------------------- inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. rmmod/1227 [HC0[0]:SC0[0]:HE1:SE1] takes: ffff888156a83530 (&dev->lock#2){+.?.}-{2:2}, at: mt76_dma_cleanup+0x125/0x150 [mt76] {IN-SOFTIRQ-W} state was registered at: __lock_acquire+0x20c/0x6b0 lock_acquire+0x9d/0x220 _raw_spin_lock+0x2c/0x70 mt76_dma_tx_cleanup+0xc7/0x200 [mt76] mt76x02_poll_tx+0x31/0xb0 [mt76x02_lib] napi_poll+0x3a/0x100 net_rx_action+0xa8/0x200 __do_softirq+0xc4/0x430 asm_call_on_stack+0xf/0x20 do_softirq_own_stack+0x49/0x60 irq_exit_rcu+0x9a/0xd0 common_interrupt+0xa4/0x190 asm_common_interrupt+0x1e/0x40 irq event stamp: 9915 hardirqs last enabled at (9915): [<ffffffff8124e286>] __free_pages_ok+0x336/0x3b0 hardirqs last disabled at (9914): [<ffffffff8124e24e>] __free_pages_ok+0x2fe/0x3b0 softirqs last enabled at (9912): [<ffffffffa03aa672>] mt76_dma_rx_cleanup+0xa2/0x120 [mt76] softirqs last disabled at (9846): [<ffffffffa03aa5ea>] mt76_dma_rx_cleanup+0x1a/0x120 [mt76] other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&dev->lock#2); <Interrupt> lock(&dev->lock#2); *** DEADLOCK *** 1 lock held by rmmod/1227: #0: ffff88815b5eb240 (&dev->mutex){....}-{3:3}, at: driver_detach+0xb5/0x110 stack backtrace: CPU: 1 PID: 1227 Comm: rmmod Kdump: loaded Not tainted 5.9.0-rc6-wdn-src+ #14 Hardware name: Dell Inc. Studio XPS 1340/0K183D, BIOS A11 09/08/2009 Call Trace: dump_stack+0x77/0xa0 mark_lock_irq.cold+0x15/0x39 mark_lock+0x1fc/0x500 mark_usage+0xc7/0x140 __lock_acquire+0x20c/0x6b0 ? find_held_lock+0x2b/0x80 ? sched_clock_cpu+0xc/0xb0 lock_acquire+0x9d/0x220 ? mt76_dma_cleanup+0x125/0x150 [mt76] _raw_spin_lock+0x2c/0x70 ? mt76_dma_cleanup+0x125/0x150 [mt76] mt76_dma_cleanup+0x125/0x150 [mt76] mt76x2_cleanup+0x5a/0x70 [mt76x2e] mt76x2e_remove+0x18/0x30 [mt76x2e] pci_device_remove+0x36/0xa0 __device_release_driver+0x16c/0x220 driver_detach+0xcf/0x110 bus_remove_driver+0x56/0xca pci_unregister_driver+0x36/0x80 __do_sys_delete_module.constprop.0+0x127/0x200 ? syscall_enter_from_user_mode+0x1d/0x50 ? trace_hardirqs_on+0x1c/0xe0 do_syscall_64+0x33/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7ff0da54e36b Code: 73 01 c3 48 8b 0d 2d 0b 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d fd 0a 0c 00 f7 d8 64 89 01 48 Fixes: dd57a95cfddc ("mt76: move txwi handling code to dma.c, since it is mmio specific") Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Felix Fietkau <nbd@nbd.name>
2020-10-12 00:43:55 +02:00
local_bh_enable();
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(dev, q, desc_base, q->desc_dma);
Q_WRITE(dev, q, ring_size, q->ndesc);
q->head = Q_READ(dev, q, dma_idx);
q->tail = q->head;
}
static void
mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
if (!q || !q->ndesc)
return;
/* clear descriptors */
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
Q_WRITE(dev, q, cpu_idx, 0);
Q_WRITE(dev, q, dma_idx, 0);
mt76_dma_sync_idx(dev, q);
}
static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
u32 ctrl;
int i, idx = -1;
if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
q->entry[q->head].skip_buf0 = true;
}
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
idx = q->head;
q->head = (q->head + 1) % q->ndesc;
desc = &q->desc[idx];
entry = &q->entry[idx];
if (buf[0].skip_unmap)
entry->skip_buf0 = true;
entry->skip_buf1 = i == nbufs - 1;
entry->dma_addr[0] = buf[0].addr;
entry->dma_len[0] = buf[0].len;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
entry->dma_addr[1] = buf[1].addr;
entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
if (buf[1].skip_unmap)
entry->skip_buf1 = true;
}
if (i == nbufs - 1)
ctrl |= MT_DMA_CTL_LAST_SEC0;
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
q->queued++;
}
q->entry[idx].txwi = txwi;
q->entry[idx].skb = skb;
q->entry[idx].wcid = 0xffff;
return idx;
}
static void
mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{
struct mt76_queue_entry *e = &q->entry[idx];
if (!e->skip_buf0)
dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
if (!e->skip_buf1)
dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
if (e->skb == DMA_DUMMY_DATA)
e->skb = NULL;
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
Q_WRITE(dev, q, cpu_idx, q->head);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
{
struct mt76_queue_entry entry;
int last;
if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
if (flush)
last = -1;
else
last = Q_READ(dev, q, dma_idx);
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
mt76_put_txwi(dev, entry.txwi);
}
if (!flush && q->tail == last)
last = Q_READ(dev, q, dma_idx);
}
spin_unlock_bh(&q->cleanup_lock);
if (flush) {
spin_lock_bh(&q->lock);
mt76_dma_sync_idx(dev, q);
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
}
if (!q->queued)
wake_up(&dev->tx_wait);
}
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
int *len, u32 *info, bool *more)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
dma_addr_t buf_addr;
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
buf_addr = e->dma_addr[0];
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
}
if (info)
*info = le32_to_cpu(desc->info);
dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
}
static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more)
{
int idx = q->tail;
*more = false;
if (!q->queued)
return NULL;
if (flush)
q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
return mt76_dma_get_buf(dev, q, idx, len, info, more);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue_buf buf = {};
dma_addr_t addr;
if (q->queued + 1 >= q->ndesc - 1)
goto error;
addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto error;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return 0;
error:
dev_kfree_skb(skb);
return -ENOMEM;
}
static int
mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct ieee80211_tx_status status = {
.sta = sta,
};
struct mt76_tx_info tx_info = {
.skb = skb,
};
struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM;
struct mt76_txwi_cache *t;
struct sk_buff *iter;
dma_addr_t addr;
u8 *txwi;
t = mt76_get_txwi(dev);
if (!t)
goto free_skb;
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
tx_info.buf[n++].len = dev->drv->txwi_size;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = iter->len;
}
tx_info.nbuf = n;
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
ret = -ENOMEM;
goto unmap;
}
dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, tx_info.skb, t);
unmap:
for (n--; n > 0; n--)
dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
#ifdef CONFIG_NL80211_TESTMODE
/* fix tx_done accounting on queue overflow */
if (mt76_is_testmode_skb(dev, skb, &hw)) {
struct mt76_phy *phy = hw->priv;
if (tx_info.skb == phy->test.tx_skb)
phy->test.tx_done--;
}
#endif
mt76_put_txwi(dev, t);
free_skb:
status.skb = tx_info.skb;
hw = mt76_tx_status_get_hw(dev, tx_info.skb);
ieee80211_tx_status_ext(hw, &status);
return ret;
}
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
int frames = 0;
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
if (!q->ndesc)
return 0;
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf;
buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
skb_free_frag(buf);
break;
}
qbuf.addr = addr + offset;
qbuf.len = len - offset;
mt76: dma: initialize skip_unmap in mt76_dma_rx_fill Even if it is only a false-positive since skip_buf0/skip_buf1 are only used in mt76_dma_tx_cleanup_idx routine, initialize skip_unmap in mt76_dma_rx_fill in order to fix the following UBSAN report: [ 13.924906] UBSAN: invalid-load in linux-5.15.0/drivers/net/wireless/mediatek/mt76/dma.c:162:13 [ 13.924909] load of value 225 is not a valid value for type '_Bool' [ 13.924912] CPU: 9 PID: 672 Comm: systemd-udevd Not tainted 5.15.0-18-generic #18-Ubuntu [ 13.924914] Hardware name: LENOVO 21A0000CMX/21A0000CMX, BIOS R1MET43W (1.13 ) 11/05/2021 [ 13.924915] Call Trace: [ 13.924917] <TASK> [ 13.924920] show_stack+0x52/0x58 [ 13.924925] dump_stack_lvl+0x4a/0x5f [ 13.924931] dump_stack+0x10/0x12 [ 13.924932] ubsan_epilogue+0x9/0x45 [ 13.924934] __ubsan_handle_load_invalid_value.cold+0x44/0x49 [ 13.924935] ? __iommu_dma_map+0x84/0xf0 [ 13.924939] mt76_dma_add_buf.constprop.0.cold+0x23/0x85 [mt76] [ 13.924949] mt76_dma_rx_fill.isra.0+0x102/0x1f0 [mt76] [ 13.924954] mt76_dma_init+0xc9/0x150 [mt76] [ 13.924959] ? mt7921_dma_enable+0x110/0x110 [mt7921e] [ 13.924966] mt7921_dma_init+0x1e3/0x260 [mt7921e] [ 13.924970] mt7921_register_device+0x29d/0x510 [mt7921e] [ 13.924975] mt7921_pci_probe.part.0+0x17f/0x1b0 [mt7921e] [ 13.924980] mt7921_pci_probe+0x43/0x60 [mt7921e] [ 13.924984] local_pci_probe+0x4b/0x90 [ 13.924987] pci_device_probe+0x115/0x1f0 [ 13.924989] really_probe+0x21e/0x420 [ 13.924992] __driver_probe_device+0x115/0x190 [ 13.924994] driver_probe_device+0x23/0xc0 [ 13.924996] __driver_attach+0xbd/0x1d0 [ 13.924998] ? __device_attach_driver+0x110/0x110 [ 13.924999] bus_for_each_dev+0x7e/0xc0 [ 13.925001] driver_attach+0x1e/0x20 [ 13.925003] bus_add_driver+0x135/0x200 [ 13.925005] driver_register+0x95/0xf0 [ 13.925008] ? 0xffffffffc0766000 [ 13.925010] __pci_register_driver+0x68/0x70 [ 13.925011] mt7921_pci_driver_init+0x23/0x1000 [mt7921e] [ 13.925015] do_one_initcall+0x48/0x1d0 [ 13.925019] ? kmem_cache_alloc_trace+0x19e/0x2e0 [ 13.925022] do_init_module+0x62/0x280 [ 13.925025] load_module+0xac9/0xbb0 [ 13.925027] __do_sys_finit_module+0xbf/0x120 [ 13.925029] __x64_sys_finit_module+0x18/0x20 [ 13.925030] do_syscall_64+0x5c/0xc0 [ 13.925033] ? do_syscall_64+0x69/0xc0 [ 13.925034] ? sysvec_reschedule_ipi+0x78/0xe0 [ 13.925036] ? asm_sysvec_reschedule_ipi+0xa/0x20 [ 13.925039] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 13.925040] RIP: 0033:0x7fbf2b90f94d [ 13.925045] RSP: 002b:00007ffe2ec7e5d8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 13.925047] RAX: ffffffffffffffda RBX: 000056106b0634e0 RCX: 00007fbf2b90f94d [ 13.925048] RDX: 0000000000000000 RSI: 00007fbf2baa3441 RDI: 0000000000000013 [ 13.925049] RBP: 0000000000020000 R08: 0000000000000000 R09: 0000000000000002 [ 13.925050] R10: 0000000000000013 R11: 0000000000000246 R12: 00007fbf2baa3441 [ 13.925051] R13: 000056106b062620 R14: 000056106b0610c0 R15: 000056106b0640d0 [ 13.925053] </TASK> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Felix Fietkau <nbd@nbd.name>
2022-02-01 12:29:55 +01:00
qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
if (frames)
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return frames;
}
static int
mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mmio.wed;
int ret, type, ring;
u8 flags = q->flags;
if (!mtk_wed_device_active(wed))
q->flags &= ~MT_QFLAG_WED;
if (!(q->flags & MT_QFLAG_WED))
return 0;
type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
switch (type) {
case MT76_WED_Q_TX:
ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
if (!ret)
q->wed_regs = wed->tx_ring[ring].reg_base;
break;
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q);
q->flags = flags;
ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
if (!ret)
q->wed_regs = wed->txfree_ring.reg_base;
break;
default:
ret = -EINVAL;
}
return ret;
#else
return 0;
#endif
}
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{
int ret, size;
spin_lock_init(&q->lock);
spin_lock_init(&q->cleanup_lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
ret = mt76_dma_wed_setup(dev, q);
if (ret)
return ret;
if (q->flags != MT_WED_Q_TXFREE)
mt76_dma_queue_reset(dev, q);
return 0;
}
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page *page;
void *buf;
bool more;
if (!q->ndesc)
return;
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
if (!buf)
break;
skb_free_frag(buf);
} while (1);
spin_unlock_bh(&q->lock);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
struct mt76_queue *q = &dev->q_rx[qid];
int i;
if (!q->ndesc)
return;
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
mt76_dma_rx_fill(dev, q);
if (!q->rx_head)
return;
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
}
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page) + q->buf_offset;
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
} else {
skb_free_frag(data);
}
if (more)
return;
q->rx_head = NULL;
if (nr_frags < ARRAY_SIZE(shinfo->frags))
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
else
dev_kfree_skb(skb);
}
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
int len, data_len, done = 0, dma_idx;
struct sk_buff *skb;
unsigned char *data;
bool check_ddone = false;
bool more;
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
q->flags == MT_WED_Q_TXFREE) {
dma_idx = Q_READ(dev, q, dma_idx);
check_ddone = true;
}
while (done < budget) {
u32 info;
if (check_ddone) {
if (q->tail == dma_idx)
dma_idx = Q_READ(dev, q, dma_idx);
if (q->tail == dma_idx)
break;
}
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
if (!data)
break;
if (q->rx_head)
data_len = q->buf_size;
else
data_len = SKB_WITH_OVERHEAD(q->buf_size);
if (data_len < len + q->buf_offset) {
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
goto free_frag;
}
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
continue;
}
if (!more && dev->drv->rx_check &&
!(dev->drv->rx_check(dev, data, len)))
goto free_frag;
skb = build_skb(data, q->buf_size);
if (!skb)
goto free_frag;
skb_reserve(skb, q->buf_offset);
if (q == &dev->q_rx[MT_RXQ_MCU]) {
u32 *rxfce = (u32 *)skb->cb;
*rxfce = info;
}
__skb_put(skb, len);
done++;
if (more) {
q->rx_head = skb;
continue;
}
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
continue;
free_frag:
skb_free_frag(data);
}
mt76_dma_rx_fill(dev, q);
return done;
}
int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
{
struct mt76_dev *dev;
int qid, done = 0, cur;
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
rcu_read_lock();
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
rcu_read_unlock();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
return done;
}
EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
{
int i;
init_dummy_netdev(&dev->napi_dev);
init_dummy_netdev(&dev->tx_napi_dev);
snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
wiphy_name(dev->hw->wiphy));
dev->napi_dev.threaded = 1;
mt76: only iterate over initialized rx queues Fixes the following reported crash: [ 2.361127] BUG: spinlock bad magic on CPU#0, modprobe/456 [ 2.361583] lock: 0xffffa1287525b3b8, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0 [ 2.362250] CPU: 0 PID: 456 Comm: modprobe Not tainted 4.14.177 #5 [ 2.362751] Hardware name: HP Meep/Meep, BIOS Google_Meep.11297.75.0 06/17/2019 [ 2.363343] Call Trace: [ 2.363552] dump_stack+0x97/0xdb [ 2.363826] ? spin_bug+0xa6/0xb3 [ 2.364096] do_raw_spin_lock+0x6a/0x9a [ 2.364417] mt76_dma_rx_fill+0x44/0x1de [mt76] [ 2.364787] ? mt76_dma_kick_queue+0x18/0x18 [mt76] [ 2.365184] mt76_dma_init+0x53/0x85 [mt76] [ 2.365532] mt7615_dma_init+0x3d7/0x546 [mt7615e] [ 2.365928] mt7615_register_device+0xe6/0x1a0 [mt7615e] [ 2.366364] mt7615_mmio_probe+0x14b/0x171 [mt7615e] [ 2.366771] mt7615_pci_probe+0x118/0x13b [mt7615e] [ 2.367169] pci_device_probe+0xaf/0x13d [ 2.367491] driver_probe_device+0x284/0x2ca [ 2.367840] __driver_attach+0x7a/0x9e [ 2.368146] ? driver_attach+0x1f/0x1f [ 2.368451] bus_for_each_dev+0xa0/0xdb [ 2.368765] bus_add_driver+0x132/0x204 [ 2.369078] driver_register+0x8e/0xcd [ 2.369384] do_one_initcall+0x160/0x257 [ 2.369706] ? 0xffffffffc0240000 [ 2.369980] do_init_module+0x60/0x1bb [ 2.370286] load_module+0x18c2/0x1a2b [ 2.370596] ? kernel_read_file+0x141/0x1b9 [ 2.370937] ? kernel_read_file_from_fd+0x46/0x71 [ 2.371320] SyS_finit_module+0xcc/0xf0 [ 2.371636] do_syscall_64+0x6b/0xf7 [ 2.371930] entry_SYSCALL_64_after_hwframe+0x3d/0xa2 [ 2.372344] RIP: 0033:0x7da218ae4199 [ 2.372637] RSP: 002b:00007fffd0608398 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 2.373252] RAX: ffffffffffffffda RBX: 00005a705449df90 RCX: 00007da218ae4199 [ 2.373833] RDX: 0000000000000000 RSI: 00005a7052e73bd8 RDI: 0000000000000006 [ 2.374411] RBP: 00007fffd06083e0 R08: 0000000000000000 R09: 00005a705449d540 [ 2.374989] R10: 0000000000000006 R11: 0000000000000246 R12: 0000000000000000 [ 2.375569] R13: 00005a705449def0 R14: 00005a7052e73bd8 R15: 0000000000000000 Reported-by: Sean Wang <sean.wang@mediatek.com> Fixes: d3377b78cec6 ("mt76: add HE phy modes and hardware queue") Signed-off-by: Felix Fietkau <nbd@nbd.name>
2020-05-24 14:44:52 +02:00
mt76_for_each_q_rx(dev, i) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
napi_enable(&dev->napi[i]);
}
return 0;
}
static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.reset_q = mt76_dma_queue_reset,
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_cleanup = mt76_dma_rx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
};
void mt76_dma_attach(struct mt76_dev *dev)
{
dev->queue_ops = &mt76_dma_ops;
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
if (dev->phy2)
mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
}
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
mt76: only iterate over initialized rx queues Fixes the following reported crash: [ 2.361127] BUG: spinlock bad magic on CPU#0, modprobe/456 [ 2.361583] lock: 0xffffa1287525b3b8, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0 [ 2.362250] CPU: 0 PID: 456 Comm: modprobe Not tainted 4.14.177 #5 [ 2.362751] Hardware name: HP Meep/Meep, BIOS Google_Meep.11297.75.0 06/17/2019 [ 2.363343] Call Trace: [ 2.363552] dump_stack+0x97/0xdb [ 2.363826] ? spin_bug+0xa6/0xb3 [ 2.364096] do_raw_spin_lock+0x6a/0x9a [ 2.364417] mt76_dma_rx_fill+0x44/0x1de [mt76] [ 2.364787] ? mt76_dma_kick_queue+0x18/0x18 [mt76] [ 2.365184] mt76_dma_init+0x53/0x85 [mt76] [ 2.365532] mt7615_dma_init+0x3d7/0x546 [mt7615e] [ 2.365928] mt7615_register_device+0xe6/0x1a0 [mt7615e] [ 2.366364] mt7615_mmio_probe+0x14b/0x171 [mt7615e] [ 2.366771] mt7615_pci_probe+0x118/0x13b [mt7615e] [ 2.367169] pci_device_probe+0xaf/0x13d [ 2.367491] driver_probe_device+0x284/0x2ca [ 2.367840] __driver_attach+0x7a/0x9e [ 2.368146] ? driver_attach+0x1f/0x1f [ 2.368451] bus_for_each_dev+0xa0/0xdb [ 2.368765] bus_add_driver+0x132/0x204 [ 2.369078] driver_register+0x8e/0xcd [ 2.369384] do_one_initcall+0x160/0x257 [ 2.369706] ? 0xffffffffc0240000 [ 2.369980] do_init_module+0x60/0x1bb [ 2.370286] load_module+0x18c2/0x1a2b [ 2.370596] ? kernel_read_file+0x141/0x1b9 [ 2.370937] ? kernel_read_file_from_fd+0x46/0x71 [ 2.371320] SyS_finit_module+0xcc/0xf0 [ 2.371636] do_syscall_64+0x6b/0xf7 [ 2.371930] entry_SYSCALL_64_after_hwframe+0x3d/0xa2 [ 2.372344] RIP: 0033:0x7da218ae4199 [ 2.372637] RSP: 002b:00007fffd0608398 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 2.373252] RAX: ffffffffffffffda RBX: 00005a705449df90 RCX: 00007da218ae4199 [ 2.373833] RDX: 0000000000000000 RSI: 00005a7052e73bd8 RDI: 0000000000000006 [ 2.374411] RBP: 00007fffd06083e0 R08: 0000000000000000 R09: 00005a705449d540 [ 2.374989] R10: 0000000000000006 R11: 0000000000000246 R12: 0000000000000000 [ 2.375569] R13: 00005a705449def0 R14: 00005a7052e73bd8 R15: 0000000000000000 Reported-by: Sean Wang <sean.wang@mediatek.com> Fixes: d3377b78cec6 ("mt76: add HE phy modes and hardware queue") Signed-off-by: Felix Fietkau <nbd@nbd.name>
2020-05-24 14:44:52 +02:00
mt76_for_each_q_rx(dev, i) {
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}
mt76_free_pending_txwi(dev);
if (mtk_wed_device_active(&dev->mmio.wed))
mtk_wed_device_detach(&dev->mmio.wed);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);