mt76: sdio: fix use of q->head and q->tail

Their use is reversed compared to DMA. The order for DMA makes more sense,
so let's use that

Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau
2020-08-23 12:43:19 +02:00
parent 95f61e17ef
commit 16254fc51f
2 changed files with 18 additions and 18 deletions

View File

@@ -97,7 +97,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
} }
for (i = 0; i < intr->rx.num[qid]; i++) { for (i = 0; i < intr->rx.num[qid]; i++) {
int index = (q->tail + i) % q->ndesc; int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index]; struct mt76_queue_entry *e = &q->entry[index];
len = intr->rx.len[qid][i]; len = intr->rx.len[qid][i];
@@ -112,7 +112,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
__free_pages(page, order); __free_pages(page, order);
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
q->tail = (q->tail + i) % q->ndesc; q->head = (q->head + i) % q->ndesc;
q->queued += i; q->queued += i;
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);
@@ -166,7 +166,7 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
struct mt76_sdio *sdio = &dev->sdio; struct mt76_sdio *sdio = &dev->sdio;
int nframes = 0; int nframes = 0;
while (q->first != q->tail) { while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first]; struct mt76_queue_entry *e = &q->entry[q->first];
int err, len = e->skb->len; int err, len = e->skb->len;

View File

@@ -98,8 +98,8 @@ mt76s_get_next_rx_entry(struct mt76_queue *q)
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
if (q->queued > 0) { if (q->queued > 0) {
e = &q->entry[q->head]; e = &q->entry[q->tail];
q->head = (q->head + 1) % q->ndesc; q->tail = (q->tail + 1) % q->ndesc;
q->queued--; q->queued--;
} }
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);
@@ -142,17 +142,17 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
bool wake; bool wake;
while (q->queued > n_dequeued) { while (q->queued > n_dequeued) {
if (!q->entry[q->head].done) if (!q->entry[q->tail].done)
break; break;
if (q->entry[q->head].schedule) { if (q->entry[q->tail].schedule) {
q->entry[q->head].schedule = false; q->entry[q->tail].schedule = false;
n_sw_dequeued++; n_sw_dequeued++;
} }
entry = q->entry[q->head]; entry = q->entry[q->tail];
q->entry[q->head].done = false; q->entry[q->tail].done = false;
q->head = (q->head + 1) % q->ndesc; q->tail = (q->tail + 1) % q->ndesc;
n_dequeued++; n_dequeued++;
if (qid == MT_TXQ_MCU) if (qid == MT_TXQ_MCU)
@@ -222,7 +222,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
.skb = skb, .skb = skb,
}; };
int err, len = skb->len; int err, len = skb->len;
u16 idx = q->tail; u16 idx = q->head;
if (q->queued == q->ndesc) if (q->queued == q->ndesc)
return -ENOSPC; return -ENOSPC;
@@ -232,9 +232,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (err < 0) if (err < 0)
return err; return err;
q->entry[q->tail].skb = tx_info.skb; q->entry[q->head].skb = tx_info.skb;
q->entry[q->tail].buf_sz = len; q->entry[q->head].buf_sz = len;
q->tail = (q->tail + 1) % q->ndesc; q->head = (q->head + 1) % q->ndesc;
q->queued++; q->queued++;
return idx; return idx;
@@ -256,9 +256,9 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
q->entry[q->tail].buf_sz = len; q->entry[q->head].buf_sz = len;
q->entry[q->tail].skb = skb; q->entry[q->head].skb = skb;
q->tail = (q->tail + 1) % q->ndesc; q->head = (q->head + 1) % q->ndesc;
q->queued++; q->queued++;
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);