wifi: iwlwifi: use already mapped data when TXing an AMSDU

The previous commits added mappings for the SKB and TSO page. This
switches the code to use these mappings instead of creating new ones.

Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Reviewed-by: Johannes Berg <johannes.berg@intel.com>
Link: https://patch.msgid.link/20240703125541.35d89c5e4ae8.I4feb8d34e7b30768d21365ec22c944bacc274d0b@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Benjamin Berg 2024-07-03 12:58:55 +03:00 committed by Johannes Berg
parent adc902cead
commit 90db507552
2 changed files with 63 additions and 22 deletions

View File

@ -61,7 +61,8 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd,
dma_addr_t phys, void *virt,
u16 len, struct iwl_cmd_meta *meta)
u16 len, struct iwl_cmd_meta *meta,
bool unmap)
{
dma_addr_t oldphys = phys;
struct page *page;
@ -105,10 +106,27 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
memcpy(page_address(page), virt, len);
phys = dma_map_single(trans->dev, page_address(page), len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, phys)))
return -ENOMEM;
/*
* This is a bit odd, but performance does not matter here, what
* matters are the expectations of the calling code and TB cleanup
* function.
*
* As such, if unmap is set, then create another mapping for the TB
* entry as it will be unmapped later. On the other hand, if it is not
* set, then the TB entry will not be unmapped and instead we simply
* reference and sync the mapping that get_workaround_page() created.
*/
if (unmap) {
phys = dma_map_single(trans->dev, page_address(page), len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, phys)))
return -ENOMEM;
} else {
phys = iwl_pcie_get_tso_page_phys(page_address(page));
dma_sync_single_for_device(trans->dev, phys, len,
DMA_TO_DEVICE);
}
ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
if (ret < 0) {
/* unmap the new allocation as single */
@ -116,6 +134,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
meta = NULL;
goto unmap;
}
IWL_DEBUG_TX(trans,
"TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
len, (unsigned long long)oldphys,
@ -123,6 +142,9 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
ret = 0;
unmap:
if (!unmap)
goto trace;
if (meta)
dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
else
@ -146,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
dma_addr_t start_hdr_phys;
u16 length, amsdu_pad;
u8 *start_hdr;
struct sg_table *sgt;
@ -168,6 +191,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
if (!sgt)
return -ENOMEM;
start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
/*
* Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow.
@ -214,10 +239,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
pos_hdr += snap_ip_tcp_hdrlen;
tb_len = pos_hdr - start_hdr;
tb_phys = dma_map_single(trans->dev, start_hdr,
tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
/*
* No need for _with_wa, this is from the TSO page and
* we leave some space at the end of it so can't hit
@ -237,11 +260,14 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
int ret;
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
goto out_err;
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
tb_phys, tso.data,
tb_len, NULL);
tb_len, NULL, false);
if (ret)
goto out_err;
@ -250,6 +276,9 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
}
}
dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
DMA_TO_DEVICE);
/* re -add the WiFi header */
skb_push(skb, hdr_len);
@ -339,7 +368,7 @@ static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
fragsz, DMA_TO_DEVICE);
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
skb_frag_address(frag),
fragsz, out_meta);
fragsz, out_meta, true);
if (ret)
return ret;
}
@ -413,7 +442,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
tb2_len, DMA_TO_DEVICE);
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
skb->data + hdr_len, tb2_len,
NULL);
NULL, true);
if (ret)
goto out_err;
}
@ -428,7 +457,8 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
skb_headlen(frag), DMA_TO_DEVICE);
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
frag->data,
skb_headlen(frag), NULL);
skb_headlen(frag), NULL,
true);
if (ret)
goto out_err;
if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
@ -623,6 +653,10 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
return;
}
/* TB1 is mapped directly, the rest is the TSO page and SG list. */
if (meta->sg_offset)
num_tbs = 2;
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
if (meta->tbs & BIT(i))

View File

@ -303,6 +303,10 @@ static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
return;
}
/* TB1 is mapped directly, the rest is the TSO page and SG list. */
if (meta->sg_offset)
num_tbs = 2;
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
@ -1892,6 +1896,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
u16 length, iv_len, amsdu_pad;
dma_addr_t start_hdr_phys;
u8 *start_hdr, *pos_hdr;
struct sg_table *sgt;
struct tso_t tso;
@ -1920,6 +1925,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
if (!sgt)
return -ENOMEM;
start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
pos_hdr = start_hdr;
memcpy(pos_hdr, skb->data + hdr_len, iv_len);
pos_hdr += iv_len;
@ -1971,10 +1977,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
pos_hdr += snap_ip_tcp_hdrlen;
hdr_tb_len = pos_hdr - start_hdr;
hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
hdr_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
return -EINVAL;
hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
@ -1991,9 +1995,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
tb_phys = dma_map_single(trans->dev, tso.data,
size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
@ -2006,6 +2010,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
}
dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
DMA_TO_DEVICE);
/* re -add the WiFi header and IV */
skb_push(skb, hdr_len + iv_len);