mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
ibmvnic: Unmap DMA address of TX descriptor buffers after use
There's no need to wait until a completion is received to unmap TX descriptor buffers that have been passed to the hypervisor. Instead unmap it when the hypervisor call has completed. This patch avoids the possibility that a buffer will not be unmapped because a TX completion is lost or mishandled. Reported-by: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Tested-by: Devesh K. Singh <devesh_singh@in.ibm.com> Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
37b0a733c1
commit
80f0fe0934
@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
|
||||
(u64)tx_buff->indir_dma,
|
||||
(u64)num_entries);
|
||||
dma_unmap_single(dev, tx_buff->indir_dma,
|
||||
sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
|
||||
} else {
|
||||
tx_buff->num_entries = num_entries;
|
||||
lpar_rc = send_subcrq(adapter, handle_array[queue_num],
|
||||
@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
|
||||
union sub_crq *next;
|
||||
int index;
|
||||
int i, j;
|
||||
u8 *first;
|
||||
|
||||
restart_loop:
|
||||
while (pending_scrq(adapter, scrq)) {
|
||||
@ -2818,14 +2819,6 @@ restart_loop:
|
||||
|
||||
txbuff->data_dma[j] = 0;
|
||||
}
|
||||
/* if sub_crq was sent indirectly */
|
||||
first = &txbuff->indir_arr[0].generic.first;
|
||||
if (*first == IBMVNIC_CRQ_CMD) {
|
||||
dma_unmap_single(dev, txbuff->indir_dma,
|
||||
sizeof(txbuff->indir_arr),
|
||||
DMA_TO_DEVICE);
|
||||
*first = 0;
|
||||
}
|
||||
|
||||
if (txbuff->last_frag) {
|
||||
dev_kfree_skb_any(txbuff->skb);
|
||||
|
Loading…
Reference in New Issue
Block a user