mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 16:12:02 +00:00
xdp: fix possible cq entry leak
Completion queue address reservation could not be undone.
In case of bad 'queue_id' or skb allocation failure, reserved entry
will be leaked reducing the total capacity of completion queue.
Fix that by moving reservation to the point where failure is not
possible. Additionally, 'queue_id' checking moved out from the loop
since there is no point to check it there.
Fixes: 35fcde7f8d
("xsk: support for Tx")
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Tested-by: William Tu <u9012063@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
36db2a94f1
commit
675716400d
@ -240,6 +240,9 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
|
||||
|
||||
mutex_lock(&xs->mutex);
|
||||
|
||||
if (xs->queue_id >= xs->dev->real_num_tx_queues)
|
||||
goto out;
|
||||
|
||||
while (xskq_peek_desc(xs->tx, &desc)) {
|
||||
char *buffer;
|
||||
u64 addr;
|
||||
@ -250,12 +253,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (xskq_reserve_addr(xs->umem->cq))
|
||||
goto out;
|
||||
|
||||
if (xs->queue_id >= xs->dev->real_num_tx_queues)
|
||||
goto out;
|
||||
|
||||
len = desc.len;
|
||||
skb = sock_alloc_send_skb(sk, len, 1, &err);
|
||||
if (unlikely(!skb)) {
|
||||
@ -267,7 +264,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
|
||||
addr = desc.addr;
|
||||
buffer = xdp_umem_get_data(xs->umem, addr);
|
||||
err = skb_store_bits(skb, 0, buffer, len);
|
||||
if (unlikely(err)) {
|
||||
if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user