2018-05-02 11:01:24 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* XDP user-space ring structure
|
|
|
|
* Copyright(c) 2018 Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
2018-09-07 08:18:46 +00:00
|
|
|
#include <linux/log2.h>
|
2018-05-02 11:01:24 +00:00
|
|
|
#include <linux/slab.h>
|
2018-09-07 08:18:46 +00:00
|
|
|
#include <linux/overflow.h>
|
2018-05-02 11:01:24 +00:00
|
|
|
|
|
|
|
#include "xsk_queue.h"
|
|
|
|
|
2018-08-31 11:40:02 +00:00
|
|
|
void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
|
2018-05-02 11:01:26 +00:00
|
|
|
{
|
|
|
|
if (!q)
|
|
|
|
return;
|
|
|
|
|
2018-08-31 11:40:02 +00:00
|
|
|
q->size = size;
|
|
|
|
q->chunk_mask = chunk_mask;
|
2018-05-02 11:01:26 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 11:01:24 +00:00
|
|
|
static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
|
|
|
|
{
|
xsk: new descriptor addressing scheme
Currently, AF_XDP only supports a fixed frame-size memory scheme where
each frame is referenced via an index (idx). A user passes the frame
index to the kernel, and the kernel acts upon the data. Some NICs,
however, do not have a fixed frame-size model, instead they have a
model where a memory window is passed to the hardware and multiple
frames are filled into that window (referred to as the "type-writer"
model).
By changing the descriptor format from the current frame index
addressing scheme, AF_XDP can in the future be extended to support
these kinds of NICs.
In the index-based model, an idx refers to a frame of size
frame_size. Addressing a frame in the UMEM is done by offseting the
UMEM starting address by a global offset, idx * frame_size + offset.
Communicating via the fill- and completion-rings are done by means of
idx.
In this commit, the idx is removed in favor of an address (addr),
which is a relative address ranging over the UMEM. To convert an
idx-based address to the new addr is simply: addr = idx * frame_size +
offset.
We also stop referring to the UMEM "frame" as a frame. Instead it is
simply called a chunk.
To transfer ownership of a chunk to the kernel, the addr of the chunk
is passed in the fill-ring. Note, that the kernel will mask addr to
make it chunk aligned, so there is no need for userspace to do
that. E.g., for a chunk size of 2k, passing an addr of 2048, 2050 or
3000 to the fill-ring will refer to the same chunk.
On the completion-ring, the addr will match that of the Tx descriptor,
passed to the kernel.
Changing the descriptor format to use chunks/addr will allow for
future changes to move to a type-writer based model, where multiple
frames can reside in one chunk. In this model passing one single chunk
into the fill-ring, would potentially result in multiple Rx
descriptors.
This commit changes the uapi of AF_XDP sockets, and updates the
documentation.
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-06-04 11:57:13 +00:00
|
|
|
return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64);
|
2018-05-02 11:01:24 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 11:01:25 +00:00
|
|
|
static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
|
|
|
|
{
|
2018-05-18 12:00:23 +00:00
|
|
|
return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc);
|
2018-05-02 11:01:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
|
2018-05-02 11:01:24 +00:00
|
|
|
{
|
|
|
|
struct xsk_queue *q;
|
|
|
|
gfp_t gfp_flags;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
q = kzalloc(sizeof(*q), GFP_KERNEL);
|
|
|
|
if (!q)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
q->nentries = nentries;
|
|
|
|
q->ring_mask = nentries - 1;
|
|
|
|
|
|
|
|
gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
|
|
|
|
__GFP_COMP | __GFP_NORETRY;
|
2018-05-02 11:01:25 +00:00
|
|
|
size = umem_queue ? xskq_umem_get_ring_size(q) :
|
|
|
|
xskq_rxtx_get_ring_size(q);
|
2018-05-02 11:01:24 +00:00
|
|
|
|
|
|
|
q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
|
|
|
|
get_order(size));
|
|
|
|
if (!q->ring) {
|
|
|
|
kfree(q);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
|
|
|
void xskq_destroy(struct xsk_queue *q)
|
|
|
|
{
|
|
|
|
if (!q)
|
|
|
|
return;
|
|
|
|
|
|
|
|
page_frag_free(q->ring);
|
|
|
|
kfree(q);
|
|
|
|
}
|
2018-09-07 08:18:46 +00:00
|
|
|
|
|
|
|
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
|
|
|
|
{
|
|
|
|
struct xdp_umem_fq_reuse *newq;
|
|
|
|
|
|
|
|
/* Check for overflow */
|
|
|
|
if (nentries > (u32)roundup_pow_of_two(nentries))
|
|
|
|
return NULL;
|
|
|
|
nentries = roundup_pow_of_two(nentries);
|
|
|
|
|
|
|
|
newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
|
|
|
|
if (!newq)
|
|
|
|
return NULL;
|
|
|
|
memset(newq, 0, offsetof(typeof(*newq), handles));
|
|
|
|
|
|
|
|
newq->nentries = nentries;
|
|
|
|
return newq;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
|
|
|
|
|
|
|
|
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
|
|
|
|
struct xdp_umem_fq_reuse *newq)
|
|
|
|
{
|
|
|
|
struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
|
|
|
|
|
|
|
|
if (!oldq) {
|
|
|
|
umem->fq_reuse = newq;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newq->nentries < oldq->length)
|
|
|
|
return newq;
|
|
|
|
|
|
|
|
memcpy(newq->handles, oldq->handles,
|
|
|
|
array_size(oldq->length, sizeof(u64)));
|
|
|
|
newq->length = oldq->length;
|
|
|
|
|
|
|
|
umem->fq_reuse = newq;
|
|
|
|
return oldq;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
|
|
|
|
|
|
|
|
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
|
|
|
|
{
|
|
|
|
kvfree(rq);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xsk_reuseq_free);
|
|
|
|
|
|
|
|
void xsk_reuseq_destroy(struct xdp_umem *umem)
|
|
|
|
{
|
|
|
|
xsk_reuseq_free(umem->fq_reuse);
|
|
|
|
umem->fq_reuse = NULL;
|
|
|
|
}
|