mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
net, xdp: Introduce __xdp_build_skb_from_frame utility routine
Introduce __xdp_build_skb_from_frame utility routine to build the skb from xdp_frame. Rely on __xdp_build_skb_from_frame in cpumap code. Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Link: https://lore.kernel.org/bpf/4f9f4c6b3dd3933770c617eb6689dbc0c6e25863.1610475660.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
09c02d553c
commit
97a0e1ea7b
@ -164,6 +164,9 @@ void xdp_warn(const char *msg, const char *func, const int line);
|
||||
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
|
||||
|
||||
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
|
||||
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
|
||||
static inline
|
||||
void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
|
||||
|
@ -141,49 +141,6 @@ static void cpu_map_kthread_stop(struct work_struct *work)
|
||||
kthread_stop(rcpu->kthread);
|
||||
}
|
||||
|
||||
static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
unsigned int hard_start_headroom;
|
||||
unsigned int frame_size;
|
||||
void *pkt_data_start;
|
||||
|
||||
/* Part of headroom was reserved to xdpf */
|
||||
hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
|
||||
|
||||
/* Memory size backing xdp_frame data already have reserved
|
||||
* room for build_skb to place skb_shared_info in tailroom.
|
||||
*/
|
||||
frame_size = xdpf->frame_sz;
|
||||
|
||||
pkt_data_start = xdpf->data - hard_start_headroom;
|
||||
skb = build_skb_around(skb, pkt_data_start, frame_size);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skb, hard_start_headroom);
|
||||
__skb_put(skb, xdpf->len);
|
||||
if (xdpf->metasize)
|
||||
skb_metadata_set(skb, xdpf->metasize);
|
||||
|
||||
/* Essential SKB info: protocol and skb->dev */
|
||||
skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
|
||||
|
||||
/* Optional SKB info, currently missing:
|
||||
* - HW checksum info (skb->ip_summed)
|
||||
* - HW RX hash (skb_set_hash)
|
||||
* - RX ring dev queue index (skb_record_rx_queue)
|
||||
*/
|
||||
|
||||
/* Until page_pool get SKB return path, release DMA here */
|
||||
xdp_release_frame(xdpf);
|
||||
|
||||
/* Allow SKB to reuse area used by xdp_frame */
|
||||
xdp_scrub_frame(xdpf);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
|
||||
{
|
||||
/* The tear-down procedure should have made sure that queue is
|
||||
@ -350,7 +307,8 @@ static int cpu_map_kthread_run(void *data)
|
||||
struct sk_buff *skb = skbs[i];
|
||||
int ret;
|
||||
|
||||
skb = cpu_map_build_skb(xdpf, skb);
|
||||
skb = __xdp_build_skb_from_frame(xdpf, skb,
|
||||
xdpf->dev_rx);
|
||||
if (!skb) {
|
||||
xdp_return_frame(xdpf);
|
||||
continue;
|
||||
|
@ -513,3 +513,47 @@ void xdp_warn(const char *msg, const char *func, const int line)
|
||||
WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(xdp_warn);
|
||||
|
||||
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
unsigned int headroom, frame_size;
|
||||
void *hard_start;
|
||||
|
||||
/* Part of headroom was reserved to xdpf */
|
||||
headroom = sizeof(*xdpf) + xdpf->headroom;
|
||||
|
||||
/* Memory size backing xdp_frame data already have reserved
|
||||
* room for build_skb to place skb_shared_info in tailroom.
|
||||
*/
|
||||
frame_size = xdpf->frame_sz;
|
||||
|
||||
hard_start = xdpf->data - headroom;
|
||||
skb = build_skb_around(skb, hard_start, frame_size);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skb, headroom);
|
||||
__skb_put(skb, xdpf->len);
|
||||
if (xdpf->metasize)
|
||||
skb_metadata_set(skb, xdpf->metasize);
|
||||
|
||||
/* Essential SKB info: protocol and skb->dev */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
/* Optional SKB info, currently missing:
|
||||
* - HW checksum info (skb->ip_summed)
|
||||
* - HW RX hash (skb_set_hash)
|
||||
* - RX ring dev queue index (skb_record_rx_queue)
|
||||
*/
|
||||
|
||||
/* Until page_pool get SKB return path, release DMA here */
|
||||
xdp_release_frame(xdpf);
|
||||
|
||||
/* Allow SKB to reuse area used by xdp_frame */
|
||||
xdp_scrub_frame(xdpf);
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
|
||||
|
Loading…
Reference in New Issue
Block a user