netfilter: flowtable: refresh flow if hardware offload fails
If nf_flow_offload_add() fails to add the flow to hardware, then the NF_FLOW_HW_REFRESH flag bit is set and the flow remains in the flowtable software path. If flowtable hardware offload is enabled, this patch enqueues a new request to offload this flow to hardware. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
a5449cdcaa
commit
f698fe4082
@ -95,6 +95,7 @@ enum nf_flow_flags {
|
|||||||
NF_FLOW_HW,
|
NF_FLOW_HW,
|
||||||
NF_FLOW_HW_DYING,
|
NF_FLOW_HW_DYING,
|
||||||
NF_FLOW_HW_DEAD,
|
NF_FLOW_HW_DEAD,
|
||||||
|
NF_FLOW_HW_REFRESH,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum flow_offload_type {
|
enum flow_offload_type {
|
||||||
|
@ -243,8 +243,10 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nf_flowtable_hw_offload(flow_table))
|
if (nf_flowtable_hw_offload(flow_table)) {
|
||||||
|
__set_bit(NF_FLOW_HW, &flow->flags);
|
||||||
nf_flow_offload_add(flow_table, flow);
|
nf_flow_offload_add(flow_table, flow);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -232,6 +232,13 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
|
|||||||
return NF_STOLEN;
|
return NF_STOLEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
|
||||||
|
struct flow_offload *flow)
|
||||||
|
{
|
||||||
|
return nf_flowtable_hw_offload(flow_table) &&
|
||||||
|
test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
|
||||||
|
}
|
||||||
|
|
||||||
unsigned int
|
unsigned int
|
||||||
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
||||||
const struct nf_hook_state *state)
|
const struct nf_hook_state *state)
|
||||||
@ -272,6 +279,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
|||||||
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
|
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
||||||
|
if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
|
||||||
|
nf_flow_offload_add(flow_table, flow);
|
||||||
|
|
||||||
if (nf_flow_offload_dst_check(&rt->dst)) {
|
if (nf_flow_offload_dst_check(&rt->dst)) {
|
||||||
flow_offload_teardown(flow);
|
flow_offload_teardown(flow);
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
@ -498,6 +508,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
|
|||||||
sizeof(*ip6h)))
|
sizeof(*ip6h)))
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
||||||
|
if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
|
||||||
|
nf_flow_offload_add(flow_table, flow);
|
||||||
|
|
||||||
if (nf_flow_offload_dst_check(&rt->dst)) {
|
if (nf_flow_offload_dst_check(&rt->dst)) {
|
||||||
flow_offload_teardown(flow);
|
flow_offload_teardown(flow);
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
@ -654,20 +654,20 @@ static int flow_offload_rule_add(struct flow_offload_work *offload,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flow_offload_work_add(struct flow_offload_work *offload)
|
static void flow_offload_work_add(struct flow_offload_work *offload)
|
||||||
{
|
{
|
||||||
struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
|
struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = nf_flow_offload_alloc(offload, flow_rule);
|
err = nf_flow_offload_alloc(offload, flow_rule);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return -ENOMEM;
|
return;
|
||||||
|
|
||||||
err = flow_offload_rule_add(offload, flow_rule);
|
err = flow_offload_rule_add(offload, flow_rule);
|
||||||
|
if (err < 0)
|
||||||
|
set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
|
||||||
|
|
||||||
nf_flow_offload_destroy(flow_rule);
|
nf_flow_offload_destroy(flow_rule);
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flow_offload_work_del(struct flow_offload_work *offload)
|
static void flow_offload_work_del(struct flow_offload_work *offload)
|
||||||
@ -712,7 +712,6 @@ static void flow_offload_work_handler(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct flow_offload_work *offload, *next;
|
struct flow_offload_work *offload, *next;
|
||||||
LIST_HEAD(offload_pending_list);
|
LIST_HEAD(offload_pending_list);
|
||||||
int ret;
|
|
||||||
|
|
||||||
spin_lock_bh(&flow_offload_pending_list_lock);
|
spin_lock_bh(&flow_offload_pending_list_lock);
|
||||||
list_replace_init(&flow_offload_pending_list, &offload_pending_list);
|
list_replace_init(&flow_offload_pending_list, &offload_pending_list);
|
||||||
@ -721,9 +720,7 @@ static void flow_offload_work_handler(struct work_struct *work)
|
|||||||
list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
|
list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
|
||||||
switch (offload->cmd) {
|
switch (offload->cmd) {
|
||||||
case FLOW_CLS_REPLACE:
|
case FLOW_CLS_REPLACE:
|
||||||
ret = flow_offload_work_add(offload);
|
flow_offload_work_add(offload);
|
||||||
if (ret < 0)
|
|
||||||
__clear_bit(NF_FLOW_HW, &offload->flow->flags);
|
|
||||||
break;
|
break;
|
||||||
case FLOW_CLS_DESTROY:
|
case FLOW_CLS_DESTROY:
|
||||||
flow_offload_work_del(offload);
|
flow_offload_work_del(offload);
|
||||||
@ -776,7 +773,6 @@ void nf_flow_offload_add(struct nf_flowtable *flowtable,
|
|||||||
if (!offload)
|
if (!offload)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
__set_bit(NF_FLOW_HW, &flow->flags);
|
|
||||||
flow_offload_queue_work(offload);
|
flow_offload_queue_work(offload);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user