Merge branch 'flow_offload-fixes'

Pablo Neira Ayuso says:

====================
flow_offload fixes

The following patchset contains fixes for the flow_offload infrastructure:

1) Fix possible build breakage before patch 3/4. Both the flow_offload
   infrastructure and OVS define the flow_stats structure. Patch 3/4 in
   this batch indirectly pulls in the flow_stats definition from
   include/net/flow_offload.h into OVS, leading to structure redefinition
   compile-time errors.

2) Remove netns parameter from flow_block_cb_alloc(), this is not
   required as Jiri suggests. The flow_block_cb_is_busy() function uses
   the per-driver block list to check for used blocks which was the
   original intention for this parameter.

3) Rename tc_setup_cb_t to flow_setup_cb_t. This callback is not
   exclusive of tc anymore, this might confuse the reader as Jiri
   suggests, fix this semantic inconsistency.

   Add #include <linux/list.h> to include/net/netfilter/nf_tables_offload.h
   to avoid a compile break with CONFIG_HEADER_TEST=y.

4) Fix block sharing feature: Add flow_block structure and use it,
   update flow_block_cb_lookup() to use this flow_block object.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-07-19 21:27:45 -07:00
commit 89099d855b
21 changed files with 94 additions and 81 deletions

View File

@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
block_cb = flow_block_cb_alloc(f->net,
mlx5e_rep_indr_setup_block_cb,
block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
indr_priv, indr_priv,
mlx5e_rep_indr_tc_block_unbind);
if (IS_ERR(block_cb)) {
@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (!indr_priv)
return -ENOENT;
block_cb = flow_block_cb_lookup(f,
block_cb = flow_block_cb_lookup(f->block,
mlx5e_rep_indr_setup_block_cb,
indr_priv);
if (!block_cb)

View File

@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
bool register_block = false;
int err;
block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
block_cb = flow_block_cb_lookup(f->block,
mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb) {
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
if (!acl_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(f->net,
mlxsw_sp_setup_tc_block_cb_flower,
block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp, acl_block,
mlxsw_sp_tc_block_flower_release);
if (IS_ERR(block_cb)) {
@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_cb *block_cb;
int err;
block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
block_cb = flow_block_cb_lookup(f->block,
mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb)
return;
@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
bool ingress;
int err;
@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port,
block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
mlxsw_sp_port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
f, ingress);
block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port);
block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
if (!block_cb)
return -ENOENT;

View File

@ -316,15 +316,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
port);
block_cb = flow_block_cb_lookup(f->block,
ocelot_setup_tc_block_cb_flower, port);
if (!block_cb) {
port_block = ocelot_port_block_create(port);
if (!port_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(f->net,
ocelot_setup_tc_block_cb_flower,
block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
port, port_block,
ocelot_tc_block_unbind);
if (IS_ERR(block_cb)) {
@ -351,8 +350,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
{
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
port);
block_cb = flow_block_cb_lookup(f->block,
ocelot_setup_tc_block_cb_flower, port);
if (!block_cb)
return;

View File

@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
int err;
netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL);
block_cb = flow_block_cb_alloc(cb, port, port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, cb, port);
block_cb = flow_block_cb_lookup(f->block, cb, port);
if (!block_cb)
return -ENOENT;

View File

@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
&nfp_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net,
nfp_flower_setup_tc_block_cb,
block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
repr, repr, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb,
block_cb = flow_block_cb_lookup(f->block,
nfp_flower_setup_tc_block_cb,
repr);
if (!block_cb)
return -ENOENT;
@ -1424,8 +1424,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
block_cb = flow_block_cb_alloc(f->net,
nfp_flower_setup_indr_block_cb,
block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
cb_priv, cb_priv,
nfp_flower_setup_indr_tc_release);
if (IS_ERR(block_cb)) {
@ -1442,7 +1441,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
if (!cb_priv)
return -ENOENT;
block_cb = flow_block_cb_lookup(f,
block_cb = flow_block_cb_lookup(f->block,
nfp_flower_setup_indr_block_cb,
cb_priv);
if (!block_cb)

View File

@ -2,8 +2,8 @@
#define _NET_FLOW_OFFLOAD_H
#include <linux/kernel.h>
#include <linux/list.h>
#include <net/flow_dissector.h>
#include <net/sch_generic.h>
struct flow_match {
struct flow_dissector *dissector;
@ -249,6 +249,10 @@ enum flow_block_binder_type {
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
};
struct flow_block {
struct list_head cb_list;
};
struct netlink_ext_ack;
struct flow_block_offload {
@ -256,29 +260,33 @@ struct flow_block_offload {
enum flow_block_binder_type binder_type;
bool block_shared;
struct net *net;
struct flow_block *block;
struct list_head cb_list;
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
};
enum tc_setup_type;
typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
void *cb_priv);
struct flow_block_cb {
struct list_head driver_list;
struct list_head list;
struct net *net;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
void *cb_ident;
void *cb_priv;
void (*release)(void *cb_priv);
unsigned int refcnt;
};
struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv));
void flow_block_cb_free(struct flow_block_cb *block_cb);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload,
tc_setup_cb_t *cb, void *cb_ident);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
flow_setup_cb_t *cb, void *cb_ident);
void *flow_block_cb_priv(struct flow_block_cb *block_cb);
void flow_block_cb_incref(struct flow_block_cb *block_cb);
@ -296,11 +304,12 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
list_move(&block_cb->list, &offload->cb_list);
}
bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list);
int flow_block_cb_setup_simple(struct flow_block_offload *f,
struct list_head *driver_list, tc_setup_cb_t *cb,
struct list_head *driver_list,
flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv, bool ingress_only);
enum flow_cls_command {
@ -333,4 +342,9 @@ flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
return flow_cmd->rule;
}
static inline void flow_block_init(struct flow_block *flow_block)
{
INIT_LIST_HEAD(&flow_block->cb_list);
}
#endif /* _NET_FLOW_OFFLOAD_H */

View File

@ -11,6 +11,7 @@
#include <linux/rhashtable.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
#include <net/flow_offload.h>
struct module;
@ -951,7 +952,7 @@ struct nft_stats {
* @stats: per-cpu chain stats
* @chain: the chain
* @dev_name: device name that this base chain is attached to (if any)
* @cb_list: list of flow block callbacks (for hardware offload)
* @flow_block: flow block (for hardware offload)
*/
struct nft_base_chain {
struct nf_hook_ops ops;
@ -961,7 +962,7 @@ struct nft_base_chain {
struct nft_stats __percpu *stats;
struct nft_chain chain;
char dev_name[IFNAMSIZ];
struct list_head cb_list;
struct flow_block flow_block;
};
static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)

View File

@ -6,7 +6,6 @@
#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
#include <net/flow_offload.h>
#include <net/net_namespace.h>
/* TC action not accessible from user space */
@ -126,14 +125,14 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
}
static inline
int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
void *cb_priv)
{
return 0;
}
static inline
void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
void *cb_priv)
{
}

View File

@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
struct Qdisc_ops;
struct qdisc_walker;
@ -22,9 +23,6 @@ struct tcf_walker;
struct module;
struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
enum tc_setup_type type, void *type_data);
@ -313,7 +311,7 @@ struct tcf_proto_ops {
void (*walk)(struct tcf_proto *tp,
struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
tc_setup_cb_t *cb, void *cb_priv,
flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
void (*bind_class)(void *, u32, unsigned long);
void * (*tmplt_create)(struct net *net,
@ -401,7 +399,7 @@ struct tcf_block {
refcount_t refcnt;
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */

View File

@ -165,7 +165,7 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
}
EXPORT_SYMBOL(flow_rule_match_enc_opts);
struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv))
{
@ -175,7 +175,6 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
if (!block_cb)
return ERR_PTR(-ENOMEM);
block_cb->net = net;
block_cb->cb = cb;
block_cb->cb_ident = cb_ident;
block_cb->cb_priv = cb_priv;
@ -194,14 +193,13 @@ void flow_block_cb_free(struct flow_block_cb *block_cb)
}
EXPORT_SYMBOL(flow_block_cb_free);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f,
tc_setup_cb_t *cb, void *cb_ident)
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
flow_setup_cb_t *cb, void *cb_ident)
{
struct flow_block_cb *block_cb;
list_for_each_entry(block_cb, f->driver_block_list, driver_list) {
if (block_cb->net == f->net &&
block_cb->cb == cb &&
list_for_each_entry(block_cb, &block->cb_list, list) {
if (block_cb->cb == cb &&
block_cb->cb_ident == cb_ident)
return block_cb;
}
@ -228,7 +226,7 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
}
EXPORT_SYMBOL(flow_block_cb_decref);
bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list)
{
struct flow_block_cb *block_cb;
@ -245,7 +243,8 @@ EXPORT_SYMBOL(flow_block_cb_is_busy);
int flow_block_cb_setup_simple(struct flow_block_offload *f,
struct list_head *driver_block_list,
tc_setup_cb_t *cb, void *cb_ident, void *cb_priv,
flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
bool ingress_only)
{
struct flow_block_cb *block_cb;
@ -261,8 +260,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, cb_ident,
cb_priv, NULL);
block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -270,7 +268,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
list_add_tail(&block_cb->driver_list, driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, cb, cb_ident);
block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
if (!block_cb)
return -ENOENT;

View File

@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
cb = dsa_slave_setup_tc_block_cb_ig;
@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL);
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, cb, dev);
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (!block_cb)
return -ENOENT;

View File

@ -1662,7 +1662,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
chain->flags |= NFT_BASE_CHAIN | flags;
basechain->policy = NF_ACCEPT;
INIT_LIST_HEAD(&basechain->cb_list);
flow_block_init(&basechain->flow_block);
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (chain == NULL)

View File

@ -116,7 +116,7 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
struct flow_block_cb *block_cb;
int err;
list_for_each_entry(block_cb, &basechain->cb_list, list) {
list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err < 0)
return err;
@ -154,7 +154,7 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
static int nft_flow_offload_bind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
list_splice(&bo->cb_list, &basechain->cb_list);
list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
return 0;
}
@ -198,6 +198,7 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
return -EOPNOTSUPP;
bo.command = cmd;
bo.block = &basechain->flow_block;
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);

View File

@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
const struct sk_buff *skb)
{
struct flow_stats *stats;
struct sw_flow_stats *stats;
unsigned int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(flow->stats_last_writer != -1) &&
likely(!rcu_access_pointer(flow->stats[cpu]))) {
/* Try to allocate CPU-specific stats. */
struct flow_stats *new_stats;
struct sw_flow_stats *new_stats;
new_stats =
kmem_cache_alloc_node(flow_stats_cache,
@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);

View File

@ -194,7 +194,7 @@ struct sw_flow_actions {
struct nlattr actions[];
};
struct flow_stats {
struct sw_flow_stats {
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
unsigned long used; /* Last used time (in jiffies). */
@ -216,7 +216,7 @@ struct sw_flow {
struct cpumask cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct flow_stats __rcu *stats[]; /* One for each CPU. First one
struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.

View File

@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
struct flow_stats *stats;
struct sw_flow_stats *stats;
flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct flow_stats __force *)flow->stats[cpu]);
(struct sw_flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow);
}
@ -712,13 +712,13 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
* sizeof(struct flow_stats *)),
* sizeof(struct sw_flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
flow_stats_cache
= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache);

View File

@ -691,6 +691,8 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
if (!indr_dev->block)
return;
bo.block = &indr_dev->block->flow_block;
indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
&bo);
tcf_block_setup(indr_dev->block, &bo);
@ -775,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
.command = command,
.binder_type = ei->binder_type,
.net = dev_net(dev),
.block = &block->flow_block,
.block_shared = tcf_block_shared(block),
.extack = extack,
};
@ -810,6 +813,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
bo.net = dev_net(dev);
bo.command = command;
bo.binder_type = ei->binder_type;
bo.block = &block->flow_block;
bo.block_shared = tcf_block_shared(block);
bo.extack = extack;
INIT_LIST_HEAD(&bo.cb_list);
@ -987,8 +991,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
return ERR_PTR(-ENOMEM);
}
mutex_init(&block->lock);
flow_block_init(&block->flow_block);
INIT_LIST_HEAD(&block->chain_list);
INIT_LIST_HEAD(&block->cb_list);
INIT_LIST_HEAD(&block->owner_list);
INIT_LIST_HEAD(&block->chain0.filter_chain_list);
@ -1514,7 +1518,7 @@ void tcf_block_put(struct tcf_block *block)
EXPORT_SYMBOL(tcf_block_put);
static int
tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
void *cb_priv, bool add, bool offload_in_use,
struct netlink_ext_ack *extack)
{
@ -1570,7 +1574,7 @@ static int tcf_block_bind(struct tcf_block *block,
i++;
}
list_splice(&bo->cb_list, &block->cb_list);
list_splice(&bo->cb_list, &block->flow_block.cb_list);
return 0;
@ -3156,7 +3160,7 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
if (block->nooffloaddevcnt && err_stop)
return -EOPNOTSUPP;
list_for_each_entry(block_cb, &block->cb_list, list) {
list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err) {
if (err_stop)

View File

@ -651,7 +651,7 @@ skip:
}
}
static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);

View File

@ -1800,7 +1800,7 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
return NULL;
}
static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;

View File

@ -282,7 +282,7 @@ skip:
arg->count++;
}
static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);

View File

@ -1152,7 +1152,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
}
static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, tc_setup_cb_t *cb, void *cb_priv,
bool add, flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_cls_u32_offload cls_u32 = {};
@ -1172,7 +1172,7 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
}
static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool add, tc_setup_cb_t *cb, void *cb_priv,
bool add, flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@ -1213,7 +1213,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
return 0;
}
static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;