forked from Minki/linux
netfilter: nft_set_rbtree: Detect partial overlaps on insertion
...and return -ENOTEMPTY to the front-end in this case, instead of proceeding. Currently, nft takes care of checking for these cases and not sending them to the kernel, but if we drop the set_overlap() call in nft we can end up in situations like: # nft add table t # nft add set t s '{ type inet_service ; flags interval ; }' # nft add element t s '{ 1 - 5 }' # nft add element t s '{ 6 - 10 }' # nft add element t s '{ 4 - 7 }' # nft list set t s table ip t { set s { type inet_service flags interval elements = { 1-3, 4-5, 6-7 } } } This change has the primary purpose of making the behaviour consistent with nft_set_pipapo, but is also functional to avoid inconsistent behaviour if userspace sends overlapping elements for any reason. v2: When we meet the same key data in the tree, as start element while inserting an end element, or as end element while inserting a start element, actually check that the existing element is active, before resetting the overlap flag (Pablo Neira Ayuso) Signed-off-by: Stefano Brivio <sbrivio@redhat.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
6f7c9caf01
commit
7c84d41416
@ -213,8 +213,43 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
|||||||
u8 genmask = nft_genmask_next(net);
|
u8 genmask = nft_genmask_next(net);
|
||||||
struct nft_rbtree_elem *rbe;
|
struct nft_rbtree_elem *rbe;
|
||||||
struct rb_node *parent, **p;
|
struct rb_node *parent, **p;
|
||||||
|
bool overlap = false;
|
||||||
int d;
|
int d;
|
||||||
|
|
||||||
|
/* Detect overlaps as we descend the tree. Set the flag in these cases:
|
||||||
|
*
|
||||||
|
* a1. |__ _ _? >|__ _ _ (insert start after existing start)
|
||||||
|
* a2. _ _ __>| ?_ _ __| (insert end before existing end)
|
||||||
|
* a3. _ _ ___| ?_ _ _>| (insert end after existing end)
|
||||||
|
* a4. >|__ _ _ _ _ __| (insert start before existing end)
|
||||||
|
*
|
||||||
|
* and clear it later on, as we eventually reach the points indicated by
|
||||||
|
* '?' above, in the cases described below. We'll always meet these
|
||||||
|
* later, locally, due to tree ordering, and overlaps for the intervals
|
||||||
|
* that are the closest together are always evaluated last.
|
||||||
|
*
|
||||||
|
* b1. |__ _ _! >|__ _ _ (insert start after existing end)
|
||||||
|
* b2. _ _ __>| !_ _ __| (insert end before existing start)
|
||||||
|
* b3. !_____>| (insert end after existing start)
|
||||||
|
*
|
||||||
|
* Case a4. resolves to b1.:
|
||||||
|
* - if the inserted start element is the leftmost, because the '0'
|
||||||
|
* element in the tree serves as end element
|
||||||
|
* - otherwise, if an existing end is found. Note that end elements are
|
||||||
|
* always inserted after corresponding start elements.
|
||||||
|
*
|
||||||
|
* For a new, rightmost pair of elements, we'll hit cases b1. and b3.,
|
||||||
|
* in that order.
|
||||||
|
*
|
||||||
|
* The flag is also cleared in two special cases:
|
||||||
|
*
|
||||||
|
* b4. |__ _ _!|<_ _ _ (insert start right before existing end)
|
||||||
|
* b5. |__ _ >|!__ _ _ (insert end right after existing start)
|
||||||
|
*
|
||||||
|
* which always happen as last step and imply that no further
|
||||||
|
* overlapping is possible.
|
||||||
|
*/
|
||||||
|
|
||||||
parent = NULL;
|
parent = NULL;
|
||||||
p = &priv->root.rb_node;
|
p = &priv->root.rb_node;
|
||||||
while (*p != NULL) {
|
while (*p != NULL) {
|
||||||
@ -223,17 +258,42 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
|||||||
d = memcmp(nft_set_ext_key(&rbe->ext),
|
d = memcmp(nft_set_ext_key(&rbe->ext),
|
||||||
nft_set_ext_key(&new->ext),
|
nft_set_ext_key(&new->ext),
|
||||||
set->klen);
|
set->klen);
|
||||||
if (d < 0)
|
if (d < 0) {
|
||||||
p = &parent->rb_left;
|
p = &parent->rb_left;
|
||||||
else if (d > 0)
|
|
||||||
|
if (nft_rbtree_interval_start(new)) {
|
||||||
|
overlap = nft_rbtree_interval_start(rbe) &&
|
||||||
|
nft_set_elem_active(&rbe->ext,
|
||||||
|
genmask);
|
||||||
|
} else {
|
||||||
|
overlap = nft_rbtree_interval_end(rbe) &&
|
||||||
|
nft_set_elem_active(&rbe->ext,
|
||||||
|
genmask);
|
||||||
|
}
|
||||||
|
} else if (d > 0) {
|
||||||
p = &parent->rb_right;
|
p = &parent->rb_right;
|
||||||
else {
|
|
||||||
|
if (nft_rbtree_interval_end(new)) {
|
||||||
|
overlap = nft_rbtree_interval_end(rbe) &&
|
||||||
|
nft_set_elem_active(&rbe->ext,
|
||||||
|
genmask);
|
||||||
|
} else if (nft_rbtree_interval_end(rbe) &&
|
||||||
|
nft_set_elem_active(&rbe->ext, genmask)) {
|
||||||
|
overlap = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if (nft_rbtree_interval_end(rbe) &&
|
if (nft_rbtree_interval_end(rbe) &&
|
||||||
nft_rbtree_interval_start(new)) {
|
nft_rbtree_interval_start(new)) {
|
||||||
p = &parent->rb_left;
|
p = &parent->rb_left;
|
||||||
|
|
||||||
|
if (nft_set_elem_active(&rbe->ext, genmask))
|
||||||
|
overlap = false;
|
||||||
} else if (nft_rbtree_interval_start(rbe) &&
|
} else if (nft_rbtree_interval_start(rbe) &&
|
||||||
nft_rbtree_interval_end(new)) {
|
nft_rbtree_interval_end(new)) {
|
||||||
p = &parent->rb_right;
|
p = &parent->rb_right;
|
||||||
|
|
||||||
|
if (nft_set_elem_active(&rbe->ext, genmask))
|
||||||
|
overlap = false;
|
||||||
} else if (nft_set_elem_active(&rbe->ext, genmask)) {
|
} else if (nft_set_elem_active(&rbe->ext, genmask)) {
|
||||||
*ext = &rbe->ext;
|
*ext = &rbe->ext;
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
@ -242,6 +302,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (overlap)
|
||||||
|
return -ENOTEMPTY;
|
||||||
|
|
||||||
rb_link_node_rcu(&new->node, parent, p);
|
rb_link_node_rcu(&new->node, parent, p);
|
||||||
rb_insert_color(&new->node, &priv->root);
|
rb_insert_color(&new->node, &priv->root);
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user