forked from Minki/linux
net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
633547973f
commit
2638595afc
@ -915,7 +915,7 @@ struct sk_buff_fclones {
|
||||
|
||||
struct sk_buff skb2;
|
||||
|
||||
atomic_t fclone_ref;
|
||||
refcount_t fclone_ref;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
|
||||
fclones = container_of(skb, struct sk_buff_fclones, skb1);
|
||||
|
||||
return skb->fclone == SKB_FCLONE_ORIG &&
|
||||
atomic_read(&fclones->fclone_ref) > 1 &&
|
||||
refcount_read(&fclones->fclone_ref) > 1 &&
|
||||
fclones->skb2.sk == sk;
|
||||
}
|
||||
|
||||
|
@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
|
||||
kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
|
||||
skb->fclone = SKB_FCLONE_ORIG;
|
||||
atomic_set(&fclones->fclone_ref, 1);
|
||||
refcount_set(&fclones->fclone_ref, 1);
|
||||
|
||||
fclones->skb2.fclone = SKB_FCLONE_CLONE;
|
||||
}
|
||||
@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
|
||||
* This test would have no chance to be true for the clone,
|
||||
* while here, branch prediction will be good.
|
||||
*/
|
||||
if (atomic_read(&fclones->fclone_ref) == 1)
|
||||
if (refcount_read(&fclones->fclone_ref) == 1)
|
||||
goto fastpath;
|
||||
break;
|
||||
|
||||
@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
|
||||
fclones = container_of(skb, struct sk_buff_fclones, skb2);
|
||||
break;
|
||||
}
|
||||
if (!atomic_dec_and_test(&fclones->fclone_ref))
|
||||
if (!refcount_dec_and_test(&fclones->fclone_ref))
|
||||
return;
|
||||
fastpath:
|
||||
kmem_cache_free(skbuff_fclone_cache, fclones);
|
||||
@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
|
||||
return NULL;
|
||||
|
||||
if (skb->fclone == SKB_FCLONE_ORIG &&
|
||||
atomic_read(&fclones->fclone_ref) == 1) {
|
||||
refcount_read(&fclones->fclone_ref) == 1) {
|
||||
n = &fclones->skb2;
|
||||
atomic_set(&fclones->fclone_ref, 2);
|
||||
refcount_set(&fclones->fclone_ref, 2);
|
||||
} else {
|
||||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
Loading…
Reference in New Issue
Block a user