Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -1943,18 +1943,31 @@ static inline int sk_tx_queue_get(const struct sock *sk)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
|
||||
static inline void __sk_rx_queue_set(struct sock *sk,
|
||||
const struct sk_buff *skb,
|
||||
bool force_set)
|
||||
{
|
||||
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
|
||||
if (skb_rx_queue_recorded(skb)) {
|
||||
u16 rx_queue = skb_get_rx_queue(skb);
|
||||
|
||||
if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
|
||||
if (force_set ||
|
||||
unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
|
||||
WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
__sk_rx_queue_set(sk, skb, true);
|
||||
}
|
||||
|
||||
static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
__sk_rx_queue_set(sk, skb, false);
|
||||
}
|
||||
|
||||
static inline void sk_rx_queue_clear(struct sock *sk)
|
||||
{
|
||||
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
|
||||
@@ -2457,19 +2470,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
|
||||
* @sk: socket
|
||||
*
|
||||
* Use the per task page_frag instead of the per socket one for
|
||||
* optimization when we know that we're in the normal context and owns
|
||||
* optimization when we know that we're in process context and own
|
||||
* everything that's associated with %current.
|
||||
*
|
||||
* gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
|
||||
* inside other socket operations and end up recursing into sk_page_frag()
|
||||
* while it's already in use.
|
||||
* Both direct reclaim and page faults can nest inside other
|
||||
* socket operations and end up recursing into sk_page_frag()
|
||||
* while it's already in use: explicitly avoid task page_frag
|
||||
* usage if the caller is potentially doing any of them.
|
||||
* This assumes that page fault handlers use the GFP_NOFS flags.
|
||||
*
|
||||
* Return: a per task page_frag if context allows that,
|
||||
* otherwise a per socket one.
|
||||
*/
|
||||
static inline struct page_frag *sk_page_frag(struct sock *sk)
|
||||
{
|
||||
if (gfpflags_normal_context(sk->sk_allocation))
|
||||
if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
|
||||
(__GFP_DIRECT_RECLAIM | __GFP_FS))
|
||||
return ¤t->task_frag;
|
||||
|
||||
return &sk->sk_frag;
|
||||
|
||||
Reference in New Issue
Block a user