mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The value choosen for the new SO_MAX_PACING_RATE socket option on parisc was very poorly choosen, let's fix it while we still can. From Eric Dumazet. 2) Our generic reciprocal divide was found to handle some edge cases incorrectly, part of this is encoded into the BPF as deep as the JIT engines themselves. Just use a real divide throughout for now. From Eric Dumazet. 3) Because the initial lookup is lockless, the TCP metrics engine can end up creating two entries for the same lookup key. Fix this by doing a second lookup under the lock before we actually create the new entry. From Christoph Paasch. 4) Fix scatter-gather list init in usbnet driver, from Bjørn Mork. 5) Fix unintended 32-bit truncation in cxgb4 driver's bit shifting. From Dan Carpenter. 6) Netlink socket dumping uses the wrong socket state for timewait sockets. Fix from Neal Cardwell. 7) Fix netlink memory leak in ieee802154_add_iface(), from Christian Engelmayer. 8) Multicast forwarding in ipv4 can overflow the per-rule reference counts, causing all multicast traffic to cease. Fix from Hannes Frederic Sowa. 9) via-rhine needs to stop all TX queues when it resets the device, from Richard Weinberger. 10) Fix RDS per-cpu accesses broken by the this_cpu_* conversions. From Gerald Schaefer. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: s390/bpf,jit: fix 32 bit divisions, use unsigned divide instructions parisc: fix SO_MAX_PACING_RATE typo ipv6: simplify detection of first operational link-local address on interface tcp: metrics: Avoid duplicate entries with the same destination-IP net: rds: fix per-cpu helper usage e1000e: Fix compilation warning when !CONFIG_PM_SLEEP bpf: do not use reciprocal divide be2net: add dma_mapping_error() check for dma_map_page() bnx2x: Don't release PCI bars on shutdown net,via-rhine: Fix tx_timeout handling batman-adv: fix batman-adv header overhead calculation qlge: Fix vlan netdev features. net: avoid reference counter overflows on fib_rules in multicast forwarding dm9601: add USB IDs for new dm96xx variants MAINTAINERS: add virtio-dev ML for virtio ieee802154: Fix memory leak in ieee802154_add_iface() net: usbnet: fix SG initialisation inet_diag: fix inet_diag_dump_icsk() to use correct state for timewait sockets cxgb4: silence shift wrapping static checker warning
This commit is contained in:
commit
7d0d46da75
@ -9231,6 +9231,7 @@ F: include/media/videobuf2-*
|
||||
|
||||
VIRTIO CONSOLE DRIVER
|
||||
M: Amit Shah <amit.shah@redhat.com>
|
||||
L: virtio-dev@lists.oasis-open.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/char/virtio_console.c
|
||||
@ -9240,6 +9241,7 @@ F: include/uapi/linux/virtio_console.h
|
||||
VIRTIO CORE, NET AND BLOCK DRIVERS
|
||||
M: Rusty Russell <rusty@rustcorp.com.au>
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
L: virtio-dev@lists.oasis-open.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/virtio/
|
||||
@ -9252,6 +9254,7 @@ F: include/uapi/linux/virtio_*.h
|
||||
VIRTIO HOST (VHOST)
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
L: virtio-dev@lists.oasis-open.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -641,10 +641,10 @@ load_ind:
|
||||
emit(ARM_MUL(r_A, r_A, r_X), ctx);
|
||||
break;
|
||||
case BPF_S_ALU_DIV_K:
|
||||
/* current k == reciprocal_value(userspace k) */
|
||||
if (k == 1)
|
||||
break;
|
||||
emit_mov_i(r_scratch, k, ctx);
|
||||
/* A = top 32 bits of the product */
|
||||
emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
|
||||
emit_udiv(r_A, r_A, r_scratch, ctx);
|
||||
break;
|
||||
case BPF_S_ALU_DIV_X:
|
||||
update_on_xread(ctx);
|
||||
|
@ -75,6 +75,6 @@
|
||||
|
||||
#define SO_BUSY_POLL 0x4027
|
||||
|
||||
#define SO_MAX_PACING_RATE 0x4048
|
||||
#define SO_MAX_PACING_RATE 0x4028
|
||||
|
||||
#endif /* _UAPI_ASM_SOCKET_H */
|
||||
|
@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
||||
}
|
||||
PPC_DIVWU(r_A, r_A, r_X);
|
||||
break;
|
||||
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
|
||||
case BPF_S_ALU_DIV_K: /* A /= K */
|
||||
if (K == 1)
|
||||
break;
|
||||
PPC_LI32(r_scratch1, K);
|
||||
/* Top 32 bits of 64bit result -> A */
|
||||
PPC_MULHWU(r_A, r_A, r_scratch1);
|
||||
PPC_DIVWU(r_A, r_A, r_scratch1);
|
||||
break;
|
||||
case BPF_S_ALU_AND_X:
|
||||
ctx->seen |= SEEN_XREG;
|
||||
|
@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
|
||||
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
|
||||
/* lhi %r4,0 */
|
||||
EMIT4(0xa7480000);
|
||||
/* dr %r4,%r12 */
|
||||
EMIT2(0x1d4c);
|
||||
/* dlr %r4,%r12 */
|
||||
EMIT4(0xb997004c);
|
||||
break;
|
||||
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
|
||||
/* m %r4,<d(K)>(%r13) */
|
||||
EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
|
||||
/* lr %r5,%r4 */
|
||||
EMIT2(0x1854);
|
||||
case BPF_S_ALU_DIV_K: /* A /= K */
|
||||
if (K == 1)
|
||||
break;
|
||||
/* lhi %r4,0 */
|
||||
EMIT4(0xa7480000);
|
||||
/* dl %r4,<d(K)>(%r13) */
|
||||
EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
|
||||
break;
|
||||
case BPF_S_ALU_MOD_X: /* A %= X */
|
||||
jit->seen |= SEEN_XREG | SEEN_RET0;
|
||||
@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
|
||||
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
|
||||
/* lhi %r4,0 */
|
||||
EMIT4(0xa7480000);
|
||||
/* dr %r4,%r12 */
|
||||
EMIT2(0x1d4c);
|
||||
/* dlr %r4,%r12 */
|
||||
EMIT4(0xb997004c);
|
||||
/* lr %r5,%r4 */
|
||||
EMIT2(0x1854);
|
||||
break;
|
||||
case BPF_S_ALU_MOD_K: /* A %= K */
|
||||
if (K == 1) {
|
||||
/* lhi %r5,0 */
|
||||
EMIT4(0xa7580000);
|
||||
break;
|
||||
}
|
||||
/* lhi %r4,0 */
|
||||
EMIT4(0xa7480000);
|
||||
/* d %r4,<d(K)>(%r13) */
|
||||
EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
|
||||
/* dl %r4,<d(K)>(%r13) */
|
||||
EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
|
||||
/* lr %r5,%r4 */
|
||||
EMIT2(0x1854);
|
||||
break;
|
||||
|
@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
|
||||
case BPF_S_ALU_MUL_K: /* A *= K */
|
||||
emit_alu_K(MUL, K);
|
||||
break;
|
||||
case BPF_S_ALU_DIV_K: /* A /= K */
|
||||
emit_alu_K(MUL, K);
|
||||
emit_read_y(r_A);
|
||||
case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
|
||||
if (K == 1)
|
||||
break;
|
||||
emit_write_y(G0);
|
||||
#ifdef CONFIG_SPARC32
|
||||
/* The Sparc v8 architecture requires
|
||||
* three instructions between a %y
|
||||
* register write and the first use.
|
||||
*/
|
||||
emit_nop();
|
||||
emit_nop();
|
||||
emit_nop();
|
||||
#endif
|
||||
emit_alu_K(DIV, K);
|
||||
break;
|
||||
case BPF_S_ALU_DIV_X: /* A /= X; */
|
||||
emit_cmpi(r_X, 0);
|
||||
|
@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
|
||||
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
|
||||
break;
|
||||
case BPF_S_ALU_MOD_K: /* A %= K; */
|
||||
if (K == 1) {
|
||||
CLEAR_A();
|
||||
break;
|
||||
}
|
||||
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
|
||||
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
|
||||
EMIT2(0xf7, 0xf1); /* div %ecx */
|
||||
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
|
||||
break;
|
||||
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
|
||||
EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
|
||||
EMIT(K, 4);
|
||||
EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
|
||||
case BPF_S_ALU_DIV_K: /* A /= K */
|
||||
if (K == 1)
|
||||
break;
|
||||
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
|
||||
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
|
||||
EMIT2(0xf7, 0xf1); /* div %ecx */
|
||||
break;
|
||||
case BPF_S_ALU_AND_X:
|
||||
seen |= SEEN_XREG;
|
||||
|
@ -12942,25 +12942,26 @@ static void __bnx2x_remove(struct pci_dev *pdev,
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
if (bp->regview)
|
||||
iounmap(bp->regview);
|
||||
if (remove_netdev) {
|
||||
if (bp->regview)
|
||||
iounmap(bp->regview);
|
||||
|
||||
/* for vf doorbells are part of the regview and were unmapped along with
|
||||
* it. FW is only loaded by PF.
|
||||
*/
|
||||
if (IS_PF(bp)) {
|
||||
if (bp->doorbells)
|
||||
iounmap(bp->doorbells);
|
||||
/* For vfs, doorbells are part of the regview and were unmapped
|
||||
* along with it. FW is only loaded by PF.
|
||||
*/
|
||||
if (IS_PF(bp)) {
|
||||
if (bp->doorbells)
|
||||
iounmap(bp->doorbells);
|
||||
|
||||
bnx2x_release_firmware(bp);
|
||||
}
|
||||
bnx2x_free_mem_bp(bp);
|
||||
bnx2x_release_firmware(bp);
|
||||
}
|
||||
bnx2x_free_mem_bp(bp);
|
||||
|
||||
if (remove_netdev)
|
||||
free_netdev(dev);
|
||||
|
||||
if (atomic_read(&pdev->enable_cnt) == 1)
|
||||
pci_release_regions(pdev);
|
||||
if (atomic_read(&pdev->enable_cnt) == 1)
|
||||
pci_release_regions(pdev);
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
@ -423,7 +423,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
|
||||
* in the Compressed Filter Tuple.
|
||||
*/
|
||||
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
|
||||
ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
|
||||
ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
|
||||
|
||||
if (tp->port_shift >= 0)
|
||||
ntuple |= (u64)l2t->lport << tp->port_shift;
|
||||
|
@ -1776,6 +1776,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
|
||||
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
|
||||
struct be_queue_info *rxq = &rxo->q;
|
||||
struct page *pagep = NULL;
|
||||
struct device *dev = &adapter->pdev->dev;
|
||||
struct be_eth_rx_d *rxd;
|
||||
u64 page_dmaaddr = 0, frag_dmaaddr;
|
||||
u32 posted, page_offset = 0;
|
||||
@ -1788,9 +1789,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
|
||||
rx_stats(rxo)->rx_post_fail++;
|
||||
break;
|
||||
}
|
||||
page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
|
||||
0, adapter->big_page_size,
|
||||
page_dmaaddr = dma_map_page(dev, pagep, 0,
|
||||
adapter->big_page_size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, page_dmaaddr)) {
|
||||
put_page(pagep);
|
||||
pagep = NULL;
|
||||
rx_stats(rxo)->rx_post_fail++;
|
||||
break;
|
||||
}
|
||||
page_info->page_offset = 0;
|
||||
} else {
|
||||
get_page(pagep);
|
||||
|
@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int e1000_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
|
||||
|
||||
return __e1000_resume(pdev);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
static int e1000_runtime_suspend(struct device *dev)
|
||||
@ -7015,13 +7015,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static const struct dev_pm_ops e1000_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
|
||||
SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
|
||||
e1000_idle)
|
||||
};
|
||||
#endif
|
||||
|
||||
/* PCI Device API Driver */
|
||||
static struct pci_driver e1000_driver = {
|
||||
@ -7029,11 +7027,9 @@ static struct pci_driver e1000_driver = {
|
||||
.id_table = e1000_pci_tbl,
|
||||
.probe = e1000_probe,
|
||||
.remove = e1000_remove,
|
||||
#ifdef CONFIG_PM
|
||||
.driver = {
|
||||
.pm = &e1000_pm_ops,
|
||||
},
|
||||
#endif
|
||||
.shutdown = e1000_shutdown,
|
||||
.err_handler = &e1000_err_handler
|
||||
};
|
||||
|
@ -4765,6 +4765,8 @@ static int qlge_probe(struct pci_dev *pdev,
|
||||
NETIF_F_RXCSUM;
|
||||
ndev->features = ndev->hw_features;
|
||||
ndev->vlan_features = ndev->hw_features;
|
||||
/* vlan gets same features (except vlan filter) */
|
||||
ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
if (test_bit(QL_DMA64, &qdev->flags))
|
||||
ndev->features |= NETIF_F_HIGHDMA;
|
||||
|
@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
|
||||
goto out_unlock;
|
||||
|
||||
napi_disable(&rp->napi);
|
||||
netif_tx_disable(dev);
|
||||
spin_lock_bh(&rp->lock);
|
||||
|
||||
/* clear all descriptors */
|
||||
|
@ -614,6 +614,18 @@ static const struct usb_device_id products[] = {
|
||||
USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
|
||||
.driver_info = (unsigned long)&dm9601_info,
|
||||
},
|
||||
{
|
||||
USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */
|
||||
.driver_info = (unsigned long)&dm9601_info,
|
||||
},
|
||||
{
|
||||
USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */
|
||||
.driver_info = (unsigned long)&dm9601_info,
|
||||
},
|
||||
{
|
||||
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
|
||||
.driver_info = (unsigned long)&dm9601_info,
|
||||
},
|
||||
{}, // END
|
||||
};
|
||||
|
||||
|
@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
|
||||
return -ENOMEM;
|
||||
|
||||
urb->num_sgs = num_sgs;
|
||||
sg_init_table(urb->sg, urb->num_sgs);
|
||||
sg_init_table(urb->sg, urb->num_sgs + 1);
|
||||
|
||||
sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
|
||||
total_len += skb_headlen(skb);
|
||||
|
@ -165,7 +165,6 @@ struct inet6_dev {
|
||||
struct net_device *dev;
|
||||
|
||||
struct list_head addr_list;
|
||||
int valid_ll_addr_cnt;
|
||||
|
||||
struct ifmcaddr6 *mc_list;
|
||||
struct ifmcaddr6 *mc_tomb;
|
||||
|
@ -277,7 +277,7 @@ int batadv_max_header_len(void)
|
||||
sizeof(struct batadv_coded_packet));
|
||||
#endif
|
||||
|
||||
return header_len;
|
||||
return header_len + ETH_HLEN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/reciprocal_div.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
|
||||
A /= X;
|
||||
continue;
|
||||
case BPF_S_ALU_DIV_K:
|
||||
A = reciprocal_divide(A, K);
|
||||
A /= K;
|
||||
continue;
|
||||
case BPF_S_ALU_MOD_X:
|
||||
if (X == 0)
|
||||
@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
|
||||
/* Some instructions need special checks */
|
||||
switch (code) {
|
||||
case BPF_S_ALU_DIV_K:
|
||||
/* check for division by zero */
|
||||
if (ftest->k == 0)
|
||||
return -EINVAL;
|
||||
ftest->k = reciprocal_value(ftest->k);
|
||||
break;
|
||||
case BPF_S_ALU_MOD_K:
|
||||
/* check for division by zero */
|
||||
if (ftest->k == 0)
|
||||
@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
|
||||
to->code = decodes[code];
|
||||
to->jt = filt->jt;
|
||||
to->jf = filt->jf;
|
||||
|
||||
if (code == BPF_S_ALU_DIV_K) {
|
||||
/*
|
||||
* When loaded this rule user gave us X, which was
|
||||
* translated into R = r(X). Now we calculate the
|
||||
* RR = r(R) and report it back. If next time this
|
||||
* value is loaded and RRR = r(RR) is calculated
|
||||
* then the R == RRR will be true.
|
||||
*
|
||||
* One exception. X == 1 translates into R == 0 and
|
||||
* we can't calculate RR out of it with r().
|
||||
*/
|
||||
|
||||
if (filt->k == 0)
|
||||
to->k = 1;
|
||||
else
|
||||
to->k = reciprocal_value(filt->k);
|
||||
|
||||
BUG_ON(reciprocal_value(to->k) != filt->k);
|
||||
} else
|
||||
to->k = filt->k;
|
||||
to->k = filt->k;
|
||||
}
|
||||
|
||||
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
|
||||
|
@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
|
||||
type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
|
||||
if (type >= __IEEE802154_DEV_MAX)
|
||||
return -EINVAL;
|
||||
if (type >= __IEEE802154_DEV_MAX) {
|
||||
rc = -EINVAL;
|
||||
goto nla_put_failure;
|
||||
}
|
||||
}
|
||||
|
||||
dev = phy->add_iface(phy, devname, type);
|
||||
|
@ -930,12 +930,15 @@ skip_listen_ht:
|
||||
spin_lock_bh(lock);
|
||||
sk_nulls_for_each(sk, node, &head->chain) {
|
||||
int res;
|
||||
int state;
|
||||
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
continue;
|
||||
if (num < s_num)
|
||||
goto next_normal;
|
||||
if (!(r->idiag_states & (1 << sk->sk_state)))
|
||||
state = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
inet_twsk(sk)->tw_substate : sk->sk_state;
|
||||
if (!(r->idiag_states & (1 << state)))
|
||||
goto next_normal;
|
||||
if (r->sdiag_family != AF_UNSPEC &&
|
||||
sk->sk_family != r->sdiag_family)
|
||||
|
@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
|
||||
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
|
||||
struct mr_table **mrt)
|
||||
{
|
||||
struct ipmr_result res;
|
||||
struct fib_lookup_arg arg = { .result = &res, };
|
||||
int err;
|
||||
struct ipmr_result res;
|
||||
struct fib_lookup_arg arg = {
|
||||
.result = &res,
|
||||
.flags = FIB_LOOKUP_NOREF,
|
||||
};
|
||||
|
||||
err = fib_rules_lookup(net->ipv4.mr_rules_ops,
|
||||
flowi4_to_flowi(flp4), 0, &arg);
|
||||
|
@ -22,6 +22,9 @@
|
||||
|
||||
int sysctl_tcp_nometrics_save __read_mostly;
|
||||
|
||||
static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
|
||||
struct net *net, unsigned int hash);
|
||||
|
||||
struct tcp_fastopen_metrics {
|
||||
u16 mss;
|
||||
u16 syn_loss:10; /* Recurring Fast Open SYN losses */
|
||||
@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
|
||||
}
|
||||
}
|
||||
|
||||
#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
|
||||
|
||||
static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
|
||||
{
|
||||
if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
|
||||
tcpm_suck_dst(tm, dst, false);
|
||||
}
|
||||
|
||||
#define TCP_METRICS_RECLAIM_DEPTH 5
|
||||
#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
|
||||
|
||||
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
|
||||
struct inetpeer_addr *addr,
|
||||
unsigned int hash,
|
||||
bool reclaim)
|
||||
unsigned int hash)
|
||||
{
|
||||
struct tcp_metrics_block *tm;
|
||||
struct net *net;
|
||||
bool reclaim = false;
|
||||
|
||||
spin_lock_bh(&tcp_metrics_lock);
|
||||
net = dev_net(dst->dev);
|
||||
|
||||
/* While waiting for the spin-lock the cache might have been populated
|
||||
* with this entry and so we have to check again.
|
||||
*/
|
||||
tm = __tcp_get_metrics(addr, net, hash);
|
||||
if (tm == TCP_METRICS_RECLAIM_PTR) {
|
||||
reclaim = true;
|
||||
tm = NULL;
|
||||
}
|
||||
if (tm) {
|
||||
tcpm_check_stamp(tm, dst);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (unlikely(reclaim)) {
|
||||
struct tcp_metrics_block *oldest;
|
||||
|
||||
@ -169,17 +197,6 @@ out_unlock:
|
||||
return tm;
|
||||
}
|
||||
|
||||
#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
|
||||
|
||||
static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
|
||||
{
|
||||
if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
|
||||
tcpm_suck_dst(tm, dst, false);
|
||||
}
|
||||
|
||||
#define TCP_METRICS_RECLAIM_DEPTH 5
|
||||
#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
|
||||
|
||||
static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
|
||||
{
|
||||
if (tm)
|
||||
@ -282,7 +299,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
|
||||
struct inetpeer_addr addr;
|
||||
unsigned int hash;
|
||||
struct net *net;
|
||||
bool reclaim;
|
||||
|
||||
addr.family = sk->sk_family;
|
||||
switch (addr.family) {
|
||||
@ -304,13 +320,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
|
||||
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
|
||||
|
||||
tm = __tcp_get_metrics(&addr, net, hash);
|
||||
reclaim = false;
|
||||
if (tm == TCP_METRICS_RECLAIM_PTR) {
|
||||
reclaim = true;
|
||||
if (tm == TCP_METRICS_RECLAIM_PTR)
|
||||
tm = NULL;
|
||||
}
|
||||
if (!tm && create)
|
||||
tm = tcpm_new(dst, &addr, hash, reclaim);
|
||||
tm = tcpm_new(dst, &addr, hash);
|
||||
else
|
||||
tcpm_check_stamp(tm, dst);
|
||||
|
||||
|
@ -3189,6 +3189,22 @@ out:
|
||||
in6_ifa_put(ifp);
|
||||
}
|
||||
|
||||
/* ifp->idev must be at least read locked */
|
||||
static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct inet6_ifaddr *ifpiter;
|
||||
struct inet6_dev *idev = ifp->idev;
|
||||
|
||||
list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
|
||||
if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
|
||||
(ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
|
||||
IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
|
||||
IFA_F_PERMANENT)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct net_device *dev = ifp->idev->dev;
|
||||
@ -3208,14 +3224,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
|
||||
*/
|
||||
|
||||
read_lock_bh(&ifp->idev->lock);
|
||||
spin_lock(&ifp->lock);
|
||||
send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
|
||||
ifp->idev->valid_ll_addr_cnt == 1;
|
||||
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
|
||||
send_rs = send_mld &&
|
||||
ipv6_accept_ra(ifp->idev) &&
|
||||
ifp->idev->cnf.rtr_solicits > 0 &&
|
||||
(dev->flags&IFF_LOOPBACK) == 0;
|
||||
spin_unlock(&ifp->lock);
|
||||
read_unlock_bh(&ifp->idev->lock);
|
||||
|
||||
/* While dad is in progress mld report's source address is in6_addrany.
|
||||
@ -4512,19 +4525,6 @@ errout:
|
||||
rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
|
||||
}
|
||||
|
||||
static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
|
||||
{
|
||||
write_lock_bh(&ifp->idev->lock);
|
||||
spin_lock(&ifp->lock);
|
||||
if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
|
||||
IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
|
||||
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
|
||||
ifp->idev->valid_ll_addr_cnt += count;
|
||||
WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
|
||||
spin_unlock(&ifp->lock);
|
||||
write_unlock_bh(&ifp->idev->lock);
|
||||
}
|
||||
|
||||
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct net *net = dev_net(ifp->idev->dev);
|
||||
@ -4533,8 +4533,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
||||
|
||||
switch (event) {
|
||||
case RTM_NEWADDR:
|
||||
update_valid_ll_addr_cnt(ifp, 1);
|
||||
|
||||
/*
|
||||
* If the address was optimistic
|
||||
* we inserted the route at the start of
|
||||
@ -4550,8 +4548,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
||||
ifp->idev->dev, 0, 0);
|
||||
break;
|
||||
case RTM_DELADDR:
|
||||
update_valid_ll_addr_cnt(ifp, -1);
|
||||
|
||||
if (ifp->idev->cnf.forwarding)
|
||||
addrconf_leave_anycast(ifp);
|
||||
addrconf_leave_solict(ifp->idev, &ifp->addr);
|
||||
|
@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
|
||||
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
|
||||
struct mr6_table **mrt)
|
||||
{
|
||||
struct ip6mr_result res;
|
||||
struct fib_lookup_arg arg = { .result = &res, };
|
||||
int err;
|
||||
struct ip6mr_result res;
|
||||
struct fib_lookup_arg arg = {
|
||||
.result = &res,
|
||||
.flags = FIB_LOOKUP_NOREF,
|
||||
};
|
||||
|
||||
err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
|
||||
flowi6_to_flowi(flp6), 0, &arg);
|
||||
|
@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
|
||||
struct rds_ib_refill_cache *cache)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct list_head *old;
|
||||
struct list_head __percpu *chpfirst;
|
||||
struct list_head *old, *chpfirst;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
|
||||
else /* put on front */
|
||||
list_add_tail(new_item, chpfirst);
|
||||
|
||||
__this_cpu_write(chpfirst, new_item);
|
||||
__this_cpu_write(cache->percpu->first, new_item);
|
||||
__this_cpu_inc(cache->percpu->count);
|
||||
|
||||
if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
|
||||
@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
|
||||
} while (old);
|
||||
|
||||
|
||||
__this_cpu_write(chpfirst, NULL);
|
||||
__this_cpu_write(cache->percpu->first, NULL);
|
||||
__this_cpu_write(cache->percpu->count, 0);
|
||||
end:
|
||||
local_irq_restore(flags);
|
||||
|
Loading…
Reference in New Issue
Block a user