Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix regression in SKB partial checksum handling, from Pravin B
   Shalar.

2) Fix VLAN inside of VXLAN handling in i40e driver, from Jesse
   Brandeburg.

3) Cure softlockups during accept() in SCTP, from Karl Heiss.

4) MSG_PEEK should return multiple SKBs worth of data in AF_UNIX, from
   Aaron Conole.

5) IPV6 erroneously ignores output interface specifier in lookup key for
   route lookups, fix from David Ahern.

6) In Marvell DSA driver, forward unknown frames to CPU port, from
   Andrew Lunn.

7) Mission flow flag initializations in some code paths, from David
   Ahern.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net: Initialize flow flags in input path
  net: dsa: fix preparation of a port STP update
  testptp: Silence compiler warnings on ppc64
  net/mlx4: Handle return codes in mlx4_qp_attach_common
  dsa: mv88e6xxx: Enable forwarding for unknown to the CPU port
  skbuff: Fix skb checksum partial check.
  net: ipv6: Add RT6_LOOKUP_F_IFACE flag if oif is set
  net sysfs: Print link speed as signed integer
  bna: fix error handling
  af_unix: return data from multiple SKBs on recv() with MSG_PEEK flag
  af_unix: Convert the unix_sk macro to an inline function for type safety
  net: sctp: Don't use 64 kilobyte lookup table for four elements
  l2tp: protect tunnel->del_work by ref_count
  net/ibm/emac: bump version numbers for correct work with ethtool
  sctp: Prevent soft lockup when sctp_accept() is called during a timeout event
  sctp: Whitespace fix
  i40e/i40evf: check for stopped admin queue
  i40e: fix VLAN inside VXLAN
  r8169: fix handling rtl_readphy result
  net: hisilicon: fix handling platform_get_irq result
This commit is contained in:
Linus Torvalds 2015-10-01 21:55:35 -04:00
commit 3deaa4f531
22 changed files with 121 additions and 59 deletions

View File

@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define _GNU_SOURCE
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>

View File

@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
reg |= PORT_CONTROL_FRAME_MODE_DSA;
reg |= PORT_CONTROL_FORWARD_UNKNOWN |
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||

View File

@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
}
/* Flush FLI data fifo. */
static u32
static int
bfa_flash_fifo_flush(void __iomem *pci_bar)
{
u32 i;
@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
}
/* Read flash status. */
static u32
static int
bfa_flash_status_read(void __iomem *pci_bar)
{
union bfa_flash_dev_status_reg dev_status;
u32 status;
int status;
u32 ret_status;
int i;
@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
}
/* Start flash read operation. */
static u32
static int
bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
char *buf)
{
u32 status;
int status;
/* len must be mutiple of 4 and not exceeding fifo size */
if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@ -1703,7 +1703,8 @@ static enum bfa_status
bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
u32 len)
{
u32 n, status;
u32 n;
int status;
u32 off, l, s, residue, fifo_sz;
residue = len;

View File

@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hip04_priv *priv;
struct resource *res;
unsigned int irq;
int irq;
int ret;
ndev = alloc_etherdev(sizeof(struct hip04_priv));

View File

@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
u32 index;
};
#define EMAC_ETHTOOL_REGS_VER 0
#define EMAC4_ETHTOOL_REGS_VER 1
#define EMAC4SYNC_ETHTOOL_REGS_VER 2
#define EMAC_ETHTOOL_REGS_VER 3
#define EMAC4_ETHTOOL_REGS_VER 4
#define EMAC4SYNC_ETHTOOL_REGS_VER 5
#endif /* __IBM_NEWEMAC_CORE_H */

View File

@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
ret_code = I40E_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
@ -1007,6 +1014,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
if (i40e_is_nvm_update_op(&e->desc)) {

View File

@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
/* this controls whether VLAN is stripped from inner headers */
rx_ctx.showiv = 0;
#ifdef I40E_FCOE
rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif

View File

@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
ret_code = I40E_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
if (ntu == ntc) {
@ -948,6 +955,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;

View File

@ -1184,10 +1184,11 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
new_steering_entry(dev, port, steer, index, qp->qpn);
err = new_steering_entry(dev, port, steer,
index, qp->qpn);
else
existing_steering_entry(dev, port, steer,
index, qp->qpn);
err = existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)

View File

@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
u16 rg_saw_cnt;
int rg_saw_cnt;
u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },

View File

@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) <= len)
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = CHECKSUM_NONE;
}

View File

@ -63,7 +63,11 @@ struct unix_sock {
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
static inline struct unix_sock *unix_sk(struct sock *sk)
{
return (struct unix_sock *)sk;
}
#define peer_wait peer_wq.wait

View File

@ -31,7 +31,6 @@
static const char fmt_hex[] = "%#x\n";
static const char fmt_long_hex[] = "%#lx\n";
static const char fmt_dec[] = "%d\n";
static const char fmt_udec[] = "%u\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev,
if (netif_running(netdev)) {
struct ethtool_cmd cmd;
if (!__ethtool_get_settings(netdev, &cmd))
ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
}
rtnl_unlock();
return ret;

View File

@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags);
*/
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
{
unsigned char *data = skb->data;
BUG_ON(len > skb->len);
skb->len -= len;
BUG_ON(skb->len < skb->data_len);
skb_postpull_rcsum(skb, skb->data, len);
return skb->data += len;
__skb_pull(skb, len);
skb_postpull_rcsum(skb, data, len);
return skb->data;
}
EXPORT_SYMBOL_GPL(skb_pull_rcsum);

View File

@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
static int dsa_slave_port_attr_set(struct net_device *dev,
struct switchdev_attr *attr)
{
int ret = 0;
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
int ret;
switch (attr->id) {
case SWITCHDEV_ATTR_PORT_STP_STATE:
if (attr->trans == SWITCHDEV_TRANS_COMMIT)
ret = dsa_slave_stp_update(dev, attr->u.stp_state);
if (attr->trans == SWITCHDEV_TRANS_PREPARE)
ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
else
ret = ds->drv->port_stp_update(ds, p->port,
attr->u.stp_state);
break;
default:
ret = -EOPNOTSUPP;

View File

@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
no_addr = idev->ifa_list == NULL;

View File

@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res, 0);

View File

@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
fl6->flowi6_iif = LOOPBACK_IFINDEX;
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
fl6->flowi6_oif)
flags |= RT6_LOOKUP_F_IFACE;
if (!ipv6_addr_any(&fl6->saddr))

View File

@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
tunnel = container_of(work, struct l2tp_tunnel, del_work);
sk = l2tp_tunnel_sock_lookup(tunnel);
if (!sk)
return;
goto out;
sock = sk->sk_socket;
@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
}
l2tp_tunnel_sock_put(sk);
out:
l2tp_tunnel_dec_refcount(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
*/
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
l2tp_tunnel_inc_refcount(tunnel);
l2tp_tunnel_closeall(tunnel);
return (false == queue_work(l2tp_wq, &tunnel->del_work));
if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
l2tp_tunnel_dec_refcount(tunnel);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);

View File

@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc,
* within this document.
*
* Our basic strategy is to round-robin transports in priorities
* according to sctp_state_prio_map[] e.g., if no such
* according to sctp_trans_score() e.g., if no such
* transport with state SCTP_ACTIVE exists, round-robin through
* SCTP_UNKNOWN, etc. You get the picture.
*/
static const u8 sctp_trans_state_to_prio_map[] = {
[SCTP_ACTIVE] = 3, /* best case */
[SCTP_UNKNOWN] = 2,
[SCTP_PF] = 1,
[SCTP_INACTIVE] = 0, /* worst case */
};
static u8 sctp_trans_score(const struct sctp_transport *trans)
{
return sctp_trans_state_to_prio_map[trans->state];
switch (trans->state) {
case SCTP_ACTIVE:
return 3; /* best case */
case SCTP_UNKNOWN:
return 2;
case SCTP_PF:
return 1;
default: /* case SCTP_INACTIVE */
return 0; /* worst case */
}
}
static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,

View File

@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
int error;
struct sctp_transport *transport = (struct sctp_transport *) peer;
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
/* Check whether a task is in the sock. */
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
transport, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(sk);
sctp_transport_put(transport);
}
@ -285,11 +286,12 @@ out_unlock:
static void sctp_generate_timeout_event(struct sctp_association *asoc,
sctp_event_timeout_t timeout_type)
{
struct net *net = sock_net(asoc->base.sk);
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
int error = 0;
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
(void *)timeout_type, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(sk);
sctp_association_put(asoc);
}
@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
int error = 0;
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->state, asoc->ep, asoc,
transport, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
if (error)
sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(sk);
sctp_transport_put(transport);
}
@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
{
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock:
bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(sk);
sctp_association_put(asoc);
}

View File

@ -2179,8 +2179,21 @@ unlock:
if (UNIXCB(skb).fp)
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
sk_peek_offset_fwd(sk, chunk);
if (skip) {
sk_peek_offset_fwd(sk, chunk);
skip -= chunk;
}
if (UNIXCB(skb).fp)
break;
last = skb;
last_len = skb->len;
unix_state_lock(sk);
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (skb)
goto again;
unix_state_unlock(sk);
break;
}
} while (size);