Networking fixes for 6.6-rc2.

Current release - regressions:
 
   - bcmasp: fix possible OOB write in bcmasp_netfilt_get_all_active()
 
 Previous releases - regressions:
 
   - ipv4: fix one memleak in __inet_del_ifa()
 
   - tcp: fix bind() regressions for v4-mapped-v6 addresses.
 
   - tls: do not free tls_rec on async operation in bpf_exec_tx_verdict()
 
   - dsa: fixes for SJA1105 FDB regressions
 
   - veth: update XDP feature set when bringing up device
 
   - igb: fix hangup when enabling SR-IOV
 
 Previous releases - always broken:
 
   - kcm: fix memory leak in error path of kcm_sendmsg()
 
   - smc: fix data corruption in smcr_port_add
 
   - microchip: fix possible memory leak for vcap_dup_rule()
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmUC2MoSHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOkYbYP/2L0I4IH6kkNxj0iJsK0Qhz0NU2OFY+d
 1X4JXNx0CzyLhFiy/ewodiP5R/NSnCbg3tssvrYKvyu2iu/hpA/WDK19Ny8doQ2T
 azgDs2tQUdYWrZojiSHBp0I4dEPraDpud+6d7uegvIbekk30WKBnqUy1ERQ6PUip
 M/dF6R6AW0j3CWiTrpmK69dNYaYPa8OnyQsyKNLuqSoefQ2wbGAmOOzlmMTTLY35
 jt/Vyc6/Dh9uw9Jf+Fg2Cf7IRhS/Q5Om+96oGv3NleVfe488Ykask/asybSbouGc
 0g4SUdOaeqEAzwqLMcHW9xXOTjzlygNjO7UUEKta0vqKVoEttR+P2srwZt+YYpGe
 GzbJBByNU2VqwaR3lozNVyKLtk/2F8v2WnosjwQAoj6R8seORDphXtjb+nD532zC
 vK4uxv0i2Au4M83lJQnCiJh5dwLkbDOjiArXHHJk3YWCGhl3x81URXOjxPjvE/20
 E9xR1K/4RYfKuKPcVnKfoDhAgs9d+J5/jz99AVI2O/xW0SkVUPjDznRBOk0+TTIW
 z4OfVJSNYjNytyG6ypKvE+JvezH4lw32s1rviOGrxcoSqUzqf+hZeZgOaAvADEVH
 VL+XKZtli/AX3MPUiFh1N3lUm6CGW98fVZxl5KUQk1w8VwyDqhXG6J4YKDNWqsJI
 4B3CliFV4iKF
 =vSvh
 -----END PGP SIGNATURE-----

Merge tag 'net-6.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Quite unusually, this does not contains any fix coming from subtrees
  (nf, ebpf, wifi, etc).

  Current release - regressions:

   - bcmasp: fix possible OOB write in bcmasp_netfilt_get_all_active()

  Previous releases - regressions:

   - ipv4: fix one memleak in __inet_del_ifa()

   - tcp: fix bind() regressions for v4-mapped-v6 addresses.

   - tls: do not free tls_rec on async operation in
     bpf_exec_tx_verdict()

   - dsa: fixes for SJA1105 FDB regressions

   - veth: update XDP feature set when bringing up device

   - igb: fix hangup when enabling SR-IOV

  Previous releases - always broken:

   - kcm: fix memory leak in error path of kcm_sendmsg()

   - smc: fix data corruption in smcr_port_add

   - microchip: fix possible memory leak for vcap_dup_rule()"

* tag 'net-6.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (37 commits)
  kcm: Fix error handling for SOCK_DGRAM in kcm_sendmsg().
  net: renesas: rswitch: Add spin lock protection for irq {un}mask
  net: renesas: rswitch: Fix unmasking irq condition
  igb: clean up in all error paths when enabling SR-IOV
  ixgbe: fix timestamp configuration code
  selftest: tcp: Add v4-mapped-v6 cases in bind_wildcard.c.
  selftest: tcp: Move expected_errno into each test case in bind_wildcard.c.
  selftest: tcp: Fix address length in bind_wildcard.c.
  tcp: Fix bind() regression for v4-mapped-v6 non-wildcard address.
  tcp: Fix bind() regression for v4-mapped-v6 wildcard address.
  tcp: Factorise sk_family-independent comparison in inet_bind2_bucket_match(_addr_any).
  ipv6: fix ip6_sock_set_addr_preferences() typo
  veth: Update XDP feature set when bringing up device
  net: macb: fix sleep inside spinlock
  net/tls: do not free tls_rec on async operation in bpf_exec_tx_verdict()
  net: ethernet: mtk_eth_soc: fix pse_port configuration for MT7988
  net: ethernet: mtk_eth_soc: fix uninitialized variable
  kcm: Fix memory leak in error path of kcm_sendmsg()
  r8152: check budget for r8152_poll()
  net: dsa: sja1105: block FDB accesses that are concurrent with a switch reset
  ...
This commit is contained in:
Linus Torvalds 2023-09-14 10:03:34 -07:00
commit 9fdfb15a3d
36 changed files with 359 additions and 214 deletions

View File

@ -266,6 +266,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
/* Serializes accesses to the FDB */
struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;

View File

@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
struct sja1105_dyn_cmd cmd = {};
int rc;
/* We don't _need_ to read the full entry, just the command area which
* is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
* buffer that contains the full entry too. Additionally, our API
* doesn't really know how many bytes into the buffer does the command
* area really begin. So just read back the whole entry.
*/
/* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
memset(cmd, 0, sizeof(*cmd));
ops->cmd_packing(packed_buf, cmd, UNPACK);
ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
return cmd->valid ? -EAGAIN : 0;
if (cmd.valid)
return -EAGAIN;
if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
if (check_errors && cmd.errors)
return -EINVAL;
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
int rc;
int err, rc;
return read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, cmd, ops);
err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, ops, entry, check_valident,
check_errors);
return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
goto out;
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
out:
mutex_unlock(&priv->dynamic_config_lock);
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
goto out;
cmd = (struct sja1105_dyn_cmd) {0};
ops->cmd_packing(packed_buf, &cmd, UNPACK);
if (cmd.errors)
return -EINVAL;
rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
out:
mutex_unlock(&priv->dynamic_config_lock);
return 0;
return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)

View File

@ -1798,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (!vid) {
switch (db.type) {
@ -1812,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
}
return priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_lock(&priv->fdb_lock);
rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
@ -1837,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
mutex_lock(&priv->fdb_lock);
rc = __sja1105_fdb_del(ds, port, addr, vid, db);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@ -1868,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
/* We need to hide the FDB entry for unknown multicast */
if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
* only wants to see unicast
*/
if (is_multicast_ether_addr(macaddr))
continue;
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
@ -1898,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
};
int i;
mutex_lock(&priv->fdb_lock);
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@ -1911,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
return;
break;
}
if (!(l2_lookup.destports & BIT(port)))
@ -1923,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
return;
break;
}
}
mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
@ -2273,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;
mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@ -2385,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
mutex_unlock(&priv->fdb_lock);
return rc;
}
@ -2954,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int match;
int match, rc;
mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@ -2967,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
return -ENOSPC;
rc = -ENOSPC;
goto out;
}
if (flags.val & BR_MCAST_FLOOD)
@ -2975,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);
return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match],
true);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match], true);
out:
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@ -3348,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);

View File

@ -1385,7 +1385,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
return -ENOMEM;
other_port = priv->ports[!port_priv->nr];
port_rules = adin1110_port_rules(port_priv, false, true);
port_rules = adin1110_port_rules(other_port, false, true);
eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,

View File

@ -528,13 +528,16 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
ASP_RX_FILTER_BLK_CTRL);
}
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt)
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt)
{
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
if (j == *rule_cnt)
return -EMSGSIZE;
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@ -548,6 +551,8 @@ void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
}
*rule_cnt = j;
return 0;
}
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
@ -1300,6 +1305,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
of_node_put(intf_node);
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);

View File

@ -577,8 +577,8 @@ void bcmasp_netfilt_release(struct bcmasp_intf *intf,
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt);
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt);
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);

View File

@ -335,7 +335,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = bcmasp_flow_get(intf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
cmd->data = NUM_NET_FILTERS;
break;
default:

View File

@ -756,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
macb_set_tx_clk(bp, speed);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@ -777,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
macb_set_tx_clk(bp, speed);
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
if (gem_has_ptp(bp))

View File

@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
}
/* only call pci_enable_sriov() if no VFs are allocated already */
if (!old_vfs)
if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
if (err)
goto err_out;
}
goto out;

View File

@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_WRITE_FLUSH(hw);
/* configure adapter flags only when HW is actually configured */
adapter->flags = aflags;
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);

View File

@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
if (loc == info->rule_cnt) {
ret = -EMSGSIZE;
break;
}
if (port->rfs_rules[i])
rules[loc++] = i;
}

View File

@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
}
#define NPA_MAX_BURST 16
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
int num_ptrs = 1;
dma_addr_t bufptr;
int num_ptrs = 1;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs = 1;
}
}
return cnt - cq->pool_ptrs;
}
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)

View File

@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
return weight;
}
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);

View File

@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
struct refill_work *work;
struct delayed_work *dwork;
work = &pfvf->refill_wrk[cq->cq_idx];
dwork = &work->pool_refill_work;
/* Schedule a task if no other task is running */
if (!cq->refill_task_sched) {
cq->refill_task_sched = true;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
}
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
return -ENOMEM;
}
return 0;
}
@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
static void otx2_pool_refill_task(struct work_struct *work)
{
struct otx2_cq_queue *cq;
struct otx2_pool *rbpool;
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
dma_addr_t bufptr;
int qidx;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
qidx = wrk - pfvf->refill_wrk;
cq = &pfvf->qset.cq[qidx];
rbpool = cq->rbpool;
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
struct delayed_work *dwork;
dwork = &wrk->pool_refill_work;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
} else {
cq->refill_task_sched = false;
}
return;
}
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false;
local_bh_disable();
napi_schedule(wrk->napi);
local_bh_enable();
}
int otx2_config_nix_queues(struct otx2_nic *pfvf)

View File

@ -302,6 +302,7 @@ struct flr_work {
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
struct napi_struct *napi;
};
/* PTPv2 originTimestamp structure */
@ -370,7 +371,7 @@ struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};

View File

@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev)
netif_tx_disable(netdev);
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
devm_kfree(pf->dev, pf->refill_wrk);
otx2_free_hw_resources(pf);
otx2_free_cints(pf, pf->hw.cint_cnt);
otx2_disable_napi(pf);
@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
devm_kfree(pf->dev, pf->refill_wrk);
kfree(qset->sq);
kfree(qset->cq);

View File

@ -424,9 +424,10 @@ process_cqe:
return processed_cqe;
}
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
dma_addr_t bufptr;
while (cq->pool_ptrs) {
@ -435,6 +436,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
return cnt - cq->pool_ptrs;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
@ -521,6 +524,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev;
@ -541,7 +545,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
if (rx_cq && rx_cq->pool_ptrs)
pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@ -561,9 +565,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
otx2_config_irq_coalescing(pfvf, i);
}
/* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
work = &pfvf->refill_wrk[cq->cq_idx];
dwork = &work->pool_refill_work;
/* Schedule a task if no other task is running */
if (!cq->refill_task_sched) {
work->napi = napi;
cq->refill_task_sched = true;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
}
} else {
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
}
}
return workdone;
}

View File

@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */

View File

@ -2005,11 +2005,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@ -2186,7 +2186,8 @@ release_desc:
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
ring->calc_idx = idx;
@ -2994,6 +2995,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;

View File

@ -214,9 +214,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dev == eth->netdev[0])
pse_port = 1;
pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
pse_port = 2;
pse_port = PSE_GDM2_PORT;
else if (dev == eth->netdev[2])
pse_port = PSE_GDM3_PORT;
else
return -EOPNOTSUPP;

View File

@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
if (!newckf)
return ERR_PTR(-ENOMEM);
goto err;
list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
}
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
if (!newcaf)
return ERR_PTR(-ENOMEM);
goto err;
list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
}
return duprule;
err:
list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
list_del(&ckf->ctrl.list);
kfree(ckf);
}
list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
list_del(&caf->ctrl.list);
kfree(caf);
}
kfree(duprule);
return ERR_PTR(-ENOMEM);
}
static void vcap_apply_width(u8 *dst, int width, int bytes)

View File

@ -799,6 +799,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct rswitch_private *priv;
struct rswitch_device *rdev;
unsigned long flags;
int quota = budget;
rdev = netdev_priv(ndev);
@ -816,10 +817,12 @@ retry:
netif_wake_subqueue(ndev, 0);
napi_complete(napi);
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags);
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&priv->lock, flags);
}
out:
return budget - quota;
@ -835,8 +838,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
if (napi_schedule_prep(&rdev->napi)) {
spin_lock(&rdev->priv->lock);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
spin_unlock(&rdev->priv->lock);
__napi_schedule(&rdev->napi);
}
}
@ -1440,14 +1445,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
unsigned long flags;
phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
@ -1461,6 +1469,7 @@ static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
unsigned long flags;
netif_tx_stop_all_queues(ndev);
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
@ -1476,8 +1485,10 @@ static int rswitch_stop(struct net_device *ndev)
kfree(ts_info);
}
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
@ -1887,6 +1898,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
attr = soc_device_match(rswitch_soc_no_speed_change);
if (attr)

View File

@ -1011,6 +1011,8 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
spinlock_t lock; /* lock interrupt registers' control */
bool etha_no_runtime_change;
bool gwca_halt;
};

View File

@ -2704,9 +2704,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
stmmac_tx_timer_arm(priv, queue);
flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
tx_q->txq_stats.tx_packets += tx_packets;
@ -2995,9 +2993,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 tx_coal_timer = priv->tx_coal_timer[queue];
if (!tx_coal_timer)
return;
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}

View File

@ -2636,6 +2636,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
if (!budget)
return 0;
work_done = rx_bottom(tp, budget);
if (work_done < budget) {

View File

@ -1446,6 +1446,8 @@ static int veth_open(struct net_device *dev)
netif_carrier_on(peer);
}
veth_set_xdp_features(dev);
return 0;
}

View File

@ -784,6 +784,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
}
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
@ -1360,7 +1365,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return 0;
}
static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
int ret;

View File

@ -594,6 +594,7 @@ static int fill_frame_info(struct hsr_frame_info *frame,
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
/* FIXME: */
netdev_warn_once(skb->dev, "VLAN not yet supported");
return -EINVAL;
}
frame->is_from_san = false;

View File

@ -355,14 +355,14 @@ static void __inet_del_ifa(struct in_device *in_dev,
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1;
struct in_ifaddr *last_prim;
struct in_ifaddr __rcu **last_prim;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
ifa1 = rtnl_dereference(*ifap);
last_prim = rtnl_dereference(in_dev->ifa_list);
last_prim = ifap;
if (in_dev->dead)
goto no_promotions;
@ -376,7 +376,7 @@ static void __inet_del_ifa(struct in_device *in_dev,
while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
last_prim = &ifa->ifa_next;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
@ -440,9 +440,9 @@ no_promotions:
rcu_assign_pointer(prev_prom->ifa_next, next_sec);
last_sec = rtnl_dereference(last_prim->ifa_next);
last_sec = rtnl_dereference(*last_prim);
rcu_assign_pointer(promote->ifa_next, last_sec);
rcu_assign_pointer(last_prim->ifa_next, promote);
rcu_assign_pointer(*last_prim, promote);
}
promote->ifa_flags &= ~IFA_F_SECONDARY;

View File

@ -815,41 +815,45 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
const struct net *net, unsigned short port,
int l3mdev, const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family)
if (!net_eq(ib2_net(tb), net) || tb->port != port ||
tb->l3mdev != l3mdev)
return false;
if (sk->sk_family == AF_INET6)
return net_eq(ib2_net(tb), net) && tb->port == port &&
tb->l3mdev == l3mdev &&
ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
else
#endif
return net_eq(ib2_net(tb), net) && tb->port == port &&
tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
return net_eq(ib2_net(tb), net) && tb->port == port &&
tb->l3mdev == l3mdev &&
ipv6_addr_any(&tb->v6_rcv_saddr);
return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
return false;
}
if (sk->sk_family == AF_INET6)
return net_eq(ib2_net(tb), net) && tb->port == port &&
tb->l3mdev == l3mdev &&
ipv6_addr_any(&tb->v6_rcv_saddr);
else
return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
#endif
return net_eq(ib2_net(tb), net) && tb->port == port &&
tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
return tb->rcv_saddr == sk->sk_rcv_saddr;
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
if (!net_eq(ib2_net(tb), net) || tb->port != port ||
tb->l3mdev != l3mdev)
return false;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
return ipv6_addr_any(&tb->v6_rcv_saddr) ||
ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
return false;
}
if (sk->sk_family == AF_INET6)
return ipv6_addr_any(&tb->v6_rcv_saddr);
#endif
return tb->rcv_saddr == 0;
}
/* The socket's bhash2 hashbucket spinlock must be held when this is called */

View File

@ -930,15 +930,18 @@ partial_message:
out_error:
kcm_push(kcm);
if (copied && sock->type == SOCK_SEQPACKET) {
if (sock->type == SOCK_SEQPACKET) {
/* Wrote some bytes before encountering an
* error, return partial success.
*/
goto partial_message;
}
if (head != kcm->seq_skb)
if (copied)
goto partial_message;
if (head != kcm->seq_skb)
kfree_skb(head);
} else {
kfree_skb(head);
kcm->seq_skb = NULL;
}
err = sk_stream_error(sk, msg->msg_flags, err);

View File

@ -1662,6 +1662,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *n;
spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
struct smc_link *link;
@ -1680,6 +1681,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
if (link)
smc_llc_add_link_local(link);
}
spin_unlock_bh(&smc_lgr_list.lock);
}
/* link is down - switch connections to alternate link,

View File

@ -243,8 +243,9 @@ while (0)
#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
do { \
typeof(_ini) i = (_ini); \
bool is_v2 = (i->smcd_version & SMC_V2); \
bool is_smcd = (i->is_smcd); \
u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
bool is_v2 = (version & SMC_V2); \
typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
if (is_v2 && is_smcd) \
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \

View File

@ -817,7 +817,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
if (err && sk->sk_err == EBADMSG) {
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
@ -846,7 +846,7 @@ more_data:
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
if (err && sk->sk_err == EBADMSG) {
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;

View File

@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0
NR_FILES=32768
SAVED_NR_FILES=$(ulimit -n)
readonly NETNS="ns-$(mktemp -u XXXXXX)"
# default values
port=443
@ -36,21 +36,21 @@ while getopts "ha:p:64" opt; do
done
setup() {
ip netns add "${NETNS}"
ip -netns "${NETNS}" link add veth0 type veth peer name veth1
ip -netns "${NETNS}" link set lo up
ip -netns "${NETNS}" link set veth0 up
ip -netns "${NETNS}" link set veth1 up
if [[ "$use_v6" == true ]]; then
ip addr add $addr_v6 nodad dev eth0
ip -netns "${NETNS}" addr add $addr_v6 nodad dev veth0
else
ip addr add $addr_v4 dev lo
ip -netns "${NETNS}" addr add $addr_v4 dev lo
fi
ulimit -n $NR_FILES
}
cleanup() {
if [[ "$use_v6" == true ]]; then
ip addr del $addr_v6 dev eth0
else
ip addr del $addr_v4/32 dev lo
fi
ulimit -n $SAVED_NR_FILES
ip netns del "${NETNS}"
}
if [[ "$addr" != "" ]]; then
@ -59,8 +59,10 @@ if [[ "$addr" != "" ]]; then
fi
setup
if [[ "$use_v6" == true ]] ; then
./bind_bhash $port "ipv6" $addr_v6
ip netns exec "${NETNS}" sh -c \
"ulimit -n ${NR_FILES};./bind_bhash ${port} ipv6 ${addr_v6}"
else
./bind_bhash $port "ipv4" $addr_v4
ip netns exec "${NETNS}" sh -c \
"ulimit -n ${NR_FILES};./bind_bhash ${port} ipv4 ${addr_v4}"
fi
cleanup

View File

@ -6,41 +6,91 @@
#include "../kselftest_harness.h"
struct in6_addr in6addr_v4mapped_any = {
.s6_addr = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 255, 255,
0, 0, 0, 0
}
};
struct in6_addr in6addr_v4mapped_loopback = {
.s6_addr = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 255, 255,
127, 0, 0, 1
}
};
FIXTURE(bind_wildcard)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
int expected_errno;
};
FIXTURE_VARIANT(bind_wildcard)
{
const __u32 addr4_const;
const struct in6_addr *addr6_const;
int expected_errno;
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_any,
.expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_loopback,
.expected_errno = 0,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_v4mapped_any,
.expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_v4mapped_loopback,
.expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_any,
.expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_loopback,
.expected_errno = 0,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_v4mapped_any,
.expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_v4mapped_loopback,
.expected_errno = EADDRINUSE,
};
FIXTURE_SETUP(bind_wildcard)
@ -52,11 +102,6 @@ FIXTURE_SETUP(bind_wildcard)
self->addr6.sin6_family = AF_INET6;
self->addr6.sin6_port = htons(0);
self->addr6.sin6_addr = *variant->addr6_const;
if (variant->addr6_const == &in6addr_any)
self->expected_errno = EADDRINUSE;
else
self->expected_errno = 0;
}
FIXTURE_TEARDOWN(bind_wildcard)
@ -65,6 +110,7 @@ FIXTURE_TEARDOWN(bind_wildcard)
void bind_sockets(struct __test_metadata *_metadata,
FIXTURE_DATA(bind_wildcard) *self,
int expected_errno,
struct sockaddr *addr1, socklen_t addrlen1,
struct sockaddr *addr2, socklen_t addrlen2)
{
@ -86,9 +132,9 @@ void bind_sockets(struct __test_metadata *_metadata,
ASSERT_GT(fd[1], 0);
ret = bind(fd[1], addr2, addrlen2);
if (self->expected_errno) {
if (expected_errno) {
ASSERT_EQ(ret, -1);
ASSERT_EQ(errno, self->expected_errno);
ASSERT_EQ(errno, expected_errno);
} else {
ASSERT_EQ(ret, 0);
}
@ -99,14 +145,14 @@ void bind_sockets(struct __test_metadata *_metadata,
TEST_F(bind_wildcard, v4_v6)
{
bind_sockets(_metadata, self,
(struct sockaddr *)&self->addr4, sizeof(self->addr6),
bind_sockets(_metadata, self, variant->expected_errno,
(struct sockaddr *)&self->addr4, sizeof(self->addr4),
(struct sockaddr *)&self->addr6, sizeof(self->addr6));
}
TEST_F(bind_wildcard, v6_v4)
{
bind_sockets(_metadata, self,
bind_sockets(_metadata, self, variant->expected_errno,
(struct sockaddr *)&self->addr6, sizeof(self->addr6),
(struct sockaddr *)&self->addr4, sizeof(self->addr4));
}