Merge branch 'macsec-offload-fixes'

Sabrina Dubroca says:

====================
macsec: offload-related fixes

I'm working on a dummy offload for macsec on netdevsim. It just has a
small SecY and RXSC table so I can trigger failures easily on the
ndo_* side. It has exposed a couple of issues.

The first patch is a revert of commit c850240b6c ("net: macsec:
report real_dev features when HW offloading is enabled"). That commit
tried to improve the performance of macsec offload by taking advantage
of some of the NIC's features, but in doing so, broke macsec offload
when the lower device supports both macsec and ipsec offload, as the
ipsec offload feature flags were copied from the real device. Since
the macsec device doesn't provide xdo_* ops, the XFRM core rejects the
registration of the new macsec device in xfrm_api_check.

I'm working on re-adding those feature flags when offload is
available, but I haven't fully solved that yet. I think it would be
safer to do that second part in net-next considering how complex
feature interactions tend to be.

v2:
 - better describe the issue introduced by commit c850240b6c (Leon
   Romanovsky)
 - patch #3: drop unnecessary !! (Leon Romanovsky)

v3:
 - patch #3: drop extra newline (Jakub Kicinski)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-11-04 10:43:57 +00:00
commit f4e405f566

View File

@ -1413,7 +1413,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
return NULL;
}
static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
bool active)
{
struct macsec_rx_sc *rx_sc;
struct macsec_dev *macsec;
@ -1437,7 +1438,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
}
rx_sc->sci = sci;
rx_sc->active = true;
rx_sc->active = active;
refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy;
@ -1838,6 +1839,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len);
err = macsec_offload(ops->mdo_add_rxsa, &ctx);
memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
@ -1876,7 +1878,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct macsec_secy *secy;
bool was_active;
bool active = true;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
@ -1898,16 +1900,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
rx_sc = create_rx_sc(dev, sci);
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
rx_sc = create_rx_sc(dev, sci, active);
if (IS_ERR(rx_sc)) {
rtnl_unlock();
return PTR_ERR(rx_sc);
}
was_active = rx_sc->active;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@ -1931,7 +1932,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return 0;
cleanup:
rx_sc->active = was_active;
del_rx_sc(secy, sci);
free_rx_sc(rx_sc);
rtnl_unlock();
return ret;
}
@ -2080,6 +2082,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len);
err = macsec_offload(ops->mdo_add_txsa, &ctx);
memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
@ -2570,7 +2573,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
int i;
if (secy->n_rx_sc > 0)
if (secy->rx_sc)
return true;
for (i = 0; i < MACSEC_NUM_AN; i++)
@ -2654,11 +2657,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
if (ret)
goto rollback;
/* Force features update, since they are different for SW MACSec and
* HW offloading cases.
*/
netdev_update_features(dev);
rtnl_unlock();
return 0;
@ -3432,16 +3430,9 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return ret;
}
#define SW_MACSEC_FEATURES \
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
/* If h/w offloading is enabled, use real device features save for
* VLAN_FEATURES - they require additional ops
* HW_MACSEC - no reason to report it
*/
#define REAL_DEV_FEATURES(dev) \
((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@ -3458,12 +3449,8 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
if (macsec_is_offloaded(macsec)) {
dev->features = REAL_DEV_FEATURES(real_dev);
} else {
dev->features = real_dev->features & SW_MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
}
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@ -3495,10 +3482,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
if (macsec_is_offloaded(macsec))
return REAL_DEV_FEATURES(real_dev);
features &= (real_dev->features & SW_MACSEC_FEATURES) |
features &= (real_dev->features & MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;