diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 69703801d1b0..3f6187c16424 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -92,6 +92,7 @@ struct iavf_vsi { #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ +#define IAVF_MBPS_QUANTA 50 #define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ (IAVF_MAX_VF_VSI * \ @@ -433,6 +434,11 @@ struct iavf_adapter { /* lock to protect access to the cloud filter list */ spinlock_t cloud_filter_list_lock; u16 num_cloud_filters; + /* snapshot of "num_active_queues" before setup_tc for qdisc add + * is invoked. This information is useful during qdisc del flow, + * to restore correct number of queues + */ + int orig_num_active_queues; #define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ u16 fdir_active_fltr; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e78c38d02432..45d097a164ad 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3410,6 +3410,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; + u32 tx_rate_rem = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0; @@ -3424,12 +3425,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, return -EINVAL; if (mqprio_qopt->min_rate[i]) { dev_err(&adapter->pdev->dev, - "Invalid min tx rate (greater than 0) specified\n"); + "Invalid min tx rate (greater than 0) specified for TC%d\n", + i); return -EINVAL; } - /*convert to Mbps */ + + /* convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], IAVF_MBPS_DIVISOR); + + if (mqprio_qopt->max_rate[i] && + tx_rate < IAVF_MBPS_QUANTA) { + dev_err(&adapter->pdev->dev, + "Invalid max tx rate for TC%d, minimum %dMbps\n", + i, IAVF_MBPS_QUANTA); + return -EINVAL; + } + + (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem); + + if (tx_rate_rem != 0) { + dev_err(&adapter->pdev->dev, + "Invalid max tx rate for TC%d, not divisible by %d\n", + i, IAVF_MBPS_QUANTA); + return -EINVAL; + } + total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } @@ -3496,6 +3517,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) netif_tx_disable(netdev); iavf_del_all_cloud_filters(adapter); adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; + total_qps = adapter->orig_num_active_queues; goto exit; } else { return -EINVAL; @@ -3539,7 +3561,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) adapter->ch_config.ch_info[i].offset = 0; } } + + /* Take snapshot of original config such as "num_active_queues" + * It is used later when delete ADQ flow is exercised, so that + * once delete ADQ flow completes, VF shall go back to its + * original queue configuration + */ + + adapter->orig_num_active_queues = adapter->num_active_queues; + + /* Store queue info based on TC so that VF gets configured + * with correct number of queues when VF completes ADQ config + * flow + */ adapter->ch_config.total_qps = total_qps; + netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; @@ -3556,6 +3592,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) } } exit: + if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) + return 0; + + netif_set_real_num_rx_queues(netdev, total_qps); + netif_set_real_num_tx_queues(netdev, total_qps); + return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9a3b14d42835..eb40526ee179 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -433,7 +433,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) IFF_PROMISC; goto out_promisc; } - if (vsi->current_netdev_flags & + if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) vlan_ops->ena_rx_filtering(vsi); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index c88e8a436029..fbe62bbfb789 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -21,7 +21,7 @@ #define OTX2_HEAD_ROOM OTX2_ALIGN #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) -#define OTX2_MIN_MTU 64 +#define OTX2_MIN_MTU 60 #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bea4a1329474..a560df446bac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -109,7 +109,7 @@ struct page_pool; #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ - ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ + (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) @@ -174,8 +174,8 @@ struct page_pool; ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) #define MLX5E_MAX_KLM_PER_WQE(mdev) \ - MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \ - << MLX5_MKEY_BSF_OCTO_SIZE) + MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \ + mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev))) #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); } -static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) +static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs) { /* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, @@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be * cache-aligned. */ -#if L1_CACHE_BYTES < 128 - return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); -#else - return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2); + u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); + +#if L1_CACHE_BYTES >= 128 + wqebbs = ALIGN_DOWN(wqebbs, 2); #endif + return wqebbs; } struct mlx5e_tx_wqe { @@ -456,7 +457,7 @@ struct mlx5e_txqsq { struct netdev_queue *txq; u32 sqn; u16 stop_room; - u16 max_sq_mpw_wqebbs; + u8 max_sq_mpw_wqebbs; u8 min_inline_mode; struct device *pdev; __be32 mkey_be; @@ -571,7 +572,7 @@ struct mlx5e_xdpsq { struct device *pdev; __be32 mkey_be; u16 stop_room; - u16 max_sq_mpw_wqebbs; + u8 max_sq_mpw_wqebbs; u8 min_inline_mode; unsigned long state; unsigned int hw_mtu; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 3c1edfa33aa7..e025040350ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc)); + + /* If XDP program is attached, XSK may be turned on at any time without + * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of + * both regular RQ and XSK RQ. + * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it + * doesn't affect its return value, as long as params->xdp_prog != NULL, + * so we can just multiply by 2. + */ + if (params->xdp_prog) + wqebbs *= 2; + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index 33c1411ed8db..4e48946c4c2a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at post_attr->inner_match_level = MLX5_MATCH_NONE; post_attr->outer_match_level = MLX5_MATCH_NONE; post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP; + post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT; handle->ns_type = post_act->ns_type; /* Splits were handled before post action */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index a8cfab4a393c..cc18d97d8ee0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -7,6 +7,8 @@ #include "en.h" #include +#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL + /* RX data path */ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, @@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { +retry: dma_info->xsk = xsk_buff_alloc(rq->xsk_pool); if (!dma_info->xsk) return -ENOMEM; @@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, */ dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk); + /* MTT page mapping has alignment requirements. If they are not + * satisfied, leak the descriptor so that it won't come again, and try + * to allocate a new one. + */ + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) { + xsk_buff_discard(dma_info->xsk); + goto retry; + } + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 814f2a56f633..30a70d139046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev = priv->mdev; int err; - if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) + if (!mlx5e_ktls_type_check(mdev, crypto_info)) return -EOPNOTSUPP; if (direction == TLS_OFFLOAD_CTX_DIR_TX) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index da1959caae41..ed73132129aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, } static void -esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, - struct mlx5_flow_act *flow_act, - struct mlx5_fs_chains *chains, - int i) +esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_fs_chains *chains, int i) { if (mlx5_chains_ignore_flow_level_supported(chains)) flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; @@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, dest[i].ft = mlx5_chains_get_tc_end_ft(chains); } +static void +esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, int i) +{ + if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = esw->fdb_table.offloads.slow_fdb; +} + static int esw_setup_chain_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, @@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest, } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); (*i)++; - } else if (mlx5e_tc_attr_flags_skip(attr->flags)) { - esw_setup_slow_path_dest(dest, flow_act, chains, *i); + } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { + esw_setup_slow_path_dest(dest, flow_act, esw, *i); + (*i)++; + } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { + esw_setup_accept_dest(dest, flow_act, chains, *i); (*i)++; } else if (attr->dest_chain) { err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c index d758848d34d0..696e45e2bd06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type dev->timeouts->to[type] = val; } -void mlx5_tout_set_def_val(struct mlx5_core_dev *dev) +int mlx5_tout_init(struct mlx5_core_dev *dev) { int i; - for (i = 0; i < MAX_TIMEOUT_TYPES; i++) - tout_set(dev, tout_def_sw_val[i], i); -} - -int mlx5_tout_init(struct mlx5_core_dev *dev) -{ dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL); if (!dev->timeouts) return -ENOMEM; + for (i = 0; i < MAX_TIMEOUT_TYPES; i++) + tout_set(dev, tout_def_sw_val[i], i); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h index 257c03eeab36..bc9e9aeda847 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev); void mlx5_tout_cleanup(struct mlx5_core_dev *dev); void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); -void mlx5_tout_set_def_val(struct mlx5_core_dev *dev); u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 1de9b39a6359..bec8d6d0b5f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -544,7 +544,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) /* Check log_max_qp from HCA caps to set in current profile */ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { - prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); + prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp)); } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", prof->log_max_qp, @@ -1050,8 +1050,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); - mlx5_tout_set_def_val(dev); - /* wait for firmware to accept initialization segments configurations */ err = wait_fw_init(dev, timeout, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index d5998ef59be4..7adcf0eec13b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -21,10 +21,11 @@ enum dr_dump_rec_type { DR_DUMP_REC_TYPE_TABLE_TX = 3102, DR_DUMP_REC_TYPE_MATCHER = 3200, - DR_DUMP_REC_TYPE_MATCHER_MASK = 3201, + DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201, DR_DUMP_REC_TYPE_MATCHER_RX = 3202, DR_DUMP_REC_TYPE_MATCHER_TX = 3203, DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204, + DR_DUMP_REC_TYPE_MATCHER_MASK = 3205, DR_DUMP_REC_TYPE_RULE = 3300, DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301, @@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, break; case DR_ACTION_TYP_FT: if (action->dest_tbl->is_fw_tbl) - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n", DR_DUMP_REC_TYPE_ACTION_FT, action_id, - rule_id, action->dest_tbl->fw_tbl.id); + rule_id, action->dest_tbl->fw_tbl.id, + -1); else - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n", DR_DUMP_REC_TYPE_ACTION_FT, action_id, - rule_id, action->dest_tbl->tbl->table_id); + rule_id, action->dest_tbl->tbl->table_id, + DR_DBG_PTR_TO_ID(action->dest_tbl->tbl)); break; case DR_ACTION_TYP_CTR: diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index f3568901eb91..1443f788ee37 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1437,7 +1437,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif, if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); - if ((vlan_flags & features) && + if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 94e7512bef94..a1f91ff8ec56 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -54,6 +54,7 @@ struct nsim_fib_data { struct rhashtable nexthop_ht; struct devlink *devlink; struct work_struct fib_event_work; + struct work_struct fib_flush_work; struct list_head fib_event_queue; spinlock_t fib_event_queue_lock; /* Protects fib event queue list */ struct mutex nh_lock; /* Protects NH HT */ @@ -61,6 +62,7 @@ struct nsim_fib_data { bool fail_route_offload; bool fail_res_nexthop_group_replace; bool fail_nexthop_bucket_replace; + bool fail_route_delete; }; struct nsim_fib_rt_key { @@ -914,6 +916,10 @@ static int nsim_fib4_prepare_event(struct fib_notifier_info *info, } break; case FIB_EVENT_ENTRY_DEL: + if (data->fail_route_delete) { + NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion"); + return -EINVAL; + } nsim_fib_account(&data->ipv4.fib, false); break; } @@ -952,6 +958,11 @@ static int nsim_fib6_prepare_event(struct fib_notifier_info *info, } break; case FIB_EVENT_ENTRY_DEL: + if (data->fail_route_delete) { + err = -EINVAL; + NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion"); + goto err_fib6_event_fini; + } nsim_fib_account(&data->ipv6.fib, false); break; } @@ -978,7 +989,7 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data, fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC); if (!fib_event) - return NOTIFY_BAD; + goto err_fib_event_alloc; fib_event->data = data; fib_event->event = event; @@ -1006,6 +1017,9 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data, err_fib_prepare_event: kfree(fib_event); +err_fib_event_alloc: + if (event == FIB_EVENT_ENTRY_DEL) + schedule_work(&data->fib_flush_work); return NOTIFY_BAD; } @@ -1483,6 +1497,24 @@ static void nsim_fib_event_work(struct work_struct *work) mutex_unlock(&data->fib_lock); } +static void nsim_fib_flush_work(struct work_struct *work) +{ + struct nsim_fib_data *data = container_of(work, struct nsim_fib_data, + fib_flush_work); + struct nsim_fib_rt *fib_rt, *fib_rt_tmp; + + /* Process pending work. */ + flush_work(&data->fib_event_work); + + mutex_lock(&data->fib_lock); + list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) { + rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node, + nsim_fib_rt_ht_params); + nsim_fib_rt_free(fib_rt, data); + } + mutex_unlock(&data->fib_lock); +} + static int nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) { @@ -1504,6 +1536,10 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir, data, &nsim_nexthop_bucket_activity_fops); + + data->fail_route_delete = false; + debugfs_create_bool("fail_route_delete", 0600, data->ddir, + &data->fail_route_delete); return 0; } @@ -1541,6 +1577,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, goto err_rhashtable_nexthop_destroy; INIT_WORK(&data->fib_event_work, nsim_fib_event_work); + INIT_WORK(&data->fib_flush_work, nsim_fib_flush_work); INIT_LIST_HEAD(&data->fib_event_queue); spin_lock_init(&data->fib_event_queue_lock); @@ -1587,6 +1624,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, err_nexthop_nb_unregister: unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); err_rhashtable_fib_destroy: + cancel_work_sync(&data->fib_flush_work); flush_work(&data->fib_event_work); rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, data); @@ -1616,6 +1654,7 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) NSIM_RESOURCE_IPV4_FIB); unregister_fib_notifier(devlink_net(devlink), &data->fib_nb); unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); + cancel_work_sync(&data->fib_flush_work); flush_work(&data->fib_event_work); rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, data); diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index e62fc4f2aee0..76659c1c525a 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -637,8 +637,9 @@ config USB_NET_AQC111 * Aquantia AQtion USB to 5GbE config USB_RTL8153_ECM - tristate "RTL8153 ECM support" + tristate depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n) + default y help This option supports ECM mode for RTL8153 ethernet adapter, when CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 3b0e8f93e3c8..0ad468a00064 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1844,55 +1844,55 @@ static const struct driver_info at_umc2000sp_info = { static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ - USB_DEVICE(0x0b95, 0x1790), + USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x1790, 0xff, 0xff, 0), .driver_info = (unsigned long)&ax88179_info, }, { /* ASIX AX88178A 10/100/1000 */ - USB_DEVICE(0x0b95, 0x178a), + USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x178a, 0xff, 0xff, 0), .driver_info = (unsigned long)&ax88178a_info, }, { /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ - USB_DEVICE(0x04b4, 0x3610), + USB_DEVICE_AND_INTERFACE_INFO(0x04b4, 0x3610, 0xff, 0xff, 0), .driver_info = (unsigned long)&cypress_GX3_info, }, { /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ - USB_DEVICE(0x2001, 0x4a00), + USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x4a00, 0xff, 0xff, 0), .driver_info = (unsigned long)&dlink_dub1312_info, }, { /* Sitecom USB 3.0 to Gigabit Adapter */ - USB_DEVICE(0x0df6, 0x0072), + USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0072, 0xff, 0xff, 0), .driver_info = (unsigned long)&sitecom_info, }, { /* Samsung USB Ethernet Adapter */ - USB_DEVICE(0x04e8, 0xa100), + USB_DEVICE_AND_INTERFACE_INFO(0x04e8, 0xa100, 0xff, 0xff, 0), .driver_info = (unsigned long)&samsung_info, }, { /* Lenovo OneLinkDock Gigabit LAN */ - USB_DEVICE(0x17ef, 0x304b), + USB_DEVICE_AND_INTERFACE_INFO(0x17ef, 0x304b, 0xff, 0xff, 0), .driver_info = (unsigned long)&lenovo_info, }, { /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ - USB_DEVICE(0x050d, 0x0128), + USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x0128, 0xff, 0xff, 0), .driver_info = (unsigned long)&belkin_info, }, { /* Toshiba USB 3.0 GBit Ethernet Adapter */ - USB_DEVICE(0x0930, 0x0a13), + USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x0a13, 0xff, 0xff, 0), .driver_info = (unsigned long)&toshiba_info, }, { /* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */ - USB_DEVICE(0x0711, 0x0179), + USB_DEVICE_AND_INTERFACE_INFO(0x0711, 0x0179, 0xff, 0xff, 0), .driver_info = (unsigned long)&mct_info, }, { /* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ - USB_DEVICE(0x07c9, 0x000e), + USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000e, 0xff, 0xff, 0), .driver_info = (unsigned long)&at_umc2000_info, }, { /* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */ - USB_DEVICE(0x07c9, 0x000f), + USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000f, 0xff, 0xff, 0), .driver_info = (unsigned long)&at_umc200_info, }, { /* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ - USB_DEVICE(0x07c9, 0x0010), + USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x0010, 0xff, 0xff, 0), .driver_info = (unsigned long)&at_umc2000sp_info, }, { }, diff --git a/include/net/ax25.h b/include/net/ax25.h index a427a05672e2..f8cf3629a419 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -236,6 +236,7 @@ typedef struct ax25_cb { ax25_address source_addr, dest_addr; ax25_digi *digipeat; ax25_dev *ax25_dev; + netdevice_tracker dev_tracker; unsigned char iamdigi; unsigned char state, modulus, pidincl; unsigned short vs, vr, va; diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index f259e1ae14ba..56f1286583d3 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -110,8 +110,6 @@ static inline bool inet6_match(struct net *net, const struct sock *sk, const __portpair ports, const int dif, const int sdif) { - int bound_dev_if; - if (!net_eq(sock_net(sk), net) || sk->sk_family != AF_INET6 || sk->sk_portpair != ports || @@ -119,8 +117,9 @@ static inline bool inet6_match(struct net *net, const struct sock *sk, !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) return false; - bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); - return bound_dev_if == dif || bound_dev_if == sdif; + /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ + return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, + sdif); } #endif /* IS_ENABLED(CONFIG_IPV6) */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index fd6b510d114b..e9cf2157ed8a 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -175,17 +175,6 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) hashinfo->ehash_locks = NULL; } -static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, - int dif, int sdif) -{ -#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), - bound_dev_if, dif, sdif); -#else - return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); -#endif -} - struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, @@ -271,16 +260,14 @@ static inline bool inet_match(struct net *net, const struct sock *sk, const __addrpair cookie, const __portpair ports, int dif, int sdif) { - int bound_dev_if; - if (!net_eq(sock_net(sk), net) || sk->sk_portpair != ports || sk->sk_addrpair != cookie) return false; - /* Paired with WRITE_ONCE() from sock_bindtoindex_locked() */ - bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); - return bound_dev_if == dif || bound_dev_if == sdif; + /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ + return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, + sdif); } /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 6395f6b9a5d2..bf5654ce711e 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -149,6 +149,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if, return bound_dev_if == dif || bound_dev_if == sdif; } +static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), + bound_dev_if, dif, sdif); +#else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +#endif +} + struct inet_cork { unsigned int flags; __be32 addr; diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 4277b0dcee05..0e58c38ce0c1 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -104,6 +104,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) xp_free(xskb); } +static inline void xsk_buff_discard(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + xp_release(xskb); +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; @@ -252,6 +259,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) { } +static inline void xsk_buff_discard(struct xdp_buff *xdp) +{ +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bbac3cb4dc99..d82a51e69386 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1066,7 +1066,7 @@ static int ax25_release(struct socket *sock) del_timer_sync(&ax25->t3timer); del_timer_sync(&ax25->idletimer); } - netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker); + netdev_put(ax25_dev->dev, &ax25->dev_tracker); ax25_dev_put(ax25_dev); } @@ -1147,7 +1147,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (ax25_dev) { ax25_fillin_cb(ax25, ax25_dev); - netdev_hold(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC); + netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC); } done: diff --git a/net/dccp/proto.c b/net/dccp/proto.c index eb8e128e43e8..e13641c65f88 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -736,11 +736,6 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) lock_sock(sk); - if (dccp_qpolicy_full(sk)) { - rc = -EAGAIN; - goto out_release; - } - timeo = sock_sndtimeo(sk, noblock); /* @@ -759,6 +754,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (skb == NULL) goto out_release; + if (dccp_qpolicy_full(sk)) { + rc = -EAGAIN; + goto out_discard; + } + if (sk->sk_state == DCCP_CLOSED) { rc = -ENOTCONN; goto out_discard; diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 6f1a50d50d06..fba82d36593a 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -742,7 +742,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, NULL, 0, rs, &local_odp_mr->r_key, NULL, iov->addr, iov->bytes, ODP_VIRTUAL); if (IS_ERR(local_odp_mr->r_trans_private)) { - ret = IS_ERR(local_odp_mr->r_trans_private); + ret = PTR_ERR(local_odp_mr->r_trans_private); rdsdebug("get_mr ret %d %p\"", ret, local_odp_mr->r_trans_private); kfree(local_odp_mr); diff --git a/tools/testing/selftests/drivers/net/netdevsim/fib.sh b/tools/testing/selftests/drivers/net/netdevsim/fib.sh index fc794cd30389..6800de816e8b 100755 --- a/tools/testing/selftests/drivers/net/netdevsim/fib.sh +++ b/tools/testing/selftests/drivers/net/netdevsim/fib.sh @@ -16,6 +16,7 @@ ALL_TESTS=" ipv4_replay ipv4_flush ipv4_error_path + ipv4_delete_fail ipv6_add ipv6_metric ipv6_append_single @@ -29,11 +30,13 @@ ALL_TESTS=" ipv6_replay_single ipv6_replay_multipath ipv6_error_path + ipv6_delete_fail " NETDEVSIM_PATH=/sys/bus/netdevsim/ DEV_ADDR=1337 DEV=netdevsim${DEV_ADDR} SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/ +DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV/ NUM_NETIFS=0 source $lib_dir/lib.sh source $lib_dir/fib_offload_lib.sh @@ -157,6 +160,27 @@ ipv4_error_path() ipv4_error_path_replay } +ipv4_delete_fail() +{ + RET=0 + + echo "y" > $DEBUGFS_DIR/fib/fail_route_delete + + ip -n testns1 link add name dummy1 type dummy + ip -n testns1 link set dev dummy1 up + + ip -n testns1 route add 192.0.2.0/24 dev dummy1 + ip -n testns1 route del 192.0.2.0/24 dev dummy1 &> /dev/null + + # We should not be able to delete the netdev if we are leaking a + # reference. + ip -n testns1 link del dev dummy1 + + log_test "IPv4 route delete failure" + + echo "n" > $DEBUGFS_DIR/fib/fail_route_delete +} + ipv6_add() { fib_ipv6_add_test "testns1" @@ -304,6 +328,27 @@ ipv6_error_path() ipv6_error_path_replay } +ipv6_delete_fail() +{ + RET=0 + + echo "y" > $DEBUGFS_DIR/fib/fail_route_delete + + ip -n testns1 link add name dummy1 type dummy + ip -n testns1 link set dev dummy1 up + + ip -n testns1 route add 2001:db8:1::/64 dev dummy1 + ip -n testns1 route del 2001:db8:1::/64 dev dummy1 &> /dev/null + + # We should not be able to delete the netdev if we are leaking a + # reference. + ip -n testns1 link del dev dummy1 + + log_test "IPv6 route delete failure" + + echo "n" > $DEBUGFS_DIR/fib/fail_route_delete +} + fib_notify_on_flag_change_set() { local notify=$1; shift