forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Off by one in netlink parsing of mac802154_hwsim, from Alexander Aring. 2) nf_tables RCU usage fix from Taehee Yoo. 3) Flow dissector needs nhoff and thoff clamping, from Stanislav Fomichev. 4) Missing sin6_flowinfo initialization in SCTP, from Xin Long. 5) Spectrev1 in ipmr and ip6mr, from Gustavo A. R. Silva. 6) Fix r8169 crash when DEBUG_SHIRQ is enabled, from Heiner Kallweit. 7) Fix SKB leak in rtlwifi, from Larry Finger. 8) Fix state pruning in bpf verifier, from Jakub Kicinski. 9) Don't handle completely duplicate fragments as overlapping, from Michal Kubecek. 10) Fix memory corruption with macb and 64-bit DMA, from Anssi Hannula. 11) Fix TCP fallback socket release in smc, from Myungho Jung. 12) gro_cells_destroy needs to napi_disable, from Lorenzo Bianconi. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (130 commits) rds: Fix warning. neighbor: NTF_PROXY is a valid ndm_flag for a dump request net: mvpp2: fix the phylink mode validation net/sched: cls_flower: Remove old entries from rhashtable net/tls: allocate tls context using GFP_ATOMIC iptunnel: make TUNNEL_FLAGS available in uapi gro_cell: add napi_disable in gro_cells_destroy lan743x: Remove MAC Reset from initialization net/mlx5e: Remove the false indication of software timestamping support net/mlx5: Typo fix in del_sw_hw_rule net/mlx5e: RX, Fix wrong early return in receive queue poll ipv6: explicitly initialize udp6_addr in udp_sock_create6() bnxt_en: Fix ethtool self-test loopback. net/rds: remove user triggered WARN_ON in rds_sendmsg net/rds: fix warn in rds_message_alloc_sgs ath10k: skip sending quiet mode cmd for WCN3990 mac80211: free skb fraglist before freeing the skb nl80211: fix memory leak if validate_pae_over_nl80211() fails net/smc: fix TCP fallback socket release vxge: ensure data0 is initialized in when fetching firmware version information ...
This commit is contained in:
commit
519be6995c
11
MAINTAINERS
11
MAINTAINERS
@ -8943,7 +8943,7 @@ F: arch/mips/boot/dts/img/pistachio_marduk.dts
|
||||
|
||||
MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
|
||||
M: Vivien Didelot <vivien.didelot@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/dsa/mv88e6xxx/
|
||||
@ -9448,6 +9448,13 @@ F: drivers/media/platform/mtk-vpu/
|
||||
F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
|
||||
F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
|
||||
|
||||
MEDIATEK MT76 WIRELESS LAN DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/mediatek/mt76/
|
||||
|
||||
MEDIATEK MT7601U WIRELESS LAN DRIVER
|
||||
M: Jakub Kicinski <kubakici@wp.pl>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
@ -10418,7 +10425,7 @@ F: drivers/net/wireless/
|
||||
|
||||
NETWORKING [DSA]
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
|
||||
M: Vivien Didelot <vivien.didelot@gmail.com>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/dsa/
|
||||
|
@ -153,6 +153,11 @@ struct chtls_dev {
|
||||
unsigned int cdev_state;
|
||||
};
|
||||
|
||||
struct chtls_listen {
|
||||
struct chtls_dev *cdev;
|
||||
struct sock *sk;
|
||||
};
|
||||
|
||||
struct chtls_hws {
|
||||
struct sk_buff_head sk_recv_queue;
|
||||
u8 txqid;
|
||||
@ -215,6 +220,8 @@ struct chtls_sock {
|
||||
u16 resv2;
|
||||
u32 delack_mode;
|
||||
u32 delack_seq;
|
||||
u32 snd_win;
|
||||
u32 rcv_win;
|
||||
|
||||
void *passive_reap_next; /* placeholder for passive */
|
||||
struct chtls_hws tlshws;
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/inet_common.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/dst.h>
|
||||
|
||||
@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
|
||||
return mtu_idx;
|
||||
}
|
||||
|
||||
static unsigned int select_rcv_wnd(struct chtls_sock *csk)
|
||||
{
|
||||
unsigned int rcvwnd;
|
||||
unsigned int wnd;
|
||||
struct sock *sk;
|
||||
|
||||
sk = csk->sk;
|
||||
wnd = tcp_full_space(sk);
|
||||
|
||||
if (wnd < MIN_RCV_WND)
|
||||
wnd = MIN_RCV_WND;
|
||||
|
||||
rcvwnd = MAX_RCV_WND;
|
||||
|
||||
csk_set_flag(csk, CSK_UPDATE_RCV_WND);
|
||||
return min(wnd, rcvwnd);
|
||||
}
|
||||
|
||||
static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
|
||||
{
|
||||
int wscale = 0;
|
||||
@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
|
||||
csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
|
||||
req);
|
||||
opt0 = TCAM_BYPASS_F |
|
||||
WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
|
||||
WND_SCALE_V(RCV_WSCALE(tp)) |
|
||||
MSS_IDX_V(csk->mtu_idx) |
|
||||
L2T_IDX_V(csk->l2t_entry->idx) |
|
||||
NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
|
||||
@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chtls_set_tcp_window(struct chtls_sock *csk)
|
||||
{
|
||||
struct net_device *ndev = csk->egress_dev;
|
||||
struct port_info *pi = netdev_priv(ndev);
|
||||
unsigned int linkspeed;
|
||||
u8 scale;
|
||||
|
||||
linkspeed = pi->link_cfg.speed;
|
||||
scale = linkspeed / SPEED_10000;
|
||||
#define CHTLS_10G_RCVWIN (256 * 1024)
|
||||
csk->rcv_win = CHTLS_10G_RCVWIN;
|
||||
if (scale)
|
||||
csk->rcv_win *= scale;
|
||||
#define CHTLS_10G_SNDWIN (256 * 1024)
|
||||
csk->snd_win = CHTLS_10G_SNDWIN;
|
||||
if (scale)
|
||||
csk->snd_win *= scale;
|
||||
}
|
||||
|
||||
static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
struct request_sock *oreq,
|
||||
void *network_hdr,
|
||||
@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
csk->port_id = port_id;
|
||||
csk->egress_dev = ndev;
|
||||
csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
|
||||
chtls_set_tcp_window(csk);
|
||||
tp->rcv_wnd = csk->rcv_win;
|
||||
csk->sndbuf = csk->snd_win;
|
||||
csk->ulp_mode = ULP_MODE_TLS;
|
||||
step = cdev->lldi->nrxq / cdev->lldi->nchan;
|
||||
csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
|
||||
@ -1076,9 +1081,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
csk->sndbuf = newsk->sk_sndbuf;
|
||||
csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
|
||||
cxgb4_port_viid(ndev));
|
||||
tp->rcv_wnd = select_rcv_wnd(csk);
|
||||
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
|
||||
WSCALE_OK(tp),
|
||||
sock_net(newsk)->
|
||||
ipv4.sysctl_tcp_window_scaling,
|
||||
tp->window_clamp);
|
||||
neigh_release(n);
|
||||
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
|
||||
@ -1130,6 +1135,7 @@ static void chtls_pass_accept_request(struct sock *sk,
|
||||
struct cpl_t5_pass_accept_rpl *rpl;
|
||||
struct cpl_pass_accept_req *req;
|
||||
struct listen_ctx *listen_ctx;
|
||||
struct vlan_ethhdr *vlan_eh;
|
||||
struct request_sock *oreq;
|
||||
struct sk_buff *reply_skb;
|
||||
struct chtls_sock *csk;
|
||||
@ -1142,6 +1148,10 @@ static void chtls_pass_accept_request(struct sock *sk,
|
||||
unsigned int stid;
|
||||
unsigned int len;
|
||||
unsigned int tid;
|
||||
bool th_ecn, ect;
|
||||
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
|
||||
u16 eth_hdr_len;
|
||||
bool ecn_ok;
|
||||
|
||||
req = cplhdr(skb) + RSS_HDR;
|
||||
tid = GET_TID(req);
|
||||
@ -1180,24 +1190,40 @@ static void chtls_pass_accept_request(struct sock *sk,
|
||||
oreq->mss = 0;
|
||||
oreq->ts_recent = 0;
|
||||
|
||||
eh = (struct ethhdr *)(req + 1);
|
||||
iph = (struct iphdr *)(eh + 1);
|
||||
eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
|
||||
if (eth_hdr_len == ETH_HLEN) {
|
||||
eh = (struct ethhdr *)(req + 1);
|
||||
iph = (struct iphdr *)(eh + 1);
|
||||
network_hdr = (void *)(eh + 1);
|
||||
} else {
|
||||
vlan_eh = (struct vlan_ethhdr *)(req + 1);
|
||||
iph = (struct iphdr *)(vlan_eh + 1);
|
||||
network_hdr = (void *)(vlan_eh + 1);
|
||||
}
|
||||
if (iph->version != 0x4)
|
||||
goto free_oreq;
|
||||
|
||||
network_hdr = (void *)(eh + 1);
|
||||
tcph = (struct tcphdr *)(iph + 1);
|
||||
skb_set_network_header(skb, (void *)iph - (void *)req);
|
||||
|
||||
tcp_rsk(oreq)->tfo_listener = false;
|
||||
tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
|
||||
chtls_set_req_port(oreq, tcph->source, tcph->dest);
|
||||
inet_rsk(oreq)->ecn_ok = 0;
|
||||
chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
|
||||
if (req->tcpopt.wsf <= 14) {
|
||||
ip_dsfield = ipv4_get_dsfield(iph);
|
||||
if (req->tcpopt.wsf <= 14 &&
|
||||
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
|
||||
inet_rsk(oreq)->wscale_ok = 1;
|
||||
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
|
||||
}
|
||||
inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
|
||||
th_ecn = tcph->ece && tcph->cwr;
|
||||
if (th_ecn) {
|
||||
ect = !INET_ECN_is_not_ect(ip_dsfield);
|
||||
ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
|
||||
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
|
||||
inet_rsk(oreq)->ecn_ok = 1;
|
||||
}
|
||||
|
||||
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
|
||||
if (!newsk)
|
||||
|
@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
req_wr->lsodisable_to_flags =
|
||||
htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
|
||||
FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
|
||||
TX_URG_V(skb_urgent(skb)) |
|
||||
T6_TX_FORCE_F | wr_ulp_mode_force |
|
||||
TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
|
||||
skb_queue_empty(&csk->txq)));
|
||||
@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
|
||||
FW_OFLD_TX_DATA_WR_SHOVE_F);
|
||||
|
||||
req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
|
||||
FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
|
||||
FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
|
||||
(sk, CSK_TX_MORE_DATA)) &&
|
||||
skb_queue_empty(&csk->txq)));
|
||||
TX_URG_V(skb_urgent(skb)) |
|
||||
TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
|
||||
skb_queue_empty(&csk->txq)));
|
||||
req->plen = htonl(len);
|
||||
}
|
||||
|
||||
@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
int mss, flags, err;
|
||||
int recordsz = 0;
|
||||
int copied = 0;
|
||||
int hdrlen = 0;
|
||||
long timeo;
|
||||
|
||||
lock_sock(sk);
|
||||
@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
|
||||
recordsz = tls_header_read(&hdr, &msg->msg_iter);
|
||||
size -= TLS_HEADER_LENGTH;
|
||||
hdrlen += TLS_HEADER_LENGTH;
|
||||
copied += TLS_HEADER_LENGTH;
|
||||
csk->tlshws.txleft = recordsz;
|
||||
csk->tlshws.type = hdr.type;
|
||||
if (skb)
|
||||
@ -1083,10 +1081,8 @@ new_buf:
|
||||
int off = TCP_OFF(sk);
|
||||
bool merge;
|
||||
|
||||
if (!page)
|
||||
goto wait_for_memory;
|
||||
|
||||
pg_size <<= compound_order(page);
|
||||
if (page)
|
||||
pg_size <<= compound_order(page);
|
||||
if (off < pg_size &&
|
||||
skb_can_coalesce(skb, i, page, off)) {
|
||||
merge = 1;
|
||||
@ -1187,7 +1183,7 @@ out:
|
||||
chtls_tcp_push(sk, flags);
|
||||
done:
|
||||
release_sock(sk);
|
||||
return copied + hdrlen;
|
||||
return copied;
|
||||
do_fault:
|
||||
if (!skb->len) {
|
||||
__skb_unlink(skb, &csk->txq);
|
||||
|
@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
|
||||
static int listen_notify_handler(struct notifier_block *this,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct chtls_dev *cdev;
|
||||
struct sock *sk;
|
||||
int ret;
|
||||
struct chtls_listen *clisten;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
sk = data;
|
||||
ret = NOTIFY_DONE;
|
||||
clisten = (struct chtls_listen *)data;
|
||||
|
||||
switch (event) {
|
||||
case CHTLS_LISTEN_START:
|
||||
ret = chtls_listen_start(clisten->cdev, clisten->sk);
|
||||
kfree(clisten);
|
||||
break;
|
||||
case CHTLS_LISTEN_STOP:
|
||||
mutex_lock(&cdev_list_lock);
|
||||
list_for_each_entry(cdev, &cdev_list, list) {
|
||||
if (event == CHTLS_LISTEN_START)
|
||||
ret = chtls_listen_start(cdev, sk);
|
||||
else
|
||||
chtls_listen_stop(cdev, sk);
|
||||
}
|
||||
mutex_unlock(&cdev_list_lock);
|
||||
chtls_listen_stop(clisten->cdev, clisten->sk);
|
||||
kfree(clisten);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chtls_start_listen(struct sock *sk)
|
||||
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
|
||||
{
|
||||
struct chtls_listen *clisten;
|
||||
int err;
|
||||
|
||||
if (sk->sk_protocol != IPPROTO_TCP)
|
||||
@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
sk->sk_backlog_rcv = listen_backlog_rcv;
|
||||
clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
|
||||
if (!clisten)
|
||||
return -ENOMEM;
|
||||
clisten->cdev = cdev;
|
||||
clisten->sk = sk;
|
||||
mutex_lock(¬ify_mutex);
|
||||
err = raw_notifier_call_chain(&listen_notify_list,
|
||||
CHTLS_LISTEN_START, sk);
|
||||
CHTLS_LISTEN_START, clisten);
|
||||
mutex_unlock(¬ify_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void chtls_stop_listen(struct sock *sk)
|
||||
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
|
||||
{
|
||||
struct chtls_listen *clisten;
|
||||
|
||||
if (sk->sk_protocol != IPPROTO_TCP)
|
||||
return;
|
||||
|
||||
clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
|
||||
if (!clisten)
|
||||
return;
|
||||
clisten->cdev = cdev;
|
||||
clisten->sk = sk;
|
||||
mutex_lock(¬ify_mutex);
|
||||
raw_notifier_call_chain(&listen_notify_list,
|
||||
CHTLS_LISTEN_STOP, sk);
|
||||
CHTLS_LISTEN_STOP, clisten);
|
||||
mutex_unlock(¬ify_mutex);
|
||||
}
|
||||
|
||||
@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
|
||||
|
||||
static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
|
||||
{
|
||||
struct chtls_dev *cdev = to_chtls_dev(dev);
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
return chtls_start_listen(sk);
|
||||
return chtls_start_listen(cdev, sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
|
||||
{
|
||||
struct chtls_dev *cdev = to_chtls_dev(dev);
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
chtls_stop_listen(sk);
|
||||
chtls_stop_listen(cdev, sk);
|
||||
}
|
||||
|
||||
static void chtls_free_uld(struct chtls_dev *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
tls_unregister_device(&cdev->tlsdev);
|
||||
kvfree(cdev->kmap.addr);
|
||||
idr_destroy(&cdev->hwtid_idr);
|
||||
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
|
||||
kfree_skb(cdev->rspq_skb_cache[i]);
|
||||
kfree(cdev->lldi);
|
||||
kfree_skb(cdev->askb);
|
||||
kfree(cdev);
|
||||
}
|
||||
|
||||
static inline void chtls_dev_release(struct kref *kref)
|
||||
{
|
||||
struct chtls_dev *cdev;
|
||||
struct tls_device *dev;
|
||||
|
||||
dev = container_of(kref, struct tls_device, kref);
|
||||
cdev = to_chtls_dev(dev);
|
||||
chtls_free_uld(cdev);
|
||||
}
|
||||
|
||||
static void chtls_register_dev(struct chtls_dev *cdev)
|
||||
@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
|
||||
tlsdev->feature = chtls_inline_feature;
|
||||
tlsdev->hash = chtls_create_hash;
|
||||
tlsdev->unhash = chtls_destroy_hash;
|
||||
tls_register_device(&cdev->tlsdev);
|
||||
tlsdev->release = chtls_dev_release;
|
||||
kref_init(&tlsdev->kref);
|
||||
tls_register_device(tlsdev);
|
||||
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
|
||||
}
|
||||
|
||||
static void chtls_unregister_dev(struct chtls_dev *cdev)
|
||||
{
|
||||
tls_unregister_device(&cdev->tlsdev);
|
||||
}
|
||||
|
||||
static void process_deferq(struct work_struct *task_param)
|
||||
{
|
||||
struct chtls_dev *cdev = container_of(task_param,
|
||||
@ -262,28 +295,16 @@ out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void chtls_free_uld(struct chtls_dev *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
chtls_unregister_dev(cdev);
|
||||
kvfree(cdev->kmap.addr);
|
||||
idr_destroy(&cdev->hwtid_idr);
|
||||
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
|
||||
kfree_skb(cdev->rspq_skb_cache[i]);
|
||||
kfree(cdev->lldi);
|
||||
kfree_skb(cdev->askb);
|
||||
kfree(cdev);
|
||||
}
|
||||
|
||||
static void chtls_free_all_uld(void)
|
||||
{
|
||||
struct chtls_dev *cdev, *tmp;
|
||||
|
||||
mutex_lock(&cdev_mutex);
|
||||
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
|
||||
if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
|
||||
chtls_free_uld(cdev);
|
||||
if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
|
||||
list_del(&cdev->list);
|
||||
kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cdev_mutex);
|
||||
}
|
||||
@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
mutex_lock(&cdev_mutex);
|
||||
list_del(&cdev->list);
|
||||
mutex_unlock(&cdev_mutex);
|
||||
chtls_free_uld(cdev);
|
||||
kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -1124,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
|
||||
u16 *p = _p;
|
||||
int i;
|
||||
|
||||
regs->version = 0;
|
||||
regs->version = chip->info->prod_num;
|
||||
|
||||
memset(p, 0xff, 32 * sizeof(u16));
|
||||
|
||||
|
@ -29,9 +29,6 @@
|
||||
#define RES_RING_CSR 1
|
||||
#define RES_RING_CMD 2
|
||||
|
||||
static const struct of_device_id xgene_enet_of_match[];
|
||||
static const struct acpi_device_id xgene_enet_acpi_match[];
|
||||
|
||||
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
|
||||
{
|
||||
struct xgene_enet_raw_desc16 *raw_desc;
|
||||
|
@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
|
||||
BNX2X_SP_RTNL_TX_STOP,
|
||||
BNX2X_SP_RTNL_GET_DRV_VERSION,
|
||||
BNX2X_SP_RTNL_CHANGE_UDP_PORT,
|
||||
BNX2X_SP_RTNL_UPDATE_SVID,
|
||||
};
|
||||
|
||||
enum bnx2x_iov_flag {
|
||||
@ -2520,6 +2521,7 @@ void bnx2x_update_mfw_dump(struct bnx2x *bp);
|
||||
void bnx2x_init_ptp(struct bnx2x *bp);
|
||||
int bnx2x_configure_ptp_filters(struct bnx2x *bp);
|
||||
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
|
||||
void bnx2x_register_phc(struct bnx2x *bp);
|
||||
|
||||
#define BNX2X_MAX_PHC_DRIFT 31000000
|
||||
#define BNX2X_PTP_TX_TIMEOUT
|
||||
|
@ -2842,6 +2842,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
bnx2x_set_rx_mode_inner(bp);
|
||||
|
||||
if (bp->flags & PTP_SUPPORTED) {
|
||||
bnx2x_register_phc(bp);
|
||||
bnx2x_init_ptp(bp);
|
||||
bnx2x_configure_ptp_filters(bp);
|
||||
}
|
||||
|
@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
|
||||
func_params.f_obj = &bp->func_obj;
|
||||
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
|
||||
|
||||
/* Prepare parameters for function state transitions */
|
||||
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
|
||||
__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
|
||||
|
||||
if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
|
||||
int func = BP_ABS_FUNC(bp);
|
||||
u32 val;
|
||||
@ -4311,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
|
||||
bnx2x_handle_eee_event(bp);
|
||||
|
||||
if (val & DRV_STATUS_OEM_UPDATE_SVID)
|
||||
bnx2x_handle_update_svid_cmd(bp);
|
||||
bnx2x_schedule_sp_rtnl(bp,
|
||||
BNX2X_SP_RTNL_UPDATE_SVID, 0);
|
||||
|
||||
if (bp->link_vars.periodic_flags &
|
||||
PERIODIC_FLAGS_LINK_EVENT) {
|
||||
@ -7723,6 +7728,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
|
||||
REG_WR(bp, reg_addr, val);
|
||||
}
|
||||
|
||||
if (CHIP_IS_E3B0(bp))
|
||||
bp->flags |= PTP_SUPPORTED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -8472,6 +8480,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
|
||||
/* Fill a user request section if needed */
|
||||
if (!test_bit(RAMROD_CONT, ramrod_flags)) {
|
||||
ramrod_param.user_req.u.vlan.vlan = vlan;
|
||||
__set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
|
||||
/* Set the command: ADD or DEL */
|
||||
if (set)
|
||||
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
|
||||
@ -8492,6 +8501,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2x_del_all_vlans(struct bnx2x *bp)
|
||||
{
|
||||
struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
|
||||
unsigned long ramrod_flags = 0, vlan_flags = 0;
|
||||
struct bnx2x_vlan_entry *vlan;
|
||||
int rc;
|
||||
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
__set_bit(BNX2X_VLAN, &vlan_flags);
|
||||
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Mark that hw forgot all entries */
|
||||
list_for_each_entry(vlan, &bp->vlan_reg, link)
|
||||
vlan->hw = false;
|
||||
bp->vlan_cnt = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnx2x_del_all_macs(struct bnx2x *bp,
|
||||
struct bnx2x_vlan_mac_obj *mac_obj,
|
||||
int mac_type, bool wait_for_comp)
|
||||
@ -9330,6 +9360,11 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
|
||||
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
|
||||
rc);
|
||||
|
||||
/* Remove all currently configured VLANs */
|
||||
rc = bnx2x_del_all_vlans(bp);
|
||||
if (rc < 0)
|
||||
BNX2X_ERR("Failed to delete all VLANs\n");
|
||||
|
||||
/* Disable LLH */
|
||||
if (!CHIP_IS_E1(bp))
|
||||
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
|
||||
@ -9417,8 +9452,13 @@ unload_error:
|
||||
* function stop ramrod is sent, since as part of this ramrod FW access
|
||||
* PTP registers.
|
||||
*/
|
||||
if (bp->flags & PTP_SUPPORTED)
|
||||
if (bp->flags & PTP_SUPPORTED) {
|
||||
bnx2x_stop_ptp(bp);
|
||||
if (bp->ptp_clock) {
|
||||
ptp_clock_unregister(bp->ptp_clock);
|
||||
bp->ptp_clock = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
@ -10359,6 +10399,9 @@ sp_rtnl_not_reset:
|
||||
&bp->sp_rtnl_state))
|
||||
bnx2x_update_mng_version(bp);
|
||||
|
||||
if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
|
||||
bnx2x_handle_update_svid_cmd(bp);
|
||||
|
||||
if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
|
||||
&bp->sp_rtnl_state)) {
|
||||
if (bnx2x_udp_port_update(bp)) {
|
||||
@ -11750,8 +11793,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
|
||||
* If maximum allowed number of connections is zero -
|
||||
* disable the feature.
|
||||
*/
|
||||
if (!bp->cnic_eth_dev.max_fcoe_conn)
|
||||
if (!bp->cnic_eth_dev.max_fcoe_conn) {
|
||||
bp->flags |= NO_FCOE_FLAG;
|
||||
eth_zero_addr(bp->fip_mac);
|
||||
}
|
||||
}
|
||||
|
||||
static void bnx2x_get_cnic_info(struct bnx2x *bp)
|
||||
@ -12494,9 +12539,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
||||
|
||||
bp->dump_preset_idx = 1;
|
||||
|
||||
if (CHIP_IS_E3B0(bp))
|
||||
bp->flags |= PTP_SUPPORTED;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -13024,13 +13066,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
|
||||
|
||||
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
|
||||
{
|
||||
struct bnx2x_vlan_entry *vlan;
|
||||
|
||||
/* The hw forgot all entries after reload */
|
||||
list_for_each_entry(vlan, &bp->vlan_reg, link)
|
||||
vlan->hw = false;
|
||||
bp->vlan_cnt = 0;
|
||||
|
||||
/* Don't set rx mode here. Our caller will do it. */
|
||||
bnx2x_vlan_configure(bp, false);
|
||||
|
||||
@ -13895,7 +13930,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static void bnx2x_register_phc(struct bnx2x *bp)
|
||||
void bnx2x_register_phc(struct bnx2x *bp)
|
||||
{
|
||||
/* Fill the ptp_clock_info struct and register PTP clock*/
|
||||
bp->ptp_clock_info.owner = THIS_MODULE;
|
||||
@ -14097,8 +14132,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
||||
dev->base_addr, bp->pdev->irq, dev->dev_addr);
|
||||
pcie_print_link_status(bp->pdev);
|
||||
|
||||
bnx2x_register_phc(bp);
|
||||
|
||||
if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
|
||||
bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
|
||||
|
||||
@ -14131,11 +14164,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
|
||||
struct bnx2x *bp,
|
||||
bool remove_netdev)
|
||||
{
|
||||
if (bp->ptp_clock) {
|
||||
ptp_clock_unregister(bp->ptp_clock);
|
||||
bp->ptp_clock = NULL;
|
||||
}
|
||||
|
||||
/* Delete storage MAC address */
|
||||
if (!NO_FCOE(bp)) {
|
||||
rtnl_lock();
|
||||
|
@ -265,6 +265,7 @@ enum {
|
||||
BNX2X_ETH_MAC,
|
||||
BNX2X_ISCSI_ETH_MAC,
|
||||
BNX2X_NETQ_ETH_MAC,
|
||||
BNX2X_VLAN,
|
||||
BNX2X_DONT_CONSUME_CAM_CREDIT,
|
||||
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
|
||||
};
|
||||
@ -272,7 +273,8 @@ enum {
|
||||
#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
|
||||
1 << BNX2X_ETH_MAC | \
|
||||
1 << BNX2X_ISCSI_ETH_MAC | \
|
||||
1 << BNX2X_NETQ_ETH_MAC)
|
||||
1 << BNX2X_NETQ_ETH_MAC | \
|
||||
1 << BNX2X_VLAN)
|
||||
#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
|
||||
((flags) & BNX2X_VLAN_MAC_CMP_MASK)
|
||||
|
||||
|
@ -2572,6 +2572,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
static int bnxt_run_loopback(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
|
||||
struct bnxt_cp_ring_info *cpr;
|
||||
int pkt_size, i = 0;
|
||||
struct sk_buff *skb;
|
||||
@ -2579,7 +2580,9 @@ static int bnxt_run_loopback(struct bnxt *bp)
|
||||
u8 *data;
|
||||
int rc;
|
||||
|
||||
cpr = &txr->bnapi->cp_ring;
|
||||
cpr = &rxr->bnapi->cp_ring;
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5)
|
||||
cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
|
||||
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
|
||||
skb = netdev_alloc_skb(bp->dev, pkt_size);
|
||||
if (!skb)
|
||||
|
@ -61,7 +61,8 @@
|
||||
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
|
||||
| MACB_BIT(ISR_RLE) \
|
||||
| MACB_BIT(TXERR))
|
||||
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
|
||||
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
|
||||
| MACB_BIT(TXUBR))
|
||||
|
||||
/* Max length of transmit frame must be a multiple of 8 bytes */
|
||||
#define MACB_TX_LEN_ALIGN 8
|
||||
@ -680,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
||||
desc_64 = macb_64b_desc(bp, desc);
|
||||
desc_64->addrh = upper_32_bits(addr);
|
||||
/* The low bits of RX address contain the RX_USED bit, clearing
|
||||
* of which allows packet RX. Make sure the high bits are also
|
||||
* visible to HW at that point.
|
||||
*/
|
||||
dma_wmb();
|
||||
}
|
||||
#endif
|
||||
desc->addr = lower_32_bits(addr);
|
||||
@ -928,14 +934,19 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
|
||||
if (entry == bp->rx_ring_size - 1)
|
||||
paddr |= MACB_BIT(RX_WRAP);
|
||||
macb_set_addr(bp, desc, paddr);
|
||||
desc->ctrl = 0;
|
||||
/* Setting addr clears RX_USED and allows reception,
|
||||
* make sure ctrl is cleared first to avoid a race.
|
||||
*/
|
||||
dma_wmb();
|
||||
macb_set_addr(bp, desc, paddr);
|
||||
|
||||
/* properly align Ethernet header */
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
} else {
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
desc->ctrl = 0;
|
||||
dma_wmb();
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
}
|
||||
}
|
||||
|
||||
@ -989,11 +1000,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
|
||||
|
||||
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
|
||||
addr = macb_get_addr(bp, desc);
|
||||
ctrl = desc->ctrl;
|
||||
|
||||
if (!rxused)
|
||||
break;
|
||||
|
||||
/* Ensure ctrl is at least as up-to-date as rxused */
|
||||
dma_rmb();
|
||||
|
||||
ctrl = desc->ctrl;
|
||||
|
||||
queue->rx_tail++;
|
||||
count++;
|
||||
|
||||
@ -1168,11 +1183,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
ctrl = desc->ctrl;
|
||||
|
||||
if (!(desc->addr & MACB_BIT(RX_USED)))
|
||||
break;
|
||||
|
||||
/* Ensure ctrl is at least as up-to-date as addr */
|
||||
dma_rmb();
|
||||
|
||||
ctrl = desc->ctrl;
|
||||
|
||||
if (ctrl & MACB_BIT(RX_SOF)) {
|
||||
if (first_frag != -1)
|
||||
discard_partial_frame(queue, first_frag, tail);
|
||||
@ -1312,6 +1330,21 @@ static void macb_hresp_error_task(unsigned long data)
|
||||
netif_tx_start_all_queues(dev);
|
||||
}
|
||||
|
||||
static void macb_tx_restart(struct macb_queue *queue)
|
||||
{
|
||||
unsigned int head = queue->tx_head;
|
||||
unsigned int tail = queue->tx_tail;
|
||||
struct macb *bp = queue->bp;
|
||||
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
queue_writel(queue, ISR, MACB_BIT(TXUBR));
|
||||
|
||||
if (head == tail)
|
||||
return;
|
||||
|
||||
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
||||
}
|
||||
|
||||
static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct macb_queue *queue = dev_id;
|
||||
@ -1369,6 +1402,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
||||
if (status & MACB_BIT(TCOMP))
|
||||
macb_tx_interrupt(queue);
|
||||
|
||||
if (status & MACB_BIT(TXUBR))
|
||||
macb_tx_restart(queue);
|
||||
|
||||
/* Link change detection isn't possible with RMII, so we'll
|
||||
* add that if/when we get our hands on a full-blown MII PHY.
|
||||
*/
|
||||
|
@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
|
||||
desc_ptp = macb_ptp_desc(queue->bp, desc);
|
||||
tx_timestamp = &queue->tx_timestamps[head];
|
||||
tx_timestamp->skb = skb;
|
||||
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
|
||||
dma_rmb();
|
||||
tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
|
||||
tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
|
||||
/* move head */
|
||||
|
@ -1453,6 +1453,9 @@ struct cpl_tx_data {
|
||||
#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
|
||||
#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
|
||||
|
||||
#define TX_URG_S 16
|
||||
#define TX_URG_V(x) ((x) << TX_URG_S)
|
||||
|
||||
#define TX_SHOVE_S 14
|
||||
#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
|
||||
|
||||
|
@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
|
||||
|
||||
hns_ae_ring_enable_all(handle, 0);
|
||||
|
||||
/* clean rx fbd. */
|
||||
hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
|
||||
|
||||
(void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
|
||||
}
|
||||
|
||||
|
@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
|
||||
struct mac_driver *drv = (struct mac_driver *)mac_drv;
|
||||
|
||||
/*enable GE rX/tX */
|
||||
if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
|
||||
if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
|
||||
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
|
||||
|
||||
if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
|
||||
if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
|
||||
/* enable rx pcs */
|
||||
dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
|
||||
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
|
||||
@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
|
||||
struct mac_driver *drv = (struct mac_driver *)mac_drv;
|
||||
|
||||
/*disable GE rX/tX */
|
||||
if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
|
||||
if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
|
||||
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
|
||||
|
||||
if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
|
||||
if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
|
||||
/* disable rx pcs */
|
||||
dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
|
||||
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* hns_gmac_get_en - get port enable
|
||||
|
@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
|
||||
{
|
||||
if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
|
||||
return;
|
||||
|
||||
phy_device_remove(mac_cb->phy_dev);
|
||||
phy_device_free(mac_cb->phy_dev);
|
||||
|
||||
mac_cb->phy_dev = NULL;
|
||||
}
|
||||
|
||||
#define MAC_MEDIA_TYPE_MAX_LEN 16
|
||||
|
||||
static const struct {
|
||||
@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
|
||||
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
|
||||
|
||||
for (i = 0; i < max_port_num; i++) {
|
||||
if (!dsaf_dev->mac_cb[i])
|
||||
continue;
|
||||
|
||||
dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
|
||||
hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
|
||||
dsaf_dev->mac_cb[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -934,6 +934,62 @@ static void hns_dsaf_tcam_mc_cfg(
|
||||
spin_unlock_bh(&dsaf_dev->tcam_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_dsaf_tcam_uc_cfg_vague - INT
|
||||
* @dsaf_dev: dsa fabric device struct pointer
|
||||
* @address,
|
||||
* @ptbl_tcam_data,
|
||||
*/
|
||||
static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
|
||||
u32 address,
|
||||
struct dsaf_tbl_tcam_data *tcam_data,
|
||||
struct dsaf_tbl_tcam_data *tcam_mask,
|
||||
struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
|
||||
{
|
||||
spin_lock_bh(&dsaf_dev->tcam_lock);
|
||||
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
|
||||
hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
|
||||
hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
|
||||
hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
|
||||
hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
|
||||
|
||||
/*Restore Match Data*/
|
||||
tcam_mask->tbl_tcam_data_high = 0xffffffff;
|
||||
tcam_mask->tbl_tcam_data_low = 0xffffffff;
|
||||
hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
|
||||
|
||||
spin_unlock_bh(&dsaf_dev->tcam_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_dsaf_tcam_mc_cfg_vague - INT
|
||||
* @dsaf_dev: dsa fabric device struct pointer
|
||||
* @address,
|
||||
* @ptbl_tcam_data,
|
||||
* @ptbl_tcam_mask
|
||||
* @ptbl_tcam_mcast
|
||||
*/
|
||||
static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
|
||||
u32 address,
|
||||
struct dsaf_tbl_tcam_data *tcam_data,
|
||||
struct dsaf_tbl_tcam_data *tcam_mask,
|
||||
struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
|
||||
{
|
||||
spin_lock_bh(&dsaf_dev->tcam_lock);
|
||||
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
|
||||
hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
|
||||
hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
|
||||
hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
|
||||
hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
|
||||
|
||||
/*Restore Match Data*/
|
||||
tcam_mask->tbl_tcam_data_high = 0xffffffff;
|
||||
tcam_mask->tbl_tcam_data_low = 0xffffffff;
|
||||
hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
|
||||
|
||||
spin_unlock_bh(&dsaf_dev->tcam_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_dsaf_tcam_mc_invld - INT
|
||||
* @dsaf_id: dsa fabric id
|
||||
@ -1492,6 +1548,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
|
||||
return DSAF_INVALID_ENTRY_IDX;
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_dsaf_find_empty_mac_entry_reverse
|
||||
* search dsa fabric soft empty-entry from the end
|
||||
* @dsaf_dev: dsa fabric device struct pointer
|
||||
*/
|
||||
static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
|
||||
{
|
||||
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
|
||||
struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
|
||||
int i;
|
||||
|
||||
soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
|
||||
for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
|
||||
/* search all entry from end to start.*/
|
||||
if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
|
||||
return i;
|
||||
soft_mac_entry--;
|
||||
}
|
||||
return DSAF_INVALID_ENTRY_IDX;
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_dsaf_set_mac_key - set mac key
|
||||
* @dsaf_dev: dsa fabric device struct pointer
|
||||
@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
|
||||
DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
|
||||
|
||||
hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
|
||||
DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
|
||||
DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
|
||||
hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
|
||||
DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
|
||||
DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
|
||||
|
||||
/* pfc pause frame statistics stored in dsaf inode*/
|
||||
if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
|
||||
@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
|
||||
DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
|
||||
p[223 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
|
||||
p[224 + i] = dsaf_read_dev(ddev,
|
||||
p[226 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
|
||||
}
|
||||
|
||||
p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
|
||||
p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
|
||||
|
||||
for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
|
||||
j = i * DSAF_COMM_CHN + port;
|
||||
p[228 + i] = dsaf_read_dev(ddev,
|
||||
p[230 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
|
||||
}
|
||||
|
||||
p[231] = dsaf_read_dev(ddev,
|
||||
DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
|
||||
p[233] = dsaf_read_dev(ddev,
|
||||
DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
|
||||
|
||||
/* dsaf inode registers */
|
||||
for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
|
||||
j = i * DSAF_COMM_CHN + port;
|
||||
p[232 + i] = dsaf_read_dev(ddev,
|
||||
p[234 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_CFG_REG_0_REG + j * 0x80);
|
||||
p[235 + i] = dsaf_read_dev(ddev,
|
||||
p[237 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
|
||||
p[238 + i] = dsaf_read_dev(ddev,
|
||||
p[240 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
|
||||
p[241 + i] = dsaf_read_dev(ddev,
|
||||
p[243 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
|
||||
p[244 + i] = dsaf_read_dev(ddev,
|
||||
p[246 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
|
||||
p[245 + i] = dsaf_read_dev(ddev,
|
||||
p[249 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
|
||||
p[248 + i] = dsaf_read_dev(ddev,
|
||||
p[252 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
|
||||
p[251 + i] = dsaf_read_dev(ddev,
|
||||
p[255 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
|
||||
p[254 + i] = dsaf_read_dev(ddev,
|
||||
p[258 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
|
||||
p[257 + i] = dsaf_read_dev(ddev,
|
||||
p[261 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
|
||||
p[260 + i] = dsaf_read_dev(ddev,
|
||||
p[264 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_INER_ST_0_REG + j * 0x80);
|
||||
p[263 + i] = dsaf_read_dev(ddev,
|
||||
p[267 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
|
||||
p[266 + i] = dsaf_read_dev(ddev,
|
||||
p[270 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
|
||||
p[269 + i] = dsaf_read_dev(ddev,
|
||||
p[273 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
|
||||
p[272 + i] = dsaf_read_dev(ddev,
|
||||
p[276 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
|
||||
p[275 + i] = dsaf_read_dev(ddev,
|
||||
p[279 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
|
||||
p[278 + i] = dsaf_read_dev(ddev,
|
||||
p[282 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
|
||||
p[281 + i] = dsaf_read_dev(ddev,
|
||||
p[285 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
|
||||
p[284 + i] = dsaf_read_dev(ddev,
|
||||
p[288 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
|
||||
p[287 + i] = dsaf_read_dev(ddev,
|
||||
p[291 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
|
||||
p[290 + i] = dsaf_read_dev(ddev,
|
||||
p[294 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
|
||||
p[293 + i] = dsaf_read_dev(ddev,
|
||||
p[297 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
|
||||
p[296 + i] = dsaf_read_dev(ddev,
|
||||
p[300 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
|
||||
p[299 + i] = dsaf_read_dev(ddev,
|
||||
p[303 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
|
||||
p[302 + i] = dsaf_read_dev(ddev,
|
||||
p[306 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
|
||||
p[305 + i] = dsaf_read_dev(ddev,
|
||||
p[309 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
|
||||
p[308 + i] = dsaf_read_dev(ddev,
|
||||
p[312 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
|
||||
}
|
||||
|
||||
/* dsaf onode registers */
|
||||
for (i = 0; i < DSAF_XOD_NUM; i++) {
|
||||
p[311 + i] = dsaf_read_dev(ddev,
|
||||
p[315 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
|
||||
p[319 + i] = dsaf_read_dev(ddev,
|
||||
p[323 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
|
||||
p[327 + i] = dsaf_read_dev(ddev,
|
||||
p[331 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
|
||||
p[335 + i] = dsaf_read_dev(ddev,
|
||||
p[339 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
|
||||
p[343 + i] = dsaf_read_dev(ddev,
|
||||
p[347 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
|
||||
p[351 + i] = dsaf_read_dev(ddev,
|
||||
p[355 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
|
||||
}
|
||||
|
||||
p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
|
||||
p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
|
||||
p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
|
||||
p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
|
||||
p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
|
||||
p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
|
||||
|
||||
for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
|
||||
j = i * DSAF_COMM_CHN + port;
|
||||
p[362 + i] = dsaf_read_dev(ddev,
|
||||
p[366 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_GNT_L_0_REG + j * 0x90);
|
||||
p[365 + i] = dsaf_read_dev(ddev,
|
||||
p[369 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_GNT_H_0_REG + j * 0x90);
|
||||
p[368 + i] = dsaf_read_dev(ddev,
|
||||
p[372 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
|
||||
p[371 + i] = dsaf_read_dev(ddev,
|
||||
p[375 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
|
||||
p[374 + i] = dsaf_read_dev(ddev,
|
||||
p[378 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
|
||||
p[377 + i] = dsaf_read_dev(ddev,
|
||||
p[381 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
|
||||
p[380 + i] = dsaf_read_dev(ddev,
|
||||
p[384 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
|
||||
p[383 + i] = dsaf_read_dev(ddev,
|
||||
p[387 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
|
||||
p[386 + i] = dsaf_read_dev(ddev,
|
||||
p[390 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
|
||||
p[389 + i] = dsaf_read_dev(ddev,
|
||||
p[393 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
|
||||
}
|
||||
|
||||
p[392] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
|
||||
p[393] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
|
||||
p[394] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
|
||||
p[395] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
|
||||
p[396] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
|
||||
p[397] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
|
||||
p[398] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
|
||||
p[399] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
|
||||
p[400] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
|
||||
p[401] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
|
||||
p[402] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
|
||||
p[403] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
|
||||
DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
|
||||
p[404] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
|
||||
p[405] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
|
||||
p[406] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
|
||||
p[407] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
|
||||
p[408] = dsaf_read_dev(ddev,
|
||||
DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
|
||||
|
||||
/* dsaf voq registers */
|
||||
for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
|
||||
j = (i * DSAF_COMM_CHN + port) * 0x90;
|
||||
p[405 + i] = dsaf_read_dev(ddev,
|
||||
p[409 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
|
||||
p[408 + i] = dsaf_read_dev(ddev,
|
||||
p[412 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
|
||||
p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
|
||||
p[414 + i] = dsaf_read_dev(ddev,
|
||||
p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
|
||||
p[418 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
|
||||
p[417 + i] = dsaf_read_dev(ddev,
|
||||
p[421 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
|
||||
p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
|
||||
p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
|
||||
p[426 + i] = dsaf_read_dev(ddev,
|
||||
p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
|
||||
p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
|
||||
p[430 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
|
||||
p[429 + i] = dsaf_read_dev(ddev,
|
||||
p[433 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
|
||||
p[432 + i] = dsaf_read_dev(ddev,
|
||||
p[436 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
|
||||
p[435 + i] = dsaf_read_dev(ddev,
|
||||
p[439 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
|
||||
p[438 + i] = dsaf_read_dev(ddev,
|
||||
p[442 + i] = dsaf_read_dev(ddev,
|
||||
DSAF_VOQ_BP_ALL_THRD_0_REG + j);
|
||||
}
|
||||
|
||||
/* dsaf tbl registers */
|
||||
p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
|
||||
p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
|
||||
p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
|
||||
p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
|
||||
p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
|
||||
p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
|
||||
p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
|
||||
p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
|
||||
p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
|
||||
p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
|
||||
p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
|
||||
p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
|
||||
p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
|
||||
p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
|
||||
p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
|
||||
p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
|
||||
p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
|
||||
p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
|
||||
p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
|
||||
p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
|
||||
p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
|
||||
p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
|
||||
p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
|
||||
p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
|
||||
p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
|
||||
p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
|
||||
p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
|
||||
p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
|
||||
p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
|
||||
p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
|
||||
p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
|
||||
p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
|
||||
p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
|
||||
p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
|
||||
p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
|
||||
p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
|
||||
p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
|
||||
p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
|
||||
p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
|
||||
p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
|
||||
p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
|
||||
p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
|
||||
p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
|
||||
p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
|
||||
p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
|
||||
p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
|
||||
|
||||
for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
|
||||
j = i * 0x8;
|
||||
p[464 + 2 * i] = dsaf_read_dev(ddev,
|
||||
p[468 + 2 * i] = dsaf_read_dev(ddev,
|
||||
DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
|
||||
p[465 + 2 * i] = dsaf_read_dev(ddev,
|
||||
p[469 + 2 * i] = dsaf_read_dev(ddev,
|
||||
DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
|
||||
}
|
||||
|
||||
p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
|
||||
p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
|
||||
p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
|
||||
p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
|
||||
p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
|
||||
p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
|
||||
p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
|
||||
p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
|
||||
p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
|
||||
p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
|
||||
p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
|
||||
p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
|
||||
p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
|
||||
p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
|
||||
p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
|
||||
p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
|
||||
p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
|
||||
p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
|
||||
p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
|
||||
p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
|
||||
p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
|
||||
p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
|
||||
p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
|
||||
p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
|
||||
|
||||
/* dsaf other registers */
|
||||
p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
|
||||
p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
|
||||
p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
|
||||
p[495] = dsaf_read_dev(ddev,
|
||||
p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
|
||||
p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
|
||||
p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
|
||||
p[499] = dsaf_read_dev(ddev,
|
||||
DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
|
||||
p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
|
||||
p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
|
||||
p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
|
||||
p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
|
||||
|
||||
if (!is_ver1)
|
||||
p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
|
||||
p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
|
||||
|
||||
/* mark end of dsaf regs */
|
||||
for (i = 499; i < 504; i++)
|
||||
for (i = 503; i < 504; i++)
|
||||
p[i] = 0xdddddddd;
|
||||
}
|
||||
|
||||
@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
|
||||
return DSAF_DUMP_REGS_NUM;
|
||||
}
|
||||
|
||||
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
|
||||
{
|
||||
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
|
||||
struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
|
||||
struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
|
||||
struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
|
||||
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
|
||||
struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
|
||||
struct dsaf_drv_mac_single_dest_entry mask_entry;
|
||||
struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
|
||||
struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
|
||||
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
|
||||
struct dsaf_drv_tbl_tcam_key mac_key;
|
||||
struct hns_mac_cb *mac_cb;
|
||||
u8 addr[ETH_ALEN] = {0};
|
||||
u8 port_num;
|
||||
u16 mskid;
|
||||
|
||||
/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
|
||||
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
|
||||
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
|
||||
if (entry_index != DSAF_INVALID_ENTRY_IDX)
|
||||
return;
|
||||
|
||||
/* put promisc tcam entry in the end. */
|
||||
/* 1. set promisc unicast vague tcam entry. */
|
||||
entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
|
||||
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
|
||||
dev_err(dsaf_dev->dev,
|
||||
"enable uc promisc failed (port:%#x)\n",
|
||||
port);
|
||||
return;
|
||||
}
|
||||
|
||||
mac_cb = dsaf_dev->mac_cb[port];
|
||||
(void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
|
||||
tbl_tcam_ucast.tbl_ucast_out_port = port_num;
|
||||
|
||||
/* config uc vague table */
|
||||
hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
|
||||
&tbl_tcam_mask_uc, &tbl_tcam_ucast);
|
||||
|
||||
/* update software entry */
|
||||
soft_mac_entry = priv->soft_mac_tbl;
|
||||
soft_mac_entry += entry_index;
|
||||
soft_mac_entry->index = entry_index;
|
||||
soft_mac_entry->tcam_key.high.val = mac_key.high.val;
|
||||
soft_mac_entry->tcam_key.low.val = mac_key.low.val;
|
||||
/* step back to the START for mc. */
|
||||
soft_mac_entry = priv->soft_mac_tbl;
|
||||
|
||||
/* 2. set promisc multicast vague tcam entry. */
|
||||
entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
|
||||
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
|
||||
dev_err(dsaf_dev->dev,
|
||||
"enable mc promisc failed (port:%#x)\n",
|
||||
port);
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&mask_entry, 0x0, sizeof(mask_entry));
|
||||
memset(&mask_key, 0x0, sizeof(mask_key));
|
||||
memset(&temp_key, 0x0, sizeof(temp_key));
|
||||
mask_entry.addr[0] = 0x01;
|
||||
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
|
||||
port, mask_entry.addr);
|
||||
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
|
||||
tbl_tcam_mcast.tbl_mcast_old_en = 0;
|
||||
|
||||
if (port < DSAF_SERVICE_NW_NUM) {
|
||||
mskid = port;
|
||||
} else if (port >= DSAF_BASE_INNER_PORT_NUM) {
|
||||
mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
|
||||
} else {
|
||||
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
|
||||
dsaf_dev->ae_dev.name, port,
|
||||
mask_key.high.val, mask_key.low.val);
|
||||
return;
|
||||
}
|
||||
|
||||
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
|
||||
mskid % 32, 1);
|
||||
memcpy(&temp_key, &mask_key, sizeof(mask_key));
|
||||
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
|
||||
(struct dsaf_tbl_tcam_data *)(&mask_key),
|
||||
&tbl_tcam_mcast);
|
||||
|
||||
/* update software entry */
|
||||
soft_mac_entry += entry_index;
|
||||
soft_mac_entry->index = entry_index;
|
||||
soft_mac_entry->tcam_key.high.val = temp_key.high.val;
|
||||
soft_mac_entry->tcam_key.low.val = temp_key.low.val;
|
||||
}
|
||||
|
||||
static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
|
||||
{
|
||||
struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
|
||||
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
|
||||
struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
|
||||
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
|
||||
struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
|
||||
struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
|
||||
struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
|
||||
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
|
||||
struct dsaf_drv_tbl_tcam_key mac_key;
|
||||
u8 addr[ETH_ALEN] = {0};
|
||||
|
||||
/* 1. delete uc vague tcam entry. */
|
||||
/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
|
||||
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
|
||||
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
|
||||
|
||||
if (entry_index == DSAF_INVALID_ENTRY_IDX)
|
||||
return;
|
||||
|
||||
/* config uc vague table */
|
||||
hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
|
||||
&tbl_tcam_mask, &tbl_tcam_ucast);
|
||||
/* update soft management table. */
|
||||
soft_mac_entry = priv->soft_mac_tbl;
|
||||
soft_mac_entry += entry_index;
|
||||
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
|
||||
/* step back to the START for mc. */
|
||||
soft_mac_entry = priv->soft_mac_tbl;
|
||||
|
||||
/* 2. delete mc vague tcam entry. */
|
||||
addr[0] = 0x01;
|
||||
memset(&mac_key, 0x0, sizeof(mac_key));
|
||||
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
|
||||
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
|
||||
|
||||
if (entry_index == DSAF_INVALID_ENTRY_IDX)
|
||||
return;
|
||||
|
||||
/* config mc vague table */
|
||||
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
|
||||
&tbl_tcam_mask, &tbl_tcam_mcast);
|
||||
/* update soft management table. */
|
||||
soft_mac_entry += entry_index;
|
||||
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
|
||||
}
|
||||
|
||||
/* Reserve the last TCAM entry for promisc support */
|
||||
#define dsaf_promisc_tcam_entry(port) \
|
||||
(DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
|
||||
void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
|
||||
u32 port, bool enable)
|
||||
{
|
||||
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
|
||||
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
|
||||
u16 entry_index;
|
||||
struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
|
||||
struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
|
||||
|
||||
if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
|
||||
return;
|
||||
|
||||
/* find the tcam entry index for promisc */
|
||||
entry_index = dsaf_promisc_tcam_entry(port);
|
||||
|
||||
memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
|
||||
memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
|
||||
|
||||
/* config key mask */
|
||||
if (enable) {
|
||||
dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
|
||||
DSAF_TBL_TCAM_KEY_PORT_M,
|
||||
DSAF_TBL_TCAM_KEY_PORT_S, port);
|
||||
dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
|
||||
DSAF_TBL_TCAM_KEY_PORT_M,
|
||||
DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
|
||||
|
||||
/* SUB_QID */
|
||||
dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
|
||||
DSAF_SERVICE_NW_NUM, true);
|
||||
mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
|
||||
} else {
|
||||
mac_data.tbl_mcast_item_vld = false; /* item_vld bit */
|
||||
}
|
||||
|
||||
dev_dbg(dsaf_dev->dev,
|
||||
"set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
|
||||
dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
|
||||
tbl_tcam_data.low.val, entry_index);
|
||||
|
||||
/* config promisc entry with mask */
|
||||
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
|
||||
(struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
|
||||
(struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
|
||||
&mac_data);
|
||||
|
||||
/* config software entry */
|
||||
soft_mac_entry += entry_index;
|
||||
soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
|
||||
if (enable)
|
||||
set_promisc_tcam_enable(dsaf_dev, port);
|
||||
else
|
||||
set_promisc_tcam_disable(dsaf_dev, port);
|
||||
}
|
||||
|
||||
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
|
||||
|
@ -176,7 +176,7 @@
|
||||
#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
|
||||
#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
|
||||
#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
|
||||
#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
|
||||
#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
|
||||
#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
|
||||
#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
|
||||
#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
|
||||
@ -404,11 +404,11 @@
|
||||
#define RCB_ECC_ERR_ADDR4_REG 0x460
|
||||
#define RCB_ECC_ERR_ADDR5_REG 0x464
|
||||
|
||||
#define RCB_COM_SF_CFG_INTMASK_RING 0x480
|
||||
#define RCB_COM_SF_CFG_RING_STS 0x484
|
||||
#define RCB_COM_SF_CFG_RING 0x488
|
||||
#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
|
||||
#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
|
||||
#define RCB_COM_SF_CFG_INTMASK_RING 0x470
|
||||
#define RCB_COM_SF_CFG_RING_STS 0x474
|
||||
#define RCB_COM_SF_CFG_RING 0x478
|
||||
#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
|
||||
#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
|
||||
#define RCB_COM_RCB_RD_BD_BUSY 0x490
|
||||
#define RCB_COM_RCB_FBD_CRT_EN 0x494
|
||||
#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
|
||||
@ -534,6 +534,7 @@
|
||||
#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
|
||||
#define GMAC_LOOP_REG 0x01DCUL
|
||||
#define GMAC_RECV_CONTROL_REG 0x01E0UL
|
||||
#define GMAC_PCS_RX_EN_REG 0x01E4UL
|
||||
#define GMAC_VLAN_CODE_REG 0x01E8UL
|
||||
#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
|
||||
#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
|
||||
|
@ -1186,6 +1186,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
|
||||
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
|
||||
phy_dev->autoneg = false;
|
||||
|
||||
if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
|
||||
phy_stop(phy_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1281,6 +1284,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < q_num * 2; i++) {
|
||||
if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
|
||||
irq_set_affinity_hint(priv->ring_data[i].ring->irq,
|
||||
NULL);
|
||||
free_irq(priv->ring_data[i].ring->irq,
|
||||
&priv->ring_data[i]);
|
||||
priv->ring_data[i].ring->irq_init_flag =
|
||||
RCB_IRQ_NOT_INITED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int hns_nic_init_irq(struct hns_nic_priv *priv)
|
||||
{
|
||||
struct hnae_handle *h = priv->ae_handle;
|
||||
@ -1306,7 +1325,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
|
||||
if (ret) {
|
||||
netdev_err(priv->netdev, "request irq(%d) fail\n",
|
||||
rd->ring->irq);
|
||||
return ret;
|
||||
goto out_free_irq;
|
||||
}
|
||||
disable_irq(rd->ring->irq);
|
||||
|
||||
@ -1321,6 +1340,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_irq:
|
||||
hns_nic_free_irq(h->q_num, priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_nic_net_up(struct net_device *ndev)
|
||||
@ -1330,6 +1353,9 @@ static int hns_nic_net_up(struct net_device *ndev)
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(NIC_STATE_DOWN, &priv->state))
|
||||
return 0;
|
||||
|
||||
ret = hns_nic_init_irq(priv);
|
||||
if (ret != 0) {
|
||||
netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
|
||||
@ -1365,6 +1391,7 @@ out_has_some_queues:
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
hns_nic_ring_close(ndev, j);
|
||||
|
||||
hns_nic_free_irq(h->q_num, priv);
|
||||
set_bit(NIC_STATE_DOWN, &priv->state);
|
||||
|
||||
return ret;
|
||||
@ -1482,11 +1509,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
|
||||
}
|
||||
|
||||
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
|
||||
#define HNS_TX_TIMEO_LIMIT (40 * HZ)
|
||||
static void hns_nic_net_timeout(struct net_device *ndev)
|
||||
{
|
||||
struct hns_nic_priv *priv = netdev_priv(ndev);
|
||||
|
||||
hns_tx_timeout_reset(priv);
|
||||
if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
|
||||
ndev->watchdog_timeo *= 2;
|
||||
netdev_info(ndev, "watchdog_timo changed to %d.\n",
|
||||
ndev->watchdog_timeo);
|
||||
} else {
|
||||
ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
|
||||
hns_tx_timeout_reset(priv);
|
||||
}
|
||||
}
|
||||
|
||||
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
|
||||
@ -2049,11 +2084,11 @@ static void hns_nic_service_task(struct work_struct *work)
|
||||
= container_of(work, struct hns_nic_priv, service_task);
|
||||
struct hnae_handle *h = priv->ae_handle;
|
||||
|
||||
hns_nic_reset_subtask(priv);
|
||||
hns_nic_update_link_status(priv->netdev);
|
||||
h->dev->ops->update_led_status(h);
|
||||
hns_nic_update_stats(priv->netdev);
|
||||
|
||||
hns_nic_reset_subtask(priv);
|
||||
hns_nic_service_event_complete(priv);
|
||||
}
|
||||
|
||||
@ -2339,7 +2374,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
|
||||
ndev->min_mtu = MAC_MIN_MTU;
|
||||
switch (priv->enet_ver) {
|
||||
case AE_VERSION_2:
|
||||
ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
|
||||
ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
|
||||
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
||||
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
|
||||
|
@ -1939,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
|
||||
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
struct ibmvnic_rwi *rwi;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&adapter->rwi_lock);
|
||||
spin_lock_irqsave(&adapter->rwi_lock, flags);
|
||||
|
||||
if (!list_empty(&adapter->rwi_list)) {
|
||||
rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
|
||||
@ -1950,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
||||
rwi = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&adapter->rwi_lock);
|
||||
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
||||
return rwi;
|
||||
}
|
||||
|
||||
@ -2025,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||
struct list_head *entry, *tmp_entry;
|
||||
struct ibmvnic_rwi *rwi, *tmp;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (adapter->state == VNIC_REMOVING ||
|
||||
@ -2041,21 +2043,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&adapter->rwi_lock);
|
||||
spin_lock_irqsave(&adapter->rwi_lock, flags);
|
||||
|
||||
list_for_each(entry, &adapter->rwi_list) {
|
||||
tmp = list_entry(entry, struct ibmvnic_rwi, list);
|
||||
if (tmp->reset_reason == reason) {
|
||||
netdev_dbg(netdev, "Skipping matching reset\n");
|
||||
mutex_unlock(&adapter->rwi_lock);
|
||||
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
||||
ret = EBUSY;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
|
||||
rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
|
||||
if (!rwi) {
|
||||
mutex_unlock(&adapter->rwi_lock);
|
||||
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
||||
ibmvnic_close(netdev);
|
||||
ret = ENOMEM;
|
||||
goto err;
|
||||
@ -2069,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||
}
|
||||
rwi->reset_reason = reason;
|
||||
list_add_tail(&rwi->list, &adapter->rwi_list);
|
||||
mutex_unlock(&adapter->rwi_lock);
|
||||
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
||||
adapter->resetting = true;
|
||||
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
||||
schedule_work(&adapter->ibmvnic_reset);
|
||||
@ -4759,7 +4761,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
|
||||
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
||||
INIT_LIST_HEAD(&adapter->rwi_list);
|
||||
mutex_init(&adapter->rwi_lock);
|
||||
spin_lock_init(&adapter->rwi_lock);
|
||||
adapter->resetting = false;
|
||||
|
||||
adapter->mac_change_pending = false;
|
||||
|
@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
|
||||
struct tasklet_struct tasklet;
|
||||
enum vnic_state state;
|
||||
enum ibmvnic_reset_reason reset_reason;
|
||||
struct mutex rwi_lock;
|
||||
spinlock_t rwi_lock;
|
||||
struct list_head rwi_list;
|
||||
struct work_struct ibmvnic_reset;
|
||||
bool resetting;
|
||||
|
@ -1543,17 +1543,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
|
||||
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
|
||||
|
||||
/* Copy the address first, so that we avoid a possible race with
|
||||
* .set_rx_mode(). If we copy after changing the address in the filter
|
||||
* list, we might open ourselves to a narrow race window where
|
||||
* .set_rx_mode could delete our dev_addr filter and prevent traffic
|
||||
* from passing.
|
||||
* .set_rx_mode().
|
||||
* - Remove old address from MAC filter
|
||||
* - Copy new address
|
||||
* - Add new address to MAC filter
|
||||
*/
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
i40e_del_mac_filter(vsi, netdev->dev_addr);
|
||||
i40e_add_mac_filter(vsi, addr->sa_data);
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
i40e_add_mac_filter(vsi, netdev->dev_addr);
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
i40e_status ret;
|
||||
|
||||
|
@ -1558,24 +1558,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_receive_skb - Send a completed packet up the stack
|
||||
* @rx_ring: rx ring in play
|
||||
* @skb: packet to send up
|
||||
* @vlan_tag: vlan tag for packet
|
||||
**/
|
||||
void i40e_receive_skb(struct i40e_ring *rx_ring,
|
||||
struct sk_buff *skb, u16 vlan_tag)
|
||||
{
|
||||
struct i40e_q_vector *q_vector = rx_ring->q_vector;
|
||||
|
||||
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
(vlan_tag & VLAN_VID_MASK))
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
|
||||
napi_gro_receive(&q_vector->napi, skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_alloc_rx_buffers - Replace used receive buffers
|
||||
* @rx_ring: ring to place buffers on
|
||||
@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
|
||||
* other fields within the skb.
|
||||
**/
|
||||
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
|
||||
union i40e_rx_desc *rx_desc, struct sk_buff *skb,
|
||||
u8 rx_ptype)
|
||||
union i40e_rx_desc *rx_desc, struct sk_buff *skb)
|
||||
{
|
||||
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
||||
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
||||
@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
|
||||
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
|
||||
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
|
||||
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
|
||||
u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
|
||||
if (unlikely(tsynvalid))
|
||||
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
|
||||
@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
|
||||
|
||||
skb_record_rx_queue(skb, rx_ring->queue_index);
|
||||
|
||||
if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
|
||||
u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
le16_to_cpu(vlan_tag));
|
||||
}
|
||||
|
||||
/* modifies the skb - consumes the enet header */
|
||||
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
||||
}
|
||||
@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
struct i40e_rx_buffer *rx_buffer;
|
||||
union i40e_rx_desc *rx_desc;
|
||||
unsigned int size;
|
||||
u16 vlan_tag;
|
||||
u8 rx_ptype;
|
||||
u64 qword;
|
||||
|
||||
/* return some buffers to hardware, one at a time is too slow */
|
||||
@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
/* probably a little skewed due to removing CRC */
|
||||
total_rx_bytes += skb->len;
|
||||
|
||||
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
||||
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
|
||||
/* populate checksum, VLAN, and protocol */
|
||||
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
|
||||
|
||||
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
|
||||
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
|
||||
i40e_process_skb_fields(rx_ring, rx_desc, skb);
|
||||
|
||||
i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
|
||||
i40e_receive_skb(rx_ring, skb, vlan_tag);
|
||||
napi_gro_receive(&rx_ring->q_vector->napi, skb);
|
||||
skb = NULL;
|
||||
|
||||
/* update budget accounting */
|
||||
|
@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
|
||||
union i40e_rx_desc *rx_desc,
|
||||
u64 qw);
|
||||
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
|
||||
union i40e_rx_desc *rx_desc, struct sk_buff *skb,
|
||||
u8 rx_ptype);
|
||||
void i40e_receive_skb(struct i40e_ring *rx_ring,
|
||||
struct sk_buff *skb, u16 vlan_tag);
|
||||
union i40e_rx_desc *rx_desc, struct sk_buff *skb);
|
||||
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
|
||||
void i40e_update_rx_stats(struct i40e_ring *rx_ring,
|
||||
unsigned int total_rx_bytes,
|
||||
|
@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||
struct i40e_rx_buffer *bi;
|
||||
union i40e_rx_desc *rx_desc;
|
||||
unsigned int size;
|
||||
u16 vlan_tag;
|
||||
u8 rx_ptype;
|
||||
u64 qword;
|
||||
|
||||
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
|
||||
@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||
total_rx_bytes += skb->len;
|
||||
total_rx_packets++;
|
||||
|
||||
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
||||
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
|
||||
|
||||
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
|
||||
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
|
||||
i40e_receive_skb(rx_ring, skb, vlan_tag);
|
||||
i40e_process_skb_fields(rx_ring, rx_desc, skb);
|
||||
napi_gro_receive(&rx_ring->q_vector->napi, skb);
|
||||
}
|
||||
|
||||
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
|
||||
|
@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
|
||||
u8 num_tcs = adapter->hw_tcs;
|
||||
u32 reg_val;
|
||||
u32 queue;
|
||||
u32 word;
|
||||
|
||||
/* remove VLAN filters beloning to this VF */
|
||||
ixgbe_clear_vf_vlans(adapter, vf);
|
||||
@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
|
||||
}
|
||||
}
|
||||
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 word;
|
||||
|
||||
/* Clear VF's mailbox memory */
|
||||
for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
|
||||
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
|
||||
@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
|
||||
/* reset the filters for the device */
|
||||
ixgbe_vf_reset_event(adapter, vf);
|
||||
|
||||
ixgbe_vf_clear_mbx(adapter, vf);
|
||||
|
||||
/* set vf mac address */
|
||||
if (!is_zero_ether_addr(vf_mac))
|
||||
ixgbe_set_vf_mac(adapter, vf, vf_mac);
|
||||
|
@ -408,7 +408,6 @@ struct mvneta_port {
|
||||
struct mvneta_pcpu_stats __percpu *stats;
|
||||
|
||||
int pkt_size;
|
||||
unsigned int frag_size;
|
||||
void __iomem *base;
|
||||
struct mvneta_rx_queue *rxqs;
|
||||
struct mvneta_tx_queue *txqs;
|
||||
@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
|
||||
if (!pp->bm_priv) {
|
||||
/* Set Offset */
|
||||
mvneta_rxq_offset_set(pp, rxq, 0);
|
||||
mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
|
||||
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
|
||||
PAGE_SIZE :
|
||||
MVNETA_RX_BUF_SIZE(pp->pkt_size));
|
||||
mvneta_rxq_bm_disable(pp, rxq);
|
||||
mvneta_rxq_fill(pp, rxq, rxq->size);
|
||||
} else {
|
||||
@ -3760,7 +3761,6 @@ static int mvneta_open(struct net_device *dev)
|
||||
int ret;
|
||||
|
||||
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
|
||||
pp->frag_size = PAGE_SIZE;
|
||||
|
||||
ret = mvneta_setup_rxqs(pp);
|
||||
if (ret)
|
||||
|
@ -4405,12 +4405,15 @@ static void mvpp2_phylink_validate(struct net_device *dev,
|
||||
case PHY_INTERFACE_MODE_10GKR:
|
||||
case PHY_INTERFACE_MODE_XAUI:
|
||||
case PHY_INTERFACE_MODE_NA:
|
||||
phylink_set(mask, 10000baseCR_Full);
|
||||
phylink_set(mask, 10000baseSR_Full);
|
||||
phylink_set(mask, 10000baseLR_Full);
|
||||
phylink_set(mask, 10000baseLRM_Full);
|
||||
phylink_set(mask, 10000baseER_Full);
|
||||
phylink_set(mask, 10000baseKR_Full);
|
||||
if (port->gop_id == 0) {
|
||||
phylink_set(mask, 10000baseT_Full);
|
||||
phylink_set(mask, 10000baseCR_Full);
|
||||
phylink_set(mask, 10000baseSR_Full);
|
||||
phylink_set(mask, 10000baseLR_Full);
|
||||
phylink_set(mask, 10000baseLRM_Full);
|
||||
phylink_set(mask, 10000baseER_Full);
|
||||
phylink_set(mask, 10000baseKR_Full);
|
||||
}
|
||||
/* Fall-through */
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
@ -4421,7 +4424,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
|
||||
phylink_set(mask, 10baseT_Full);
|
||||
phylink_set(mask, 100baseT_Half);
|
||||
phylink_set(mask, 100baseT_Full);
|
||||
phylink_set(mask, 10000baseT_Full);
|
||||
/* Fall-through */
|
||||
case PHY_INTERFACE_MODE_1000BASEX:
|
||||
case PHY_INTERFACE_MODE_2500BASEX:
|
||||
|
@ -1190,11 +1190,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
|
||||
struct ethtool_ts_info *info)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int ret;
|
||||
|
||||
ret = ethtool_op_get_ts_info(priv->netdev, info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
info->phc_index = mlx5_clock_get_ptp_index(mdev);
|
||||
|
||||
@ -1202,9 +1197,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
|
||||
info->phc_index == -1)
|
||||
return 0;
|
||||
|
||||
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
|
||||
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
|
||||
BIT(HWTSTAMP_TX_ON);
|
||||
|
@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
|
||||
return !params->lro_en && frag_sz <= PAGE_SIZE;
|
||||
}
|
||||
|
||||
#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
|
||||
MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
|
||||
static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
|
||||
if (!mlx5e_rx_is_linear_skb(mdev, params))
|
||||
return false;
|
||||
|
||||
if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
|
||||
return false;
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
|
||||
return true;
|
||||
|
||||
@ -1396,6 +1401,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
struct mlx5_rate_limit rl = {0};
|
||||
|
||||
cancel_work_sync(&sq->dim.work);
|
||||
mlx5e_destroy_sq(mdev, sq->sqn);
|
||||
if (sq->rate_limit) {
|
||||
rl.rate = sq->rate_limit;
|
||||
|
@ -46,6 +46,7 @@
|
||||
|
||||
#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
|
||||
max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
|
||||
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
|
||||
|
||||
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
|
||||
|
||||
@ -466,8 +467,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
|
||||
!ether_addr_equal(e->h_dest, ha))
|
||||
if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
|
||||
(!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
|
||||
mlx5e_tc_encap_flows_del(priv, e);
|
||||
|
||||
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
|
||||
@ -1083,9 +1084,7 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
||||
priv->channels.params.num_channels =
|
||||
mlx5e_get_netdev_max_channels(netdev);
|
||||
priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
|
||||
|
||||
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
|
||||
mlx5e_build_rep_netdev(netdev);
|
||||
|
@ -1190,7 +1190,7 @@ mpwrq_cqe_out:
|
||||
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
{
|
||||
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
|
||||
struct mlx5e_xdpsq *xdpsq;
|
||||
struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
int work_done = 0;
|
||||
|
||||
@ -1201,10 +1201,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
|
||||
|
||||
cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
||||
if (!cqe)
|
||||
if (!cqe) {
|
||||
if (unlikely(work_done))
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
xdpsq = &rq->xdpsq;
|
||||
}
|
||||
|
||||
do {
|
||||
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
|
||||
@ -1219,6 +1220,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
rq->handle_rx_cqe(rq, cqe);
|
||||
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
||||
|
||||
out:
|
||||
if (xdpsq->doorbell) {
|
||||
mlx5e_xmit_xdp_doorbell(xdpsq);
|
||||
xdpsq->doorbell = false;
|
||||
|
@ -74,7 +74,6 @@ static const struct counter_desc sw_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
|
||||
@ -198,7 +197,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
s->tx_nop += sq_stats->nop;
|
||||
s->tx_queue_stopped += sq_stats->stopped;
|
||||
s->tx_queue_wake += sq_stats->wake;
|
||||
s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
|
||||
s->tx_queue_dropped += sq_stats->dropped;
|
||||
s->tx_cqe_err += sq_stats->cqe_err;
|
||||
s->tx_recover += sq_stats->recover;
|
||||
|
@ -87,7 +87,6 @@ struct mlx5e_sw_stats {
|
||||
u64 tx_recover;
|
||||
u64 tx_cqes;
|
||||
u64 tx_queue_wake;
|
||||
u64 tx_udp_seg_rem;
|
||||
u64 tx_cqe_err;
|
||||
u64 tx_xdp_xmit;
|
||||
u64 tx_xdp_full;
|
||||
@ -221,7 +220,6 @@ struct mlx5e_sq_stats {
|
||||
u64 csum_partial_inner;
|
||||
u64 added_vlan_packets;
|
||||
u64 nop;
|
||||
u64 udp_seg_rem;
|
||||
#ifdef CONFIG_MLX5_EN_TLS
|
||||
u64 tls_ooo;
|
||||
u64 tls_resync_bytes;
|
||||
|
@ -870,9 +870,9 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_handle *rule;
|
||||
|
||||
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
|
||||
slow_attr->mirror_count = 0,
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->mirror_count = 0;
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
|
||||
|
||||
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
|
||||
if (!IS_ERR(rule))
|
||||
@ -887,6 +887,9 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
|
||||
struct mlx5_esw_flow_attr *slow_attr)
|
||||
{
|
||||
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->mirror_count = 0;
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
|
||||
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
|
||||
flow->flags &= ~MLX5E_TC_FLOW_SLOW;
|
||||
}
|
||||
@ -907,11 +910,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5e_priv *out_priv;
|
||||
int err = 0, encap_err = 0;
|
||||
|
||||
/* if prios are not supported, keep the old behaviour of using same prio
|
||||
* for all offloaded rules.
|
||||
*/
|
||||
if (!mlx5_eswitch_prios_supported(esw))
|
||||
attr->prio = 1;
|
||||
if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
|
||||
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (attr->chain > max_chain) {
|
||||
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
|
||||
@ -1094,10 +1096,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
|
||||
flow->rule[0] = rule;
|
||||
}
|
||||
|
||||
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
|
||||
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
|
||||
}
|
||||
/* we know that the encap is valid */
|
||||
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
|
||||
}
|
||||
|
||||
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
|
||||
@ -2966,8 +2967,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
||||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
attr->dest_chain = dest_chain;
|
||||
|
||||
continue;
|
||||
@ -2980,6 +2980,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->dest_chain) {
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||
NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
}
|
||||
|
||||
if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"current firmware doesn't support split rule for port mirroring");
|
||||
|
@ -452,7 +452,7 @@ static void del_sw_hw_rule(struct fs_node *node)
|
||||
|
||||
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
|
||||
--fte->dests_size) {
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
|
||||
update_fte = true;
|
||||
}
|
||||
out:
|
||||
|
@ -81,6 +81,7 @@ struct mlxsw_core {
|
||||
struct mlxsw_core_port *ports;
|
||||
unsigned int max_ports;
|
||||
bool reload_fail;
|
||||
bool fw_flash_in_progress;
|
||||
unsigned long driver_priv[0];
|
||||
/* driver_priv has to be always the last item */
|
||||
};
|
||||
@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
#define MLXSW_EMAD_TIMEOUT_MS 200
|
||||
#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
|
||||
#define MLXSW_EMAD_TIMEOUT_MS 200
|
||||
|
||||
static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
|
||||
|
||||
if (trans->core->fw_flash_in_progress)
|
||||
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
|
||||
|
||||
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
|
||||
}
|
||||
|
||||
@ -1854,6 +1859,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
|
||||
|
||||
void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
mlxsw_core->fw_flash_in_progress = true;
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
|
||||
|
||||
void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
mlxsw_core->fw_flash_in_progress = false;
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
|
||||
|
||||
static int __init mlxsw_core_module_init(void)
|
||||
{
|
||||
int err;
|
||||
|
@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
|
||||
u64 *p_single_size, u64 *p_double_size,
|
||||
u64 *p_linear_size);
|
||||
|
||||
void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
|
||||
void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
|
||||
|
||||
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
|
||||
enum mlxsw_res_id res_id);
|
||||
|
||||
|
@ -309,8 +309,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
|
||||
},
|
||||
.mlxsw_sp = mlxsw_sp
|
||||
};
|
||||
int err;
|
||||
|
||||
return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
|
||||
mlxsw_core_fw_flash_start(mlxsw_sp->core);
|
||||
err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
|
||||
mlxsw_core_fw_flash_end(mlxsw_sp->core);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
|
||||
@ -3521,6 +3526,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
|
||||
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
|
||||
/* NVE traps */
|
||||
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
|
||||
MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
|
||||
};
|
||||
|
||||
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
|
||||
|
@ -977,6 +977,6 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
|
||||
rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
|
||||
mlxsw_sp->nve = NULL;
|
||||
kfree(mlxsw_sp->nve);
|
||||
mlxsw_sp->nve = NULL;
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ enum {
|
||||
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
|
||||
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
|
||||
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
|
||||
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
|
||||
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
|
||||
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
|
||||
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
|
||||
|
@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
|
||||
u32 mac_addr_hi = 0;
|
||||
u32 mac_addr_lo = 0;
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
netdev = adapter->netdev;
|
||||
lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
|
||||
ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
|
||||
0, 1000, 20000, 100);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* setup auto duplex, and speed detection */
|
||||
data = lan743x_csr_read(adapter, MAC_CR);
|
||||
@ -2719,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
|
||||
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
|
||||
"pci-%s", pci_name(adapter->pdev));
|
||||
|
||||
/* set to internal PHY id */
|
||||
adapter->mdiobus->phy_mask = ~(u32)BIT(1);
|
||||
if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
|
||||
/* LAN7430 uses internal phy at address 1 */
|
||||
adapter->mdiobus->phy_mask = ~(u32)BIT(1);
|
||||
|
||||
/* register mdiobus */
|
||||
ret = mdiobus_register(adapter->mdiobus);
|
||||
|
@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
|
||||
struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
|
||||
struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
|
||||
struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
|
||||
u64 data0, data1 = 0, steer_ctrl = 0;
|
||||
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
|
||||
enum vxge_hw_status status;
|
||||
|
||||
status = vxge_hw_vpath_fw_api(vpath,
|
||||
|
@ -345,13 +345,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
!(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* We need to store TCP flags in the IPv4 key space, thus
|
||||
* we need to ensure we include a IPv4 key layer if we have
|
||||
* not done so already.
|
||||
/* We need to store TCP flags in the either the IPv4 or IPv6 key
|
||||
* space, thus we need to ensure we include a IPv4/IPv6 key
|
||||
* layer if we have not done so already.
|
||||
*/
|
||||
if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
|
||||
key_layer |= NFP_FLOWER_LAYER_IPV4;
|
||||
key_size += sizeof(struct nfp_flower_ipv4);
|
||||
if (!key_basic)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
|
||||
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
|
||||
switch (key_basic->n_proto) {
|
||||
case cpu_to_be16(ETH_P_IP):
|
||||
key_layer |= NFP_FLOWER_LAYER_IPV4;
|
||||
key_size += sizeof(struct nfp_flower_ipv4);
|
||||
break;
|
||||
|
||||
case cpu_to_be16(ETH_P_IPV6):
|
||||
key_layer |= NFP_FLOWER_LAYER_IPV6;
|
||||
key_size += sizeof(struct nfp_flower_ipv6);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
static void __init get_mac_address(struct net_device *dev)
|
||||
static void get_mac_address(struct net_device *dev)
|
||||
{
|
||||
struct w90p910_ether *ether = netdev_priv(dev);
|
||||
struct platform_device *pdev;
|
||||
|
@ -12831,8 +12831,9 @@ enum MFW_DRV_MSG_TYPE {
|
||||
MFW_DRV_MSG_BW_UPDATE10,
|
||||
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
|
||||
MFW_DRV_MSG_BW_UPDATE11,
|
||||
MFW_DRV_MSG_OEM_CFG_UPDATE,
|
||||
MFW_DRV_MSG_RESERVED,
|
||||
MFW_DRV_MSG_GET_TLV_REQ,
|
||||
MFW_DRV_MSG_OEM_CFG_UPDATE,
|
||||
MFW_DRV_MSG_MAX
|
||||
};
|
||||
|
||||
|
@ -2496,6 +2496,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
|
||||
if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
|
||||
DP_NOTICE(cdev,
|
||||
"Unable to map frag - dropping packet\n");
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -6469,7 +6469,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (status & LinkChg)
|
||||
if (status & LinkChg && tp->dev->phydev)
|
||||
phy_mac_interrupt(tp->dev->phydev);
|
||||
|
||||
if (unlikely(status & RxFIFOOver &&
|
||||
|
@ -4250,6 +4250,7 @@ int stmmac_dvr_probe(struct device *device,
|
||||
priv->wq = create_singlethread_workqueue("stmmac_wq");
|
||||
if (!priv->wq) {
|
||||
dev_err(priv->device, "failed to create workqueue\n");
|
||||
ret = -ENOMEM;
|
||||
goto error_wq;
|
||||
}
|
||||
|
||||
|
@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
|
||||
static void ca8210_rx_done(struct cas_control *cas_ctl)
|
||||
{
|
||||
u8 *buf;
|
||||
u8 len;
|
||||
unsigned int len;
|
||||
struct work_priv_container *mlme_reset_wpc;
|
||||
struct ca8210_priv *priv = cas_ctl->priv;
|
||||
|
||||
@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
|
||||
if (len > CA8210_SPI_BUF_SIZE) {
|
||||
dev_crit(
|
||||
&priv->spi->dev,
|
||||
"Received packet len (%d) erroneously long\n",
|
||||
"Received packet len (%u) erroneously long\n",
|
||||
len
|
||||
);
|
||||
goto finish;
|
||||
|
@ -492,7 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
|
||||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
|
||||
if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
|
||||
info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
|
||||
hwsim_edge_policy, NULL))
|
||||
return -EINVAL;
|
||||
@ -542,7 +542,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
|
||||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
|
||||
if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
|
||||
info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
|
||||
hwsim_edge_policy, NULL))
|
||||
return -EINVAL;
|
||||
|
@ -308,11 +308,8 @@ static int mdio_bus_phy_restore(struct device *dev)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* The PHY needs to renegotiate. */
|
||||
phydev->link = 0;
|
||||
phydev->state = PHY_UP;
|
||||
|
||||
phy_start_machine(phydev);
|
||||
if (phydev->attached_dev && phydev->adjust_link)
|
||||
phy_start_machine(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* check if we have a valid interface */
|
||||
if (if_num > 16) {
|
||||
kfree(config_data);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (config_data[if_num]) {
|
||||
case 0x0:
|
||||
result = 0;
|
||||
@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
|
||||
|
||||
/* Get the interface/port specification from either driver_info or from
|
||||
* the device itself */
|
||||
if (id->driver_info)
|
||||
if (id->driver_info) {
|
||||
/* if_num is controlled by the device, driver_info is a 0 terminated
|
||||
* array. Make sure, the access is in bounds! */
|
||||
for (i = 0; i <= if_num; ++i)
|
||||
if (((u32 *)(id->driver_info))[i] == 0)
|
||||
goto exit;
|
||||
port_spec = ((u32 *)(id->driver_info))[if_num];
|
||||
else
|
||||
} else {
|
||||
port_spec = hso_get_config_data(interface);
|
||||
if (port_spec < 0)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Check if we need to switch to alt interfaces prior to port
|
||||
* configuration */
|
||||
|
@ -2320,6 +2320,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
|
||||
ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
|
||||
ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
|
||||
|
||||
/* Added to support MAC address changes */
|
||||
ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
|
||||
ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1117,6 +1117,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
|
||||
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
|
||||
@ -1229,6 +1230,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
|
||||
|
@ -129,6 +129,7 @@
|
||||
#define USB_UPS_CTRL 0xd800
|
||||
#define USB_POWER_CUT 0xd80a
|
||||
#define USB_MISC_0 0xd81a
|
||||
#define USB_MISC_1 0xd81f
|
||||
#define USB_AFE_CTRL2 0xd824
|
||||
#define USB_UPS_CFG 0xd842
|
||||
#define USB_UPS_FLAGS 0xd848
|
||||
@ -555,6 +556,7 @@ enum spd_duplex {
|
||||
|
||||
/* MAC PASSTHRU */
|
||||
#define AD_MASK 0xfee0
|
||||
#define BND_MASK 0x0004
|
||||
#define EFUSE 0xcfdb
|
||||
#define PASS_THRU_MASK 0x1
|
||||
|
||||
@ -1150,7 +1152,7 @@ out1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Devices containing RTL8153-AD can support a persistent
|
||||
/* Devices containing proper chips can support a persistent
|
||||
* host system provided MAC address.
|
||||
* Examples of this are Dell TB15 and Dell WD15 docks
|
||||
*/
|
||||
@ -1165,13 +1167,23 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
|
||||
|
||||
/* test for -AD variant of RTL8153 */
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
|
||||
if ((ocp_data & AD_MASK) != 0x1000)
|
||||
return -ENODEV;
|
||||
|
||||
/* test for MAC address pass-through bit */
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
|
||||
if ((ocp_data & PASS_THRU_MASK) != 1)
|
||||
return -ENODEV;
|
||||
if ((ocp_data & AD_MASK) == 0x1000) {
|
||||
/* test for MAC address pass-through bit */
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
|
||||
if ((ocp_data & PASS_THRU_MASK) != 1) {
|
||||
netif_dbg(tp, probe, tp->netdev,
|
||||
"No efuse for RTL8153-AD MAC pass through\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
/* test for RTL8153-BND */
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
|
||||
if ((ocp_data & BND_MASK) == 0) {
|
||||
netif_dbg(tp, probe, tp->netdev,
|
||||
"Invalid variant for MAC pass through\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/* returns _AUXMAC_#AABBCCDDEEFF# */
|
||||
status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
|
||||
@ -1217,9 +1229,8 @@ static int set_ethernet_addr(struct r8152 *tp)
|
||||
if (tp->version == RTL_VER_01) {
|
||||
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
|
||||
} else {
|
||||
/* if this is not an RTL8153-AD, no eFuse mac pass thru set,
|
||||
* or system doesn't provide valid _SB.AMAC this will be
|
||||
* be expected to non-zero
|
||||
/* if device doesn't support MAC pass through this will
|
||||
* be expected to be non-zero
|
||||
*/
|
||||
ret = vendor_mac_passthru_addr_read(tp, &sa);
|
||||
if (ret < 0)
|
||||
|
@ -568,6 +568,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
|
||||
rd->remote_port = port;
|
||||
rd->remote_vni = vni;
|
||||
rd->remote_ifindex = ifindex;
|
||||
rd->offloaded = false;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3258,6 +3259,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_fdb *f = NULL;
|
||||
bool unregister = false;
|
||||
int err;
|
||||
|
||||
err = vxlan_dev_configure(net, dev, conf, false, extack);
|
||||
@ -3283,12 +3285,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
err = register_netdevice(dev);
|
||||
if (err)
|
||||
goto errout;
|
||||
unregister = true;
|
||||
|
||||
err = rtnl_configure_link(dev, NULL);
|
||||
if (err) {
|
||||
unregister_netdevice(dev);
|
||||
if (err)
|
||||
goto errout;
|
||||
}
|
||||
|
||||
/* notify default fdb entry */
|
||||
if (f)
|
||||
@ -3296,9 +3297,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
|
||||
list_add(&vxlan->next, &vn->vxlan_list);
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
/* unregister_netdevice() destroys the default FDB entry with deletion
|
||||
* notification. But the addition notification was not sent yet, so
|
||||
* destroy the entry by hand here.
|
||||
*/
|
||||
if (f)
|
||||
vxlan_fdb_destroy(vxlan, f, false);
|
||||
if (unregister)
|
||||
unregister_netdevice(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3534,7 +3542,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||
struct vxlan_rdst old_dst;
|
||||
struct vxlan_config conf;
|
||||
struct vxlan_fdb *f = NULL;
|
||||
int err;
|
||||
|
||||
err = vxlan_nl2conf(tb, data,
|
||||
@ -3560,19 +3567,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
old_dst.remote_ifindex, 0);
|
||||
|
||||
if (!vxlan_addr_any(&dst->remote_ip)) {
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
err = vxlan_fdb_update(vxlan, all_zeros_mac,
|
||||
&dst->remote_ip,
|
||||
NUD_REACHABLE | NUD_PERMANENT,
|
||||
NLM_F_APPEND | NLM_F_CREATE,
|
||||
vxlan->cfg.dst_port,
|
||||
dst->remote_vni,
|
||||
dst->remote_vni,
|
||||
dst->remote_ifindex,
|
||||
NTF_SELF, &f);
|
||||
NTF_SELF);
|
||||
if (err) {
|
||||
spin_unlock_bh(&vxlan->hash_lock);
|
||||
return err;
|
||||
}
|
||||
vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
|
||||
}
|
||||
spin_unlock_bh(&vxlan->hash_lock);
|
||||
}
|
||||
|
@ -2418,6 +2418,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_core_compat_services(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
|
||||
|
||||
/* all 10.x firmware versions support thermal throttling but don't
|
||||
* advertise the support via service flags so we have to hardcode
|
||||
* it here
|
||||
*/
|
||||
switch (fw_file->wmi_op_version) {
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_1:
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_2:
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_4:
|
||||
set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
|
||||
const struct ath10k_fw_components *fw)
|
||||
{
|
||||
@ -2617,6 +2639,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
status = ath10k_core_compat_services(ar);
|
||||
if (status) {
|
||||
ath10k_err(ar, "compat services failed: %d\n", status);
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
/* Some firmware revisions do not properly set up hardware rx filter
|
||||
* registers.
|
||||
*
|
||||
|
@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
|
||||
debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
|
||||
&fops_pktlog_filter);
|
||||
|
||||
debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
|
||||
&fops_quiet_period);
|
||||
if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
|
||||
debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
|
||||
&fops_quiet_period);
|
||||
|
||||
debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
|
||||
&fops_tpc_stats);
|
||||
|
@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
|
||||
return;
|
||||
|
||||
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
|
||||
return;
|
||||
|
||||
@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
|
||||
struct device *hwmon_dev;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
|
||||
return 0;
|
||||
|
||||
cdev = thermal_cooling_device_register("ath10k_thermal", ar,
|
||||
&ath10k_thermal_ops);
|
||||
|
||||
@ -216,6 +222,9 @@ err_cooling_destroy:
|
||||
|
||||
void ath10k_thermal_unregister(struct ath10k *ar)
|
||||
{
|
||||
if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
|
||||
return;
|
||||
|
||||
sysfs_remove_link(&ar->dev->kobj, "cooling_device");
|
||||
thermal_cooling_device_unregister(ar->thermal.cdev);
|
||||
}
|
||||
|
@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
|
||||
SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
|
||||
WMI_SERVICE_SPOOF_MAC_SUPPORT,
|
||||
WMI_TLV_MAX_SERVICE);
|
||||
SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
|
||||
WMI_SERVICE_THERM_THROT,
|
||||
WMI_TLV_MAX_SERVICE);
|
||||
}
|
||||
|
||||
#undef SVCMAP
|
||||
|
@ -205,6 +205,7 @@ enum wmi_service {
|
||||
WMI_SERVICE_SPOOF_MAC_SUPPORT,
|
||||
WMI_SERVICE_TX_DATA_ACK_RSSI,
|
||||
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
|
||||
WMI_SERVICE_THERM_THROT,
|
||||
|
||||
/* keep last */
|
||||
WMI_SERVICE_MAX,
|
||||
|
@ -881,6 +881,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||
int ret, i, j;
|
||||
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
|
||||
|
||||
/*
|
||||
* This command is not supported on earlier firmware versions.
|
||||
* Unfortunately, we don't have a TLV API flag to rely on, so
|
||||
* rely on the major version which is in the first byte of
|
||||
* ucode_ver.
|
||||
*/
|
||||
if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
|
||||
return 0;
|
||||
|
||||
ret = iwl_mvm_sar_get_wgds_table(mvm);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
|
@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
|
||||
"Send delba to tid=%d, %pM\n",
|
||||
tid, rx_reor_tbl_ptr->ta);
|
||||
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
return;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
exit:
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
|
||||
* There could be holes in the buffer, which are skipped by the function.
|
||||
* Since the buffer is linear, the function uses rotation to simulate
|
||||
* circular buffer.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static void
|
||||
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
||||
@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
||||
{
|
||||
int pkt_to_send, i;
|
||||
void *rx_tmp_ptr;
|
||||
unsigned long flags;
|
||||
|
||||
pkt_to_send = (start_win > tbl->start_win) ?
|
||||
min((start_win - tbl->start_win), tbl->win_size) :
|
||||
tbl->win_size;
|
||||
|
||||
for (i = 0; i < pkt_to_send; ++i) {
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
rx_tmp_ptr = NULL;
|
||||
if (tbl->rx_reorder_ptr[i]) {
|
||||
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
|
||||
tbl->rx_reorder_ptr[i] = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (rx_tmp_ptr)
|
||||
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
/*
|
||||
* We don't have a circular buffer, hence use rotation to simulate
|
||||
* circular buffer
|
||||
@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
||||
}
|
||||
|
||||
tbl->start_win = start_win;
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
||||
* The start window is adjusted automatically when a hole is located.
|
||||
* Since the buffer is linear, the function uses rotation to simulate
|
||||
* circular buffer.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static void
|
||||
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
||||
@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
||||
{
|
||||
int i, j, xchg;
|
||||
void *rx_tmp_ptr;
|
||||
unsigned long flags;
|
||||
|
||||
for (i = 0; i < tbl->win_size; ++i) {
|
||||
if (!tbl->rx_reorder_ptr[i])
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (!tbl->rx_reorder_ptr[i]) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
break;
|
||||
}
|
||||
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
|
||||
tbl->rx_reorder_ptr[i] = NULL;
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
/*
|
||||
* We don't have a circular buffer, hence use rotation to simulate
|
||||
* circular buffer
|
||||
@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
||||
}
|
||||
}
|
||||
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
||||
*
|
||||
* The function stops the associated timer and dispatches all the
|
||||
* pending packets in the Rx reorder table before deletion.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static void
|
||||
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
|
||||
@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
|
||||
|
||||
del_timer_sync(&tbl->timer_context.timer);
|
||||
tbl->timer_context.timer_is_set = false;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_del(&tbl->list);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
kfree(tbl->rx_reorder_ptr);
|
||||
kfree(tbl);
|
||||
|
||||
@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
|
||||
/*
|
||||
* This function returns the pointer to an entry in Rx reordering
|
||||
* table which matches the given TA/TID pair.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
struct mwifiex_rx_reorder_tbl *
|
||||
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
|
||||
{
|
||||
struct mwifiex_rx_reorder_tbl *tbl;
|
||||
unsigned long flags;
|
||||
|
||||
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
return tbl;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN))
|
||||
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
mwifiex_del_rx_reorder_entry(priv, tbl);
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
return;
|
||||
@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
|
||||
/*
|
||||
* This function finds the last sequence number used in the packets
|
||||
* buffered in Rx reordering table.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static int
|
||||
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
|
||||
{
|
||||
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
|
||||
struct mwifiex_private *priv = ctx->priv;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
|
||||
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
|
||||
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
return i;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
return -1;
|
||||
}
|
||||
@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
|
||||
struct reorder_tmr_cnxt *ctx =
|
||||
from_timer(ctx, t, timer);
|
||||
int start_win, seq_num;
|
||||
unsigned long flags;
|
||||
|
||||
ctx->timer_is_set = false;
|
||||
spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
|
||||
seq_num = mwifiex_11n_find_last_seq_num(ctx);
|
||||
|
||||
if (seq_num < 0) {
|
||||
spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
|
||||
if (seq_num < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
|
||||
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
|
||||
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
|
||||
start_win);
|
||||
spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
|
||||
* If we get a TID, ta pair which is already present dispatch all the
|
||||
* the packets and move the window size until the ssn
|
||||
*/
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
|
||||
if (tbl) {
|
||||
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
/* if !tbl then create one */
|
||||
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
|
||||
if (!new_node)
|
||||
@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
|
||||
int prev_start_win, start_win, end_win, win_size;
|
||||
u16 pkt_index;
|
||||
bool init_window_shift = false;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
|
||||
if (!tbl) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (pkt_type != PKT_TYPE_BAR)
|
||||
mwifiex_11n_dispatch_pkt(priv, payload);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
mwifiex_11n_dispatch_pkt(priv, payload);
|
||||
return ret;
|
||||
}
|
||||
@ -651,8 +666,6 @@ done:
|
||||
if (!tbl->timer_context.timer_is_set ||
|
||||
prev_start_win != tbl->start_win)
|
||||
mwifiex_11n_rxreorder_timer_restart(tbl);
|
||||
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
|
||||
peer_mac, tid, initiator);
|
||||
|
||||
if (cleanup_rx_reorder_tbl) {
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
|
||||
peer_mac);
|
||||
if (!tbl) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
mwifiex_dbg(priv->adapter, EVENT,
|
||||
"event: TID, TA not found in table\n");
|
||||
return;
|
||||
}
|
||||
mwifiex_del_rx_reorder_entry(priv, tbl);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
} else {
|
||||
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
|
||||
if (!ptx_tbl) {
|
||||
@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
|
||||
int tid, win_size;
|
||||
struct mwifiex_rx_reorder_tbl *tbl;
|
||||
uint16_t block_ack_param_set;
|
||||
unsigned long flags;
|
||||
|
||||
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
|
||||
|
||||
@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
|
||||
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
|
||||
add_ba_rsp->peer_mac_addr, tid);
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
|
||||
add_ba_rsp->peer_mac_addr);
|
||||
if (tbl)
|
||||
mwifiex_del_rx_reorder_entry(priv, tbl);
|
||||
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
|
||||
>> BLOCKACKPARAM_WINSIZE_POS;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
|
||||
add_ba_rsp->peer_mac_addr);
|
||||
if (tbl) {
|
||||
@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
|
||||
else
|
||||
tbl->amsdu = false;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
mwifiex_dbg(priv->adapter, CMD,
|
||||
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
|
||||
@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
|
||||
&priv->rx_reorder_tbl_ptr, list)
|
||||
&priv->rx_reorder_tbl_ptr, list) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
||||
int tlv_buf_left = len;
|
||||
int ret;
|
||||
u8 *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
|
||||
event_buf, len);
|
||||
@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
||||
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
|
||||
tlv_bitmap_len);
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
rx_reor_tbl_ptr =
|
||||
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
|
||||
tlv_rxba->mac);
|
||||
if (!rx_reor_tbl_ptr) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"Can not find rx_reorder_tbl!");
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
for (i = 0; i < tlv_bitmap_len; i++) {
|
||||
for (j = 0 ; j < 8; j++) {
|
||||
|
@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
|
||||
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (!priv->ap_11n_enabled ||
|
||||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
|
||||
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
|
||||
ret = mwifiex_handle_uap_rx_forward(priv, skb);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
/* Reorder and send to kernel */
|
||||
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
|
||||
|
@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
||||
struct ieee80211_txq *txq = sta->txq[i];
|
||||
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
|
||||
struct mt76_txq *mtxq;
|
||||
|
||||
if (!txq)
|
||||
continue;
|
||||
|
||||
mtxq = (struct mt76_txq *)txq->drv_priv;
|
||||
|
||||
spin_lock_bh(&mtxq->hwq->lock);
|
||||
mtxq->send_bar = mtxq->aggr && send_bar;
|
||||
|
@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
|
||||
if (rtl_c2h_fast_cmd(hw, skb)) {
|
||||
rtl_c2h_content_parsing(hw, skb);
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -905,7 +905,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
||||
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
BUG_ON(pull_to <= skb_headlen(skb));
|
||||
BUG_ON(pull_to < skb_headlen(skb));
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
|
||||
|
@ -513,7 +513,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
|
||||
struct socket *sock;
|
||||
struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
|
||||
|
||||
mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
|
||||
/* Try to hold the vq mutex of the paired virtqueue. We can't
|
||||
* use mutex_lock() here since we could not guarantee a
|
||||
* consistenet lock ordering.
|
||||
*/
|
||||
if (!mutex_trylock(&vq->mutex))
|
||||
return;
|
||||
|
||||
vhost_disable_notify(&net->dev, vq);
|
||||
sock = rvq->private_data;
|
||||
|
||||
|
@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < d->nvqs; ++i) {
|
||||
mutex_lock(&d->vqs[i]->mutex);
|
||||
for (i = 0; i < d->nvqs; ++i)
|
||||
__vhost_vq_meta_reset(d->vqs[i]);
|
||||
mutex_unlock(&d->vqs[i]->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
|
||||
#define vhost_get_used(vq, x, ptr) \
|
||||
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
|
||||
|
||||
static void vhost_dev_lock_vqs(struct vhost_dev *d)
|
||||
{
|
||||
int i = 0;
|
||||
for (i = 0; i < d->nvqs; ++i)
|
||||
mutex_lock_nested(&d->vqs[i]->mutex, i);
|
||||
}
|
||||
|
||||
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
|
||||
{
|
||||
int i = 0;
|
||||
for (i = 0; i < d->nvqs; ++i)
|
||||
mutex_unlock(&d->vqs[i]->mutex);
|
||||
}
|
||||
|
||||
static int vhost_new_umem_range(struct vhost_umem *umem,
|
||||
u64 start, u64 size, u64 end,
|
||||
u64 userspace_addr, int perm)
|
||||
@ -976,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->mutex);
|
||||
vhost_dev_lock_vqs(dev);
|
||||
switch (msg->type) {
|
||||
case VHOST_IOTLB_UPDATE:
|
||||
if (!dev->iotlb) {
|
||||
@ -1009,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
vhost_dev_unlock_vqs(dev);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
return ret;
|
||||
@ -2220,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
|
||||
return -EFAULT;
|
||||
}
|
||||
if (unlikely(vq->log_used)) {
|
||||
/* Make sure used idx is seen before log. */
|
||||
smp_wmb();
|
||||
/* Log used index update. */
|
||||
log_write(vq->log_base,
|
||||
vq->log_addr + offsetof(struct vring_used, idx),
|
||||
|
@ -861,7 +861,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
|
||||
extern int bpf_jit_enable;
|
||||
extern int bpf_jit_harden;
|
||||
extern int bpf_jit_kallsyms;
|
||||
extern int bpf_jit_limit;
|
||||
extern long bpf_jit_limit;
|
||||
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
|
@ -582,11 +582,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_flow_table_eswitch_cap_bits {
|
||||
u8 reserved_at_0[0x1c];
|
||||
u8 fdb_multi_path_to_table[0x1];
|
||||
u8 reserved_at_1d[0x1];
|
||||
u8 reserved_at_0[0x1a];
|
||||
u8 multi_fdb_encap[0x1];
|
||||
u8 reserved_at_1e[0x1e1];
|
||||
u8 reserved_at_1b[0x1];
|
||||
u8 fdb_multi_path_to_table[0x1];
|
||||
u8 reserved_at_1d[0x3];
|
||||
|
||||
u8 reserved_at_20[0x1e0];
|
||||
|
||||
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
|
||||
|
||||
|
@ -565,7 +565,7 @@ struct platform_device_id {
|
||||
/**
|
||||
* struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
|
||||
* @phy_id: The result of
|
||||
* (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
|
||||
* (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
|
||||
* for this PHY type
|
||||
* @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
|
||||
* is used to terminate an array of struct mdio_device_id.
|
||||
|
@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
|
||||
}
|
||||
#endif /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
/*
|
||||
* nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
|
||||
*
|
||||
* @p: The pointer to read, prior to dereferencing
|
||||
* @ss: The nfnetlink subsystem ID
|
||||
*
|
||||
* Return the value of the specified RCU-protected pointer, but omit
|
||||
* the READ_ONCE(), because caller holds the NFNL subsystem mutex.
|
||||
*/
|
||||
#define nfnl_dereference(p, ss) \
|
||||
rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
|
||||
|
||||
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
|
||||
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
|
||||
|
||||
|
@ -144,25 +144,6 @@ struct ip_tunnel {
|
||||
bool ignore_df;
|
||||
};
|
||||
|
||||
#define TUNNEL_CSUM __cpu_to_be16(0x01)
|
||||
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
|
||||
#define TUNNEL_KEY __cpu_to_be16(0x04)
|
||||
#define TUNNEL_SEQ __cpu_to_be16(0x08)
|
||||
#define TUNNEL_STRICT __cpu_to_be16(0x10)
|
||||
#define TUNNEL_REC __cpu_to_be16(0x20)
|
||||
#define TUNNEL_VERSION __cpu_to_be16(0x40)
|
||||
#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
|
||||
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
|
||||
#define TUNNEL_OAM __cpu_to_be16(0x0200)
|
||||
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
|
||||
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
|
||||
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
|
||||
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
|
||||
#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
|
||||
|
||||
#define TUNNEL_OPTIONS_PRESENT \
|
||||
(TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
|
||||
|
||||
struct tnl_ptk_info {
|
||||
__be16 flags;
|
||||
__be16 proto;
|
||||
|
@ -2340,22 +2340,39 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
|
||||
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
|
||||
|
||||
/**
|
||||
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
|
||||
* _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
|
||||
* @sk: socket sending this packet
|
||||
* @tsflags: timestamping flags to use
|
||||
* @tx_flags: completed with instructions for time stamping
|
||||
* @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
|
||||
*
|
||||
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
|
||||
*/
|
||||
static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
|
||||
__u8 *tx_flags)
|
||||
static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
|
||||
__u8 *tx_flags, __u32 *tskey)
|
||||
{
|
||||
if (unlikely(tsflags))
|
||||
if (unlikely(tsflags)) {
|
||||
__sock_tx_timestamp(tsflags, tx_flags);
|
||||
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
|
||||
tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
|
||||
*tskey = sk->sk_tskey++;
|
||||
}
|
||||
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
|
||||
*tx_flags |= SKBTX_WIFI_STATUS;
|
||||
}
|
||||
|
||||
static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
|
||||
__u8 *tx_flags)
|
||||
{
|
||||
_sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
|
||||
}
|
||||
|
||||
static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
|
||||
{
|
||||
_sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
|
||||
&skb_shinfo(skb)->tskey);
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_eat_skb - Release a skb if it is no longer needed
|
||||
* @sk: socket to eat this skb from
|
||||
|
@ -76,6 +76,10 @@
|
||||
*
|
||||
* void (*unhash)(struct tls_device *device, struct sock *sk);
|
||||
* This function cleans listen state set by Inline TLS driver
|
||||
*
|
||||
* void (*release)(struct kref *kref);
|
||||
* Release the registered device and allocated resources
|
||||
* @kref: Number of reference to tls_device
|
||||
*/
|
||||
struct tls_device {
|
||||
char name[TLS_DEVICE_NAME_MAX];
|
||||
@ -83,6 +87,8 @@ struct tls_device {
|
||||
int (*feature)(struct tls_device *device);
|
||||
int (*hash)(struct tls_device *device, struct sock *sk);
|
||||
void (*unhash)(struct tls_device *device, struct sock *sk);
|
||||
void (*release)(struct kref *kref);
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -1552,6 +1552,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
|
||||
int (*func)(struct xfrm_state *, int, void*), void *);
|
||||
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
|
||||
struct xfrm_state *xfrm_state_alloc(struct net *net);
|
||||
void xfrm_state_free(struct xfrm_state *x);
|
||||
struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
|
||||
const xfrm_address_t *saddr,
|
||||
const struct flowi *fl,
|
||||
|
@ -3,6 +3,7 @@
|
||||
#
|
||||
mandatory-y += auxvec.h
|
||||
mandatory-y += bitsperlong.h
|
||||
mandatory-y += bpf_perf_event.h
|
||||
mandatory-y += byteorder.h
|
||||
mandatory-y += errno.h
|
||||
mandatory-y += fcntl.h
|
||||
|
@ -160,4 +160,24 @@ enum {
|
||||
};
|
||||
|
||||
#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
|
||||
|
||||
#define TUNNEL_CSUM __cpu_to_be16(0x01)
|
||||
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
|
||||
#define TUNNEL_KEY __cpu_to_be16(0x04)
|
||||
#define TUNNEL_SEQ __cpu_to_be16(0x08)
|
||||
#define TUNNEL_STRICT __cpu_to_be16(0x10)
|
||||
#define TUNNEL_REC __cpu_to_be16(0x20)
|
||||
#define TUNNEL_VERSION __cpu_to_be16(0x40)
|
||||
#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
|
||||
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
|
||||
#define TUNNEL_OAM __cpu_to_be16(0x0200)
|
||||
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
|
||||
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
|
||||
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
|
||||
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
|
||||
#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
|
||||
|
||||
#define TUNNEL_OPTIONS_PRESENT \
|
||||
(TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
|
||||
|
||||
#endif /* _UAPI_IF_TUNNEL_H_ */
|
||||
|
@ -266,10 +266,14 @@ struct sockaddr_in {
|
||||
|
||||
#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
|
||||
#define IN_MULTICAST(a) IN_CLASSD(a)
|
||||
#define IN_MULTICAST_NET 0xF0000000
|
||||
#define IN_MULTICAST_NET 0xe0000000
|
||||
|
||||
#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
|
||||
#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
|
||||
#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
|
||||
#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
|
||||
|
||||
#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
|
||||
#define IN_CLASSE_NET 0xffffffff
|
||||
#define IN_CLASSE_NSHIFT 0
|
||||
|
||||
/* Address to accept any incoming messages. */
|
||||
#define INADDR_ANY ((unsigned long int) 0x00000000)
|
||||
|
@ -155,8 +155,8 @@ enum txtime_flags {
|
||||
};
|
||||
|
||||
struct sock_txtime {
|
||||
clockid_t clockid; /* reference clockid */
|
||||
__u32 flags; /* as defined by enum txtime_flags */
|
||||
__kernel_clockid_t clockid;/* reference clockid */
|
||||
__u32 flags; /* as defined by enum txtime_flags */
|
||||
};
|
||||
|
||||
#endif /* _NET_TIMESTAMPING_H */
|
||||
|
@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
|
||||
#define NETLINK_LIST_MEMBERSHIPS 9
|
||||
#define NETLINK_CAP_ACK 10
|
||||
#define NETLINK_EXT_ACK 11
|
||||
#define NETLINK_DUMP_STRICT_CHK 12
|
||||
#define NETLINK_GET_STRICT_CHK 12
|
||||
|
||||
struct nl_pktinfo {
|
||||
__u32 group;
|
||||
|
@ -365,13 +365,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
|
||||
|
||||
/* All BPF JIT sysctl knobs here. */
|
||||
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
|
||||
int bpf_jit_harden __read_mostly;
|
||||
int bpf_jit_kallsyms __read_mostly;
|
||||
int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
|
||||
long bpf_jit_limit __read_mostly;
|
||||
|
||||
static __always_inline void
|
||||
bpf_get_prog_addr_region(const struct bpf_prog *prog,
|
||||
@ -580,16 +578,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
|
||||
static atomic_long_t bpf_jit_current;
|
||||
|
||||
/* Can be overridden by an arch's JIT compiler if it has a custom,
|
||||
* dedicated BPF backend memory area, or if neither of the two
|
||||
* below apply.
|
||||
*/
|
||||
u64 __weak bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
#if defined(MODULES_VADDR)
|
||||
return MODULES_END - MODULES_VADDR;
|
||||
#else
|
||||
return VMALLOC_END - VMALLOC_START;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init bpf_jit_charge_init(void)
|
||||
{
|
||||
/* Only used as heuristic here to derive limit. */
|
||||
bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
|
||||
PAGE_SIZE), INT_MAX);
|
||||
bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
|
||||
PAGE_SIZE), LONG_MAX);
|
||||
return 0;
|
||||
}
|
||||
pure_initcall(bpf_jit_charge_init);
|
||||
#endif
|
||||
|
||||
static int bpf_jit_charge_modmem(u32 pages)
|
||||
{
|
||||
|
@ -5102,9 +5102,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
||||
}
|
||||
new_sl->next = env->explored_states[insn_idx];
|
||||
env->explored_states[insn_idx] = new_sl;
|
||||
/* connect new state to parentage chain */
|
||||
for (i = 0; i < BPF_REG_FP; i++)
|
||||
cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
|
||||
/* connect new state to parentage chain. Current frame needs all
|
||||
* registers connected. Only r6 - r9 of the callers are alive (pushed
|
||||
* to the stack implicitly by JITs) so in callers' frames connect just
|
||||
* r6 - r9 as an optimization. Callers will have r1 - r5 connected to
|
||||
* the state of the call instruction (with WRITTEN set), and r0 comes
|
||||
* from callee with its full parentage chain, anyway.
|
||||
*/
|
||||
for (j = 0; j <= cur->curframe; j++)
|
||||
for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
|
||||
cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
|
||||
/* clear write marks in current state: the writes we did are not writes
|
||||
* our child did, so they don't screen off its reads from us.
|
||||
* (There are no read marks in current state, because reads always mark
|
||||
|
@ -771,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
if (err < 0)
|
||||
goto free_skb;
|
||||
|
||||
sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
|
||||
skb_setup_tx_timestamp(skb, sk->sk_tsflags);
|
||||
|
||||
skb->dev = dev;
|
||||
skb->sk = sk;
|
||||
|
@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
||||
/* Pass parameters to the BPF program */
|
||||
cb->qdisc_cb.flow_keys = &flow_keys;
|
||||
flow_keys.nhoff = nhoff;
|
||||
flow_keys.thoff = nhoff;
|
||||
|
||||
bpf_compute_data_pointers((struct sk_buff *)skb);
|
||||
result = BPF_PROG_RUN(attached, skb);
|
||||
@ -790,9 +791,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
||||
/* Restore state */
|
||||
memcpy(cb, &cb_saved, sizeof(cb_saved));
|
||||
|
||||
flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
|
||||
flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
|
||||
flow_keys.nhoff, skb->len);
|
||||
|
||||
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
||||
target_container);
|
||||
key_control->thoff = min_t(u16, key_control->thoff, skb->len);
|
||||
rcu_read_unlock();
|
||||
return result == BPF_OK;
|
||||
}
|
||||
|
@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
|
||||
for_each_possible_cpu(i) {
|
||||
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
|
||||
|
||||
napi_disable(&cell->napi);
|
||||
netif_napi_del(&cell->napi);
|
||||
__skb_queue_purge(&cell->napi_skbs);
|
||||
}
|
||||
|
@ -2494,11 +2494,16 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
|
||||
|
||||
ndm = nlmsg_data(nlh);
|
||||
if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
|
||||
ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
|
||||
ndm->ndm_state || ndm->ndm_type) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ndm->ndm_flags & ~NTF_PROXY) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
|
||||
NULL, extack);
|
||||
} else {
|
||||
|
@ -28,6 +28,8 @@ static int two __maybe_unused = 2;
|
||||
static int min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
static int max_skb_frags = MAX_SKB_FRAGS;
|
||||
static long long_one __maybe_unused = 1;
|
||||
static long long_max __maybe_unused = LONG_MAX;
|
||||
|
||||
static int net_msg_warn; /* Unused, but still a sysctl */
|
||||
|
||||
@ -289,6 +291,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
||||
|
||||
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
static int
|
||||
proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct ctl_table net_core_table[] = {
|
||||
@ -398,10 +411,11 @@ static struct ctl_table net_core_table[] = {
|
||||
{
|
||||
.procname = "bpf_jit_limit",
|
||||
.data = &bpf_jit_limit,
|
||||
.maxlen = sizeof(int),
|
||||
.maxlen = sizeof(long),
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dointvec_minmax_bpf_restricted,
|
||||
.extra1 = &one,
|
||||
.proc_handler = proc_dolongvec_minmax_bpf_restricted,
|
||||
.extra1 = &long_one,
|
||||
.extra2 = &long_max,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
@ -952,17 +952,18 @@ static int inet_abc_len(__be32 addr)
|
||||
{
|
||||
int rc = -1; /* Something else, probably a multicast. */
|
||||
|
||||
if (ipv4_is_zeronet(addr))
|
||||
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
|
||||
rc = 0;
|
||||
else {
|
||||
__u32 haddr = ntohl(addr);
|
||||
|
||||
if (IN_CLASSA(haddr))
|
||||
rc = 8;
|
||||
else if (IN_CLASSB(haddr))
|
||||
rc = 16;
|
||||
else if (IN_CLASSC(haddr))
|
||||
rc = 24;
|
||||
else if (IN_CLASSE(haddr))
|
||||
rc = 32;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -72,6 +72,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
|
||||
if (unlikely(opt->optlen))
|
||||
ip_forward_options(skb);
|
||||
|
||||
skb->tstamp = 0;
|
||||
return dst_output(net, sk, skb);
|
||||
}
|
||||
|
||||
|
@ -346,10 +346,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
||||
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
|
||||
struct rb_node **rbn, *parent;
|
||||
struct sk_buff *skb1, *prev_tail;
|
||||
int ihl, end, skb1_run_end;
|
||||
struct net_device *dev;
|
||||
unsigned int fragsize;
|
||||
int flags, offset;
|
||||
int ihl, end;
|
||||
int err = -ENOENT;
|
||||
u8 ecn;
|
||||
|
||||
@ -419,7 +419,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
||||
* overlapping fragment, the entire datagram (and any constituent
|
||||
* fragments) MUST be silently discarded.
|
||||
*
|
||||
* We do the same here for IPv4 (and increment an snmp counter).
|
||||
* We do the same here for IPv4 (and increment an snmp counter) but
|
||||
* we do not want to drop the whole queue in response to a duplicate
|
||||
* fragment.
|
||||
*/
|
||||
|
||||
err = -EINVAL;
|
||||
@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
||||
do {
|
||||
parent = *rbn;
|
||||
skb1 = rb_to_skb(parent);
|
||||
skb1_run_end = skb1->ip_defrag_offset +
|
||||
FRAG_CB(skb1)->frag_run_len;
|
||||
if (end <= skb1->ip_defrag_offset)
|
||||
rbn = &parent->rb_left;
|
||||
else if (offset >= skb1->ip_defrag_offset +
|
||||
FRAG_CB(skb1)->frag_run_len)
|
||||
else if (offset >= skb1_run_end)
|
||||
rbn = &parent->rb_right;
|
||||
else /* Found an overlap with skb1. */
|
||||
goto overlap;
|
||||
else if (offset >= skb1->ip_defrag_offset &&
|
||||
end <= skb1_run_end)
|
||||
goto err; /* No new data, potential duplicate */
|
||||
else
|
||||
goto overlap; /* Found an overlap */
|
||||
} while (*rbn);
|
||||
/* Here we have parent properly set, and rbn pointing to
|
||||
* one of its NULL left/right children. Insert skb.
|
||||
|
@ -429,6 +429,8 @@ static int __init ic_defaults(void)
|
||||
ic_netmask = htonl(IN_CLASSB_NET);
|
||||
else if (IN_CLASSC(ntohl(ic_myaddr)))
|
||||
ic_netmask = htonl(IN_CLASSC_NET);
|
||||
else if (IN_CLASSE(ntohl(ic_myaddr)))
|
||||
ic_netmask = htonl(IN_CLASSE_NET);
|
||||
else {
|
||||
pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
|
||||
&ic_myaddr);
|
||||
|
@ -69,6 +69,8 @@
|
||||
#include <net/nexthop.h>
|
||||
#include <net/switchdev.h>
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
struct ipmr_rule {
|
||||
struct fib_rule common;
|
||||
};
|
||||
@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
|
||||
return -EFAULT;
|
||||
if (vr.vifi >= mrt->maxvif)
|
||||
return -EINVAL;
|
||||
vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
|
||||
read_lock(&mrt_lock);
|
||||
vif = &mrt->vif_table[vr.vifi];
|
||||
if (VIF_EXISTS(mrt, vr.vifi)) {
|
||||
@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
|
||||
return -EFAULT;
|
||||
if (vr.vifi >= mrt->maxvif)
|
||||
return -EINVAL;
|
||||
vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
|
||||
read_lock(&mrt_lock);
|
||||
vif = &mrt->vif_table[vr.vifi];
|
||||
if (VIF_EXISTS(mrt, vr.vifi)) {
|
||||
|
@ -391,7 +391,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
|
||||
skb_setup_tx_timestamp(skb, sockc->tsflags);
|
||||
|
||||
if (flags & MSG_CONFIRM)
|
||||
skb_set_dst_pending_confirm(skb, 1);
|
||||
|
@ -378,6 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
|
||||
skb->tstamp = 0;
|
||||
return dst_output(net, sk, skb);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user