forked from Minki/linux
Merge branch 'enic-next'
Govindarajulu Varadarajan says: ==================== enic: Check for DMA mapping error After dma mapping the buffers, enic does not call dma_mapping_error() to check if mapping is successful. This series fixes the issue by checking return value of pci_dma_mapping_error() after pci_map_single(). This is reported by redhat here https://bugzilla.redhat.com/show_bug.cgi?id=1145016 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f230332fa4
@ -188,6 +188,7 @@ struct enic {
|
||||
struct enic_rfs_flw_tbl rfs_h;
|
||||
u32 rx_copybreak;
|
||||
u8 rss_key[ENIC_RSS_LEN];
|
||||
struct vnic_gen_stats gen_stats;
|
||||
};
|
||||
|
||||
static inline struct device *enic_get_dev(struct enic *enic)
|
||||
@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
|
||||
return enic->rq_count + enic->wq_count + 1;
|
||||
}
|
||||
|
||||
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
|
||||
{
|
||||
if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
|
||||
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
|
||||
enic->netdev->name);
|
||||
enic->gen_stats.dma_map_error++;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void enic_reset_addr_lists(struct enic *enic);
|
||||
int enic_sriov_enabled(struct enic *enic);
|
||||
int enic_is_valid_vf(struct enic *enic, int vf);
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "enic_dev.h"
|
||||
#include "enic_clsf.h"
|
||||
#include "vnic_rss.h"
|
||||
#include "vnic_stats.h"
|
||||
|
||||
struct enic_stat {
|
||||
char name[ETH_GSTRING_LEN];
|
||||
@ -40,6 +41,11 @@ struct enic_stat {
|
||||
.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
|
||||
}
|
||||
|
||||
#define ENIC_GEN_STAT(stat) { \
|
||||
.name = #stat, \
|
||||
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
|
||||
}
|
||||
|
||||
static const struct enic_stat enic_tx_stats[] = {
|
||||
ENIC_TX_STAT(tx_frames_ok),
|
||||
ENIC_TX_STAT(tx_unicast_frames_ok),
|
||||
@ -78,8 +84,13 @@ static const struct enic_stat enic_rx_stats[] = {
|
||||
ENIC_RX_STAT(rx_frames_to_max),
|
||||
};
|
||||
|
||||
static const struct enic_stat enic_gen_stats[] = {
|
||||
ENIC_GEN_STAT(dma_map_error),
|
||||
};
|
||||
|
||||
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
|
||||
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
|
||||
static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
|
||||
|
||||
void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
|
||||
{
|
||||
@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
|
||||
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic_n_gen_stats; i++) {
|
||||
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return enic_n_tx_stats + enic_n_rx_stats;
|
||||
return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
|
||||
for (i = 0; i < enic_n_rx_stats; i++)
|
||||
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
|
||||
for (i = 0; i < enic_n_gen_stats; i++)
|
||||
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
|
||||
}
|
||||
|
||||
static u32 enic_get_msglevel(struct net_device *netdev)
|
||||
|
@ -351,80 +351,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static inline void enic_queue_wq_skb_cont(struct enic *enic,
|
||||
struct vnic_wq *wq, struct sk_buff *skb,
|
||||
unsigned int len_left, int loopback)
|
||||
static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
|
||||
struct sk_buff *skb, unsigned int len_left,
|
||||
int loopback)
|
||||
{
|
||||
const skb_frag_t *frag;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
/* Queue additional data fragments */
|
||||
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
|
||||
len_left -= skb_frag_size(frag);
|
||||
enic_queue_wq_desc_cont(wq, skb,
|
||||
skb_frag_dma_map(&enic->pdev->dev,
|
||||
frag, 0, skb_frag_size(frag),
|
||||
DMA_TO_DEVICE),
|
||||
skb_frag_size(frag),
|
||||
(len_left == 0), /* EOP? */
|
||||
loopback);
|
||||
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
|
||||
skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(enic_dma_map_check(enic, dma_addr)))
|
||||
return -ENOMEM;
|
||||
enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
|
||||
(len_left == 0), /* EOP? */
|
||||
loopback);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void enic_queue_wq_skb_vlan(struct enic *enic,
|
||||
struct vnic_wq *wq, struct sk_buff *skb,
|
||||
int vlan_tag_insert, unsigned int vlan_tag, int loopback)
|
||||
static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
|
||||
struct sk_buff *skb, int vlan_tag_insert,
|
||||
unsigned int vlan_tag, int loopback)
|
||||
{
|
||||
unsigned int head_len = skb_headlen(skb);
|
||||
unsigned int len_left = skb->len - head_len;
|
||||
int eop = (len_left == 0);
|
||||
dma_addr_t dma_addr;
|
||||
int err = 0;
|
||||
|
||||
dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(enic_dma_map_check(enic, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Queue the main skb fragment. The fragments are no larger
|
||||
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
|
||||
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
|
||||
* per fragment is queued.
|
||||
*/
|
||||
enic_queue_wq_desc(wq, skb,
|
||||
pci_map_single(enic->pdev, skb->data,
|
||||
head_len, PCI_DMA_TODEVICE),
|
||||
head_len,
|
||||
vlan_tag_insert, vlan_tag,
|
||||
eop, loopback);
|
||||
enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
|
||||
vlan_tag, eop, loopback);
|
||||
|
||||
if (!eop)
|
||||
enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
|
||||
struct vnic_wq *wq, struct sk_buff *skb,
|
||||
int vlan_tag_insert, unsigned int vlan_tag, int loopback)
|
||||
static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
|
||||
struct sk_buff *skb, int vlan_tag_insert,
|
||||
unsigned int vlan_tag, int loopback)
|
||||
{
|
||||
unsigned int head_len = skb_headlen(skb);
|
||||
unsigned int len_left = skb->len - head_len;
|
||||
unsigned int hdr_len = skb_checksum_start_offset(skb);
|
||||
unsigned int csum_offset = hdr_len + skb->csum_offset;
|
||||
int eop = (len_left == 0);
|
||||
dma_addr_t dma_addr;
|
||||
int err = 0;
|
||||
|
||||
dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(enic_dma_map_check(enic, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Queue the main skb fragment. The fragments are no larger
|
||||
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
|
||||
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
|
||||
* per fragment is queued.
|
||||
*/
|
||||
enic_queue_wq_desc_csum_l4(wq, skb,
|
||||
pci_map_single(enic->pdev, skb->data,
|
||||
head_len, PCI_DMA_TODEVICE),
|
||||
head_len,
|
||||
csum_offset,
|
||||
hdr_len,
|
||||
vlan_tag_insert, vlan_tag,
|
||||
eop, loopback);
|
||||
enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
|
||||
hdr_len, vlan_tag_insert, vlan_tag, eop,
|
||||
loopback);
|
||||
|
||||
if (!eop)
|
||||
enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void enic_queue_wq_skb_tso(struct enic *enic,
|
||||
struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
|
||||
int vlan_tag_insert, unsigned int vlan_tag, int loopback)
|
||||
static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
|
||||
struct sk_buff *skb, unsigned int mss,
|
||||
int vlan_tag_insert, unsigned int vlan_tag,
|
||||
int loopback)
|
||||
{
|
||||
unsigned int frag_len_left = skb_headlen(skb);
|
||||
unsigned int len_left = skb->len - frag_len_left;
|
||||
@ -454,20 +468,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
|
||||
*/
|
||||
while (frag_len_left) {
|
||||
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
|
||||
dma_addr = pci_map_single(enic->pdev, skb->data + offset,
|
||||
len, PCI_DMA_TODEVICE);
|
||||
enic_queue_wq_desc_tso(wq, skb,
|
||||
dma_addr,
|
||||
len,
|
||||
mss, hdr_len,
|
||||
vlan_tag_insert, vlan_tag,
|
||||
eop && (len == frag_len_left), loopback);
|
||||
dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(enic_dma_map_check(enic, dma_addr)))
|
||||
return -ENOMEM;
|
||||
enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
|
||||
vlan_tag_insert, vlan_tag,
|
||||
eop && (len == frag_len_left), loopback);
|
||||
frag_len_left -= len;
|
||||
offset += len;
|
||||
}
|
||||
|
||||
if (eop)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
|
||||
* for additional data fragments
|
||||
@ -483,16 +496,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
|
||||
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
|
||||
offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
enic_queue_wq_desc_cont(wq, skb,
|
||||
dma_addr,
|
||||
len,
|
||||
(len_left == 0) &&
|
||||
(len == frag_len_left), /* EOP? */
|
||||
loopback);
|
||||
if (unlikely(enic_dma_map_check(enic, dma_addr)))
|
||||
return -ENOMEM;
|
||||
enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
|
||||
(len_left == 0) &&
|
||||
(len == frag_len_left),/*EOP*/
|
||||
loopback);
|
||||
frag_len_left -= len;
|
||||
offset += len;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void enic_queue_wq_skb(struct enic *enic,
|
||||
@ -502,6 +517,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
|
||||
unsigned int vlan_tag = 0;
|
||||
int vlan_tag_insert = 0;
|
||||
int loopback = 0;
|
||||
int err;
|
||||
|
||||
if (vlan_tx_tag_present(skb)) {
|
||||
/* VLAN tag from trunking driver */
|
||||
@ -513,14 +529,30 @@ static inline void enic_queue_wq_skb(struct enic *enic,
|
||||
}
|
||||
|
||||
if (mss)
|
||||
enic_queue_wq_skb_tso(enic, wq, skb, mss,
|
||||
vlan_tag_insert, vlan_tag, loopback);
|
||||
err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
|
||||
vlan_tag_insert, vlan_tag,
|
||||
loopback);
|
||||
else if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
enic_queue_wq_skb_csum_l4(enic, wq, skb,
|
||||
vlan_tag_insert, vlan_tag, loopback);
|
||||
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
|
||||
vlan_tag, loopback);
|
||||
else
|
||||
enic_queue_wq_skb_vlan(enic, wq, skb,
|
||||
vlan_tag_insert, vlan_tag, loopback);
|
||||
err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
|
||||
vlan_tag, loopback);
|
||||
if (unlikely(err)) {
|
||||
struct vnic_wq_buf *buf;
|
||||
|
||||
buf = wq->to_use->prev;
|
||||
/* while not EOP of previous pkt && queue not empty.
|
||||
* For all non EOP bufs, os_buf is NULL.
|
||||
*/
|
||||
while (!buf->os_buf && (buf->next != wq->to_clean)) {
|
||||
enic_free_wq_buf(wq, buf);
|
||||
wq->ring.desc_avail++;
|
||||
buf = buf->prev;
|
||||
}
|
||||
wq->to_use = buf->next;
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/* netif_tx_lock held, process context with BHs disabled, or BH */
|
||||
@ -950,8 +982,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_addr = pci_map_single(enic->pdev, skb->data,
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
dma_addr = pci_map_single(enic->pdev, skb->data, len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
|
||||
dev_kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
enic_queue_rq_desc(rq, skb, os_buf_index,
|
||||
dma_addr, len);
|
||||
|
@ -62,6 +62,11 @@ struct vnic_rx_stats {
|
||||
u64 rsvd[16];
|
||||
};
|
||||
|
||||
/* Generic statistics */
|
||||
struct vnic_gen_stats {
|
||||
u64 dma_map_error;
|
||||
};
|
||||
|
||||
struct vnic_stats {
|
||||
struct vnic_tx_stats tx;
|
||||
struct vnic_rx_stats rx;
|
||||
|
@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
|
||||
wq->ring.desc_size * buf->index;
|
||||
if (buf->index + 1 == count) {
|
||||
buf->next = wq->bufs[0];
|
||||
buf->next->prev = buf;
|
||||
break;
|
||||
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
|
||||
buf->next = wq->bufs[i + 1];
|
||||
buf->next->prev = buf;
|
||||
} else {
|
||||
buf->next = buf + 1;
|
||||
buf->next->prev = buf;
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
@ -62,6 +62,7 @@ struct vnic_wq_buf {
|
||||
uint8_t cq_entry; /* Gets completion event from hw */
|
||||
uint8_t desc_skip_cnt; /* Num descs to occupy */
|
||||
uint8_t compressed_send; /* Both hdr and payload in one desc */
|
||||
struct vnic_wq_buf *prev;
|
||||
};
|
||||
|
||||
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
|
||||
|
Loading…
Reference in New Issue
Block a user