s390/qeth: add a L3 xmit wrapper

In preparation for future work, move the high-level xmit work into a
separate wrapper. This matches the L2 xmit code.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Julian Wiedmann 2018-07-11 17:42:45 +02:00 committed by David S. Miller
parent 371a1e7a07
commit ea1d4a0c7f

View File

@ -2156,48 +2156,21 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
return elements; return elements;
} }
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct net_device *dev) struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{ {
int rc; int rc;
__be16 *tag; __be16 *tag;
struct qeth_hdr *hdr = NULL; struct qeth_hdr *hdr = NULL;
int hdr_elements = 0; int hdr_elements = 0;
int elements; int elements;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL; struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len; int tx_bytes = skb->len;
unsigned int hd_len = 0; unsigned int hd_len = 0;
bool use_tso; bool use_tso;
int data_offset = -1; int data_offset = -1;
unsigned int nr_frags; unsigned int nr_frags;
if (((card->info.type == QETH_CARD_TYPE_IQD) &&
(((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
((card->options.cq == QETH_CQ_ENABLED) &&
(be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) ||
card->options.sniffer)
goto tx_drop;
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
if ((cast_type == RTN_BROADCAST) &&
(card->info.broadcast_capable == 0))
goto tx_drop;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
/* Ignore segment size from skb_is_gso(), 1 page is always used. */ /* Ignore segment size from skb_is_gso(), 1 page is always used. */
use_tso = skb_is_gso(skb) && use_tso = skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
@ -2208,14 +2181,14 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
hd_len = sizeof(*hdr); hd_len = sizeof(*hdr);
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr) if (!hdr)
goto tx_drop; return -ENOMEM;
hdr_elements++; hdr_elements++;
} else { } else {
/* create a clone with writeable headroom */ /* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
+ VLAN_HLEN); + VLAN_HLEN);
if (!new_skb) if (!new_skb)
goto tx_drop; return -ENOMEM;
if (ipv == 4) { if (ipv == 4) {
skb_pull(new_skb, ETH_HLEN); skb_pull(new_skb, ETH_HLEN);
@ -2234,24 +2207,22 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
} }
} }
netif_stop_queue(dev);
/* fix hardware limitation: as long as we do not have sbal /* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists * chaining we can not send long frag lists
*/ */
if ((card->info.type != QETH_CARD_TYPE_IQD) && if ((card->info.type != QETH_CARD_TYPE_IQD) &&
((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
(!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
int lin_rc = skb_linearize(new_skb); rc = skb_linearize(new_skb);
if (card->options.performance_stats) { if (card->options.performance_stats) {
if (lin_rc) if (rc)
card->perf_stats.tx_linfail++; card->perf_stats.tx_linfail++;
else else
card->perf_stats.tx_lin++; card->perf_stats.tx_lin++;
} }
if (lin_rc) if (rc)
goto tx_drop; goto out;
} }
nr_frags = skb_shinfo(new_skb)->nr_frags; nr_frags = skb_shinfo(new_skb)->nr_frags;
@ -2290,9 +2261,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_get_elements_no(card, new_skb, hdr_elements, qeth_get_elements_no(card, new_skb, hdr_elements,
(data_offset > 0) ? data_offset : 0); (data_offset > 0) ? data_offset : 0);
if (!elements) { if (!elements) {
if (data_offset >= 0) rc = -E2BIG;
kmem_cache_free(qeth_core_header_cache, hdr); goto out;
goto tx_drop;
} }
elements += hdr_elements; elements += hdr_elements;
@ -2306,17 +2276,18 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
len = sizeof(struct qeth_hdr_layer3); len = sizeof(struct qeth_hdr_layer3);
} }
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
goto tx_drop; rc = -EINVAL;
goto out;
}
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
hd_len, elements); hd_len, elements);
} else } else
rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset,
hd_len); hd_len);
out:
if (!rc) { if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (new_skb != skb) if (new_skb != skb)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (card->options.performance_stats) { if (card->options.performance_stats) {
@ -2330,30 +2301,67 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
card->perf_stats.sg_frags_sent += nr_frags + 1; card->perf_stats.sg_frags_sent += nr_frags + 1;
} }
} }
rc = NETDEV_TX_OK;
} else { } else {
if (new_skb != skb)
dev_kfree_skb_any(new_skb);
if (data_offset >= 0) if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr); kmem_cache_free(qeth_core_header_cache, hdr);
}
return rc;
}
if (rc == -EBUSY) { static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (new_skb != skb) struct net_device *dev)
dev_kfree_skb_any(new_skb); {
return NETDEV_TX_BUSY; int cast_type = qeth_l3_get_cast_type(skb);
} else struct qeth_card *card = dev->ml_priv;
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
if (IS_IQD(card)) {
if (card->options.sniffer)
goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop; goto tx_drop;
} }
netif_wake_queue(dev); if (card->state != CARD_STATE_UP || !card->lan_online) {
if (card->options.performance_stats) card->stats.tx_carrier_errors++;
card->perf_stats.outbound_time += qeth_get_micros() - goto tx_drop;
card->perf_stats.outbound_start_time; }
return rc;
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
netif_stop_queue(dev);
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats)
card->perf_stats.outbound_time += qeth_get_micros() -
card->perf_stats.outbound_start_time;
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
tx_drop: tx_drop:
card->stats.tx_dropped++; card->stats.tx_dropped++;
card->stats.tx_errors++; card->stats.tx_errors++;
if ((new_skb != skb) && new_skb)
dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
netif_wake_queue(dev); netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;