net: add skb_[inner_]tcp_all_headers helpers
Most drivers use "skb_transport_offset(skb) + tcp_hdrlen(skb)" to compute headers length for a TCP packet, but others use more convoluted (but equivalent) ways. Add skb_tcp_all_headers() and skb_inner_tcp_all_headers() helpers to harmonize this a bit. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
04740c53ca
commit
504148fedb
@@ -573,7 +573,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
|||||||
unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
|
unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
|
||||||
|
|
||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hlen = skb_tcp_all_headers(skb);
|
||||||
phead = skb->data;
|
phead = skb->data;
|
||||||
if (unlikely(!skb_pull(skb, hlen))) {
|
if (unlikely(!skb_pull(skb, hlen))) {
|
||||||
ipoib_warn(priv, "linear data too small\n");
|
ipoib_warn(priv, "linear data too small\n");
|
||||||
|
|||||||
@@ -1673,12 +1673,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
|
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
|
||||||
packet->header_len = skb_inner_transport_offset(skb) +
|
packet->header_len = skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
packet->tcp_header_len = inner_tcp_hdrlen(skb);
|
packet->tcp_header_len = inner_tcp_hdrlen(skb);
|
||||||
} else {
|
} else {
|
||||||
packet->header_len = skb_transport_offset(skb) +
|
packet->header_len = skb_tcp_all_headers(skb);
|
||||||
tcp_hdrlen(skb);
|
|
||||||
packet->tcp_header_len = tcp_hdrlen(skb);
|
packet->tcp_header_len = tcp_hdrlen(skb);
|
||||||
}
|
}
|
||||||
packet->tcp_payload_len = skb->len - packet->header_len;
|
packet->tcp_payload_len = skb->len - packet->header_len;
|
||||||
|
|||||||
@@ -2072,7 +2072,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
|
|||||||
tpd_req = skb_shinfo(skb)->nr_frags + 1;
|
tpd_req = skb_shinfo(skb)->nr_frags + 1;
|
||||||
|
|
||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
proto_hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (proto_hdr_len < skb_headlen(skb))
|
if (proto_hdr_len < skb_headlen(skb))
|
||||||
tpd_req++;
|
tpd_req++;
|
||||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
||||||
@@ -2107,7 +2107,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
|
|||||||
if (real_len < skb->len)
|
if (real_len < skb->len)
|
||||||
pskb_trim(skb, real_len);
|
pskb_trim(skb, real_len);
|
||||||
|
|
||||||
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(skb->len == hdr_len)) {
|
if (unlikely(skb->len == hdr_len)) {
|
||||||
/* only xsum need */
|
/* only xsum need */
|
||||||
if (netif_msg_tx_queued(adapter))
|
if (netif_msg_tx_queued(adapter))
|
||||||
@@ -2132,7 +2132,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
|
|||||||
*tpd = atl1c_get_tpd(adapter, queue);
|
*tpd = atl1c_get_tpd(adapter, queue);
|
||||||
ipv6_hdr(skb)->payload_len = 0;
|
ipv6_hdr(skb)->payload_len = 0;
|
||||||
/* check payload == 0 byte ? */
|
/* check payload == 0 byte ? */
|
||||||
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(skb->len == hdr_len)) {
|
if (unlikely(skb->len == hdr_len)) {
|
||||||
/* only xsum need */
|
/* only xsum need */
|
||||||
if (netif_msg_tx_queued(adapter))
|
if (netif_msg_tx_queued(adapter))
|
||||||
@@ -2219,7 +2219,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
|
|||||||
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
|
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
|
||||||
if (tso) {
|
if (tso) {
|
||||||
/* TSO */
|
/* TSO */
|
||||||
map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
|
map_len = hdr_len;
|
||||||
use_tpd = tpd;
|
use_tpd = tpd;
|
||||||
|
|
||||||
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
|
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
|
||||||
|
|||||||
@@ -1609,8 +1609,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
|
|||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
if (skb->protocol == htons(ETH_P_IP) ||
|
if (skb->protocol == htons(ETH_P_IP) ||
|
||||||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
|
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
|
||||||
proto_hdr_len = skb_transport_offset(skb) +
|
proto_hdr_len = skb_tcp_all_headers(skb);
|
||||||
tcp_hdrlen(skb);
|
|
||||||
if (proto_hdr_len < skb_headlen(skb)) {
|
if (proto_hdr_len < skb_headlen(skb)) {
|
||||||
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
|
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
|
||||||
MAX_TX_BUF_LEN - 1) >>
|
MAX_TX_BUF_LEN - 1) >>
|
||||||
@@ -1645,7 +1644,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
|
|||||||
if (real_len < skb->len)
|
if (real_len < skb->len)
|
||||||
pskb_trim(skb, real_len);
|
pskb_trim(skb, real_len);
|
||||||
|
|
||||||
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(skb->len == hdr_len)) {
|
if (unlikely(skb->len == hdr_len)) {
|
||||||
/* only xsum need */
|
/* only xsum need */
|
||||||
netdev_warn(adapter->netdev,
|
netdev_warn(adapter->netdev,
|
||||||
@@ -1713,7 +1712,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||||||
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
|
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
|
||||||
if (segment) {
|
if (segment) {
|
||||||
/* TSO */
|
/* TSO */
|
||||||
map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
|
map_len = hdr_len;
|
||||||
use_tpd = tpd;
|
use_tpd = tpd;
|
||||||
|
|
||||||
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
|
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
|
||||||
|
|||||||
@@ -2115,7 +2115,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
ntohs(iph->tot_len));
|
ntohs(iph->tot_len));
|
||||||
if (real_len < skb->len)
|
if (real_len < skb->len)
|
||||||
pskb_trim(skb, real_len);
|
pskb_trim(skb, real_len);
|
||||||
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (skb->len == hdr_len) {
|
if (skb->len == hdr_len) {
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
tcp_hdr(skb)->check =
|
tcp_hdr(skb)->check =
|
||||||
@@ -2206,7 +2206,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
|||||||
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
|
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
|
||||||
if (retval) {
|
if (retval) {
|
||||||
/* TSO */
|
/* TSO */
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
buffer_info->length = hdr_len;
|
buffer_info->length = hdr_len;
|
||||||
page = virt_to_page(skb->data);
|
page = virt_to_page(skb->data);
|
||||||
offset = offset_in_page(skb->data);
|
offset = offset_in_page(skb->data);
|
||||||
@@ -2367,8 +2367,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
|
|||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
if (mss) {
|
if (mss) {
|
||||||
if (skb->protocol == htons(ETH_P_IP)) {
|
if (skb->protocol == htons(ETH_P_IP)) {
|
||||||
proto_hdr_len = (skb_transport_offset(skb) +
|
proto_hdr_len = skb_tcp_all_headers(skb);
|
||||||
tcp_hdrlen(skb));
|
|
||||||
if (unlikely(proto_hdr_len > len)) {
|
if (unlikely(proto_hdr_len > len)) {
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|||||||
@@ -3421,12 +3421,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
|
|||||||
|
|
||||||
/* Headers length */
|
/* Headers length */
|
||||||
if (xmit_type & XMIT_GSO_ENC)
|
if (xmit_type & XMIT_GSO_ENC)
|
||||||
hlen = (int)(skb_inner_transport_header(skb) -
|
hlen = skb_inner_tcp_all_headers(skb);
|
||||||
skb->data) +
|
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
else
|
else
|
||||||
hlen = (int)(skb_transport_header(skb) -
|
hlen = skb_tcp_all_headers(skb);
|
||||||
skb->data) + tcp_hdrlen(skb);
|
|
||||||
|
|
||||||
/* Amount of data (w/o headers) on linear part of SKB*/
|
/* Amount of data (w/o headers) on linear part of SKB*/
|
||||||
first_bd_sz = skb_headlen(skb) - hlen;
|
first_bd_sz = skb_headlen(skb) - hlen;
|
||||||
@@ -3534,15 +3531,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
|
|||||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
|
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
|
||||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
|
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
|
||||||
|
|
||||||
return skb_inner_transport_header(skb) +
|
return skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb) - skb->data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We support checksum offload for TCP and UDP only.
|
/* We support checksum offload for TCP and UDP only.
|
||||||
* No need to pass the UDP header length - it's a constant.
|
* No need to pass the UDP header length - it's a constant.
|
||||||
*/
|
*/
|
||||||
return skb_inner_transport_header(skb) +
|
return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
|
||||||
sizeof(struct udphdr) - skb->data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -3568,12 +3563,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
|
|||||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
|
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
|
||||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
|
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
|
||||||
|
|
||||||
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
|
return skb_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
/* We support checksum offload for TCP and UDP only.
|
/* We support checksum offload for TCP and UDP only.
|
||||||
* No need to pass the UDP header length - it's a constant.
|
* No need to pass the UDP header length - it's a constant.
|
||||||
*/
|
*/
|
||||||
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
|
return skb_transport_offset(skb) + sizeof(struct udphdr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set FW indication according to inner or outer protocols if tunneled */
|
/* set FW indication according to inner or outer protocols if tunneled */
|
||||||
|
|||||||
@@ -535,12 +535,9 @@ normal_tx:
|
|||||||
u32 hdr_len;
|
u32 hdr_len;
|
||||||
|
|
||||||
if (skb->encapsulation)
|
if (skb->encapsulation)
|
||||||
hdr_len = skb_inner_network_offset(skb) +
|
hdr_len = skb_inner_tcp_all_headers(skb);
|
||||||
skb_inner_network_header_len(skb) +
|
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
else
|
else
|
||||||
hdr_len = skb_transport_offset(skb) +
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
tcp_hdrlen(skb);
|
|
||||||
|
|
||||||
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
|
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
|
||||||
TX_BD_FLAGS_T_IPID |
|
TX_BD_FLAGS_T_IPID |
|
||||||
|
|||||||
@@ -7944,7 +7944,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
tcp_opt_len = tcp_optlen(skb);
|
tcp_opt_len = tcp_optlen(skb);
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
|
hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
|
||||||
|
|
||||||
/* HW/FW can not correctly segment packets that have been
|
/* HW/FW can not correctly segment packets that have been
|
||||||
* vlan encapsulated.
|
* vlan encapsulated.
|
||||||
|
|||||||
@@ -2823,8 +2823,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
|
|||||||
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
|
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (unlikely((gso_size + skb_transport_offset(skb) +
|
if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
|
||||||
tcp_hdrlen(skb)) >= skb->len)) {
|
|
||||||
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
|
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
|
||||||
txqent->hdr.wi.lso_mss = 0;
|
txqent->hdr.wi.lso_mss = 0;
|
||||||
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
|
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
|
||||||
@@ -2872,8 +2871,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
|
|||||||
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
|
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
|
||||||
|
|
||||||
if (unlikely(skb_headlen(skb) <
|
if (unlikely(skb_headlen(skb) <
|
||||||
skb_transport_offset(skb) +
|
skb_tcp_all_headers(skb))) {
|
||||||
tcp_hdrlen(skb))) {
|
|
||||||
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
|
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2267,7 +2267,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
/* only queue eth + ip headers separately for UDP */
|
/* only queue eth + ip headers separately for UDP */
|
||||||
hdrlen = skb_transport_offset(skb);
|
hdrlen = skb_transport_offset(skb);
|
||||||
else
|
else
|
||||||
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdrlen = skb_tcp_all_headers(skb);
|
||||||
if (skb_headlen(skb) < hdrlen) {
|
if (skb_headlen(skb) < hdrlen) {
|
||||||
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
|
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
|
||||||
/* if this is required, would need to copy to single buffer */
|
/* if this is required, would need to copy to single buffer */
|
||||||
|
|||||||
@@ -1261,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
|
|||||||
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
|
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct skb_shared_info *sh = skb_shinfo(skb);
|
struct skb_shared_info *sh = skb_shinfo(skb);
|
||||||
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
unsigned int sh_len = skb_tcp_all_headers(skb);
|
||||||
unsigned int data_len = skb->len - sh_len;
|
unsigned int data_len = skb->len - sh_len;
|
||||||
unsigned int p_len = sh->gso_size;
|
unsigned int p_len = sh->gso_size;
|
||||||
long f_id = -1; /* id of the current fragment */
|
long f_id = -1; /* id of the current fragment */
|
||||||
@@ -1382,7 +1382,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
|
|||||||
|
|
||||||
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
|
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
|
||||||
hdr->tso = 1;
|
hdr->tso = 1;
|
||||||
hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr->tso_start = skb_tcp_all_headers(skb);
|
||||||
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
|
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
|
||||||
/* For non-tunneled pkts, point this to L2 ethertype */
|
/* For non-tunneled pkts, point this to L2 ethertype */
|
||||||
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
|
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
|
||||||
|
|||||||
@@ -1531,7 +1531,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
|
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
|
||||||
if (cxgb4_is_ktls_skb(skb) &&
|
if (cxgb4_is_ktls_skb(skb) &&
|
||||||
(skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
|
(skb->len - skb_tcp_all_headers(skb)))
|
||||||
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
|
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
|
||||||
#endif /* CHELSIO_TLS_DEVICE */
|
#endif /* CHELSIO_TLS_DEVICE */
|
||||||
|
|
||||||
|
|||||||
@@ -1012,7 +1012,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
|
|||||||
/* packet length = eth hdr len + ip hdr len + tcp hdr len
|
/* packet length = eth hdr len + ip hdr len + tcp hdr len
|
||||||
* (including options).
|
* (including options).
|
||||||
*/
|
*/
|
||||||
pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
pktlen = skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
ctrl = sizeof(*cpl) + pktlen;
|
ctrl = sizeof(*cpl) + pktlen;
|
||||||
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
|
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
|
||||||
@@ -1907,7 +1907,7 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
th = tcp_hdr(nskb);
|
th = tcp_hdr(nskb);
|
||||||
skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
|
skb_offset = skb_tcp_all_headers(nskb);
|
||||||
data_len = nskb->len - skb_offset;
|
data_len = nskb->len - skb_offset;
|
||||||
skb_tx_timestamp(nskb);
|
skb_tx_timestamp(nskb);
|
||||||
|
|
||||||
@@ -1938,7 +1938,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
tcp_seq = ntohl(th->seq);
|
tcp_seq = ntohl(th->seq);
|
||||||
skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
skb_offset = skb_tcp_all_headers(skb);
|
||||||
skb_data_len = skb->len - skb_offset;
|
skb_data_len = skb->len - skb_offset;
|
||||||
data_len = skb_data_len;
|
data_len = skb_data_len;
|
||||||
|
|
||||||
|
|||||||
@@ -680,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
|
|||||||
skb_frag_t *frag;
|
skb_frag_t *frag;
|
||||||
|
|
||||||
if (skb->encapsulation) {
|
if (skb->encapsulation) {
|
||||||
hdr_len = skb_inner_transport_header(skb) - skb->data;
|
hdr_len = skb_inner_tcp_all_headers(skb);
|
||||||
hdr_len += inner_tcp_hdrlen(skb);
|
|
||||||
enic_preload_tcp_csum_encap(skb);
|
enic_preload_tcp_csum_encap(skb);
|
||||||
} else {
|
} else {
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
enic_preload_tcp_csum(skb);
|
enic_preload_tcp_csum(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -737,9 +737,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
|
|||||||
static int be_gso_hdr_len(struct sk_buff *skb)
|
static int be_gso_hdr_len(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->encapsulation)
|
if (skb->encapsulation)
|
||||||
return skb_inner_transport_offset(skb) +
|
return skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
return skb_transport_offset(skb) + tcp_hdrlen(skb);
|
return skb_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
|
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
|
||||||
|
|||||||
@@ -691,7 +691,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
|
|||||||
struct bufdesc *bdp, int index)
|
struct bufdesc *bdp, int index)
|
||||||
{
|
{
|
||||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||||
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
int hdr_len = skb_tcp_all_headers(skb);
|
||||||
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
|
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
|
||||||
void *bufaddr;
|
void *bufaddr;
|
||||||
unsigned long dmabuf;
|
unsigned long dmabuf;
|
||||||
|
|||||||
@@ -83,7 +83,7 @@ static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
|
|||||||
const struct fun_ktls_tx_ctx *tls_ctx;
|
const struct fun_ktls_tx_ctx *tls_ctx;
|
||||||
u32 datalen, seq;
|
u32 datalen, seq;
|
||||||
|
|
||||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
datalen = skb->len - skb_tcp_all_headers(skb);
|
||||||
if (!datalen)
|
if (!datalen)
|
||||||
return skb;
|
return skb;
|
||||||
|
|
||||||
|
|||||||
@@ -386,7 +386,7 @@ static int gve_prep_tso(struct sk_buff *skb)
|
|||||||
(__force __wsum)htonl(paylen));
|
(__force __wsum)htonl(paylen));
|
||||||
|
|
||||||
/* Compute length of segmentation header. */
|
/* Compute length of segmentation header. */
|
||||||
header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
header_len = skb_tcp_all_headers(skb);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -598,9 +598,9 @@ static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
|
|||||||
*/
|
*/
|
||||||
static bool gve_can_send_tso(const struct sk_buff *skb)
|
static bool gve_can_send_tso(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
|
|
||||||
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
|
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
|
||||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||||
|
const int header_len = skb_tcp_all_headers(skb);
|
||||||
const int gso_size = shinfo->gso_size;
|
const int gso_size = shinfo->gso_size;
|
||||||
int cur_seg_num_bufs;
|
int cur_seg_num_bufs;
|
||||||
int cur_seg_size;
|
int cur_seg_size;
|
||||||
|
|||||||
@@ -31,8 +31,6 @@
|
|||||||
#define HNS_BUFFER_SIZE_2048 2048
|
#define HNS_BUFFER_SIZE_2048 2048
|
||||||
|
|
||||||
#define BD_MAX_SEND_SIZE 8191
|
#define BD_MAX_SEND_SIZE 8191
|
||||||
#define SKB_TMP_LEN(SKB) \
|
|
||||||
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
|
|
||||||
|
|
||||||
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
|
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
|
||||||
int send_sz, dma_addr_t dma, int frag_end,
|
int send_sz, dma_addr_t dma, int frag_end,
|
||||||
@@ -94,7 +92,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
|
|||||||
HNSV2_TXD_TSE_B, 1);
|
HNSV2_TXD_TSE_B, 1);
|
||||||
l4_len = tcp_hdrlen(skb);
|
l4_len = tcp_hdrlen(skb);
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
paylen = skb->len - SKB_TMP_LEN(skb);
|
paylen = skb->len - skb_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||||
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
|
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
|
||||||
@@ -108,7 +106,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
|
|||||||
HNSV2_TXD_TSE_B, 1);
|
HNSV2_TXD_TSE_B, 1);
|
||||||
l4_len = tcp_hdrlen(skb);
|
l4_len = tcp_hdrlen(skb);
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
paylen = skb->len - SKB_TMP_LEN(skb);
|
paylen = skb->len - skb_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
desc->tx.ip_offset = ip_offset;
|
desc->tx.ip_offset = ip_offset;
|
||||||
|
|||||||
@@ -1838,9 +1838,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
|
|||||||
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
|
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (!skb->encapsulation)
|
if (!skb->encapsulation)
|
||||||
return skb_transport_offset(skb) + tcp_hdrlen(skb);
|
return skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
|
return skb_inner_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
|
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
|
||||||
|
|||||||
@@ -37,8 +37,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
|
|||||||
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
|
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
|
||||||
__entry->gso_type = skb_shinfo(skb)->gso_type;
|
__entry->gso_type = skb_shinfo(skb)->gso_type;
|
||||||
__entry->hdr_len = skb->encapsulation ?
|
__entry->hdr_len = skb->encapsulation ?
|
||||||
skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
|
skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
|
||||||
skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
||||||
__entry->ip_summed = skb->ip_summed;
|
__entry->ip_summed = skb->ip_summed;
|
||||||
__entry->fraglist = skb_has_frag_list(skb);
|
__entry->fraglist = skb_has_frag_list(skb);
|
||||||
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
|
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
|
||||||
|
|||||||
@@ -1617,7 +1617,7 @@ static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
|
|||||||
* For TSO packets we only copy the headers into the
|
* For TSO packets we only copy the headers into the
|
||||||
* immediate area.
|
* immediate area.
|
||||||
*/
|
*/
|
||||||
immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
|
immediate_len = skb_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
|
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
|
||||||
|
|||||||
@@ -2708,7 +2708,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
if (protocol == htons(ETH_P_IP)) {
|
if (protocol == htons(ETH_P_IP)) {
|
||||||
struct iphdr *iph = ip_hdr(skb);
|
struct iphdr *iph = ip_hdr(skb);
|
||||||
@@ -3139,7 +3139,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
|||||||
max_per_txd = min(mss << 2, max_per_txd);
|
max_per_txd = min(mss << 2, max_per_txd);
|
||||||
max_txd_pwr = fls(max_per_txd) - 1;
|
max_txd_pwr = fls(max_per_txd) - 1;
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (skb->data_len && hdr_len == len) {
|
if (skb->data_len && hdr_len == len) {
|
||||||
switch (hw->mac_type) {
|
switch (hw->mac_type) {
|
||||||
case e1000_82544: {
|
case e1000_82544: {
|
||||||
|
|||||||
@@ -5474,7 +5474,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
if (protocol == htons(ETH_P_IP)) {
|
if (protocol == htons(ETH_P_IP)) {
|
||||||
struct iphdr *iph = ip_hdr(skb);
|
struct iphdr *iph = ip_hdr(skb);
|
||||||
@@ -5846,7 +5846,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
|||||||
* points to just header, pull a few bytes of payload from
|
* points to just header, pull a few bytes of payload from
|
||||||
* frags into skb->data
|
* frags into skb->data
|
||||||
*/
|
*/
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
/* we do this workaround for ES2LAN, but it is un-necessary,
|
/* we do this workaround for ES2LAN, but it is un-necessary,
|
||||||
* avoiding it could save a lot of cycles
|
* avoiding it could save a lot of cycles
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1187,7 +1187,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
iph->tot_len = 0;
|
iph->tot_len = 0;
|
||||||
|
|||||||
@@ -775,7 +775,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
|
|||||||
u32 *first_cmd_sts, bool first_desc)
|
u32 *first_cmd_sts, bool first_desc)
|
||||||
{
|
{
|
||||||
struct mv643xx_eth_private *mp = txq_to_mp(txq);
|
struct mv643xx_eth_private *mp = txq_to_mp(txq);
|
||||||
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
int hdr_len = skb_tcp_all_headers(skb);
|
||||||
int tx_index;
|
int tx_index;
|
||||||
struct tx_desc *desc;
|
struct tx_desc *desc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|||||||
@@ -2664,8 +2664,8 @@ err_drop_frame:
|
|||||||
static inline void
|
static inline void
|
||||||
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
|
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
|
||||||
{
|
{
|
||||||
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
||||||
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
||||||
|
int hdr_len = skb_tcp_all_headers(skb);
|
||||||
struct mvneta_tx_desc *tx_desc;
|
struct mvneta_tx_desc *tx_desc;
|
||||||
|
|
||||||
tx_desc = mvneta_txq_next_desc_get(txq);
|
tx_desc = mvneta_txq_next_desc_get(txq);
|
||||||
@@ -2727,7 +2727,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if ((txq->count + tso_count_descs(skb)) >= txq->size)
|
if ((txq->count + tso_count_descs(skb)) >= txq->size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
|
if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
|
||||||
pr_info("*** Is this even possible?\n");
|
pr_info("*** Is this even possible?\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -624,7 +624,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
|
|||||||
ext->subdc = NIX_SUBDC_EXT;
|
ext->subdc = NIX_SUBDC_EXT;
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_shinfo(skb)->gso_size) {
|
||||||
ext->lso = 1;
|
ext->lso = 1;
|
||||||
ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
ext->lso_sb = skb_tcp_all_headers(skb);
|
||||||
ext->lso_mps = skb_shinfo(skb)->gso_size;
|
ext->lso_mps = skb_shinfo(skb)->gso_size;
|
||||||
|
|
||||||
/* Only TSOv4 and TSOv6 GSO offloads are supported */
|
/* Only TSOv4 and TSOv6 GSO offloads are supported */
|
||||||
@@ -931,7 +931,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
|
|||||||
* be correctly modified, hence don't offload such TSO segments.
|
* be correctly modified, hence don't offload such TSO segments.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
payload_len = skb->len - skb_tcp_all_headers(skb);
|
||||||
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
|
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
|
||||||
if (last_seg_size && last_seg_size < 16)
|
if (last_seg_size && last_seg_size < 16)
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@@ -1863,7 +1863,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
|
|||||||
if (mss != 0) {
|
if (mss != 0) {
|
||||||
|
|
||||||
if (!(hw->flags & SKY2_HW_NEW_LE))
|
if (!(hw->flags & SKY2_HW_NEW_LE))
|
||||||
mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
|
mss += skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
if (mss != sky2->tx_last_mss) {
|
if (mss != sky2->tx_last_mss) {
|
||||||
le = get_tx_le(sky2, &slot);
|
le = get_tx_le(sky2, &slot);
|
||||||
|
|||||||
@@ -645,7 +645,7 @@ static int get_real_size(const struct sk_buff *skb,
|
|||||||
*inline_ok = false;
|
*inline_ok = false;
|
||||||
*hopbyhop = 0;
|
*hopbyhop = 0;
|
||||||
if (skb->encapsulation) {
|
if (skb->encapsulation) {
|
||||||
*lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
|
*lso_header_size = skb_inner_tcp_all_headers(skb);
|
||||||
} else {
|
} else {
|
||||||
/* Detects large IPV6 TCP packets and prepares for removal of
|
/* Detects large IPV6 TCP packets and prepares for removal of
|
||||||
* HBH header that has been pushed by ip6_xmit(),
|
* HBH header that has been pushed by ip6_xmit(),
|
||||||
@@ -653,7 +653,7 @@ static int get_real_size(const struct sk_buff *skb,
|
|||||||
*/
|
*/
|
||||||
if (ipv6_has_hopopt_jumbo(skb))
|
if (ipv6_has_hopopt_jumbo(skb))
|
||||||
*hopbyhop = sizeof(struct hop_jumbo_hdr);
|
*hopbyhop = sizeof(struct hop_jumbo_hdr);
|
||||||
*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
*lso_header_size = skb_tcp_all_headers(skb);
|
||||||
}
|
}
|
||||||
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
|
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
|
||||||
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
|
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
|
||||||
|
|||||||
@@ -458,7 +458,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
|||||||
int datalen;
|
int datalen;
|
||||||
u32 seq;
|
u32 seq;
|
||||||
|
|
||||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
datalen = skb->len - skb_tcp_all_headers(skb);
|
||||||
if (!datalen)
|
if (!datalen)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
|||||||
@@ -152,14 +152,14 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
|
|||||||
|
|
||||||
*hopbyhop = 0;
|
*hopbyhop = 0;
|
||||||
if (skb->encapsulation) {
|
if (skb->encapsulation) {
|
||||||
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
|
ihs = skb_tcp_all_headers(skb);
|
||||||
stats->tso_inner_packets++;
|
stats->tso_inner_packets++;
|
||||||
stats->tso_inner_bytes += skb->len - ihs;
|
stats->tso_inner_bytes += skb->len - ihs;
|
||||||
} else {
|
} else {
|
||||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
|
||||||
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
|
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
|
||||||
} else {
|
} else {
|
||||||
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
ihs = skb_tcp_all_headers(skb);
|
||||||
if (ipv6_has_hopopt_jumbo(skb)) {
|
if (ipv6_has_hopopt_jumbo(skb)) {
|
||||||
*hopbyhop = sizeof(struct hop_jumbo_hdr);
|
*hopbyhop = sizeof(struct hop_jumbo_hdr);
|
||||||
ihs -= sizeof(struct hop_jumbo_hdr);
|
ihs -= sizeof(struct hop_jumbo_hdr);
|
||||||
|
|||||||
@@ -2692,7 +2692,7 @@ again:
|
|||||||
* send loop that we are still in the
|
* send loop that we are still in the
|
||||||
* header portion of the TSO packet.
|
* header portion of the TSO packet.
|
||||||
* TSO header can be at most 1KB long */
|
* TSO header can be at most 1KB long */
|
||||||
cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
|
cum_len = -skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
/* for IPv6 TSO, the checksum offset stores the
|
/* for IPv6 TSO, the checksum offset stores the
|
||||||
* TCP header length, to save the firmware from
|
* TCP header length, to save the firmware from
|
||||||
|
|||||||
@@ -81,12 +81,11 @@ nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
|
|||||||
if (!skb->encapsulation) {
|
if (!skb->encapsulation) {
|
||||||
l3_offset = skb_network_offset(skb);
|
l3_offset = skb_network_offset(skb);
|
||||||
l4_offset = skb_transport_offset(skb);
|
l4_offset = skb_transport_offset(skb);
|
||||||
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdrlen = skb_tcp_all_headers(skb);
|
||||||
} else {
|
} else {
|
||||||
l3_offset = skb_inner_network_offset(skb);
|
l3_offset = skb_inner_network_offset(skb);
|
||||||
l4_offset = skb_inner_transport_offset(skb);
|
l4_offset = skb_inner_transport_offset(skb);
|
||||||
hdrlen = skb_inner_transport_header(skb) - skb->data +
|
hdrlen = skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
|
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
|
||||||
|
|||||||
@@ -46,12 +46,11 @@ nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
|
|||||||
if (!skb->encapsulation) {
|
if (!skb->encapsulation) {
|
||||||
l3_offset = skb_network_offset(skb);
|
l3_offset = skb_network_offset(skb);
|
||||||
l4_offset = skb_transport_offset(skb);
|
l4_offset = skb_transport_offset(skb);
|
||||||
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdrlen = skb_tcp_all_headers(skb);
|
||||||
} else {
|
} else {
|
||||||
l3_offset = skb_inner_network_offset(skb);
|
l3_offset = skb_inner_network_offset(skb);
|
||||||
l4_offset = skb_inner_transport_offset(skb);
|
l4_offset = skb_inner_transport_offset(skb);
|
||||||
hdrlen = skb_inner_transport_header(skb) - skb->data +
|
hdrlen = skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
segs = skb_shinfo(skb)->gso_segs;
|
segs = skb_shinfo(skb)->gso_segs;
|
||||||
|
|||||||
@@ -598,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
|
|||||||
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
|
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
|
||||||
return skb;
|
return skb;
|
||||||
|
|
||||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
datalen = skb->len - skb_tcp_all_headers(skb);
|
||||||
seq = ntohl(tcp_hdr(skb)->seq);
|
seq = ntohl(tcp_hdr(skb)->seq);
|
||||||
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
||||||
resync_pending = tls_offload_tx_resync_pending(skb->sk);
|
resync_pending = tls_offload_tx_resync_pending(skb->sk);
|
||||||
@@ -666,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
|
|||||||
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
|
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
datalen = skb->len - skb_tcp_all_headers(skb);
|
||||||
seq = ntohl(tcp_hdr(skb)->seq);
|
seq = ntohl(tcp_hdr(skb)->seq);
|
||||||
|
|
||||||
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
||||||
@@ -1758,8 +1758,7 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
u32 hdrlen;
|
u32 hdrlen;
|
||||||
|
|
||||||
hdrlen = skb_inner_transport_header(skb) - skb->data +
|
hdrlen = skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
|
|
||||||
/* Assume worst case scenario of having longest possible
|
/* Assume worst case scenario of having longest possible
|
||||||
* metadata prepend - 8B
|
* metadata prepend - 8B
|
||||||
|
|||||||
@@ -947,10 +947,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (encap)
|
if (encap)
|
||||||
hdrlen = skb_inner_transport_header(skb) - skb->data +
|
hdrlen = skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb);
|
|
||||||
else
|
else
|
||||||
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdrlen = skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
tso_rem = len;
|
tso_rem = len;
|
||||||
seg_rem = min(tso_rem, hdrlen + mss);
|
seg_rem = min(tso_rem, hdrlen + mss);
|
||||||
|
|||||||
@@ -1877,7 +1877,7 @@ netxen_tso_check(struct net_device *netdev,
|
|||||||
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
|
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
|
||||||
skb_shinfo(skb)->gso_size > 0) {
|
skb_shinfo(skb)->gso_size > 0) {
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
||||||
first_desc->total_hdr_length = hdr_len;
|
first_desc->total_hdr_length = hdr_len;
|
||||||
|
|||||||
@@ -260,11 +260,9 @@ static int map_frag_to_bd(struct qede_tx_queue *txq,
|
|||||||
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
|
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
|
||||||
{
|
{
|
||||||
if (is_encap_pkt)
|
if (is_encap_pkt)
|
||||||
return (skb_inner_transport_header(skb) +
|
return skb_inner_tcp_all_headers(skb);
|
||||||
inner_tcp_hdrlen(skb) - skb->data);
|
|
||||||
else
|
return skb_tcp_all_headers(skb);
|
||||||
return (skb_transport_header(skb) +
|
|
||||||
tcp_hdrlen(skb) - skb->data);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
|
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
|
||||||
|
|||||||
@@ -497,7 +497,7 @@ set_flags:
|
|||||||
}
|
}
|
||||||
opcode = QLCNIC_TX_ETHER_PKT;
|
opcode = QLCNIC_TX_ETHER_PKT;
|
||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
||||||
first_desc->hdr_length = hdr_len;
|
first_desc->hdr_length = hdr_len;
|
||||||
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
|
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
|
||||||
|
|||||||
@@ -1264,7 +1264,7 @@ static int emac_tso_csum(struct emac_adapter *adpt,
|
|||||||
pskb_trim(skb, pkt_len);
|
pskb_trim(skb, pkt_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
if (unlikely(skb->len == hdr_len)) {
|
if (unlikely(skb->len == hdr_len)) {
|
||||||
/* we only need to do csum */
|
/* we only need to do csum */
|
||||||
netif_warn(adpt, tx_err, adpt->netdev,
|
netif_warn(adpt, tx_err, adpt->netdev,
|
||||||
@@ -1339,7 +1339,7 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
|
|||||||
|
|
||||||
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
|
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
|
||||||
if (TPD_LSO(tpd)) {
|
if (TPD_LSO(tpd)) {
|
||||||
mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
mapped_len = skb_tcp_all_headers(skb);
|
||||||
|
|
||||||
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
|
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
|
||||||
tpbuf->length = mapped_len;
|
tpbuf->length = mapped_len;
|
||||||
|
|||||||
@@ -3961,7 +3961,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
|
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
|
||||||
hdr = sizeof(struct udphdr);
|
hdr = sizeof(struct udphdr);
|
||||||
} else {
|
} else {
|
||||||
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
proto_hdr_len = skb_tcp_all_headers(skb);
|
||||||
hdr = tcp_hdrlen(skb);
|
hdr = tcp_hdrlen(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ static int xlgmac_prep_tso(struct sk_buff *skb,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
pkt_info->header_len = skb_tcp_all_headers(skb);
|
||||||
pkt_info->tcp_header_len = tcp_hdrlen(skb);
|
pkt_info->tcp_header_len = tcp_hdrlen(skb);
|
||||||
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
|
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
|
||||||
pkt_info->mss = skb_shinfo(skb)->gso_size;
|
pkt_info->mss = skb_shinfo(skb)->gso_size;
|
||||||
|
|||||||
@@ -1782,9 +1782,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Header Length = MAC header len + IP header len + TCP header len*/
|
/* Header Length = MAC header len + IP header len + TCP header len*/
|
||||||
hdrlen = ETH_HLEN +
|
hdrlen = skb_tcp_all_headers(skb);
|
||||||
(int)skb_network_header_len(skb) +
|
|
||||||
tcp_hdrlen(skb);
|
|
||||||
|
|
||||||
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
|
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
|
||||||
switch (gso_type) {
|
switch (gso_type) {
|
||||||
|
|||||||
@@ -1201,9 +1201,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
|||||||
}
|
}
|
||||||
|
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
hdrlen = skb_transport_header(skb) -
|
hdrlen = skb_tcp_all_headers(skb);
|
||||||
skb_mac_header(skb) +
|
|
||||||
tcp_hdrlen(skb);
|
|
||||||
|
|
||||||
skb_shinfo(skb)->gso_segs =
|
skb_shinfo(skb)->gso_segs =
|
||||||
DIV_ROUND_UP(skb->len - hdrlen, mss);
|
DIV_ROUND_UP(skb->len - hdrlen, mss);
|
||||||
|
|||||||
@@ -2461,7 +2461,7 @@ static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_io
|
|||||||
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
|
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
|
||||||
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
|
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
|
||||||
mac_iocb_ptr->total_hdrs_len =
|
mac_iocb_ptr->total_hdrs_len =
|
||||||
cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
|
cpu_to_le16(skb_tcp_all_headers(skb));
|
||||||
mac_iocb_ptr->net_trans_offset =
|
mac_iocb_ptr->net_trans_offset =
|
||||||
cpu_to_le16(skb_network_offset(skb) |
|
cpu_to_le16(skb_network_offset(skb) |
|
||||||
skb_transport_offset(skb)
|
skb_transport_offset(skb)
|
||||||
|
|||||||
@@ -46,6 +46,36 @@ static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
|
|||||||
return inner_tcp_hdr(skb)->doff * 4;
|
return inner_tcp_hdr(skb)->doff * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* skb_tcp_all_headers - Returns size of all headers for a TCP packet
|
||||||
|
* @skb: buffer
|
||||||
|
*
|
||||||
|
* Used in TX path, for a packet known to be a TCP one.
|
||||||
|
*
|
||||||
|
* if (skb_is_gso(skb)) {
|
||||||
|
* int hlen = skb_tcp_all_headers(skb);
|
||||||
|
* ...
|
||||||
|
*/
|
||||||
|
static inline int skb_tcp_all_headers(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet
|
||||||
|
* @skb: buffer
|
||||||
|
*
|
||||||
|
* Used in TX path, for a packet known to be a TCP one.
|
||||||
|
*
|
||||||
|
* if (skb_is_gso(skb) && skb->encapsulation) {
|
||||||
|
* int hlen = skb_inner_tcp_all_headers(skb);
|
||||||
|
* ...
|
||||||
|
*/
|
||||||
|
static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
|
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return (tcp_hdr(skb)->doff - 5) * 4;
|
return (tcp_hdr(skb)->doff - 5) * 4;
|
||||||
|
|||||||
@@ -232,7 +232,7 @@ static int fill_sg_in(struct scatterlist *sg_in,
|
|||||||
s32 *sync_size,
|
s32 *sync_size,
|
||||||
int *resync_sgs)
|
int *resync_sgs)
|
||||||
{
|
{
|
||||||
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
int tcp_payload_offset = skb_tcp_all_headers(skb);
|
||||||
int payload_len = skb->len - tcp_payload_offset;
|
int payload_len = skb->len - tcp_payload_offset;
|
||||||
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
|
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
|
||||||
struct tls_record_info *record;
|
struct tls_record_info *record;
|
||||||
@@ -310,8 +310,8 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
s32 sync_size, u64 rcd_sn)
|
s32 sync_size, u64 rcd_sn)
|
||||||
{
|
{
|
||||||
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
||||||
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
||||||
|
int tcp_payload_offset = skb_tcp_all_headers(skb);
|
||||||
int payload_len = skb->len - tcp_payload_offset;
|
int payload_len = skb->len - tcp_payload_offset;
|
||||||
void *buf, *iv, *aad, *dummy_buf;
|
void *buf, *iv, *aad, *dummy_buf;
|
||||||
struct aead_request *aead_req;
|
struct aead_request *aead_req;
|
||||||
@@ -372,7 +372,7 @@ free_nskb:
|
|||||||
|
|
||||||
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
|
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
int tcp_payload_offset = skb_tcp_all_headers(skb);
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
||||||
int payload_len = skb->len - tcp_payload_offset;
|
int payload_len = skb->len - tcp_payload_offset;
|
||||||
|
|||||||
Reference in New Issue
Block a user