mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
inet: introduce dst_rtable() helper
I added dst_rt6_info() in commit
e8dfd42c17
("ipv6: introduce dst_rt6_info() helper")
This patch does a similar change for IPv4.
Instead of (struct rtable *)dst casts, we can use :
#define dst_rtable(_ptr) \
container_of_const(_ptr, struct rtable, dst)
Patch is smaller than IPv6 one, because IPv4 has skb_rtable() helper.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Link: https://lore.kernel.org/r/20240429133009.1227754-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
b451767036
commit
05d6d49209
@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst,
|
||||
|
||||
static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
|
||||
{
|
||||
const struct rtable *rt;
|
||||
const struct rt6_info *rt6;
|
||||
if (family == AF_INET)
|
||||
return dst_rtable(dst)->rt_uses_gateway;
|
||||
|
||||
if (family == AF_INET) {
|
||||
rt = container_of(dst, struct rtable, dst);
|
||||
return rt->rt_uses_gateway;
|
||||
}
|
||||
|
||||
rt6 = dst_rt6_info(dst);
|
||||
return rt6->rt6i_flags & RTF_GATEWAY;
|
||||
return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY;
|
||||
}
|
||||
|
||||
static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
|
||||
|
@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev)
|
||||
static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct rtable *rt = (struct rtable *)dst;
|
||||
struct rtable *rt = dst_rtable(dst);
|
||||
struct net_device *dev = dst->dev;
|
||||
unsigned int hh_len = LL_RESERVED_SPACE(dev);
|
||||
struct neighbour *neigh;
|
||||
|
@ -970,9 +970,8 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
|
||||
static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
|
||||
return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) :
|
||||
ip_hdr(skb)->daddr;
|
||||
}
|
||||
|
||||
static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
|
||||
|
@ -1180,15 +1180,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb)
|
||||
return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_rtable - Returns the skb &rtable
|
||||
* @skb: buffer
|
||||
*/
|
||||
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct rtable *)skb_dst(skb);
|
||||
}
|
||||
|
||||
/* For mangling skb->pkt_type from user space side from applications
|
||||
* such as nft, tc, etc, we only allow a conservative subset of
|
||||
* possible pkt_types to be set.
|
||||
|
@ -423,7 +423,7 @@ int ip_decrease_ttl(struct iphdr *iph)
|
||||
|
||||
static inline int ip_mtu_locked(const struct dst_entry *dst)
|
||||
{
|
||||
const struct rtable *rt = (const struct rtable *)dst;
|
||||
const struct rtable *rt = dst_rtable(dst);
|
||||
|
||||
return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
|
||||
}
|
||||
@ -461,7 +461,7 @@ static inline bool ip_sk_ignore_df(const struct sock *sk)
|
||||
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
|
||||
bool forwarding)
|
||||
{
|
||||
const struct rtable *rt = container_of(dst, struct rtable, dst);
|
||||
const struct rtable *rt = dst_rtable(dst);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
unsigned int mtu;
|
||||
|
||||
|
@ -75,6 +75,17 @@ struct rtable {
|
||||
rt_pmtu:31;
|
||||
};
|
||||
|
||||
#define dst_rtable(_ptr) container_of_const(_ptr, struct rtable, dst)
|
||||
|
||||
/**
|
||||
* skb_rtable - Returns the skb &rtable
|
||||
* @skb: buffer
|
||||
*/
|
||||
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_rtable(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline bool rt_is_input_route(const struct rtable *rt)
|
||||
{
|
||||
return rt->rt_is_input != 0;
|
||||
|
@ -345,7 +345,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
rt = (struct rtable *) dst;
|
||||
rt = dst_rtable(dst);
|
||||
if (rt->rt_gw_family == AF_INET)
|
||||
daddr = &rt->rt_gw4;
|
||||
else
|
||||
|
@ -83,7 +83,7 @@ struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr)
|
||||
return NULL;
|
||||
|
||||
*saddr = idst->in_saddr.s_addr;
|
||||
return container_of(dst, struct rtable, dst);
|
||||
return dst_rtable(dst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dst_cache_get_ip4);
|
||||
|
||||
|
@ -2317,8 +2317,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
|
||||
|
||||
rcu_read_lock();
|
||||
if (!nh) {
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct rtable *rt = container_of(dst, struct rtable, dst);
|
||||
struct rtable *rt = skb_rtable(skb);
|
||||
|
||||
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
|
||||
} else if (nh->nh_family == AF_INET6) {
|
||||
|
@ -1307,8 +1307,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
|
||||
|
||||
int inet_sk_rebuild_header(struct sock *sk)
|
||||
{
|
||||
struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
|
||||
__be32 daddr;
|
||||
struct ip_options_rcu *inet_opt;
|
||||
struct flowi4 *fl4;
|
||||
|
@ -483,6 +483,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
||||
struct icmp_bxm *param)
|
||||
{
|
||||
struct net_device *route_lookup_dev;
|
||||
struct dst_entry *dst, *dst2;
|
||||
struct rtable *rt, *rt2;
|
||||
struct flowi4 fl4_dec;
|
||||
int err;
|
||||
@ -508,16 +509,17 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
||||
/* No need to clone since we're just using its address. */
|
||||
rt2 = rt;
|
||||
|
||||
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
|
||||
flowi4_to_flowi(fl4), NULL, 0);
|
||||
if (!IS_ERR(rt)) {
|
||||
dst = xfrm_lookup(net, &rt->dst,
|
||||
flowi4_to_flowi(fl4), NULL, 0);
|
||||
rt = dst_rtable(dst);
|
||||
if (!IS_ERR(dst)) {
|
||||
if (rt != rt2)
|
||||
return rt;
|
||||
} else if (PTR_ERR(rt) == -EPERM) {
|
||||
} else if (PTR_ERR(dst) == -EPERM) {
|
||||
rt = NULL;
|
||||
} else
|
||||
} else {
|
||||
return rt;
|
||||
|
||||
}
|
||||
err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
|
||||
if (err)
|
||||
goto relookup_failed;
|
||||
@ -551,19 +553,19 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
||||
if (err)
|
||||
goto relookup_failed;
|
||||
|
||||
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
|
||||
flowi4_to_flowi(&fl4_dec), NULL,
|
||||
XFRM_LOOKUP_ICMP);
|
||||
if (!IS_ERR(rt2)) {
|
||||
dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL,
|
||||
XFRM_LOOKUP_ICMP);
|
||||
rt2 = dst_rtable(dst2);
|
||||
if (!IS_ERR(dst2)) {
|
||||
dst_release(&rt->dst);
|
||||
memcpy(fl4, &fl4_dec, sizeof(*fl4));
|
||||
rt = rt2;
|
||||
} else if (PTR_ERR(rt2) == -EPERM) {
|
||||
} else if (PTR_ERR(dst2) == -EPERM) {
|
||||
if (rt)
|
||||
dst_release(&rt->dst);
|
||||
return rt2;
|
||||
} else {
|
||||
err = PTR_ERR(rt2);
|
||||
err = PTR_ERR(dst2);
|
||||
goto relookup_failed;
|
||||
}
|
||||
return rt;
|
||||
|
@ -616,7 +616,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
|
||||
dst = skb_dst(skb);
|
||||
if (curr_dst != dst) {
|
||||
hint = ip_extract_route_hint(net, skb,
|
||||
((struct rtable *)dst)->rt_type);
|
||||
dst_rtable(dst)->rt_type);
|
||||
|
||||
/* dispatch old sublist */
|
||||
if (!list_empty(&sublist))
|
||||
|
@ -198,7 +198,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
|
||||
static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct rtable *rt = (struct rtable *)dst;
|
||||
struct rtable *rt = dst_rtable(dst);
|
||||
struct net_device *dev = dst->dev;
|
||||
unsigned int hh_len = LL_RESERVED_SPACE(dev);
|
||||
struct neighbour *neigh;
|
||||
@ -475,7 +475,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
|
||||
goto packet_routed;
|
||||
|
||||
/* Make sure we can route this packet. */
|
||||
rt = (struct rtable *)__sk_dst_check(sk, 0);
|
||||
rt = dst_rtable(__sk_dst_check(sk, 0));
|
||||
if (!rt) {
|
||||
__be32 daddr;
|
||||
|
||||
@ -971,7 +971,7 @@ static int __ip_append_data(struct sock *sk,
|
||||
bool zc = false;
|
||||
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
|
||||
int csummode = CHECKSUM_NONE;
|
||||
struct rtable *rt = (struct rtable *)cork->dst;
|
||||
struct rtable *rt = dst_rtable(cork->dst);
|
||||
bool paged, hold_tskey, extra_uref = false;
|
||||
unsigned int wmem_alloc_delta = 0;
|
||||
u32 tskey = 0;
|
||||
@ -1390,7 +1390,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
struct ip_options *opt = NULL;
|
||||
struct rtable *rt = (struct rtable *)cork->dst;
|
||||
struct rtable *rt = dst_rtable(cork->dst);
|
||||
struct iphdr *iph;
|
||||
u8 pmtudisc, ttl;
|
||||
__be16 df = 0;
|
||||
|
@ -819,7 +819,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
|
||||
u32 mark = skb->mark;
|
||||
__u8 tos = iph->tos;
|
||||
|
||||
rt = (struct rtable *) dst;
|
||||
rt = dst_rtable(dst);
|
||||
|
||||
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
|
||||
__ip_do_redirect(rt, skb, &fl4, true);
|
||||
@ -827,7 +827,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
|
||||
|
||||
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *)dst;
|
||||
struct rtable *rt = dst_rtable(dst);
|
||||
struct dst_entry *ret = dst;
|
||||
|
||||
if (rt) {
|
||||
@ -1044,7 +1044,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu,
|
||||
bool confirm_neigh)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
struct rtable *rt = dst_rtable(dst);
|
||||
struct flowi4 fl4;
|
||||
|
||||
ip_rt_build_flow_key(&fl4, sk, skb);
|
||||
@ -1115,7 +1115,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
||||
|
||||
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
|
||||
|
||||
rt = (struct rtable *)odst;
|
||||
rt = dst_rtable(odst);
|
||||
if (odst->obsolete && !odst->ops->check(odst, 0)) {
|
||||
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
|
||||
if (IS_ERR(rt))
|
||||
@ -1124,7 +1124,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
||||
new = true;
|
||||
}
|
||||
|
||||
__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
|
||||
__ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
|
||||
|
||||
if (!dst_check(&rt->dst, 0)) {
|
||||
if (new)
|
||||
@ -1181,7 +1181,7 @@ EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
|
||||
INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
|
||||
u32 cookie)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
struct rtable *rt = dst_rtable(dst);
|
||||
|
||||
/* All IPV4 dsts are created with ->obsolete set to the value
|
||||
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
|
||||
@ -1516,10 +1516,8 @@ void rt_del_uncached_list(struct rtable *rt)
|
||||
|
||||
static void ipv4_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *)dst;
|
||||
|
||||
ip_dst_metrics_put(dst);
|
||||
rt_del_uncached_list(rt);
|
||||
rt_del_uncached_list(dst_rtable(dst));
|
||||
}
|
||||
|
||||
void rt_flush_dev(struct net_device *dev)
|
||||
@ -2820,7 +2818,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
|
||||
|
||||
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
|
||||
{
|
||||
struct rtable *ort = (struct rtable *) dst_orig;
|
||||
struct rtable *ort = dst_rtable(dst_orig);
|
||||
struct rtable *rt;
|
||||
|
||||
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
|
||||
@ -2865,9 +2863,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
|
||||
|
||||
if (flp4->flowi4_proto) {
|
||||
flp4->flowi4_oif = rt->dst.dev->ifindex;
|
||||
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
|
||||
flowi4_to_flowi(flp4),
|
||||
sk, 0);
|
||||
rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
|
||||
flowi4_to_flowi(flp4),
|
||||
sk, 0));
|
||||
}
|
||||
|
||||
return rt;
|
||||
|
@ -1217,7 +1217,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
}
|
||||
|
||||
if (connected)
|
||||
rt = (struct rtable *)sk_dst_check(sk, 0);
|
||||
rt = dst_rtable(sk_dst_check(sk, 0));
|
||||
|
||||
if (!rt) {
|
||||
struct net *net = sock_net(sk);
|
||||
|
@ -69,7 +69,7 @@ static int xfrm4_get_saddr(struct net *net, int oif,
|
||||
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
||||
const struct flowi *fl)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *)xdst->route;
|
||||
struct rtable *rt = dst_rtable(xdst->route);
|
||||
const struct flowi4 *fl4 = &fl->u.ip4;
|
||||
|
||||
xdst->u.rt.rt_iif = fl4->flowi4_iif;
|
||||
|
@ -459,7 +459,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
fl4 = &inet->cork.fl.u.ip4;
|
||||
if (connected)
|
||||
rt = (struct rtable *)__sk_dst_check(sk, 0);
|
||||
rt = dst_rtable(__sk_dst_check(sk, 0));
|
||||
|
||||
rcu_read_lock();
|
||||
if (!rt) {
|
||||
|
@ -81,7 +81,7 @@ static int mpls_xmit(struct sk_buff *skb)
|
||||
ttl = net->mpls.default_ttl;
|
||||
else
|
||||
ttl = ip_hdr(skb)->ttl;
|
||||
rt = (struct rtable *)dst;
|
||||
rt = dst_rtable(dst);
|
||||
} else if (dst->ops->family == AF_INET6) {
|
||||
if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
|
||||
ttl = tun_encap_info->default_ttl;
|
||||
|
@ -318,7 +318,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
|
||||
if (dest) {
|
||||
dest_dst = __ip_vs_dst_check(dest);
|
||||
if (likely(dest_dst))
|
||||
rt = (struct rtable *) dest_dst->dst_cache;
|
||||
rt = dst_rtable(dest_dst->dst_cache);
|
||||
else {
|
||||
dest_dst = ip_vs_dest_dst_alloc();
|
||||
spin_lock_bh(&dest->dst_lock);
|
||||
|
@ -434,7 +434,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
|
||||
rt = (struct rtable *)tuplehash->tuple.dst_cache;
|
||||
rt = dst_rtable(tuplehash->tuple.dst_cache);
|
||||
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
|
||||
IPCB(skb)->iif = skb->dev->ifindex;
|
||||
IPCB(skb)->flags = IPSKB_FORWARDED;
|
||||
@ -446,7 +446,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
||||
|
||||
switch (tuplehash->tuple.xmit_type) {
|
||||
case FLOW_OFFLOAD_XMIT_NEIGH:
|
||||
rt = (struct rtable *)tuplehash->tuple.dst_cache;
|
||||
rt = dst_rtable(tuplehash->tuple.dst_cache);
|
||||
outdev = rt->dst.dev;
|
||||
skb->dev = outdev;
|
||||
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
|
||||
|
@ -73,7 +73,7 @@ void nft_rt_get_eval(const struct nft_expr *expr,
|
||||
if (nft_pf(pkt) != NFPROTO_IPV4)
|
||||
goto err;
|
||||
|
||||
*dest = (__force u32)rt_nexthop((const struct rtable *)dst,
|
||||
*dest = (__force u32)rt_nexthop(dst_rtable(dst),
|
||||
ip_hdr(skb)->daddr);
|
||||
break;
|
||||
case NFT_RT_NEXTHOP6:
|
||||
|
@ -552,7 +552,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
|
||||
struct flowi *fl)
|
||||
{
|
||||
union sctp_addr *saddr = &t->saddr;
|
||||
struct rtable *rt = (struct rtable *)t->dst;
|
||||
struct rtable *rt = dst_rtable(t->dst);
|
||||
|
||||
if (rt) {
|
||||
saddr->v4.sin_family = AF_INET;
|
||||
@ -1085,7 +1085,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
|
||||
skb_reset_inner_mac_header(skb);
|
||||
skb_reset_inner_transport_header(skb);
|
||||
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
|
||||
udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr,
|
||||
udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
|
||||
fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
|
||||
sctp_sk(sk)->udp_port, t->encap_port, false, false);
|
||||
return 0;
|
||||
|
@ -174,7 +174,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
|
||||
local_bh_disable();
|
||||
ndst = dst_cache_get(cache);
|
||||
if (dst->proto == htons(ETH_P_IP)) {
|
||||
struct rtable *rt = (struct rtable *)ndst;
|
||||
struct rtable *rt = dst_rtable(ndst);
|
||||
|
||||
if (!rt) {
|
||||
struct flowi4 fl = {
|
||||
|
Loading…
Reference in New Issue
Block a user