2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-01-09 15:42:57 +00:00
|
|
|
/*
|
|
|
|
* 6LoWPAN next header compression
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alexander Aring <aar@pengutronix.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
|
|
|
|
#include "nhc.h"
|
|
|
|
|
2022-04-28 03:05:34 +00:00
|
|
|
static const struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
|
2015-01-09 15:42:57 +00:00
|
|
|
static DEFINE_SPINLOCK(lowpan_nhc_lock);
|
|
|
|
|
2022-04-28 03:05:34 +00:00
|
|
|
static const struct lowpan_nhc *lowpan_nhc_by_nhcid(struct sk_buff *skb)
|
2015-01-09 15:42:57 +00:00
|
|
|
{
|
2022-04-28 03:05:34 +00:00
|
|
|
const struct lowpan_nhc *nhc;
|
2022-04-28 03:05:33 +00:00
|
|
|
int i;
|
|
|
|
u8 id;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
2022-04-28 03:05:33 +00:00
|
|
|
if (!pskb_may_pull(skb, 1))
|
|
|
|
return NULL;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
2022-04-28 03:05:33 +00:00
|
|
|
id = *skb->data;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
2022-04-28 03:05:33 +00:00
|
|
|
for (i = 0; i < NEXTHDR_MAX + 1; i++) {
|
|
|
|
nhc = lowpan_nexthdr_nhcs[i];
|
|
|
|
if (!nhc)
|
|
|
|
continue;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
2022-04-28 03:05:33 +00:00
|
|
|
if ((id & nhc->idmask) == nhc->id)
|
2015-01-09 15:42:57 +00:00
|
|
|
return nhc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lowpan_nhc_check_compression(struct sk_buff *skb,
|
2015-10-20 06:31:21 +00:00
|
|
|
const struct ipv6hdr *hdr, u8 **hc_ptr)
|
2015-01-09 15:42:57 +00:00
|
|
|
{
|
2022-04-28 03:05:34 +00:00
|
|
|
const struct lowpan_nhc *nhc;
|
2015-10-20 06:31:21 +00:00
|
|
|
int ret = 0;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
|
2015-10-20 06:31:21 +00:00
|
|
|
if (!(nhc && nhc->compress))
|
|
|
|
ret = -ENOENT;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
|
2015-10-20 06:31:21 +00:00
|
|
|
return ret;
|
2015-01-09 15:42:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
|
|
|
|
u8 **hc_ptr)
|
|
|
|
{
|
|
|
|
int ret;
|
2022-04-28 03:05:34 +00:00
|
|
|
const struct lowpan_nhc *nhc;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
|
|
|
|
/* check if the nhc module was removed in unlocked part.
|
|
|
|
* TODO: this is a workaround we should prevent unloading
|
|
|
|
* of nhc modules while unlocked part, this will always drop
|
|
|
|
* the lowpan packet but it's very unlikely.
|
|
|
|
*
|
|
|
|
* Solution isn't easy because we need to decide at
|
|
|
|
* lowpan_nhc_check_compression if we do a compression or not.
|
|
|
|
* Because the inline data which is added to skb, we can't move this
|
|
|
|
* handling.
|
|
|
|
*/
|
|
|
|
if (unlikely(!nhc || !nhc->compress)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In the case of RAW sockets the transport header is not set by
|
|
|
|
* the ip6 stack so we must set it ourselves
|
|
|
|
*/
|
|
|
|
if (skb->transport_header == skb->network_header)
|
|
|
|
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
|
|
|
|
|
|
|
|
ret = nhc->compress(skb, hc_ptr);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* skip the transport header */
|
|
|
|
skb_pull(skb, nhc->nexthdrlen);
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-10-13 11:42:58 +00:00
|
|
|
int lowpan_nhc_do_uncompression(struct sk_buff *skb,
|
|
|
|
const struct net_device *dev,
|
2015-01-09 15:42:57 +00:00
|
|
|
struct ipv6hdr *hdr)
|
|
|
|
{
|
2022-04-28 03:05:34 +00:00
|
|
|
const struct lowpan_nhc *nhc;
|
2015-01-09 15:42:57 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
nhc = lowpan_nhc_by_nhcid(skb);
|
|
|
|
if (nhc) {
|
|
|
|
if (nhc->uncompress) {
|
|
|
|
ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
|
|
|
|
nhc->nexthdrlen);
|
|
|
|
if (ret < 0) {
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
netdev_warn(dev, "received nhc id for %s which is not implemented.\n",
|
|
|
|
nhc->name);
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
netdev_warn(dev, "received unknown nhc id which was not found.\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr->nexthdr = nhc->nexthdr;
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
raw_dump_table(__func__, "raw transport header dump",
|
|
|
|
skb_transport_header(skb), nhc->nexthdrlen);
|
|
|
|
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-28 03:05:34 +00:00
|
|
|
int lowpan_nhc_add(const struct lowpan_nhc *nhc)
|
2015-01-09 15:42:57 +00:00
|
|
|
{
|
2022-04-28 03:05:33 +00:00
|
|
|
int ret = 0;
|
2015-01-09 15:42:57 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
|
|
|
|
ret = -EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(lowpan_nhc_add);
|
|
|
|
|
2022-04-28 03:05:34 +00:00
|
|
|
void lowpan_nhc_del(const struct lowpan_nhc *nhc)
|
2015-01-09 15:42:57 +00:00
|
|
|
{
|
|
|
|
spin_lock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
|
|
|
|
|
|
|
|
spin_unlock_bh(&lowpan_nhc_lock);
|
|
|
|
|
|
|
|
synchronize_net();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(lowpan_nhc_del);
|