Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-10-08

Here's another set of Bluetooth & 802.15.4 patches for the 4.4 kernel.

802.15.4:
 - Many improvements & fixes to the mrf24j40 driver
 - Fixes and cleanups to nl802154, mac802154 & ieee802154 code

Bluetooth:
 - New chipset support in btmrvl driver
 - Fixes & cleanups to btbcm, btmrvl, bpa10x & btintel drivers
 - Support for vendor specific diagnostic data through common API
 - Cleanups to the 6lowpan code
 - New events & message types for monitor channel

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-10-11 05:15:30 -07:00
commit 7bcfeead48
60 changed files with 4001 additions and 1123 deletions

View File

@ -0,0 +1,20 @@
* MRF24J40 IEEE 802.15.4 *
Required properties:
- compatible: should be "microchip,mrf24j40", "microchip,mrf24j40ma",
or "microchip,mrf24j40mc" depends on your transceiver
board
- spi-max-frequency: maximal bus speed, should be set something under or equal
10000000
- reg: the chipselect index
- interrupts: the interrupt generated by the device.
Example:
mrf24j40ma@0 {
compatible = "microchip,mrf24j40ma";
spi-max-frequency = <8500000>;
reg = <0>;
interrupts = <19 8>;
interrupt-parent = <&gpio3>;
};

View File

@ -6978,6 +6978,7 @@ M: Alan Ott <alan@signal11.us>
L: linux-wpan@vger.kernel.org
S: Maintained
F: drivers/net/ieee802154/mrf24j40.c
F: Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
MSI LAPTOP SUPPORT
M: "Lee, Chun-Yi" <jlee@suse.com>

View File

@ -4,6 +4,7 @@ menu "Bluetooth device drivers"
config BT_INTEL
tristate
select REGMAP
config BT_BCM
tristate
@ -183,6 +184,7 @@ config BT_HCIBCM203X
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
depends on USB
select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@ -275,7 +277,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897.
Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8997.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@ -289,7 +291,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897
devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8997
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver

View File

@ -422,17 +422,12 @@ static int bfusb_open(struct hci_dev *hdev)
BT_DBG("hdev %p bfusb %p", hdev, data);
if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
return 0;
write_lock_irqsave(&data->lock, flags);
err = bfusb_rx_submit(data, NULL);
if (!err) {
for (i = 1; i < BFUSB_MAX_BULK_RX; i++)
bfusb_rx_submit(data, NULL);
} else {
clear_bit(HCI_RUNNING, &hdev->flags);
}
write_unlock_irqrestore(&data->lock, flags);
@ -458,9 +453,6 @@ static int bfusb_close(struct hci_dev *hdev)
BT_DBG("hdev %p bfusb %p", hdev, data);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
write_lock_irqsave(&data->lock, flags);
write_unlock_irqrestore(&data->lock, flags);
@ -479,9 +471,6 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;

View File

@ -390,7 +390,7 @@ static void bluecard_receive(struct bluecard_info *info,
for (i = 0; i < len; i++) {
/* Allocate packet */
if (info->rx_skb == NULL) {
if (!info->rx_skb) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@ -628,9 +628,6 @@ static int bluecard_hci_open(struct hci_dev *hdev)
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
if (test_and_set_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
unsigned int iobase = info->p_dev->resource[0]->start;
@ -646,9 +643,6 @@ static int bluecard_hci_close(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
bluecard_hci_flush(hdev);
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {

View File

@ -35,7 +35,9 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#define VERSION "0.10"
#include "hci_uart.h"
#define VERSION "0.11"
static const struct usb_device_id bpa10x_table[] = {
/* Tektronix BPA 100/105 (Digianswer) */
@ -56,112 +58,6 @@ struct bpa10x_data {
struct sk_buff *rx_skb[2];
};
#define HCI_VENDOR_HDR_SIZE 5
struct hci_vendor_hdr {
__u8 type;
__le16 snum;
__le16 dlen;
} __packed;
static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
{
struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s queue %d buffer %p count %d", hdev->name,
queue, buf, count);
if (queue < 0 || queue > 1)
return -EILSEQ;
hdev->stat.byte_rx += count;
while (count) {
struct sk_buff *skb = data->rx_skb[queue];
struct { __u8 type; int expect; } *scb;
int type, len = 0;
if (!skb) {
/* Start of the frame */
type = *((__u8 *) buf);
count--; buf++;
switch (type) {
case HCI_EVENT_PKT:
if (count >= HCI_EVENT_HDR_SIZE) {
struct hci_event_hdr *h = buf;
len = HCI_EVENT_HDR_SIZE + h->plen;
} else
return -EILSEQ;
break;
case HCI_ACLDATA_PKT:
if (count >= HCI_ACL_HDR_SIZE) {
struct hci_acl_hdr *h = buf;
len = HCI_ACL_HDR_SIZE +
__le16_to_cpu(h->dlen);
} else
return -EILSEQ;
break;
case HCI_SCODATA_PKT:
if (count >= HCI_SCO_HDR_SIZE) {
struct hci_sco_hdr *h = buf;
len = HCI_SCO_HDR_SIZE + h->dlen;
} else
return -EILSEQ;
break;
case HCI_VENDOR_PKT:
if (count >= HCI_VENDOR_HDR_SIZE) {
struct hci_vendor_hdr *h = buf;
len = HCI_VENDOR_HDR_SIZE +
__le16_to_cpu(h->dlen);
} else
return -EILSEQ;
break;
}
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb) {
BT_ERR("%s no memory for packet", hdev->name);
return -ENOMEM;
}
data->rx_skb[queue] = skb;
scb = (void *) skb->cb;
scb->type = type;
scb->expect = len;
} else {
/* Continuation */
scb = (void *) skb->cb;
len = scb->expect;
}
len = min(len, count);
memcpy(skb_put(skb, len), buf, len);
scb->expect -= len;
if (scb->expect == 0) {
/* Complete frame */
data->rx_skb[queue] = NULL;
bt_cb(skb)->pkt_type = scb->type;
hci_recv_frame(hdev, skb);
}
count -= len; buf += len;
}
return 0;
}
static void bpa10x_tx_complete(struct urb *urb)
{
struct sk_buff *skb = urb->context;
@ -184,6 +80,22 @@ done:
kfree_skb(skb);
}
#define HCI_VENDOR_HDR_SIZE 5
#define HCI_RECV_VENDOR \
.type = HCI_VENDOR_PKT, \
.hlen = HCI_VENDOR_HDR_SIZE, \
.loff = 3, \
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE
static const struct h4_recv_pkt bpa10x_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ HCI_RECV_VENDOR, .recv = hci_recv_diag },
};
static void bpa10x_rx_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@ -197,11 +109,17 @@ static void bpa10x_rx_complete(struct urb *urb)
return;
if (urb->status == 0) {
if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe),
bool idx = usb_pipebulk(urb->pipe);
data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length) < 0) {
urb->actual_length,
bpa10x_recv_pkts,
ARRAY_SIZE(bpa10x_recv_pkts));
if (IS_ERR(data->rx_skb[idx])) {
BT_ERR("%s corrupted event packet", hdev->name);
hdev->stat.err_rx++;
data->rx_skb[idx] = NULL;
}
}
@ -304,9 +222,6 @@ static int bpa10x_open(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
return 0;
err = bpa10x_submit_intr_urb(hdev);
if (err < 0)
goto error;
@ -320,8 +235,6 @@ static int bpa10x_open(struct hci_dev *hdev)
error:
usb_kill_anchored_urbs(&data->rx_anchor);
clear_bit(HCI_RUNNING, &hdev->flags);
return err;
}
@ -331,9 +244,6 @@ static int bpa10x_close(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
usb_kill_anchored_urbs(&data->rx_anchor);
return 0;
@ -350,6 +260,24 @@ static int bpa10x_flush(struct hci_dev *hdev)
return 0;
}
static int bpa10x_setup(struct hci_dev *hdev)
{
const u8 req[] = { 0x07 };
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
/* Read revision string */
skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
return 0;
}
static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
struct bpa10x_data *data = hci_get_drvdata(hdev);
@ -360,9 +288,6 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
skb->dev = (void *) hdev;
urb = usb_alloc_urb(0, GFP_ATOMIC);
@ -431,6 +356,25 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
{
const u8 req[] = { 0x00, enable };
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -ENETDOWN;
/* Enable sniffer operation */
skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
kfree_skb(skb);
return 0;
}
static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct bpa10x_data *data;
@ -465,7 +409,9 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
hdev->open = bpa10x_open;
hdev->close = bpa10x_close;
hdev->flush = bpa10x_flush;
hdev->setup = bpa10x_setup;
hdev->send = bpa10x_send_frame;
hdev->set_diag = bpa10x_set_diag;
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);

View File

@ -233,7 +233,7 @@ static void bt3c_receive(struct bt3c_info *info)
info->hdev->stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL) {
if (!info->rx_skb) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@ -270,7 +270,6 @@ static void bt3c_receive(struct bt3c_info *info)
/* Unknown packet */
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
info->hdev->stat.err_rx++;
clear_bit(HCI_RUNNING, &(info->hdev->flags));
kfree_skb(info->rx_skb);
info->rx_skb = NULL;
@ -395,17 +394,12 @@ static int bt3c_hci_flush(struct hci_dev *hdev)
static int bt3c_hci_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
static int bt3c_hci_close(struct hci_dev *hdev)
{
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
bt3c_hci_flush(hdev);
return 0;

View File

@ -181,6 +181,27 @@ static int btbcm_reset(struct hci_dev *hdev)
return 0;
}
static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev)
{
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: BCM: Reading local name failed (%ld)",
hdev->name, PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_name)) {
BT_ERR("%s: BCM: Local name length mismatch", hdev->name);
kfree_skb(skb);
return ERR_PTR(-EIO);
}
return skb;
}
static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev)
{
struct sk_buff *skb;
@ -393,6 +414,14 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
kfree_skb(skb);
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
return PTR_ERR(skb);
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
switch ((rev & 0xf000) >> 12) {
case 0:
case 3:
@ -464,6 +493,14 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
hw_name ? : "BCM", (subver & 0x7000) >> 13,
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
return PTR_ERR(skb);
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
@ -475,12 +512,25 @@ EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
int btbcm_setup_apple(struct hci_dev *hdev)
{
struct sk_buff *skb;
int err;
/* Reset */
err = btbcm_reset(hdev);
if (err)
return err;
/* Read Verbose Config Version Info */
skb = btbcm_read_verbose_config(hdev);
if (!IS_ERR(skb)) {
BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
get_unaligned_le16(skb->data + 5));
BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name,
skb->data[1], get_unaligned_le16(skb->data + 5));
kfree_skb(skb);
}
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (!IS_ERR(skb)) {
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
}

View File

@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -215,6 +216,201 @@ int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
}
EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
/* ------- REGMAP IBT SUPPORT ------- */
#define IBT_REG_MODE_8BIT 0x00
#define IBT_REG_MODE_16BIT 0x01
#define IBT_REG_MODE_32BIT 0x02
struct regmap_ibt_context {
struct hci_dev *hdev;
__u16 op_write;
__u16 op_read;
};
struct ibt_cp_reg_access {
__le32 addr;
__u8 mode;
__u8 len;
__u8 data[0];
} __packed;
struct ibt_rp_reg_access {
__u8 status;
__le32 addr;
__u8 data[0];
} __packed;
static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
void *val, size_t val_size)
{
struct regmap_ibt_context *ctx = context;
struct ibt_cp_reg_access cp;
struct ibt_rp_reg_access *rp;
struct sk_buff *skb;
int err = 0;
if (reg_size != sizeof(__le32))
return -EINVAL;
switch (val_size) {
case 1:
cp.mode = IBT_REG_MODE_8BIT;
break;
case 2:
cp.mode = IBT_REG_MODE_16BIT;
break;
case 4:
cp.mode = IBT_REG_MODE_32BIT;
break;
default:
return -EINVAL;
}
/* regmap provides a little-endian formatted addr */
cp.addr = *(__le32 *)addr;
cp.len = val_size;
bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr));
skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)",
le32_to_cpu(cp.addr), err);
return err;
}
if (skb->len != sizeof(*rp) + val_size) {
bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len",
le32_to_cpu(cp.addr));
err = -EINVAL;
goto done;
}
rp = (struct ibt_rp_reg_access *)skb->data;
if (rp->addr != cp.addr) {
bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr",
le32_to_cpu(rp->addr));
err = -EINVAL;
goto done;
}
memcpy(val, rp->data, val_size);
done:
kfree_skb(skb);
return err;
}
static int regmap_ibt_gather_write(void *context,
const void *addr, size_t reg_size,
const void *val, size_t val_size)
{
struct regmap_ibt_context *ctx = context;
struct ibt_cp_reg_access *cp;
struct sk_buff *skb;
int plen = sizeof(*cp) + val_size;
u8 mode;
int err = 0;
if (reg_size != sizeof(__le32))
return -EINVAL;
switch (val_size) {
case 1:
mode = IBT_REG_MODE_8BIT;
break;
case 2:
mode = IBT_REG_MODE_16BIT;
break;
case 4:
mode = IBT_REG_MODE_32BIT;
break;
default:
return -EINVAL;
}
cp = kmalloc(plen, GFP_KERNEL);
if (!cp)
return -ENOMEM;
/* regmap provides a little-endian formatted addr/value */
cp->addr = *(__le32 *)addr;
cp->mode = mode;
cp->len = val_size;
memcpy(&cp->data, val, val_size);
bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr));
skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)",
le32_to_cpu(cp->addr), err);
goto done;
}
kfree_skb(skb);
done:
kfree(cp);
return err;
}
static int regmap_ibt_write(void *context, const void *data, size_t count)
{
/* data contains register+value, since we only support 32bit addr,
* minimum data size is 4 bytes.
*/
if (WARN_ONCE(count < 4, "Invalid register access"))
return -EINVAL;
return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4);
}
static void regmap_ibt_free_context(void *context)
{
kfree(context);
}
static struct regmap_bus regmap_ibt = {
.read = regmap_ibt_read,
.write = regmap_ibt_write,
.gather_write = regmap_ibt_gather_write,
.free_context = regmap_ibt_free_context,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
/* Config is the same for all register regions */
static const struct regmap_config regmap_ibt_cfg = {
.name = "btintel_regmap",
.reg_bits = 32,
.val_bits = 32,
};
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write)
{
struct regmap_ibt_context *ctx;
bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read,
opcode_write);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->op_read = opcode_read;
ctx->op_write = opcode_write;
ctx->hdev = hdev;
return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg);
}
EXPORT_SYMBOL_GPL(btintel_regmap_init);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);

View File

@ -80,6 +80,9 @@ int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
const void *param);
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@ -113,4 +116,10 @@ static inline int btintel_load_ddc_config(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
u16 opcode_read,
u16 opcode_write)
{
return ERR_PTR(-EINVAL);
}
#endif

View File

@ -184,7 +184,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
}
skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
if (!skb) {
BT_ERR("No free skb");
return -ENOMEM;
}
@ -436,13 +436,6 @@ static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
skb->data, skb->len);
return -EBUSY;
}
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@ -477,9 +470,6 @@ static int btmrvl_close(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
skb_queue_purge(&priv->adapter->tx_queue);
return 0;
@ -487,8 +477,6 @@ static int btmrvl_close(struct hci_dev *hdev)
static int btmrvl_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &hdev->flags);
return 0;
}

View File

@ -146,6 +146,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = {
.fw_dump_end = 0xea,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
.cfg = 0x00,
.host_int_mask = 0x08,
.host_intstatus = 0x0c,
.card_status = 0x5c,
.sq_read_base_addr_a0 = 0xf8,
.sq_read_base_addr_a1 = 0xf9,
.card_revision = 0xc8,
.card_fw_status0 = 0xe8,
.card_fw_status1 = 0xe9,
.card_rx_len = 0xea,
.card_rx_unit = 0xeb,
.io_port_0 = 0xe4,
.io_port_1 = 0xe5,
.io_port_2 = 0xe6,
.int_read_to_clear = true,
.host_int_rsr = 0x04,
.card_misc_cfg = 0xD8,
.fw_dump_ctrl = 0xf0,
.fw_dump_start = 0xf1,
.fw_dump_end = 0xf8,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
@ -191,25 +214,37 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.supports_fw_dump = true,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
.reg = &btmrvl_reg_8997,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
};
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
.driver_data = (unsigned long) &btmrvl_sdio_sd8688 },
.driver_data = (unsigned long)&btmrvl_sdio_sd8688 },
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
/* Marvell SD8787 Bluetooth AMP device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
.driver_data = (unsigned long)&btmrvl_sdio_sd8797 },
/* Marvell SD8887 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136),
.driver_data = (unsigned long)&btmrvl_sdio_sd8887 },
/* Marvell SD8897 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
.driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
.driver_data = (unsigned long)&btmrvl_sdio_sd8897 },
/* Marvell SD8997 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
{ } /* Terminating entry */
};
@ -619,7 +654,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
/* Allocate buffer */
skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
if (skb == NULL) {
if (!skb) {
BT_ERR("No free skb");
ret = -ENOMEM;
goto exit;
@ -1278,6 +1313,12 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv)
if (memory_size == 0) {
BT_INFO("Firmware dump finished!");
sdio_writeb(card->func, FW_DUMP_READ_DONE,
card->reg->fw_dump_ctrl, &ret);
if (ret) {
BT_ERR("SDIO Write MEMDUMP_FINISH ERR");
goto done;
}
break;
}
@ -1616,3 +1657,4 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");

View File

@ -194,21 +194,15 @@ static int btsdio_open(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
return 0;
sdio_claim_host(data->func);
err = sdio_enable_func(data->func);
if (err < 0) {
clear_bit(HCI_RUNNING, &hdev->flags);
if (err < 0)
goto release;
}
err = sdio_claim_irq(data->func, btsdio_interrupt);
if (err < 0) {
sdio_disable_func(data->func);
clear_bit(HCI_RUNNING, &hdev->flags);
goto release;
}
@ -229,9 +223,6 @@ static int btsdio_close(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
sdio_claim_host(data->func);
sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL);
@ -261,9 +252,6 @@ static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;

View File

@ -38,7 +38,7 @@
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@ -188,7 +188,7 @@ static void btuart_receive(struct btuart_info *info)
info->hdev->stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL) {
if (!info->rx_skb) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@ -223,7 +223,6 @@ static void btuart_receive(struct btuart_info *info)
/* Unknown packet */
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
info->hdev->stat.err_rx++;
clear_bit(HCI_RUNNING, &(info->hdev->flags));
kfree_skb(info->rx_skb);
info->rx_skb = NULL;
@ -409,17 +408,12 @@ static int btuart_hci_flush(struct hci_dev *hdev)
static int btuart_hci_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
static int btuart_hci_close(struct hci_dev *hdev)
{
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
btuart_hci_flush(hdev);
return 0;

View File

@ -940,9 +940,6 @@ static int btusb_open(struct hci_dev *hdev)
data->intf->needs_remote_wakeup = 1;
if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
goto done;
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@ -965,7 +962,6 @@ done:
failed:
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
clear_bit(HCI_RUNNING, &hdev->flags);
usb_autopm_put_interface(data->intf);
return err;
}
@ -984,9 +980,6 @@ static int btusb_close(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
cancel_work_sync(&data->work);
cancel_work_sync(&data->waker);
@ -1156,9 +1149,6 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
urb = alloc_ctrl_urb(hdev, skb);
@ -1843,9 +1833,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {

View File

@ -155,9 +155,6 @@ static int ti_st_open(struct hci_dev *hdev)
BT_DBG("%s %p", hdev->name, hdev);
if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
/* provide contexts for callbacks from ST */
hst = hci_get_drvdata(hdev);
@ -181,7 +178,6 @@ static int ti_st_open(struct hci_dev *hdev)
goto done;
if (err != -EINPROGRESS) {
clear_bit(HCI_RUNNING, &hdev->flags);
BT_ERR("st_register failed %d", err);
return err;
}
@ -195,7 +191,6 @@ static int ti_st_open(struct hci_dev *hdev)
(&hst->wait_reg_completion,
msecs_to_jiffies(BT_REGISTER_TIMEOUT));
if (!timeleft) {
clear_bit(HCI_RUNNING, &hdev->flags);
BT_ERR("Timeout(%d sec),didn't get reg "
"completion signal from ST",
BT_REGISTER_TIMEOUT / 1000);
@ -205,7 +200,6 @@ static int ti_st_open(struct hci_dev *hdev)
/* Is ST registration callback
* called with ERROR status? */
if (hst->reg_status != 0) {
clear_bit(HCI_RUNNING, &hdev->flags);
BT_ERR("ST registration completed with invalid "
"status %d", hst->reg_status);
return -EAGAIN;
@ -215,7 +209,6 @@ done:
hst->st_write = ti_st_proto[i].write;
if (!hst->st_write) {
BT_ERR("undefined ST write function");
clear_bit(HCI_RUNNING, &hdev->flags);
for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
/* Undo registration with ST */
err = st_unregister(&ti_st_proto[i]);
@ -236,9 +229,6 @@ static int ti_st_close(struct hci_dev *hdev)
int err, i;
struct ti_st *hst = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
err = st_unregister(&ti_st_proto[i]);
if (err)
@ -256,9 +246,6 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
struct ti_st *hst;
long len;
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
hst = hci_get_drvdata(hdev);
/* Prepend skb with frame type */

View File

@ -357,8 +357,6 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
static int dtl1_hci_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
@ -376,9 +374,6 @@ static int dtl1_hci_flush(struct hci_dev *hdev)
static int dtl1_hci_close(struct hci_dev *hdev)
{
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
dtl1_hci_flush(hdev);
return 0;

View File

@ -32,6 +32,8 @@
#include <linux/gpio/consumer.h>
#include <linux/tty.h>
#include <linux/interrupt.h>
#include <linux/dmi.h>
#include <linux/pm_runtime.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -39,6 +41,11 @@
#include "btbcm.h"
#include "hci_uart.h"
#define BCM_LM_DIAG_PKT 0x07
#define BCM_LM_DIAG_SIZE 63
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
struct bcm_device {
struct list_head list;
@ -55,7 +62,7 @@ struct bcm_device {
int irq;
u8 irq_polarity;
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
struct hci_uart *hu;
bool is_suspended; /* suspend/resume flag */
#endif
@ -152,13 +159,17 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
return 0;
}
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
static irqreturn_t bcm_host_wake(int irq, void *data)
{
struct bcm_device *bdev = data;
bt_dev_dbg(bdev, "Host wake IRQ");
pm_runtime_get(&bdev->pdev->dev);
pm_runtime_mark_last_busy(&bdev->pdev->dev);
pm_runtime_put_autosuspend(&bdev->pdev->dev);
return IRQ_HANDLED;
}
@ -182,6 +193,12 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
device_init_wakeup(&bdev->pdev->dev, true);
pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
BCM_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&bdev->pdev->dev);
pm_runtime_set_active(&bdev->pdev->dev);
pm_runtime_enable(&bdev->pdev->dev);
}
unlock:
@ -197,7 +214,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
.bt_wake_active = 1, /* BT_WAKE active mode: 1 = high, 0 = low */
.host_wake_active = 0, /* HOST_WAKE active mode: 1 = high, 0 = low */
.allow_host_sleep = 1, /* Allow host sleep in SCO flag */
.combine_modes = 0, /* Combine sleep and LPM flag */
.combine_modes = 1, /* Combine sleep and LPM flag */
.tristate_control = 0, /* Allow tri-state control of UART tx flag */
/* Irrelevant USB flags */
.usb_auto_sleep = 0,
@ -232,6 +249,29 @@ static inline int bcm_request_irq(struct bcm_data *bcm) { return 0; }
static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; }
#endif
static int bcm_set_diag(struct hci_dev *hdev, bool enable)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct bcm_data *bcm = hu->priv;
struct sk_buff *skb;
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -ENETDOWN;
skb = bt_skb_alloc(3, GFP_KERNEL);
if (IS_ERR(skb))
return PTR_ERR(skb);
*skb_put(skb, 1) = BCM_LM_DIAG_PKT;
*skb_put(skb, 1) = 0xf0;
*skb_put(skb, 1) = enable;
skb_queue_tail(&bcm->txq, skb);
hci_uart_tx_wakeup(hu);
return 0;
}
static int bcm_open(struct hci_uart *hu)
{
struct bcm_data *bcm;
@ -258,7 +298,7 @@ static int bcm_open(struct hci_uart *hu)
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
bcm->dev = dev;
hu->init_speed = dev->init_speed;
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
dev->hu = hu;
#endif
bcm_gpio_set_power(bcm->dev, true);
@ -282,7 +322,10 @@ static int bcm_close(struct hci_uart *hu)
mutex_lock(&bcm_device_lock);
if (bcm_device_exists(bdev)) {
bcm_gpio_set_power(bdev, false);
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
pm_runtime_disable(&bdev->pdev->dev);
pm_runtime_set_suspended(&bdev->pdev->dev);
if (device_can_wakeup(&bdev->pdev->dev)) {
devm_free_irq(&bdev->pdev->dev, bdev->irq, bdev);
device_init_wakeup(&bdev->pdev->dev, false);
@ -322,6 +365,7 @@ static int bcm_setup(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
hu->hdev->set_diag = bcm_set_diag;
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name));
@ -379,10 +423,18 @@ finalize:
return err;
}
#define BCM_RECV_LM_DIAG \
.type = BCM_LM_DIAG_PKT, \
.hlen = BCM_LM_DIAG_SIZE, \
.loff = 0, \
.lsize = 0, \
.maxlen = BCM_LM_DIAG_SIZE
static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
};
static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@ -399,6 +451,15 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
bcm->rx_skb = NULL;
return err;
} else if (!bcm->rx_skb) {
/* Delay auto-suspend when receiving completed packet */
mutex_lock(&bcm_device_lock);
if (bcm->dev && bcm_device_exists(bcm->dev)) {
pm_runtime_get(&bcm->dev->pdev->dev);
pm_runtime_mark_last_busy(&bcm->dev->pdev->dev);
pm_runtime_put_autosuspend(&bcm->dev->pdev->dev);
}
mutex_unlock(&bcm_device_lock);
}
return count;
@ -420,10 +481,76 @@ static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
struct sk_buff *skb = NULL;
struct bcm_device *bdev = NULL;
return skb_dequeue(&bcm->txq);
mutex_lock(&bcm_device_lock);
if (bcm_device_exists(bcm->dev)) {
bdev = bcm->dev;
pm_runtime_get_sync(&bdev->pdev->dev);
/* Shall be resumed here */
}
skb = skb_dequeue(&bcm->txq);
if (bdev) {
pm_runtime_mark_last_busy(&bdev->pdev->dev);
pm_runtime_put_autosuspend(&bdev->pdev->dev);
}
mutex_unlock(&bcm_device_lock);
return skb;
}
#ifdef CONFIG_PM
static int bcm_suspend_device(struct device *dev)
{
struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
bt_dev_dbg(bdev, "");
if (!bdev->is_suspended && bdev->hu) {
hci_uart_set_flow_control(bdev->hu, true);
/* Once this returns, driver suspends BT via GPIO */
bdev->is_suspended = true;
}
/* Suspend the device */
if (bdev->device_wakeup) {
gpiod_set_value(bdev->device_wakeup, false);
bt_dev_dbg(bdev, "suspend, delaying 15 ms");
mdelay(15);
}
return 0;
}
static int bcm_resume_device(struct device *dev)
{
struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
bt_dev_dbg(bdev, "");
if (bdev->device_wakeup) {
gpiod_set_value(bdev->device_wakeup, true);
bt_dev_dbg(bdev, "resume, delaying 15 ms");
mdelay(15);
}
/* When this executes, the device has woken up already */
if (bdev->is_suspended && bdev->hu) {
bdev->is_suspended = false;
hci_uart_set_flow_control(bdev->hu, false);
}
return 0;
}
#endif
#ifdef CONFIG_PM_SLEEP
/* Platform suspend callback */
static int bcm_suspend(struct device *dev)
@ -433,24 +560,17 @@ static int bcm_suspend(struct device *dev)
bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended);
/* bcm_suspend can be called at any time as long as platform device is
* bound, so it should use bcm_device_lock to protect access to hci_uart
* and device_wake-up GPIO.
*/
mutex_lock(&bcm_device_lock);
if (!bdev->hu)
goto unlock;
if (!bdev->is_suspended) {
hci_uart_set_flow_control(bdev->hu, true);
/* Once this callback returns, driver suspends BT via GPIO */
bdev->is_suspended = true;
}
/* Suspend the device */
if (bdev->device_wakeup) {
gpiod_set_value(bdev->device_wakeup, false);
bt_dev_dbg(bdev, "suspend, delaying 15 ms");
mdelay(15);
}
if (pm_runtime_active(dev))
bcm_suspend_device(dev);
if (device_may_wakeup(&bdev->pdev->dev)) {
error = enable_irq_wake(bdev->irq);
@ -471,6 +591,10 @@ static int bcm_resume(struct device *dev)
bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
/* bcm_resume can be called at any time as long as platform device is
* bound, so it should use bcm_device_lock to protect access to hci_uart
* and device_wake-up GPIO.
*/
mutex_lock(&bcm_device_lock);
if (!bdev->hu)
@ -481,22 +605,15 @@ static int bcm_resume(struct device *dev)
bt_dev_dbg(bdev, "BCM irq: disabled");
}
if (bdev->device_wakeup) {
gpiod_set_value(bdev->device_wakeup, true);
bt_dev_dbg(bdev, "resume, delaying 15 ms");
mdelay(15);
}
/* When this callback executes, the device has woken up already */
if (bdev->is_suspended) {
bdev->is_suspended = false;
hci_uart_set_flow_control(bdev->hu, false);
}
bcm_resume_device(dev);
unlock:
mutex_unlock(&bcm_device_lock);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
#endif
@ -513,6 +630,22 @@ static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
};
#ifdef CONFIG_ACPI
static u8 acpi_active_low = ACPI_ACTIVE_LOW;
/* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
{
.ident = "Asus T100TA",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR,
"ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
.driver_data = &acpi_active_low,
},
{ }
};
static int bcm_resource(struct acpi_resource *ares, void *data)
{
struct bcm_device *dev = data;
@ -549,15 +682,10 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
static int bcm_acpi_probe(struct bcm_device *dev)
{
struct platform_device *pdev = dev->pdev;
const struct acpi_device_id *id;
struct acpi_device *adev;
LIST_HEAD(resources);
const struct dmi_system_id *dmi_id;
int ret;
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
if (!id)
return -ENODEV;
/* Retrieve GPIO data */
dev->name = dev_name(&pdev->dev);
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
@ -602,11 +730,18 @@ static int bcm_acpi_probe(struct bcm_device *dev)
}
/* Retrieve UART ACPI info */
adev = ACPI_COMPANION(&dev->pdev->dev);
if (!adev)
return 0;
ret = acpi_dev_get_resources(ACPI_COMPANION(&dev->pdev->dev),
&resources, bcm_resource, dev);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resources);
acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table);
if (dmi_id) {
bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low",
dmi_id->ident);
dev->irq_polarity = *(u8 *)dmi_id->driver_data;
}
return 0;
}
@ -620,7 +755,6 @@ static int bcm_acpi_probe(struct bcm_device *dev)
static int bcm_probe(struct platform_device *pdev)
{
struct bcm_device *dev;
struct acpi_device_id *pdata = pdev->dev.platform_data;
int ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@ -629,15 +763,9 @@ static int bcm_probe(struct platform_device *pdev)
dev->pdev = pdev;
if (ACPI_HANDLE(&pdev->dev)) {
ret = bcm_acpi_probe(dev);
if (ret)
return ret;
} else if (pdata) {
dev->name = pdata->id;
} else {
return -ENODEV;
}
ret = bcm_acpi_probe(dev);
if (ret)
return ret;
platform_set_drvdata(pdev, dev);
@ -693,7 +821,10 @@ MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
#endif
/* Platform suspend and resume callbacks */
static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
static const struct dev_pm_ops bcm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume)
SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL)
};
static struct platform_driver bcm_driver = {
.probe = bcm_probe,

View File

@ -266,3 +266,4 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
return skb;
}
EXPORT_SYMBOL_GPL(h4_recv_buf);

View File

@ -128,7 +128,7 @@ static void h5_timed_event(unsigned long arg)
{
const unsigned char sync_req[] = { 0x01, 0x7e };
unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
struct hci_uart *hu = (struct hci_uart *) arg;
struct hci_uart *hu = (struct hci_uart *)arg;
struct h5 *h5 = hu->priv;
struct sk_buff *skb;
unsigned long flags;
@ -210,7 +210,7 @@ static int h5_open(struct hci_uart *hu)
init_timer(&h5->timer);
h5->timer.function = h5_timed_event;
h5->timer.data = (unsigned long) hu;
h5->timer.data = (unsigned long)hu;
h5->tx_win = H5_TX_WIN_MAX;
@ -453,7 +453,7 @@ static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
return -ENOMEM;
}
h5->rx_skb->dev = (void *) hu->hdev;
h5->rx_skb->dev = (void *)hu->hdev;
return 0;
}
@ -696,7 +696,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
}
skb = skb_dequeue(&h5->unrel);
if (skb != NULL) {
if (skb) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {
@ -714,7 +714,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
goto unlock;
skb = skb_dequeue(&h5->rel);
if (skb != NULL) {
if (skb) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {

View File

@ -1165,22 +1165,6 @@ static const struct acpi_device_id intel_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
static int intel_acpi_probe(struct intel_device *idev)
{
const struct acpi_device_id *id;
id = acpi_match_device(intel_acpi_match, &idev->pdev->dev);
if (!id)
return -ENODEV;
return 0;
}
#else
static int intel_acpi_probe(struct intel_device *idev)
{
return -ENODEV;
}
#endif
#ifdef CONFIG_PM
@ -1248,14 +1232,6 @@ static int intel_probe(struct platform_device *pdev)
idev->pdev = pdev;
if (ACPI_HANDLE(&pdev->dev)) {
int err = intel_acpi_probe(idev);
if (err)
return err;
} else {
return -ENODEV;
}
idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(idev->reset)) {

View File

@ -208,9 +208,6 @@ static int hci_uart_open(struct hci_dev *hdev)
BT_DBG("%s %p", hdev->name, hdev);
/* Nothing to do for UART driver */
set_bit(HCI_RUNNING, &hdev->flags);
return 0;
}
@ -241,9 +238,6 @@ static int hci_uart_close(struct hci_dev *hdev)
{
BT_DBG("hdev %p", hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
hci_uart_flush(hdev);
hdev->flush = NULL;
return 0;
@ -254,9 +248,6 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
hu->proto->enqueue(hu, skb);
@ -470,8 +461,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
spin_lock_init(&hu->rx_lock);
/* Flush any pending characters in the driver and line discipline. */
/* FIXME: why is this needed. Note don't use ldisc_ref here as the
@ -569,14 +558,14 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
return;
spin_lock(&hu->rx_lock);
/* It does not need a lock here as it is already protected by a mutex in
* tty caller
*/
hu->proto->recv(hu, data, count);
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
}

View File

@ -347,7 +347,7 @@ static void hci_ibs_wake_retrans_timeout(unsigned long arg)
struct hci_uart *hu = (struct hci_uart *)arg;
struct qca_data *qca = hu->priv;
unsigned long flags, retrans_delay;
unsigned long retransmit = 0;
bool retransmit = false;
BT_DBG("hu %p wake retransmit timeout in %d state",
hu, qca->tx_ibs_state);
@ -358,7 +358,7 @@ static void hci_ibs_wake_retrans_timeout(unsigned long arg)
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
/* No WAKE_ACK, retransmit WAKE */
retransmit = 1;
retransmit = true;
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
BT_ERR("Failed to acknowledge device wake up");
break;

View File

@ -85,7 +85,6 @@ struct hci_uart {
struct sk_buff *tx_skb;
unsigned long tx_state;
spinlock_t rx_lock;
unsigned int init_speed;
unsigned int oper_speed;

View File

@ -55,8 +55,6 @@ struct vhci_data {
static int vhci_open_dev(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &hdev->flags);
return 0;
}
@ -64,9 +62,6 @@ static int vhci_close_dev(struct hci_dev *hdev)
{
struct vhci_data *data = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
skb_queue_purge(&data->readq);
return 0;
@ -85,9 +80,6 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
struct vhci_data *data = hci_get_drvdata(hdev);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb_queue_tail(&data->readq, skb);

View File

@ -43,6 +43,7 @@ config IEEE802154_MRF24J40
tristate "Microchip MRF24J40 transceiver driver"
depends on IEEE802154_DRIVERS && MAC802154
depends on SPI
select REGMAP_SPI
---help---
Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
controller.

View File

@ -81,7 +81,7 @@ struct at86rf230_state_change {
u8 from_state;
u8 to_state;
bool irq_enable;
bool free;
};
struct at86rf230_trac {
@ -105,8 +105,6 @@ struct at86rf230_local {
struct completion state_complete;
struct at86rf230_state_change state;
struct at86rf230_state_change irq;
unsigned long cal_timeout;
bool is_tx;
bool is_tx_from_off;
@ -122,8 +120,7 @@ struct at86rf230_local {
static void
at86rf230_async_state_change(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx,
const u8 state, void (*complete)(void *context),
const bool irq_enable);
const u8 state, void (*complete)(void *context));
static inline void
at86rf230_sleep(struct at86rf230_local *lp)
@ -352,8 +349,10 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_local *lp = ctx->lp;
lp->is_tx = 0;
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
ieee802154_wake_queue(lp->hw);
if (ctx->free)
kfree(ctx);
}
static inline void
@ -363,15 +362,14 @@ at86rf230_async_error(struct at86rf230_local *lp,
dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
at86rf230_async_error_recover, false);
at86rf230_async_error_recover);
}
/* Generic function to get some register value in async mode */
static void
at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg,
struct at86rf230_state_change *ctx,
void (*complete)(void *context),
const bool irq_enable)
void (*complete)(void *context))
{
int rc;
@ -379,14 +377,24 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
ctx->msg.complete = complete;
ctx->irq_enable = irq_enable;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
if (irq_enable)
enable_irq(ctx->irq);
if (rc)
at86rf230_async_error(lp, ctx, rc);
}
static void
at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val,
struct at86rf230_state_change *ctx,
void (*complete)(void *context))
{
int rc;
ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
ctx->buf[1] = val;
ctx->msg.complete = complete;
rc = spi_async(lp->spi, &ctx->msg);
if (rc)
at86rf230_async_error(lp, ctx, rc);
}
}
static void
@ -434,8 +442,7 @@ at86rf230_async_state_assert(void *context)
lp->tx_retry++;
at86rf230_async_state_change(lp, ctx, state,
ctx->complete,
ctx->irq_enable);
ctx->complete);
return;
}
}
@ -456,8 +463,7 @@ static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer)
struct at86rf230_local *lp = ctx->lp;
at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
at86rf230_async_state_assert,
ctx->irq_enable);
at86rf230_async_state_assert);
return HRTIMER_NORESTART;
}
@ -562,14 +568,12 @@ at86rf230_async_state_change_start(void *context)
struct at86rf230_local *lp = ctx->lp;
u8 *buf = ctx->buf;
const u8 trx_state = buf[1] & TRX_STATE_MASK;
int rc;
/* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
udelay(1);
at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
at86rf230_async_state_change_start,
ctx->irq_enable);
at86rf230_async_state_change_start);
return;
}
@ -586,31 +590,20 @@ at86rf230_async_state_change_start(void *context)
/* Going into the next step for a state change which do a timing
* relevant delay.
*/
buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
buf[1] = ctx->to_state;
ctx->msg.complete = at86rf230_async_state_delay;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
if (ctx->irq_enable)
enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
at86rf230_async_write_reg(lp, RG_TRX_STATE, ctx->to_state, ctx,
at86rf230_async_state_delay);
}
static void
at86rf230_async_state_change(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx,
const u8 state, void (*complete)(void *context),
const bool irq_enable)
const u8 state, void (*complete)(void *context))
{
/* Initialization for the state change context */
ctx->to_state = state;
ctx->complete = complete;
ctx->irq_enable = irq_enable;
at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
at86rf230_async_state_change_start,
irq_enable);
at86rf230_async_state_change_start);
}
static void
@ -632,8 +625,7 @@ at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
unsigned long rc;
at86rf230_async_state_change(lp, &lp->state, state,
at86rf230_sync_state_change_complete,
false);
at86rf230_sync_state_change_complete);
rc = wait_for_completion_timeout(&lp->state_complete,
msecs_to_jiffies(100));
@ -651,9 +643,8 @@ at86rf230_tx_complete(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
enable_irq(ctx->irq);
ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
kfree(ctx);
}
static void
@ -663,7 +654,7 @@ at86rf230_tx_on(void *context)
struct at86rf230_local *lp = ctx->lp;
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_tx_complete, true);
at86rf230_tx_complete);
}
static void
@ -697,8 +688,7 @@ at86rf230_tx_trac_check(void *context)
}
}
at86rf230_async_state_change(lp, &lp->irq, STATE_TX_ON,
at86rf230_tx_on, true);
at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on);
}
static void
@ -706,7 +696,6 @@ at86rf230_rx_read_frame_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
u8 rx_local_buf[AT86RF2XX_MAX_BUF];
const u8 *buf = ctx->buf;
struct sk_buff *skb;
u8 len, lqi;
@ -718,18 +707,16 @@ at86rf230_rx_read_frame_complete(void *context)
}
lqi = buf[2 + len];
memcpy(rx_local_buf, buf + 2, len);
ctx->trx.len = 2;
enable_irq(ctx->irq);
skb = dev_alloc_skb(IEEE802154_MTU);
if (!skb) {
dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
kfree(ctx);
return;
}
memcpy(skb_put(skb, len), rx_local_buf, len);
memcpy(skb_put(skb, len), buf + 2, len);
ieee802154_rx_irqsafe(lp->hw, skb, lqi);
kfree(ctx);
}
static void
@ -765,21 +752,23 @@ at86rf230_rx_trac_check(void *context)
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
ctx->trx.len = 2;
enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
}
static void
at86rf230_irq_trx_end(struct at86rf230_local *lp)
at86rf230_irq_trx_end(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
if (lp->is_tx) {
lp->is_tx = 0;
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
at86rf230_tx_trac_check, true);
at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
at86rf230_tx_trac_check);
} else {
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
at86rf230_rx_trac_check, true);
at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
at86rf230_rx_trac_check);
}
}
@ -789,32 +778,59 @@ at86rf230_irq_status(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = ctx->buf;
const u8 irq = buf[1];
u8 irq = buf[1];
enable_irq(lp->spi->irq);
if (irq & IRQ_TRX_END) {
at86rf230_irq_trx_end(lp);
at86rf230_irq_trx_end(ctx);
} else {
enable_irq(ctx->irq);
dev_err(&lp->spi->dev, "not supported irq %02x received\n",
irq);
kfree(ctx);
}
}
static void
at86rf230_setup_spi_messages(struct at86rf230_local *lp,
struct at86rf230_state_change *state)
{
state->lp = lp;
state->irq = lp->spi->irq;
spi_message_init(&state->msg);
state->msg.context = state;
state->trx.len = 2;
state->trx.tx_buf = state->buf;
state->trx.rx_buf = state->buf;
spi_message_add_tail(&state->trx, &state->msg);
hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
state->timer.function = at86rf230_async_state_timer;
}
static irqreturn_t at86rf230_isr(int irq, void *data)
{
struct at86rf230_local *lp = data;
struct at86rf230_state_change *ctx = &lp->irq;
u8 *buf = ctx->buf;
struct at86rf230_state_change *ctx;
int rc;
disable_irq_nosync(irq);
buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx) {
enable_irq(irq);
return IRQ_NONE;
}
at86rf230_setup_spi_messages(lp, ctx);
/* tell on error handling to free ctx */
ctx->free = true;
ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
ctx->msg.complete = at86rf230_irq_status;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
enable_irq(irq);
at86rf230_async_error(lp, ctx, rc);
enable_irq(irq);
return IRQ_NONE;
}
@ -826,21 +842,14 @@ at86rf230_write_frame_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
u8 *buf = ctx->buf;
int rc;
ctx->trx.len = 2;
if (gpio_is_valid(lp->slp_tr)) {
if (gpio_is_valid(lp->slp_tr))
at86rf230_slp_tr_rising_edge(lp);
} else {
buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
buf[1] = STATE_BUSY_TX;
ctx->msg.complete = NULL;
rc = spi_async(lp->spi, &ctx->msg);
if (rc)
at86rf230_async_error(lp, ctx, rc);
}
else
at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx,
NULL);
}
static void
@ -873,7 +882,7 @@ at86rf230_xmit_tx_on(void *context)
struct at86rf230_local *lp = ctx->lp;
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
at86rf230_write_frame, false);
at86rf230_write_frame);
}
static void
@ -886,12 +895,10 @@ at86rf230_xmit_start(void *context)
if (lp->is_tx_from_off) {
lp->is_tx_from_off = false;
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
at86rf230_write_frame,
false);
at86rf230_write_frame);
} else {
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
at86rf230_xmit_tx_on,
false);
at86rf230_xmit_tx_on);
}
}
@ -914,7 +921,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
if (time_is_before_jiffies(lp->cal_timeout)) {
lp->is_tx_from_off = true;
at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
at86rf230_xmit_start, false);
at86rf230_xmit_start);
} else {
at86rf230_xmit_start(ctx);
}
@ -1373,10 +1380,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
return rc;
irq_type = irq_get_trigger_type(lp->spi->irq);
if (irq_type == IRQ_TYPE_EDGE_RISING ||
irq_type == IRQ_TYPE_EDGE_FALLING)
dev_warn(&lp->spi->dev,
"Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n");
if (irq_type == IRQ_TYPE_EDGE_FALLING ||
irq_type == IRQ_TYPE_LEVEL_LOW)
irq_pol = IRQ_ACTIVE_LOW;
@ -1602,43 +1605,6 @@ not_supp:
return rc;
}
static void
at86rf230_setup_spi_messages(struct at86rf230_local *lp)
{
lp->state.lp = lp;
lp->state.irq = lp->spi->irq;
spi_message_init(&lp->state.msg);
lp->state.msg.context = &lp->state;
lp->state.trx.len = 2;
lp->state.trx.tx_buf = lp->state.buf;
lp->state.trx.rx_buf = lp->state.buf;
spi_message_add_tail(&lp->state.trx, &lp->state.msg);
hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
lp->state.timer.function = at86rf230_async_state_timer;
lp->irq.lp = lp;
lp->irq.irq = lp->spi->irq;
spi_message_init(&lp->irq.msg);
lp->irq.msg.context = &lp->irq;
lp->irq.trx.len = 2;
lp->irq.trx.tx_buf = lp->irq.buf;
lp->irq.trx.rx_buf = lp->irq.buf;
spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
lp->irq.timer.function = at86rf230_async_state_timer;
lp->tx.lp = lp;
lp->tx.irq = lp->spi->irq;
spi_message_init(&lp->tx.msg);
lp->tx.msg.context = &lp->tx;
lp->tx.trx.len = 2;
lp->tx.trx.tx_buf = lp->tx.buf;
lp->tx.trx.rx_buf = lp->tx.buf;
spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
lp->tx.timer.function = at86rf230_async_state_timer;
}
#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
static struct dentry *at86rf230_debugfs_root;
@ -1760,7 +1726,8 @@ static int at86rf230_probe(struct spi_device *spi)
goto free_dev;
}
at86rf230_setup_spi_messages(lp);
at86rf230_setup_spi_messages(lp, &lp->state);
at86rf230_setup_spi_messages(lp, &lp->tx);
rc = at86rf230_detect_device(lp);
if (rc < 0)

File diff suppressed because it is too large Load Diff

View File

@ -25,12 +25,22 @@
#include <linux/types.h>
#include <linux/random.h>
#include <asm/byteorder.h>
#define IEEE802154_MTU 127
#define IEEE802154_ACK_PSDU_LEN 5
#define IEEE802154_MIN_PSDU_LEN 9
#define IEEE802154_FCS_LEN 2
#define IEEE802154_MAX_AUTH_TAG_LEN 16
/* General MAC frame format:
* 2 bytes: Frame Control
* 1 byte: Sequence Number
* 20 bytes: Addressing fields
* 14 bytes: Auxiliary Security Header
*/
#define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14)
#define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \
IEEE802154_FCS_LEN)
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@ -207,6 +217,7 @@ enum {
/* frame control handling */
#define IEEE802154_FCTL_FTYPE 0x0003
#define IEEE802154_FCTL_ACKREQ 0x0020
#define IEEE802154_FCTL_INTRA_PAN 0x0040
#define IEEE802154_FTYPE_DATA 0x0001
@ -221,6 +232,15 @@ static inline int ieee802154_is_data(__le16 fc)
cpu_to_le16(IEEE802154_FTYPE_DATA);
}
/**
* ieee802154_is_ackreq - check if acknowledgment request bit is set
* @fc: frame control bytes in little-endian byteorder
*/
static inline bool ieee802154_is_ackreq(__le16 fc)
{
return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ);
}
/**
* ieee802154_is_intra_pan - check if intra pan id communication
* @fc: frame control bytes in little-endian byteorder

View File

@ -61,6 +61,16 @@
#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
#define EUI64_ADDR_LEN 8
#define LOWPAN_NHC_MAX_ID_LEN 1
/* Max IPHC Header len without IPv6 hdr specific inline data.
* Useful for getting the "extra" bytes we need at worst case compression.
*
* LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
*/
#define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
/*
* ipv6 address based on mac
* second bit-flip (Universe/Local) is done according RFC2464

View File

@ -122,11 +122,14 @@ struct bt_voice {
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
void bt_warn(const char *fmt, ...);
__printf(1, 2)
void bt_err(const char *fmt, ...);
__printf(1, 2)
void bt_err_ratelimited(const char *fmt, ...);
#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
#define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__)
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
@ -134,6 +137,8 @@ void bt_err_ratelimited(const char *fmt, ...);
#define bt_dev_info(hdev, fmt, ...) \
BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_warn(hdev, fmt, ...) \
BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_err(hdev, fmt, ...) \
BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_dbg(hdev, fmt, ...) \

View File

@ -44,6 +44,8 @@
#define HCI_DEV_DOWN 4
#define HCI_DEV_SUSPEND 5
#define HCI_DEV_RESUME 6
#define HCI_DEV_OPEN 7
#define HCI_DEV_CLOSE 8
/* HCI notify events */
#define HCI_NOTIFY_CONN_ADD 1
@ -238,6 +240,7 @@ enum {
HCI_LE_SCAN_INTERRUPTED,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
@ -260,6 +263,7 @@ enum {
#define HCI_ACLDATA_PKT 0x02
#define HCI_SCODATA_PKT 0x03
#define HCI_EVENT_PKT 0x04
#define HCI_DIAG_PKT 0xf0
#define HCI_VENDOR_PKT 0xff
/* HCI packet types */

View File

@ -398,6 +398,7 @@ struct hci_dev {
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
void (*hw_error)(struct hci_dev *hdev, u8 code);
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
};
@ -1066,6 +1067,7 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
void hci_init_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
@ -1349,6 +1351,9 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,

View File

@ -39,6 +39,10 @@ struct hci_mon_hdr {
#define HCI_MON_ACL_RX_PKT 5
#define HCI_MON_SCO_TX_PKT 6
#define HCI_MON_SCO_RX_PKT 7
#define HCI_MON_OPEN_INDEX 8
#define HCI_MON_CLOSE_INDEX 9
#define HCI_MON_INDEX_INFO 10
#define HCI_MON_VENDOR_DIAG 11
struct hci_mon_new_index {
__u8 type;
@ -48,4 +52,10 @@ struct hci_mon_new_index {
} __packed;
#define HCI_MON_NEW_INDEX_SIZE 16
struct hci_mon_index_info {
bdaddr_t bdaddr;
__le16 manufacturer;
} __packed;
#define HCI_MON_INDEX_INFO_SIZE 8
#endif /* __HCI_MON_H */

View File

@ -27,6 +27,16 @@
struct wpan_phy;
struct wpan_phy_cca;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
struct ieee802154_llsec_device_key;
struct ieee802154_llsec_seclevel;
struct ieee802154_llsec_params;
struct ieee802154_llsec_device;
struct ieee802154_llsec_table;
struct ieee802154_llsec_key_id;
struct ieee802154_llsec_key;
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
struct cfg802154_ops {
struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
const char *name,
@ -65,6 +75,51 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool ackreq);
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
void (*get_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_table **table);
void (*lock_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev);
void (*unlock_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev);
/* TODO remove locking/get table callbacks, this is part of the
* nl802154 interface and should be accessible from ieee802154 layer.
*/
int (*get_llsec_params)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_params *params);
int (*set_llsec_params)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_params *params,
int changed);
int (*add_llsec_key)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key);
int (*del_llsec_key)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id);
int (*add_seclevel)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl);
int (*del_seclevel)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl);
int (*add_device)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_device *dev);
int (*del_device)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, __le64 extended_addr);
int (*add_devkey)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
__le64 extended_addr,
const struct ieee802154_llsec_device_key *key);
int (*del_devkey)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
__le64 extended_addr,
const struct ieee802154_llsec_device_key *key);
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};
static inline bool
@ -167,6 +222,102 @@ struct wpan_phy {
char priv[0] __aligned(NETDEV_ALIGN);
};
struct ieee802154_addr {
u8 mode;
__le16 pan_id;
union {
__le16 short_addr;
__le64 extended_addr;
};
};
struct ieee802154_llsec_key_id {
u8 mode;
u8 id;
union {
struct ieee802154_addr device_addr;
__le32 short_source;
__le64 extended_source;
};
};
#define IEEE802154_LLSEC_KEY_SIZE 16
struct ieee802154_llsec_key {
u8 frame_types;
u32 cmd_frame_ids;
/* TODO replace with NL802154_KEY_SIZE */
u8 key[IEEE802154_LLSEC_KEY_SIZE];
};
struct ieee802154_llsec_key_entry {
struct list_head list;
struct ieee802154_llsec_key_id id;
struct ieee802154_llsec_key *key;
};
struct ieee802154_llsec_params {
bool enabled;
__be32 frame_counter;
u8 out_level;
struct ieee802154_llsec_key_id out_key;
__le64 default_key_source;
__le16 pan_id;
__le64 hwaddr;
__le64 coord_hwaddr;
__le16 coord_shortaddr;
};
struct ieee802154_llsec_table {
struct list_head keys;
struct list_head devices;
struct list_head security_levels;
};
struct ieee802154_llsec_seclevel {
struct list_head list;
u8 frame_type;
u8 cmd_frame_id;
bool device_override;
u32 sec_levels;
};
struct ieee802154_llsec_device {
struct list_head list;
__le16 pan_id;
__le16 short_addr;
__le64 hwaddr;
u32 frame_counter;
bool seclevel_exempt;
u8 key_mode;
struct list_head keys;
};
struct ieee802154_llsec_device_key {
struct list_head list;
struct ieee802154_llsec_key_id key_id;
u32 frame_counter;
};
struct wpan_dev_header_ops {
/* TODO create callback currently assumes ieee802154_mac_cb inside
* skb->cb. This should be changed to give these information as
* parameter.
*/
int (*create)(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_addr *daddr,
const struct ieee802154_addr *saddr,
unsigned int len);
};
struct wpan_dev {
struct wpan_phy *wpan_phy;
int iftype;
@ -175,6 +326,8 @@ struct wpan_dev {
struct list_head list;
struct net_device *netdev;
const struct wpan_dev_header_ops *header_ops;
/* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
struct net_device *lowpan_dev;
@ -205,6 +358,17 @@ struct wpan_dev {
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
static inline int
wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_addr *daddr,
const struct ieee802154_addr *saddr,
unsigned int len)
{
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
}
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)

View File

@ -50,15 +50,6 @@ struct ieee802154_sechdr {
};
};
struct ieee802154_addr {
u8 mode;
__le16 pan_id;
union {
__le16 short_addr;
__le64 extended_addr;
};
};
struct ieee802154_hdr_fc {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u16 type:3,
@ -99,7 +90,7 @@ struct ieee802154_hdr {
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
* version, if SECEN is set.
*/
int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr);
int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr);
/* pulls the entire 802.15.4 header off of the skb, including the security
* header, and performs pan id decompression
@ -243,38 +234,6 @@ static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
return mac_cb(skb);
}
#define IEEE802154_LLSEC_KEY_SIZE 16
struct ieee802154_llsec_key_id {
u8 mode;
u8 id;
union {
struct ieee802154_addr device_addr;
__le32 short_source;
__le64 extended_source;
};
};
struct ieee802154_llsec_key {
u8 frame_types;
u32 cmd_frame_ids;
u8 key[IEEE802154_LLSEC_KEY_SIZE];
};
struct ieee802154_llsec_key_entry {
struct list_head list;
struct ieee802154_llsec_key_id id;
struct ieee802154_llsec_key *key;
};
struct ieee802154_llsec_device_key {
struct list_head list;
struct ieee802154_llsec_key_id key_id;
u32 frame_counter;
};
enum {
IEEE802154_LLSEC_DEVKEY_IGNORE,
IEEE802154_LLSEC_DEVKEY_RESTRICT,
@ -283,49 +242,6 @@ enum {
__IEEE802154_LLSEC_DEVKEY_MAX,
};
struct ieee802154_llsec_device {
struct list_head list;
__le16 pan_id;
__le16 short_addr;
__le64 hwaddr;
u32 frame_counter;
bool seclevel_exempt;
u8 key_mode;
struct list_head keys;
};
struct ieee802154_llsec_seclevel {
struct list_head list;
u8 frame_type;
u8 cmd_frame_id;
bool device_override;
u32 sec_levels;
};
struct ieee802154_llsec_params {
bool enabled;
__be32 frame_counter;
u8 out_level;
struct ieee802154_llsec_key_id out_key;
__le64 default_key_source;
__le16 pan_id;
__le64 hwaddr;
__le64 coord_hwaddr;
__le16 coord_shortaddr;
};
struct ieee802154_llsec_table {
struct list_head keys;
struct list_head devices;
struct list_head security_levels;
};
#define IEEE802154_MAC_SCAN_ED 0
#define IEEE802154_MAC_SCAN_ACTIVE 1
#define IEEE802154_MAC_SCAN_PASSIVE 2

View File

@ -23,14 +23,6 @@
#include <net/cfg802154.h>
/* General MAC frame format:
* 2 bytes: Frame Control
* 1 byte: Sequence Number
* 20 bytes: Addressing fields
* 14 bytes: Auxiliary Security Header
*/
#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
/**
* enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
*
@ -256,7 +248,7 @@ struct ieee802154_ops {
static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
{
/* return some invalid fc on failure */
if (unlikely(skb->mac_len < 2)) {
if (unlikely(skb->len < 2)) {
WARN_ON(1);
return cpu_to_le16(0);
}

View File

@ -1003,6 +1003,15 @@ static inline __be32 nla_get_be32(const struct nlattr *nla)
return *(__be32 *) nla_data(nla);
}
/**
* nla_get_le32 - return payload of __le32 attribute
* @nla: __le32 netlink attribute
*/
static inline __le32 nla_get_le32(const struct nlattr *nla)
{
return *(__le32 *) nla_data(nla);
}
/**
* nla_get_u16 - return payload of u16 attribute
* @nla: u16 netlink attribute
@ -1065,6 +1074,15 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
return tmp;
}
/**
* nla_get_le64 - return payload of __le64 attribute
* @nla: __le64 netlink attribute
*/
static inline __le64 nla_get_le64(const struct nlattr *nla)
{
return *(__le64 *) nla_data(nla);
}
/**
* nla_get_s32 - return payload of s32 attribute
* @nla: s32 netlink attribute

View File

@ -56,6 +56,22 @@ enum nl802154_commands {
/* add new commands above here */
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
NL802154_CMD_SET_SEC_PARAMS,
NL802154_CMD_GET_SEC_KEY, /* can dump */
NL802154_CMD_NEW_SEC_KEY,
NL802154_CMD_DEL_SEC_KEY,
NL802154_CMD_GET_SEC_DEV, /* can dump */
NL802154_CMD_NEW_SEC_DEV,
NL802154_CMD_DEL_SEC_DEV,
NL802154_CMD_GET_SEC_DEVKEY, /* can dump */
NL802154_CMD_NEW_SEC_DEVKEY,
NL802154_CMD_DEL_SEC_DEVKEY,
NL802154_CMD_GET_SEC_LEVEL, /* can dump */
NL802154_CMD_NEW_SEC_LEVEL,
NL802154_CMD_DEL_SEC_LEVEL,
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
/* used to define NL802154_CMD_MAX below */
__NL802154_CMD_AFTER_LAST,
NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
@ -110,6 +126,18 @@ enum nl802154_attrs {
/* add attributes here, update the policy in nl802154.c */
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
NL802154_ATTR_SEC_ENABLED,
NL802154_ATTR_SEC_OUT_LEVEL,
NL802154_ATTR_SEC_OUT_KEY_ID,
NL802154_ATTR_SEC_FRAME_COUNTER,
NL802154_ATTR_SEC_LEVEL,
NL802154_ATTR_SEC_DEVICE,
NL802154_ATTR_SEC_DEVKEY,
NL802154_ATTR_SEC_KEY,
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
__NL802154_ATTR_AFTER_LAST,
NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
};
@ -247,4 +275,167 @@ enum nl802154_supported_bool_states {
NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
};
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
enum nl802154_dev_addr_modes {
NL802154_DEV_ADDR_NONE,
__NL802154_DEV_ADDR_INVALID,
NL802154_DEV_ADDR_SHORT,
NL802154_DEV_ADDR_EXTENDED,
/* keep last */
__NL802154_DEV_ADDR_AFTER_LAST,
NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1
};
enum nl802154_dev_addr_attrs {
NL802154_DEV_ADDR_ATTR_UNSPEC,
NL802154_DEV_ADDR_ATTR_PAN_ID,
NL802154_DEV_ADDR_ATTR_MODE,
NL802154_DEV_ADDR_ATTR_SHORT,
NL802154_DEV_ADDR_ATTR_EXTENDED,
/* keep last */
__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
};
enum nl802154_key_id_modes {
NL802154_KEY_ID_MODE_IMPLICIT,
NL802154_KEY_ID_MODE_INDEX,
NL802154_KEY_ID_MODE_INDEX_SHORT,
NL802154_KEY_ID_MODE_INDEX_EXTENDED,
/* keep last */
__NL802154_KEY_ID_MODE_AFTER_LAST,
NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1
};
enum nl802154_key_id_attrs {
NL802154_KEY_ID_ATTR_UNSPEC,
NL802154_KEY_ID_ATTR_MODE,
NL802154_KEY_ID_ATTR_INDEX,
NL802154_KEY_ID_ATTR_IMPLICIT,
NL802154_KEY_ID_ATTR_SOURCE_SHORT,
NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
/* keep last */
__NL802154_KEY_ID_ATTR_AFTER_LAST,
NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1
};
enum nl802154_seclevels {
NL802154_SECLEVEL_NONE,
NL802154_SECLEVEL_MIC32,
NL802154_SECLEVEL_MIC64,
NL802154_SECLEVEL_MIC128,
NL802154_SECLEVEL_ENC,
NL802154_SECLEVEL_ENC_MIC32,
NL802154_SECLEVEL_ENC_MIC64,
NL802154_SECLEVEL_ENC_MIC128,
/* keep last */
__NL802154_SECLEVEL_AFTER_LAST,
NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1
};
enum nl802154_frames {
NL802154_FRAME_BEACON,
NL802154_FRAME_DATA,
NL802154_FRAME_ACK,
NL802154_FRAME_CMD,
/* keep last */
__NL802154_FRAME_AFTER_LAST,
NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1
};
enum nl802154_cmd_frames {
__NL802154_CMD_FRAME_INVALID,
NL802154_CMD_FRAME_ASSOC_REQUEST,
NL802154_CMD_FRAME_ASSOC_RESPONSE,
NL802154_CMD_FRAME_DISASSOC_NOTIFY,
NL802154_CMD_FRAME_DATA_REQUEST,
NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY,
NL802154_CMD_FRAME_ORPHAN_NOTIFY,
NL802154_CMD_FRAME_BEACON_REQUEST,
NL802154_CMD_FRAME_COORD_REALIGNMENT,
NL802154_CMD_FRAME_GTS_REQUEST,
/* keep last */
__NL802154_CMD_FRAME_AFTER_LAST,
NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1
};
enum nl802154_seclevel_attrs {
NL802154_SECLEVEL_ATTR_UNSPEC,
NL802154_SECLEVEL_ATTR_LEVELS,
NL802154_SECLEVEL_ATTR_FRAME,
NL802154_SECLEVEL_ATTR_CMD_FRAME,
NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
/* keep last */
__NL802154_SECLEVEL_ATTR_AFTER_LAST,
NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1
};
/* TODO what is this? couldn't find in mib */
enum {
NL802154_DEVKEY_IGNORE,
NL802154_DEVKEY_RESTRICT,
NL802154_DEVKEY_RECORD,
/* keep last */
__NL802154_DEVKEY_AFTER_LAST,
NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1
};
enum nl802154_dev {
NL802154_DEV_ATTR_UNSPEC,
NL802154_DEV_ATTR_FRAME_COUNTER,
NL802154_DEV_ATTR_PAN_ID,
NL802154_DEV_ATTR_SHORT_ADDR,
NL802154_DEV_ATTR_EXTENDED_ADDR,
NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
NL802154_DEV_ATTR_KEY_MODE,
/* keep last */
__NL802154_DEV_ATTR_AFTER_LAST,
NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1
};
enum nl802154_devkey {
NL802154_DEVKEY_ATTR_UNSPEC,
NL802154_DEVKEY_ATTR_FRAME_COUNTER,
NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
NL802154_DEVKEY_ATTR_ID,
/* keep last */
__NL802154_DEVKEY_ATTR_AFTER_LAST,
NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1
};
enum nl802154_key {
NL802154_KEY_ATTR_UNSPEC,
NL802154_KEY_ATTR_ID,
NL802154_KEY_ATTR_USAGE_FRAMES,
NL802154_KEY_ATTR_USAGE_CMDS,
NL802154_KEY_ATTR_BYTES,
/* keep last */
__NL802154_KEY_ATTR_AFTER_LAST,
NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1
};
#define NL802154_KEY_SIZE 16
#define NL802154_CMD_FRAME_NR_IDS 256
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
#endif /* __NL802154_H */

View File

@ -17,6 +17,11 @@
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
{
dev->addr_len = EUI64_ADDR_LEN;
dev->type = ARPHRD_6LOWPAN;
dev->mtu = IPV6_MIN_MTU;
dev->priv_flags |= IFF_NO_QUEUE;
lowpan_priv(dev)->lltype = lltype;
}
EXPORT_SYMBOL(lowpan_netdev_setup);

View File

@ -8,8 +8,6 @@
#include <net/6lowpan.h>
#include <net/ipv6.h>
#define LOWPAN_NHC_MAX_ID_LEN 1
/**
* LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct
*

View File

@ -35,7 +35,6 @@ static struct dentry *lowpan_enable_debugfs;
static struct dentry *lowpan_control_debugfs;
#define IFACE_NAME_TEMPLATE "bt%d"
#define EUI64_ADDR_LEN 8
struct skb_cb {
struct in6_addr addr;
@ -674,13 +673,8 @@ static struct header_ops header_ops = {
static void netdev_setup(struct net_device *dev)
{
dev->addr_len = EUI64_ADDR_LEN;
dev->type = ARPHRD_6LOWPAN;
dev->hard_header_len = 0;
dev->needed_tailroom = 0;
dev->mtu = IPV6_MIN_MTU;
dev->tx_queue_len = 0;
dev->flags = IFF_RUNNING | IFF_POINTOPOINT |
IFF_MULTICAST;
dev->watchdog_timeo = 0;
@ -775,24 +769,7 @@ static struct l2cap_chan *chan_create(void)
chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
chan->mode = L2CAP_MODE_LE_FLOWCTL;
chan->omtu = 65535;
chan->imtu = chan->omtu;
return chan;
}
static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
{
struct l2cap_chan *chan;
chan = chan_create();
if (!chan)
return NULL;
chan->remote_mps = chan->omtu;
chan->mps = chan->omtu;
chan->state = BT_CONNECTED;
chan->imtu = 1280;
return chan;
}
@ -919,7 +896,10 @@ static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
{
struct l2cap_chan *chan;
chan = chan_open(pchan);
chan = chan_create();
if (!chan)
return NULL;
chan->ops = pchan->ops;
BT_DBG("chan %p pchan %p", chan, pchan);
@ -1065,34 +1045,23 @@ static inline __u8 bdaddr_type(__u8 type)
return BDADDR_LE_RANDOM;
}
static struct l2cap_chan *chan_get(void)
{
struct l2cap_chan *pchan;
pchan = chan_create();
if (!pchan)
return NULL;
pchan->ops = &bt_6lowpan_chan_ops;
return pchan;
}
static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
{
struct l2cap_chan *pchan;
struct l2cap_chan *chan;
int err;
pchan = chan_get();
if (!pchan)
chan = chan_create();
if (!chan)
return -EINVAL;
err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
chan->ops = &bt_6lowpan_chan_ops;
err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
addr, dst_type);
BT_DBG("chan %p err %d", pchan, err);
BT_DBG("chan %p err %d", chan, err);
if (err < 0)
l2cap_chan_put(pchan);
l2cap_chan_put(chan);
return err;
}
@ -1117,31 +1086,32 @@ static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
static struct l2cap_chan *bt_6lowpan_listen(void)
{
bdaddr_t *addr = BDADDR_ANY;
struct l2cap_chan *pchan;
struct l2cap_chan *chan;
int err;
if (!enable_6lowpan)
return NULL;
pchan = chan_get();
if (!pchan)
chan = chan_create();
if (!chan)
return NULL;
pchan->state = BT_LISTEN;
pchan->src_type = BDADDR_LE_PUBLIC;
chan->ops = &bt_6lowpan_chan_ops;
chan->state = BT_LISTEN;
chan->src_type = BDADDR_LE_PUBLIC;
atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
BT_DBG("chan %p src type %d", pchan, pchan->src_type);
BT_DBG("chan %p src type %d", chan, chan->src_type);
err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
if (err) {
l2cap_chan_put(pchan);
l2cap_chan_put(chan);
BT_ERR("psm cannot be added err %d", err);
return NULL;
}
return pchan;
return chan;
}
static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,

View File

@ -134,6 +134,66 @@ static const struct file_operations dut_mode_fops = {
.llseek = default_llseek,
};
static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct hci_dev *hdev = file->private_data;
char buf[3];
buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct hci_dev *hdev = file->private_data;
char buf[32];
size_t buf_size = min(count, (sizeof(buf)-1));
bool enable;
int err;
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = '\0';
if (strtobool(buf, &enable))
return -EINVAL;
hci_req_lock(hdev);
err = hdev->set_diag(hdev, enable);
hci_req_unlock(hdev);
if (err < 0)
return err;
if (enable)
hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
else
hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
return count;
}
static const struct file_operations vendor_diag_fops = {
.open = simple_open,
.read = vendor_diag_read,
.write = vendor_diag_write,
.llseek = default_llseek,
};
static void hci_debugfs_create_basic(struct hci_dev *hdev)
{
debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
&dut_mode_fops);
if (hdev->set_diag)
debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
&vendor_diag_fops);
}
/* ---- HCI requests ---- */
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
@ -850,13 +910,8 @@ static int __hci_init(struct hci_dev *hdev)
if (err < 0)
return err;
/* The Device Under Test (DUT) mode is special and available for
* all controller types. So just create it early on.
*/
if (hci_dev_test_flag(hdev, HCI_SETUP)) {
debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
&dut_mode_fops);
}
if (hci_dev_test_flag(hdev, HCI_SETUP))
hci_debugfs_create_basic(hdev);
err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
if (err < 0)
@ -933,6 +988,9 @@ static int __hci_unconf_init(struct hci_dev *hdev)
if (err < 0)
return err;
if (hci_dev_test_flag(hdev, HCI_SETUP))
hci_debugfs_create_basic(hdev);
return 0;
}
@ -1385,6 +1443,9 @@ static int hci_dev_do_open(struct hci_dev *hdev)
goto done;
}
set_bit(HCI_RUNNING, &hdev->flags);
hci_notify(hdev, HCI_DEV_OPEN);
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
@ -1466,6 +1527,9 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
clear_bit(HCI_RUNNING, &hdev->flags);
hci_notify(hdev, HCI_DEV_CLOSE);
hdev->close(hdev);
hdev->flags &= BIT(HCI_RAW);
}
@ -1551,6 +1615,8 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
int hci_dev_do_close(struct hci_dev *hdev)
{
bool auto_off;
BT_DBG("%s %p", hdev->name, hdev);
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
@ -1606,10 +1672,10 @@ int hci_dev_do_close(struct hci_dev *hdev)
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
if (hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
}
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
if (!auto_off && hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
@ -1626,9 +1692,8 @@ int hci_dev_do_close(struct hci_dev *hdev)
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
!auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
@ -1649,6 +1714,9 @@ int hci_dev_do_close(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
clear_bit(HCI_RUNNING, &hdev->flags);
hci_notify(hdev, HCI_DEV_CLOSE);
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
@ -3471,6 +3539,13 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
return -ENXIO;
}
if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
kfree_skb(skb);
return -EINVAL;
}
/* Incoming skb */
bt_cb(skb)->incoming = 1;
@ -3484,6 +3559,21 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
}
EXPORT_SYMBOL(hci_recv_frame);
/* Receive diagnostic message from HCI drivers */
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{
/* Time stamp */
__net_timestamp(skb);
/* Mark as diagnostic packet and send to monitor */
bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
hci_send_to_monitor(hdev, skb);
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL(hci_recv_diag);
/* ---- Interface to upper protocols ---- */
int hci_register_cb(struct hci_cb *cb)
@ -3530,6 +3620,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
kfree_skb(skb);
return;
}
err = hdev->send(hdev, skb);
if (err < 0) {
BT_ERR("%s sending frame failed (%d)", hdev->name, err);
@ -3580,6 +3675,25 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
/* Send HCI command and wait for command commplete event */
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout)
{
struct sk_buff *skb;
if (!test_bit(HCI_UP, &hdev->flags))
return ERR_PTR(-ENETDOWN);
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
hci_req_lock(hdev);
skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
hci_req_unlock(hdev);
return skb;
}
EXPORT_SYMBOL(hci_cmd_sync);
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{

View File

@ -279,6 +279,9 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
else
opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
break;
case HCI_DIAG_PKT:
opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
break;
default:
return;
}
@ -303,6 +306,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
{
struct hci_mon_hdr *hdr;
struct hci_mon_new_index *ni;
struct hci_mon_index_info *ii;
struct sk_buff *skb;
__le16 opcode;
@ -312,7 +316,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
if (!skb)
return NULL;
ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
@ -329,6 +333,34 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
break;
case HCI_DEV_UP:
skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
if (!skb)
return NULL;
ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
bacpy(&ii->bdaddr, &hdev->bdaddr);
ii->manufacturer = cpu_to_le16(hdev->manufacturer);
opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
break;
case HCI_DEV_OPEN:
skb = bt_skb_alloc(0, GFP_ATOMIC);
if (!skb)
return NULL;
opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
break;
case HCI_DEV_CLOSE:
skb = bt_skb_alloc(0, GFP_ATOMIC);
if (!skb)
return NULL;
opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
break;
default:
return NULL;
}
@ -358,6 +390,26 @@ static void send_monitor_replay(struct sock *sk)
if (sock_queue_rcv_skb(sk, skb))
kfree_skb(skb);
if (!test_bit(HCI_RUNNING, &hdev->flags))
continue;
skb = create_monitor_event(hdev, HCI_DEV_OPEN);
if (!skb)
continue;
if (sock_queue_rcv_skb(sk, skb))
kfree_skb(skb);
if (!test_bit(HCI_UP, &hdev->flags))
continue;
skb = create_monitor_event(hdev, HCI_DEV_UP);
if (!skb)
continue;
if (sock_queue_rcv_skb(sk, skb))
kfree_skb(skb);
}
read_unlock(&hci_dev_list_lock);
@ -392,14 +444,12 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
void hci_sock_dev_event(struct hci_dev *hdev, int event)
{
struct hci_ev_si_device ev;
BT_DBG("hdev %s event %d", hdev->name, event);
/* Send event to monitor */
if (atomic_read(&monitor_promisc)) {
struct sk_buff *skb;
/* Send event to monitor */
skb = create_monitor_event(hdev, event);
if (skb) {
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
@ -408,10 +458,14 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
}
}
/* Send event to sockets */
ev.event = event;
ev.dev_id = hdev->id;
hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
if (event <= HCI_DEV_DOWN) {
struct hci_ev_si_device ev;
/* Send event to sockets */
ev.event = event;
ev.dev_id = hdev->id;
hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
}
if (event == HCI_DEV_UNREG) {
struct sock *sk;

View File

@ -151,6 +151,22 @@ void bt_info(const char *format, ...)
}
EXPORT_SYMBOL(bt_info);
void bt_warn(const char *format, ...)
{
struct va_format vaf;
va_list args;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
pr_warn("%pV", &vaf);
va_end(args);
}
EXPORT_SYMBOL(bt_warn);
void bt_err(const char *format, ...)
{
struct va_format vaf;

View File

@ -101,14 +101,9 @@ static const struct net_device_ops lowpan_netdev_ops = {
static void lowpan_setup(struct net_device *ldev)
{
ldev->addr_len = IEEE802154_ADDR_LEN;
memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN);
ldev->type = ARPHRD_6LOWPAN;
/* Frame Control + Sequence Number + Address fields + Security Header */
ldev->hard_header_len = 2 + 1 + 20 + 14;
ldev->needed_tailroom = 2; /* FCS */
ldev->mtu = IPV6_MIN_MTU;
ldev->priv_flags |= IFF_NO_QUEUE;
/* We need an ipv6hdr as minimum len when calling xmit */
ldev->hard_header_len = sizeof(struct ipv6hdr);
ldev->flags = IFF_BROADCAST | IFF_MULTICAST;
ldev->netdev_ops = &lowpan_netdev_ops;
@ -156,6 +151,15 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
lowpan_dev_info(ldev)->wdev = wdev;
/* Set the lowpan hardware address to the wpan hardware address. */
memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
/* We need headroom for possible wpan_dev_hard_header call and tailroom
* for encryption/fcs handling. The lowpan interface will replace
* the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN
* header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6
* header.
*/
ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN +
wdev->needed_headroom;
ldev->needed_tailroom = wdev->needed_tailroom;
lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);

View File

@ -29,6 +29,8 @@
static int lowpan_give_skb_to_device(struct sk_buff *skb)
{
skb->protocol = htons(ETH_P_IPV6);
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
return netif_rx(skb);
}

View File

@ -10,6 +10,7 @@
#include <net/6lowpan.h>
#include <net/ieee802154_netdev.h>
#include <net/mac802154.h>
#include "6lowpan_i.h"
@ -36,6 +37,13 @@ lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
sizeof(struct lowpan_addr_info));
}
/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
* sockets gives an 8 byte array for addresses only!
*
* TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
* sense here. We should disable it, the right use-case would be AF_INET6
* RAW/DGRAM sockets.
*/
int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
@ -71,27 +79,33 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
static struct sk_buff*
lowpan_alloc_frag(struct sk_buff *skb, int size,
const struct ieee802154_hdr *master_hdr)
const struct ieee802154_hdr *master_hdr, bool frag1)
{
struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
struct sk_buff *frag;
int rc;
frag = alloc_skb(wdev->hard_header_len + wdev->needed_tailroom + size,
frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
GFP_ATOMIC);
if (likely(frag)) {
frag->dev = wdev;
frag->priority = skb->priority;
skb_reserve(frag, wdev->hard_header_len);
skb_reserve(frag, wdev->needed_headroom);
skb_reset_network_header(frag);
*mac_cb(frag) = *mac_cb(skb);
rc = dev_hard_header(frag, wdev, 0, &master_hdr->dest,
&master_hdr->source, size);
if (rc < 0) {
kfree_skb(frag);
return ERR_PTR(rc);
if (frag1) {
memcpy(skb_put(frag, skb->mac_len),
skb_mac_header(skb), skb->mac_len);
} else {
rc = wpan_dev_hard_header(frag, wdev,
&master_hdr->dest,
&master_hdr->source, size);
if (rc < 0) {
kfree_skb(frag);
return ERR_PTR(rc);
}
}
} else {
frag = ERR_PTR(-ENOMEM);
@ -103,13 +117,13 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
static int
lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
u8 *frag_hdr, int frag_hdrlen,
int offset, int len)
int offset, int len, bool frag1)
{
struct sk_buff *frag;
raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
if (IS_ERR(frag))
return PTR_ERR(frag);
@ -148,7 +162,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAG1_HEAD_SIZE, 0,
frag_len + skb_network_header_len(skb));
frag_len + skb_network_header_len(skb),
true);
if (rc) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, ntohs(frag_tag));
@ -169,7 +184,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
frag_len);
frag_len, false);
if (rc) {
pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
__func__, ntohs(frag_tag), skb_offset);
@ -177,6 +192,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
}
} while (skb_unprocessed > frag_cap);
ldev->stats.tx_packets++;
ldev->stats.tx_bytes += dgram_size;
consume_skb(skb);
return NET_XMIT_SUCCESS;
@ -228,8 +245,8 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
cb->ackreq = wpan_dev->ackreq;
}
return dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, ETH_P_IPV6,
(void *)&da, (void *)&sa, 0);
return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
0);
}
netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
@ -240,6 +257,8 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
pr_debug("package xmit\n");
WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
@ -262,6 +281,8 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
skb->dev = lowpan_dev_info(ldev)->wdev;
ldev->stats.tx_packets++;
ldev->stats.tx_bytes += dgram_size;
return dev_queue_xmit(skb);
} else {
netdev_tx_t rc;

View File

@ -12,6 +12,11 @@ menuconfig IEEE802154
if IEEE802154
config IEEE802154_NL802154_EXPERIMENTAL
bool "IEEE 802.15.4 experimental netlink support"
---help---
Adds experimental netlink support for nl802154.
config IEEE802154_SOCKET
tristate "IEEE 802.15.4 socket interface"
default y

View File

@ -95,6 +95,18 @@ cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx)
return result;
}
struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx)
{
struct cfg802154_registered_device *rdev;
ASSERT_RTNL();
rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx);
if (!rdev)
return NULL;
return &rdev->wpan_phy;
}
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
{

View File

@ -42,5 +42,6 @@ extern int cfg802154_rdev_list_generation;
void cfg802154_dev_free(struct cfg802154_registered_device *rdev);
struct cfg802154_registered_device *
cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx);
struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx);
#endif /* __IEEE802154_CORE_H */

View File

@ -83,35 +83,35 @@ ieee802154_hdr_push_sechdr(u8 *buf, const struct ieee802154_sechdr *hdr)
}
int
ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
u8 buf[MAC802154_FRAME_HARD_HEADER_LEN];
u8 buf[IEEE802154_MAX_HEADER_LEN];
int pos = 2;
int rc;
struct ieee802154_hdr_fc fc = hdr->fc;
struct ieee802154_hdr_fc *fc = &hdr->fc;
buf[pos++] = hdr->seq;
fc.dest_addr_mode = hdr->dest.mode;
fc->dest_addr_mode = hdr->dest.mode;
rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false);
if (rc < 0)
return -EINVAL;
pos += rc;
fc.source_addr_mode = hdr->source.mode;
fc->source_addr_mode = hdr->source.mode;
if (hdr->source.pan_id == hdr->dest.pan_id &&
hdr->dest.mode != IEEE802154_ADDR_NONE)
fc.intra_pan = true;
fc->intra_pan = true;
rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan);
rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan);
if (rc < 0)
return -EINVAL;
pos += rc;
if (fc.security_enabled) {
fc.version = 1;
if (fc->security_enabled) {
fc->version = 1;
rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec);
if (rc < 0)
@ -120,7 +120,7 @@ ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
pos += rc;
}
memcpy(buf, &fc, 2);
memcpy(buf, fc, 2);
memcpy(skb_push(skb, pos), buf, pos);

File diff suppressed because it is too large Load Diff

View File

@ -208,4 +208,113 @@ rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
return ret;
}
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
/* TODO this is already a nl802154, so move into ieee802154 */
static inline void
rdev_get_llsec_table(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_table **table)
{
rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table);
}
static inline void
rdev_lock_llsec_table(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev)
{
rdev->ops->lock_llsec_table(&rdev->wpan_phy, wpan_dev);
}
static inline void
rdev_unlock_llsec_table(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev)
{
rdev->ops->unlock_llsec_table(&rdev->wpan_phy, wpan_dev);
}
static inline int
rdev_get_llsec_params(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_params *params)
{
return rdev->ops->get_llsec_params(&rdev->wpan_phy, wpan_dev, params);
}
static inline int
rdev_set_llsec_params(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_params *params,
u32 changed)
{
return rdev->ops->set_llsec_params(&rdev->wpan_phy, wpan_dev, params,
changed);
}
static inline int
rdev_add_llsec_key(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
return rdev->ops->add_llsec_key(&rdev->wpan_phy, wpan_dev, id, key);
}
static inline int
rdev_del_llsec_key(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id)
{
return rdev->ops->del_llsec_key(&rdev->wpan_phy, wpan_dev, id);
}
static inline int
rdev_add_seclevel(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl)
{
return rdev->ops->add_seclevel(&rdev->wpan_phy, wpan_dev, sl);
}
static inline int
rdev_del_seclevel(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl)
{
return rdev->ops->del_seclevel(&rdev->wpan_phy, wpan_dev, sl);
}
static inline int
rdev_add_device(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_device *dev_desc)
{
return rdev->ops->add_device(&rdev->wpan_phy, wpan_dev, dev_desc);
}
static inline int
rdev_del_device(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le64 extended_addr)
{
return rdev->ops->del_device(&rdev->wpan_phy, wpan_dev, extended_addr);
}
static inline int
rdev_add_devkey(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le64 extended_addr,
const struct ieee802154_llsec_device_key *devkey)
{
return rdev->ops->add_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
devkey);
}
static inline int
rdev_del_devkey(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le64 extended_addr,
const struct ieee802154_llsec_device_key *devkey)
{
return rdev->ops->del_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
devkey);
}
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
#endif /* __CFG802154_RDEV_OPS */

View File

@ -273,7 +273,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto out;
}
mtu = dev->mtu;
mtu = IEEE802154_MTU;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
@ -637,7 +637,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
err = -ENXIO;
goto out;
}
mtu = dev->mtu;
mtu = IEEE802154_MTU;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
@ -676,8 +676,8 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
cb->seclevel = ro->seclevel;
cb->seclevel_override = ro->seclevel_override;
err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
ro->bound ? &ro->src_addr : NULL, size);
err = wpan_dev_hard_header(skb, dev, &dst_addr,
ro->bound ? &ro->src_addr : NULL, size);
if (err < 0)
goto out_skb;

View File

@ -266,6 +266,195 @@ ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
return 0;
}
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
static void
ieee802154_get_llsec_table(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_table **table)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
*table = &sdata->sec.table;
}
static void
ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
mutex_lock(&sdata->sec_mtx);
}
static void
ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
mutex_unlock(&sdata->sec_mtx);
}
static int
ieee802154_set_llsec_params(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_params *params,
int changed)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_set_params(&sdata->sec, params, changed);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_get_llsec_params(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_params *params)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_get_params(&sdata->sec, params);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_key_add(&sdata->sec, id, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_key_del(&sdata->sec, id);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_seclevel_add(&sdata->sec, sl);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_seclevel_del(&sdata->sec, sl);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_device *dev_desc)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_dev_add(&sdata->sec, dev_desc);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le64 extended_addr)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_dev_del(&sdata->sec, extended_addr);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le64 extended_addr,
const struct ieee802154_llsec_device_key *key)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le64 extended_addr,
const struct ieee802154_llsec_device_key *key)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
const struct cfg802154_ops mac802154_config_ops = {
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
@ -284,4 +473,20 @@ const struct cfg802154_ops mac802154_config_ops = {
.set_max_frame_retries = ieee802154_set_max_frame_retries,
.set_lbt_mode = ieee802154_set_lbt_mode,
.set_ackreq_default = ieee802154_set_ackreq_default,
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
.get_llsec_table = ieee802154_get_llsec_table,
.lock_llsec_table = ieee802154_lock_llsec_table,
.unlock_llsec_table = ieee802154_unlock_llsec_table,
/* TODO above */
.set_llsec_params = ieee802154_set_llsec_params,
.get_llsec_params = ieee802154_get_llsec_params,
.add_llsec_key = ieee802154_add_llsec_key,
.del_llsec_key = ieee802154_del_llsec_key,
.add_seclevel = ieee802154_add_seclevel,
.del_seclevel = ieee802154_del_seclevel,
.add_device = ieee802154_add_device,
.del_device = ieee802154_del_device,
.add_devkey = ieee802154_add_devkey,
.del_devkey = ieee802154_del_devkey,
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};

View File

@ -367,12 +367,11 @@ static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata,
return 0;
}
static int mac802154_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
const void *daddr,
const void *saddr,
unsigned len)
static int ieee802154_header_create(struct sk_buff *skb,
struct net_device *dev,
const struct ieee802154_addr *daddr,
const struct ieee802154_addr *saddr,
unsigned len)
{
struct ieee802154_hdr hdr;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
@ -423,24 +422,89 @@ static int mac802154_header_create(struct sk_buff *skb,
return hlen;
}
static const struct wpan_dev_header_ops ieee802154_header_ops = {
.create = ieee802154_header_create,
};
/* This header create functionality assumes a 8 byte array for
* source and destination pointer at maximum. To adapt this for
* the 802.15.4 dataframe header we use extended address handling
* here only and intra pan connection. fc fields are mostly fallback
* handling. For provide dev_hard_header for dgram sockets.
*/
static int mac802154_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
const void *daddr,
const void *saddr,
unsigned len)
{
struct ieee802154_hdr hdr;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct ieee802154_mac_cb cb = { };
int hlen;
if (!daddr)
return -EINVAL;
memset(&hdr.fc, 0, sizeof(hdr.fc));
hdr.fc.type = IEEE802154_FC_TYPE_DATA;
hdr.fc.ack_request = wpan_dev->ackreq;
hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
/* TODO currently a workaround to give zero cb block to set
* security parameters defaults according MIB.
*/
if (mac802154_set_header_security(sdata, &hdr, &cb) < 0)
return -EINVAL;
hdr.dest.pan_id = wpan_dev->pan_id;
hdr.dest.mode = IEEE802154_ADDR_LONG;
ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr);
hdr.source.pan_id = hdr.dest.pan_id;
hdr.source.mode = IEEE802154_ADDR_LONG;
if (!saddr)
hdr.source.extended_addr = wpan_dev->extended_addr;
else
ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr);
hlen = ieee802154_hdr_push(skb, &hdr);
if (hlen < 0)
return -EINVAL;
skb_reset_mac_header(skb);
skb->mac_len = hlen;
if (len > ieee802154_max_payload(&hdr))
return -EMSGSIZE;
return hlen;
}
static int
mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
struct ieee802154_hdr hdr;
struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
pr_debug("malformed packet\n");
return 0;
}
*addr = hdr.source;
return sizeof(*addr);
if (hdr.source.mode == IEEE802154_ADDR_LONG) {
ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr);
return IEEE802154_EXTENDED_ADDR_LEN;
}
return 0;
}
static struct header_ops mac802154_header_ops = {
.create = mac802154_header_create,
.parse = mac802154_header_parse,
static const struct header_ops mac802154_header_ops = {
.create = mac802154_header_create,
.parse = mac802154_header_parse,
};
static const struct net_device_ops mac802154_wpan_ops = {
@ -471,9 +535,29 @@ static void ieee802154_if_setup(struct net_device *dev)
dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN);
dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
dev->needed_tailroom = 2 + 16; /* FCS + MIC */
dev->mtu = IEEE802154_MTU;
/* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET
* will not send frames without any payload, but ack frames
* has no payload, so substract one that we can send a 3 bytes
* frame. The xmit callback assumes at least a hard header where two
* bytes fc and sequence field are set.
*/
dev->hard_header_len = IEEE802154_MIN_HEADER_LEN - 1;
/* The auth_tag header is for security and places in private payload
* room of mac frame which stucks between payload and FCS field.
*/
dev->needed_tailroom = IEEE802154_MAX_AUTH_TAG_LEN +
IEEE802154_FCS_LEN;
/* The mtu size is the payload without mac header in this case.
* We have a dynamic length header with a minimum header length
* which is hard_header_len. In this case we let mtu to the size
* of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN -
* hard_header_len. The FCS which is set by hardware or ndo_start_xmit
* and the minimum mac header which can be evaluated inside driver
* layer. The rest of mac header will be part of payload if greater
* than hard_header_len.
*/
dev->mtu = IEEE802154_MTU - IEEE802154_FCS_LEN -
dev->hard_header_len;
dev->tx_queue_len = 300;
dev->flags = IFF_NOARP | IFF_BROADCAST;
}
@ -513,6 +597,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
sdata->dev->netdev_ops = &mac802154_wpan_ops;
sdata->dev->ml_priv = &mac802154_mlme_wpan;
wpan_dev->promiscuous_mode = false;
wpan_dev->header_ops = &ieee802154_header_ops;
mutex_init(&sdata->sec_mtx);
@ -550,7 +635,8 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
if (!ndev)
return ERR_PTR(-ENOMEM);
ndev->needed_headroom = local->hw.extra_tx_headroom;
ndev->needed_headroom = local->hw.extra_tx_headroom +
IEEE802154_MAX_HEADER_LEN;
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0)

View File

@ -401,6 +401,7 @@ int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
hash_del_rcu(&pos->bucket_s);
hash_del_rcu(&pos->bucket_hw);
list_del_rcu(&pos->dev.list);
call_rcu(&pos->rcu, llsec_dev_free_rcu);
return 0;

View File

@ -87,6 +87,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
skb->dev = sdata->dev;
/* TODO this should be moved after netif_receive_skb call, otherwise
* wireshark will show a mac header with security fields and the
* payload is already decrypted.
*/
rc = mac802154_llsec_decrypt(&sdata->sec, skb);
if (rc) {
pr_debug("decryption failed: %i\n", rc);

View File

@ -77,9 +77,6 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
put_unaligned_le16(crc, skb_put(skb, 2));
}
if (skb_cow_head(skb, local->hw.extra_tx_headroom))
goto err_tx;
/* Stop the netif queue on each sub_if_data object. */
ieee802154_stop_queue(&local->hw);
@ -121,6 +118,10 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int rc;
/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
* functions. The reason is wireshark will show a mac header which is
* with security fields but the payload is not encrypted.
*/
rc = mac802154_llsec_encrypt(&sdata->sec, skb);
if (rc) {
netdev_warn(dev, "encryption failed: %i\n", rc);