Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2020-03-29

Here are a few more Bluetooth patches for the 5.7 kernel:

 - Fix assumption of encryption key size when reading fails
 - Add support for DEFER_SETUP with L2CAP Enhanced Credit Based Mode
 - Fix issue with auto-connected devices
 - Fix suspend handling when entering the state fails
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-03-30 11:49:14 -07:00
commit 033c6f3b78
5 changed files with 174 additions and 42 deletions

View File

@ -47,6 +47,7 @@
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
#define L2CAP_LE_MIN_MTU 23
#define L2CAP_ECRED_CONN_SCID_MAX 5
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@ -660,6 +661,7 @@ struct l2cap_ops {
void (*suspend) (struct l2cap_chan *chan);
void (*set_shutdown) (struct l2cap_chan *chan);
long (*get_sndtimeo) (struct l2cap_chan *chan);
struct pid *(*get_peer_pid) (struct l2cap_chan *chan);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
@ -983,6 +985,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan);
int l2cap_ertm_init(struct l2cap_chan *chan);
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
typedef void (*l2cap_chan_func_t)(struct l2cap_chan *chan, void *data);
void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
void l2cap_move_start(struct l2cap_chan *chan);

View File

@ -3305,6 +3305,15 @@ static void hci_prepare_suspend(struct work_struct *work)
hci_dev_unlock(hdev);
}
static int hci_change_suspend_state(struct hci_dev *hdev,
enum suspended_state next)
{
hdev->suspend_state_next = next;
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
return hci_suspend_wait_event(hdev);
}
static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
@ -3330,32 +3339,24 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
* connectable (disabling scanning)
* - Second, program event filter/whitelist and enable scan
*/
hdev->suspend_state_next = BT_SUSPEND_DISCONNECT;
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
ret = hci_suspend_wait_event(hdev);
ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
/* If the disconnect portion failed, don't attempt to complete
* by configuring the whitelist. The suspend notifier will
* follow a cancelled suspend with a PM_POST_SUSPEND
* notification.
*/
if (!ret) {
hdev->suspend_state_next = BT_SUSPEND_COMPLETE;
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
ret = hci_suspend_wait_event(hdev);
}
/* Only configure whitelist if disconnect succeeded */
if (!ret)
ret = hci_change_suspend_state(hdev,
BT_SUSPEND_COMPLETE);
} else if (action == PM_POST_SUSPEND) {
hdev->suspend_state_next = BT_RUNNING;
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
ret = hci_suspend_wait_event(hdev);
ret = hci_change_suspend_state(hdev, BT_RUNNING);
}
/* If suspend failed, restore it to running */
if (ret && action == PM_SUSPEND_PREPARE)
hci_change_suspend_state(hdev, BT_RUNNING);
done:
return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
}
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{

View File

@ -2539,17 +2539,18 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
bt_dev_err(hdev, "no memory for new conn");
goto unlock;
}
}
} else {
if (ev->link_type != SCO_LINK)
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
&ev->bdaddr);
if (!conn)
goto unlock;
conn->type = SCO_LINK;
}
}
if (!ev->status) {
conn->handle = __le16_to_cpu(ev->handle);
@ -2962,14 +2963,14 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
if (!conn)
goto unlock;
/* If we fail to read the encryption key size, assume maximum
* (which is the same we do also when this HCI command isn't
* supported.
/* While unexpected, the read_enc_key_size command may fail. The most
* secure approach is to then assume the key size is 0 to force a
* disconnection.
*/
if (rp->status) {
bt_dev_err(hdev, "failed to read key size for handle %u",
handle);
conn->enc_key_size = HCI_LINK_KEY_SIZE;
conn->enc_key_size = 0;
} else {
conn->enc_key_size = rp->key_size;
}

View File

@ -678,6 +678,29 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
}
EXPORT_SYMBOL_GPL(l2cap_chan_del);
static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data)
{
struct l2cap_chan *chan;
list_for_each_entry(chan, &conn->chan_l, list) {
func(chan, data);
}
}
void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data)
{
if (!conn)
return;
mutex_lock(&conn->chan_lock);
__l2cap_chan_list(conn, func, data);
mutex_unlock(&conn->chan_lock);
}
EXPORT_SYMBOL_GPL(l2cap_chan_list);
static void l2cap_conn_update_id_addr(struct work_struct *work)
{
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
@ -1356,29 +1379,79 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
sizeof(req), &req);
}
static void l2cap_ecred_connect(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
struct l2cap_ecred_conn_data {
struct {
struct l2cap_ecred_conn_req req;
__le16 scid;
__le16 scid[5];
} __packed pdu;
struct l2cap_chan *chan;
struct pid *pid;
int count;
};
static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
{
struct l2cap_ecred_conn_data *conn = data;
struct pid *pid;
if (chan == conn->chan)
return;
if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
return;
pid = chan->ops->get_peer_pid(chan);
/* Only add deferred channels with the same PID/PSM */
if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
return;
if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
return;
l2cap_ecred_init(chan, 0);
pdu.req.psm = chan->psm;
pdu.req.mtu = cpu_to_le16(chan->imtu);
pdu.req.mps = cpu_to_le16(chan->mps);
pdu.req.credits = cpu_to_le16(chan->rx_credits);
pdu.scid = cpu_to_le16(chan->scid);
/* Set the same ident so we can match on the rsp */
chan->ident = conn->chan->ident;
/* Include all channels deferred */
conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
conn->count++;
}
static void l2cap_ecred_connect(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
struct l2cap_ecred_conn_data data;
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
return;
if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
return;
l2cap_ecred_init(chan, 0);
data.pdu.req.psm = chan->psm;
data.pdu.req.mtu = cpu_to_le16(chan->imtu);
data.pdu.req.mps = cpu_to_le16(chan->mps);
data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
data.pdu.scid[0] = cpu_to_le16(chan->scid);
chan->ident = l2cap_get_ident(conn);
data.pid = chan->ops->get_peer_pid(chan);
data.count = 1;
data.chan = chan;
data.pid = chan->ops->get_peer_pid(chan);
__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
sizeof(pdu), &pdu);
sizeof(data.pdu.req) + data.count * sizeof(__le16),
&data.pdu);
}
static void l2cap_le_start(struct l2cap_chan *chan)
@ -7693,6 +7766,33 @@ static bool is_valid_psm(u16 psm, u8 dst_type) {
return ((psm & 0x0101) == 0x0001);
}
struct l2cap_chan_data {
struct l2cap_chan *chan;
struct pid *pid;
int count;
};
static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
{
struct l2cap_chan_data *d = data;
struct pid *pid;
if (chan == d->chan)
return;
if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
return;
pid = chan->ops->get_peer_pid(chan);
/* Only count deferred channels with the same PID/PSM */
if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
return;
d->count++;
}
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type)
{
@ -7812,6 +7912,23 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
struct l2cap_chan_data data;
data.chan = chan;
data.pid = chan->ops->get_peer_pid(chan);
data.count = 1;
l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
/* Check if there isn't too many channels being connected */
if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
hci_conn_drop(hcon);
err = -EPROTO;
goto done;
}
}
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);

View File

@ -1504,6 +1504,13 @@ static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
return sk->sk_sndtimeo;
}
static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
return sk->sk_peer_pid;
}
static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
@ -1525,6 +1532,7 @@ static const struct l2cap_ops l2cap_chan_ops = {
.suspend = l2cap_sock_suspend_cb,
.set_shutdown = l2cap_sock_set_shutdown_cb,
.get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
.get_peer_pid = l2cap_sock_get_peer_pid_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
};