rpmsg updates for v6.3

rpmsg ctrl and char driver locking is ensure ordering in cases where the
 communication link is being torn down in parallel with calls to open(2)
 or poll(2).
 
 The glink driver is refactored, to move rpm/smem-specifics out of the
 common logic and better suite further improvements, such as transports
 without a mailbox controller. The handling of remoteproc shutdown is
 improved, to fail clients immediately instead of having them to wait for
 timeouts. A driver_override memory leak is corrected and a few spelling
 improvements are introduced.
 
 glink_ssr is transitioned off strlcpy() and "gpr" is added as a valid
 child node of the glink-edge DT binding.
 -----BEGIN PGP SIGNATURE-----
 
 iQJJBAABCAAzFiEEBd4DzF816k8JZtUlCx85Pw2ZrcUFAmP7jLkVHGFuZGVyc3Nv
 bkBrZXJuZWwub3JnAAoJEAsfOT8Nma3FKy4P/RBK8WFS9FJHgP3KZrN8XawruWco
 W/23uiW5tKmtzXO4RB6ZMnHR6iURgqzN6cQnguH9neq4ze1rBbLUkSeKtYK8FwJg
 XApzFMDCxtnjZwCAMdtT14C6R0mDt21DyeGCwRRaQg4eqiaPpuQBNX9ijgh3bons
 GKK0UJuZ1o/E+/pf18+vls1PRKXDqmFr8OUpIPEdM0EA8Mr+bFGbA9TbBkHWr7uq
 5xo7GSZU8u+mA53HGNctIVWAFEM0v9xHLgbjSXcHSiuS0xAC99CHw6TlA26SY6WS
 gh/ovF31m7tC7lz4nFSZfeLHbKb02dRmjIDxsCMFp5pLq/jbMgbkoJ97qtQlSQhW
 VjQemUzGvaENgwZr5ZcHEJ75SoGNtudHnfnwZeybRGjfiMLocVDcTZ81qIvxr8y7
 jZkrQw2uUpGu8qrV9URuIHb5xfo5ixdZ8llgKWq6/PT8pcYWcwBybby55jJ/vhG0
 7Fbv37VeEyq9xxpNfwuMELA2RztT5zrbPZVSfN348PIgMW3nbGzryw//r6HxmUUr
 +FoSvuSBwrnEHlsdgg3ydzIhzgY1+k3jQcS7xW+nZ3PG7eHBhIVBPyNZ9xydtfaL
 TBtjsCRigoLMxp8Fpdz0xANF5PICXcGfqX4591yZ9Q7fAYYQr/WKtiWS+cNd5t6A
 nXDMGwVYjEA46n/1
 =0w8J
 -----END PGP SIGNATURE-----

Merge tag 'rpmsg-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc/linux

Pull rpmsg updates from Bjorn Andersson:

 - rpmsg ctrl and char driver locking is ensure ordering in cases where
   the communication link is being torn down in parallel with calls to
   open(2) or poll(2)

 - The glink driver is refactored, to move rpm/smem-specifics out of the
   common logic and better suite further improvements, such as
   transports without a mailbox controller. The handling of remoteproc
   shutdown is improved, to fail clients immediately instead of having
   them to wait for timeouts. A driver_override memory leak is corrected
   and a few spelling improvements are introduced

 - glink_ssr is transitioned off strlcpy() and "gpr" is added as a valid
   child node of the glink-edge DT binding

* tag 'rpmsg-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc/linux:
  rpmsg: glink: Release driver_override
  rpmsg: glink: Avoid infinite loop on intent for missing channel
  rpmsg: glink: Fix GLINK command prefix
  rpmsg: glink: Fix spelling of peek
  rpmsg: glink: Cancel pending intent requests at removal
  rpmsg: glink: Fail qcom_glink_tx() once remove has been initiated
  rpmsg: glink: Move irq and mbox handling to transports
  rpmsg: glink: rpm: Wrap driver context
  rpmsg: glink: smem: Wrap driver context
  rpmsg: glink: Extract tx kick operation
  rpmsg: glink: Include types in qcom_glink_native.h
  rpmsg: ctrl: Add lock to rpmsg_ctrldev_remove
  rpmsg: char: Add lock to avoid race when rpmsg device is released
  rpmsg: move from strlcpy with unused retval to strscpy
  dt-bindings: remoteproc: qcom,glink-edge: add GPR node
This commit is contained in:
Linus Torvalds 2023-02-26 12:10:28 -08:00
commit cc38a46de7
10 changed files with 315 additions and 156 deletions

View File

@ -22,7 +22,7 @@ properties:
required:
- qcom,glink-channels
description:
Qualcomm APR/GPR (Asynchronous/Generic Packet Router)
Qualcomm APR (Asynchronous Packet Router)
fastrpc:
$ref: /schemas/misc/qcom,fastrpc.yaml#
@ -31,6 +31,13 @@ properties:
description:
Qualcomm FastRPC
gpr:
$ref: /schemas/soc/qcom/qcom,apr.yaml#
required:
- qcom,glink-channels
description:
Qualcomm GPR (Generic Packet Router)
interrupts:
maxItems: 1
@ -52,6 +59,21 @@ required:
- mboxes
- qcom,remote-pid
allOf:
- if:
required:
- apr
then:
properties:
gpr: false
- if:
required:
- gpr
then:
properties:
apr: false
additionalProperties: false
examples:

View File

@ -6,6 +6,7 @@
#include "remoteproc_internal.h"
#include <linux/soc/qcom/qmi.h>
struct qcom_glink_smem;
struct qcom_sysmon;
struct qcom_rproc_glink {
@ -15,7 +16,7 @@ struct qcom_rproc_glink {
struct device *dev;
struct device_node *node;
struct qcom_glink *edge;
struct qcom_glink_smem *edge;
};
struct qcom_rproc_subdev {

View File

@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rpmsg.h>
@ -78,11 +77,8 @@ struct glink_core_rx_intent {
/**
* struct qcom_glink - driver context, relates to one remote subsystem
* @dev: reference to the associated struct device
* @mbox_client: mailbox client
* @mbox_chan: mailbox channel
* @rx_pipe: pipe object for receive FIFO
* @tx_pipe: pipe object for transmit FIFO
* @irq: IRQ for signaling incoming events
* @rx_work: worker for handling received control messages
* @rx_lock: protects the @rx_queue
* @rx_queue: queue of received control messages to be processed in @rx_work
@ -94,18 +90,14 @@ struct glink_core_rx_intent {
* @intentless: flag to indicate that there is no intent
* @tx_avail_notify: Waitqueue for pending tx tasks
* @sent_read_notify: flag to check cmd sent or not
* @abort_tx: flag indicating that all tx attempts should fail
*/
struct qcom_glink {
struct device *dev;
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
struct qcom_glink_pipe *rx_pipe;
struct qcom_glink_pipe *tx_pipe;
int irq;
struct work_struct rx_work;
spinlock_t rx_lock;
struct list_head rx_queue;
@ -120,6 +112,8 @@ struct qcom_glink {
bool intentless;
wait_queue_head_t tx_avail_notify;
bool sent_read_notify;
bool abort_tx;
};
enum {
@ -189,20 +183,20 @@ struct glink_channel {
static const struct rpmsg_endpoint_ops glink_endpoint_ops;
#define RPM_CMD_VERSION 0
#define RPM_CMD_VERSION_ACK 1
#define RPM_CMD_OPEN 2
#define RPM_CMD_CLOSE 3
#define RPM_CMD_OPEN_ACK 4
#define RPM_CMD_INTENT 5
#define RPM_CMD_RX_DONE 6
#define RPM_CMD_RX_INTENT_REQ 7
#define RPM_CMD_RX_INTENT_REQ_ACK 8
#define RPM_CMD_TX_DATA 9
#define RPM_CMD_CLOSE_ACK 11
#define RPM_CMD_TX_DATA_CONT 12
#define RPM_CMD_READ_NOTIF 13
#define RPM_CMD_RX_DONE_W_REUSE 14
#define GLINK_CMD_VERSION 0
#define GLINK_CMD_VERSION_ACK 1
#define GLINK_CMD_OPEN 2
#define GLINK_CMD_CLOSE 3
#define GLINK_CMD_OPEN_ACK 4
#define GLINK_CMD_INTENT 5
#define GLINK_CMD_RX_DONE 6
#define GLINK_CMD_RX_INTENT_REQ 7
#define GLINK_CMD_RX_INTENT_REQ_ACK 8
#define GLINK_CMD_TX_DATA 9
#define GLINK_CMD_CLOSE_ACK 11
#define GLINK_CMD_TX_DATA_CONT 12
#define GLINK_CMD_READ_NOTIF 13
#define GLINK_CMD_RX_DONE_W_REUSE 14
#define GLINK_FEATURE_INTENTLESS BIT(1)
@ -280,10 +274,10 @@ static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
return glink->rx_pipe->avail(glink->rx_pipe);
}
static void qcom_glink_rx_peak(struct qcom_glink *glink,
static void qcom_glink_rx_peek(struct qcom_glink *glink,
void *data, unsigned int offset, size_t count)
{
glink->rx_pipe->peak(glink->rx_pipe, data, offset, count);
glink->rx_pipe->peek(glink->rx_pipe, data, offset, count);
}
static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
@ -303,18 +297,22 @@ static void qcom_glink_tx_write(struct qcom_glink *glink,
glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
}
static void qcom_glink_tx_kick(struct qcom_glink *glink)
{
glink->tx_pipe->kick(glink->tx_pipe);
}
static void qcom_glink_send_read_notify(struct qcom_glink *glink)
{
struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF);
msg.cmd = cpu_to_le16(GLINK_CMD_READ_NOTIF);
msg.param1 = 0;
msg.param2 = 0;
qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0);
mbox_send_message(glink->mbox_chan, NULL);
mbox_client_txdone(glink->mbox_chan, 0);
qcom_glink_tx_kick(glink);
}
static int qcom_glink_tx(struct qcom_glink *glink,
@ -331,12 +329,22 @@ static int qcom_glink_tx(struct qcom_glink *glink,
spin_lock_irqsave(&glink->tx_lock, flags);
if (glink->abort_tx) {
ret = -EIO;
goto out;
}
while (qcom_glink_tx_avail(glink) < tlen) {
if (!wait) {
ret = -EAGAIN;
goto out;
}
if (glink->abort_tx) {
ret = -EIO;
goto out;
}
if (!glink->sent_read_notify) {
glink->sent_read_notify = true;
qcom_glink_send_read_notify(glink);
@ -355,9 +363,7 @@ static int qcom_glink_tx(struct qcom_glink *glink,
}
qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
mbox_send_message(glink->mbox_chan, NULL);
mbox_client_txdone(glink->mbox_chan, 0);
qcom_glink_tx_kick(glink);
out:
spin_unlock_irqrestore(&glink->tx_lock, flags);
@ -369,7 +375,7 @@ static int qcom_glink_send_version(struct qcom_glink *glink)
{
struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
msg.cmd = cpu_to_le16(GLINK_CMD_VERSION);
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
@ -380,7 +386,7 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink)
{
struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
msg.cmd = cpu_to_le16(GLINK_CMD_VERSION_ACK);
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
@ -392,7 +398,7 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink,
{
struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
msg.cmd = cpu_to_le16(GLINK_CMD_OPEN_ACK);
msg.param1 = cpu_to_le16(channel->rcid);
msg.param2 = cpu_to_le32(0);
@ -417,12 +423,18 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
complete(&channel->intent_req_comp);
}
static void qcom_glink_intent_req_abort(struct glink_channel *channel)
{
channel->intent_req_result = 0;
complete(&channel->intent_req_comp);
}
/**
* qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
* qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote
* @glink: Ptr to the glink edge
* @channel: Ptr to the channel that the open req is sent
*
* Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
* Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote.
* Will return with refcount held, regardless of outcome.
*
* Return: 0 on success, negative errno otherwise.
@ -451,7 +463,7 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink,
channel->lcid = ret;
req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN);
req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(name_len);
strcpy(req.name, channel->name);
@ -476,7 +488,7 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink,
{
struct glink_msg req;
req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
req.cmd = cpu_to_le16(GLINK_CMD_CLOSE);
req.param1 = cpu_to_le16(channel->lcid);
req.param2 = 0;
@ -488,7 +500,7 @@ static void qcom_glink_send_close_ack(struct qcom_glink *glink,
{
struct glink_msg req;
req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK);
req.param1 = cpu_to_le16(rcid);
req.param2 = 0;
@ -519,7 +531,7 @@ static void qcom_glink_rx_done_work(struct work_struct *work)
iid = intent->id;
reuse = intent->reuse;
cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE;
cmd.lcid = cid;
cmd.liid = iid;
@ -631,7 +643,7 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
{
struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK);
msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK);
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(granted);
@ -662,7 +674,7 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink,
} __packed;
struct command cmd;
cmd.id = cpu_to_le16(RPM_CMD_INTENT);
cmd.id = cpu_to_le16(GLINK_CMD_INTENT);
cmd.lcid = cpu_to_le16(channel->lcid);
cmd.count = cpu_to_le32(1);
cmd.size = cpu_to_le32(intent->size);
@ -796,7 +808,7 @@ static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
INIT_LIST_HEAD(&dcmd->node);
qcom_glink_rx_peak(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
spin_lock(&glink->rx_lock);
list_add_tail(&dcmd->node, &glink->rx_queue);
@ -829,7 +841,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
return -EAGAIN;
}
qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr));
qcom_glink_rx_peek(glink, &hdr, 0, sizeof(hdr));
chunk_size = le32_to_cpu(hdr.chunk_size);
left_size = le32_to_cpu(hdr.left_size);
@ -894,7 +906,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
goto advance_rx;
}
qcom_glink_rx_peak(glink, intent->data + intent->offset,
qcom_glink_rx_peek(glink, intent->data + intent->offset,
sizeof(hdr), chunk_size);
intent->offset += chunk_size;
@ -954,6 +966,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
spin_unlock_irqrestore(&glink->idr_lock, flags);
if (!channel) {
dev_err(glink->dev, "intents for non-existing channel\n");
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
return;
}
@ -961,7 +974,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
if (!msg)
return;
qcom_glink_rx_peak(glink, msg, 0, msglen);
qcom_glink_rx_peek(glink, msg, 0, msglen);
for (i = 0; i < count; ++i) {
intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
@ -1001,9 +1014,8 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
return 0;
}
static irqreturn_t qcom_glink_native_intr(int irq, void *data)
void qcom_glink_native_rx(struct qcom_glink *glink)
{
struct qcom_glink *glink = data;
struct glink_msg msg;
unsigned int param1;
unsigned int param2;
@ -1019,49 +1031,47 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data)
if (avail < sizeof(msg))
break;
qcom_glink_rx_peak(glink, &msg, 0, sizeof(msg));
qcom_glink_rx_peek(glink, &msg, 0, sizeof(msg));
cmd = le16_to_cpu(msg.cmd);
param1 = le16_to_cpu(msg.param1);
param2 = le32_to_cpu(msg.param2);
switch (cmd) {
case RPM_CMD_VERSION:
case RPM_CMD_VERSION_ACK:
case RPM_CMD_CLOSE:
case RPM_CMD_CLOSE_ACK:
case RPM_CMD_RX_INTENT_REQ:
case GLINK_CMD_VERSION:
case GLINK_CMD_VERSION_ACK:
case GLINK_CMD_CLOSE:
case GLINK_CMD_CLOSE_ACK:
case GLINK_CMD_RX_INTENT_REQ:
ret = qcom_glink_rx_defer(glink, 0);
break;
case RPM_CMD_OPEN_ACK:
case GLINK_CMD_OPEN_ACK:
ret = qcom_glink_rx_open_ack(glink, param1);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case RPM_CMD_OPEN:
case GLINK_CMD_OPEN:
ret = qcom_glink_rx_defer(glink, param2);
break;
case RPM_CMD_TX_DATA:
case RPM_CMD_TX_DATA_CONT:
case GLINK_CMD_TX_DATA:
case GLINK_CMD_TX_DATA_CONT:
ret = qcom_glink_rx_data(glink, avail);
break;
case RPM_CMD_READ_NOTIF:
case GLINK_CMD_READ_NOTIF:
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
mbox_send_message(glink->mbox_chan, NULL);
mbox_client_txdone(glink->mbox_chan, 0);
qcom_glink_tx_kick(glink);
break;
case RPM_CMD_INTENT:
case GLINK_CMD_INTENT:
qcom_glink_handle_intent(glink, param1, param2, avail);
break;
case RPM_CMD_RX_DONE:
case GLINK_CMD_RX_DONE:
qcom_glink_handle_rx_done(glink, param1, param2, false);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case RPM_CMD_RX_DONE_W_REUSE:
case GLINK_CMD_RX_DONE_W_REUSE:
qcom_glink_handle_rx_done(glink, param1, param2, true);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case RPM_CMD_RX_INTENT_REQ_ACK:
case GLINK_CMD_RX_INTENT_REQ_ACK:
qcom_glink_handle_intent_req_ack(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
@ -1074,9 +1084,8 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data)
if (ret)
break;
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL(qcom_glink_native_rx);
/* Locally initiated rpmsg_create_ept */
static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
@ -1264,7 +1273,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
reinit_completion(&channel->intent_req_comp);
cmd.id = RPM_CMD_RX_INTENT_REQ;
cmd.id = GLINK_CMD_RX_INTENT_REQ;
cmd.cid = channel->lcid;
cmd.size = size;
@ -1338,7 +1347,7 @@ static int __qcom_glink_send(struct glink_channel *channel,
chunk_size = SZ_8K;
left_size = len - chunk_size;
}
req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
req.msg.cmd = cpu_to_le16(GLINK_CMD_TX_DATA);
req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(iid);
req.chunk_size = cpu_to_le32(chunk_size);
@ -1359,7 +1368,7 @@ static int __qcom_glink_send(struct glink_channel *channel,
chunk_size = SZ_8K;
left_size -= chunk_size;
req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT);
req.msg.cmd = cpu_to_le16(GLINK_CMD_TX_DATA_CONT);
req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(iid);
req.chunk_size = cpu_to_le32(chunk_size);
@ -1446,6 +1455,7 @@ static void qcom_glink_rpdev_release(struct device *dev)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
kfree(rpdev->driver_override);
kfree(rpdev);
}
@ -1623,22 +1633,22 @@ static void qcom_glink_work(struct work_struct *work)
param2 = le32_to_cpu(msg->param2);
switch (cmd) {
case RPM_CMD_VERSION:
case GLINK_CMD_VERSION:
qcom_glink_receive_version(glink, param1, param2);
break;
case RPM_CMD_VERSION_ACK:
case GLINK_CMD_VERSION_ACK:
qcom_glink_receive_version_ack(glink, param1, param2);
break;
case RPM_CMD_OPEN:
case GLINK_CMD_OPEN:
qcom_glink_rx_open(glink, param1, msg->data);
break;
case RPM_CMD_CLOSE:
case GLINK_CMD_CLOSE:
qcom_glink_rx_close(glink, param1);
break;
case RPM_CMD_CLOSE_ACK:
case GLINK_CMD_CLOSE_ACK:
qcom_glink_rx_close_ack(glink, param1);
break;
case RPM_CMD_RX_INTENT_REQ:
case GLINK_CMD_RX_INTENT_REQ:
qcom_glink_handle_intent_req(glink, param1, param2);
break;
default:
@ -1689,6 +1699,7 @@ static void qcom_glink_device_release(struct device *dev)
/* Release qcom_glink_alloc_channel() reference */
kref_put(&channel->refcount, qcom_glink_channel_release);
kfree(rpdev->driver_override);
kfree(rpdev);
}
@ -1722,7 +1733,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
struct qcom_glink_pipe *tx,
bool intentless)
{
int irq;
int ret;
struct qcom_glink *glink;
@ -1753,27 +1763,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
if (ret)
dev_err(dev, "failed to add groups\n");
glink->mbox_client.dev = dev;
glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
if (IS_ERR(glink->mbox_chan)) {
if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
dev_err(dev, "failed to acquire IPC channel\n");
return ERR_CAST(glink->mbox_chan);
}
irq = of_irq_get(dev->of_node, 0);
ret = devm_request_irq(dev, irq,
qcom_glink_native_intr,
IRQF_NO_SUSPEND | IRQF_SHARED,
"glink-native", glink);
if (ret) {
dev_err(dev, "failed to request IRQ\n");
return ERR_PTR(ret);
}
glink->irq = irq;
ret = qcom_glink_send_version(glink);
if (ret)
return ERR_PTR(ret);
@ -1796,12 +1785,24 @@ static int qcom_glink_remove_device(struct device *dev, void *data)
void qcom_glink_native_remove(struct qcom_glink *glink)
{
struct glink_channel *channel;
unsigned long flags;
int cid;
int ret;
disable_irq(glink->irq);
qcom_glink_cancel_rx_work(glink);
/* Fail all attempts at sending messages */
spin_lock_irqsave(&glink->tx_lock, flags);
glink->abort_tx = true;
wake_up_all(&glink->tx_avail_notify);
spin_unlock_irqrestore(&glink->tx_lock, flags);
/* Abort any senders waiting for intent requests */
spin_lock_irqsave(&glink->idr_lock, flags);
idr_for_each_entry(&glink->lcids, channel, cid)
qcom_glink_intent_req_abort(channel);
spin_unlock_irqrestore(&glink->idr_lock, flags);
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
if (ret)
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
@ -1816,15 +1817,8 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
idr_destroy(&glink->lcids);
idr_destroy(&glink->rcids);
mbox_free_channel(glink->mbox_chan);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
void qcom_glink_native_unregister(struct qcom_glink *glink)
{
device_unregister(glink->dev);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
MODULE_DESCRIPTION("Qualcomm GLINK driver");
MODULE_LICENSE("GPL v2");

View File

@ -6,6 +6,8 @@
#ifndef __QCOM_GLINK_NATIVE_H__
#define __QCOM_GLINK_NATIVE_H__
#include <linux/types.h>
#define GLINK_FEATURE_INTENT_REUSE BIT(0)
#define GLINK_FEATURE_MIGRATION BIT(1)
#define GLINK_FEATURE_TRACER_PKT BIT(2)
@ -15,15 +17,17 @@ struct qcom_glink_pipe {
size_t (*avail)(struct qcom_glink_pipe *glink_pipe);
void (*peak)(struct qcom_glink_pipe *glink_pipe, void *data,
void (*peek)(struct qcom_glink_pipe *glink_pipe, void *data,
unsigned int offset, size_t count);
void (*advance)(struct qcom_glink_pipe *glink_pipe, size_t count);
void (*write)(struct qcom_glink_pipe *glink_pipe,
const void *hdr, size_t hlen,
const void *data, size_t dlen);
void (*kick)(struct qcom_glink_pipe *glink_pipe);
};
struct device;
struct qcom_glink;
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
@ -32,6 +36,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
struct qcom_glink_pipe *tx,
bool intentless);
void qcom_glink_native_remove(struct qcom_glink *glink);
void qcom_glink_native_rx(struct qcom_glink *glink);
void qcom_glink_native_unregister(struct qcom_glink *glink);
#endif

View File

@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rpmsg.h>
@ -53,6 +54,18 @@ struct glink_rpm_pipe {
void __iomem *fifo;
};
struct glink_rpm {
struct qcom_glink *glink;
int irq;
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
struct glink_rpm_pipe rx_pipe;
struct glink_rpm_pipe tx_pipe;
};
static size_t glink_rpm_rx_avail(struct qcom_glink_pipe *glink_pipe)
{
struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe);
@ -68,7 +81,7 @@ static size_t glink_rpm_rx_avail(struct qcom_glink_pipe *glink_pipe)
return head - tail;
}
static void glink_rpm_rx_peak(struct qcom_glink_pipe *glink_pipe,
static void glink_rpm_rx_peek(struct qcom_glink_pipe *glink_pipe,
void *data, unsigned int offset, size_t count)
{
struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe);
@ -179,6 +192,24 @@ static void glink_rpm_tx_write(struct qcom_glink_pipe *glink_pipe,
writel(head, pipe->head);
}
static void glink_rpm_tx_kick(struct qcom_glink_pipe *glink_pipe)
{
struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe);
struct glink_rpm *rpm = container_of(pipe, struct glink_rpm, tx_pipe);
mbox_send_message(rpm->mbox_chan, NULL);
mbox_client_txdone(rpm->mbox_chan, 0);
}
static irqreturn_t qcom_glink_rpm_intr(int irq, void *data)
{
struct glink_rpm *rpm = data;
qcom_glink_native_rx(rpm->glink);
return IRQ_HANDLED;
}
static int glink_rpm_parse_toc(struct device *dev,
void __iomem *msg_ram,
size_t msg_ram_size,
@ -257,8 +288,7 @@ err_inval:
static int glink_rpm_probe(struct platform_device *pdev)
{
struct qcom_glink *glink;
struct glink_rpm_pipe *rx_pipe;
struct glink_rpm_pipe *tx_pipe;
struct glink_rpm *rpm;
struct device_node *np;
void __iomem *msg_ram;
size_t msg_ram_size;
@ -266,9 +296,8 @@ static int glink_rpm_probe(struct platform_device *pdev)
struct resource r;
int ret;
rx_pipe = devm_kzalloc(&pdev->dev, sizeof(*rx_pipe), GFP_KERNEL);
tx_pipe = devm_kzalloc(&pdev->dev, sizeof(*tx_pipe), GFP_KERNEL);
if (!rx_pipe || !tx_pipe)
rpm = devm_kzalloc(&pdev->dev, sizeof(*rpm), GFP_KERNEL);
if (!rpm)
return -ENOMEM;
np = of_parse_phandle(dev->of_node, "qcom,rpm-msg-ram", 0);
@ -283,39 +312,66 @@ static int glink_rpm_probe(struct platform_device *pdev)
return -ENOMEM;
ret = glink_rpm_parse_toc(dev, msg_ram, msg_ram_size,
rx_pipe, tx_pipe);
&rpm->rx_pipe, &rpm->tx_pipe);
if (ret)
return ret;
rpm->irq = of_irq_get(dev->of_node, 0);
ret = devm_request_irq(dev, rpm->irq, qcom_glink_rpm_intr,
IRQF_NO_SUSPEND | IRQF_NO_AUTOEN,
"glink-rpm", rpm);
if (ret) {
dev_err(dev, "failed to request IRQ\n");
return ret;
}
rpm->mbox_client.dev = dev;
rpm->mbox_client.knows_txdone = true;
rpm->mbox_chan = mbox_request_channel(&rpm->mbox_client, 0);
if (IS_ERR(rpm->mbox_chan))
return dev_err_probe(dev, PTR_ERR(rpm->mbox_chan), "failed to acquire IPC channel\n");
/* Pipe specific accessors */
rx_pipe->native.avail = glink_rpm_rx_avail;
rx_pipe->native.peak = glink_rpm_rx_peak;
rx_pipe->native.advance = glink_rpm_rx_advance;
tx_pipe->native.avail = glink_rpm_tx_avail;
tx_pipe->native.write = glink_rpm_tx_write;
rpm->rx_pipe.native.avail = glink_rpm_rx_avail;
rpm->rx_pipe.native.peek = glink_rpm_rx_peek;
rpm->rx_pipe.native.advance = glink_rpm_rx_advance;
rpm->tx_pipe.native.avail = glink_rpm_tx_avail;
rpm->tx_pipe.native.write = glink_rpm_tx_write;
rpm->tx_pipe.native.kick = glink_rpm_tx_kick;
writel(0, tx_pipe->head);
writel(0, rx_pipe->tail);
writel(0, rpm->tx_pipe.head);
writel(0, rpm->rx_pipe.tail);
glink = qcom_glink_native_probe(&pdev->dev,
glink = qcom_glink_native_probe(dev,
0,
&rx_pipe->native,
&tx_pipe->native,
&rpm->rx_pipe.native,
&rpm->tx_pipe.native,
true);
if (IS_ERR(glink))
if (IS_ERR(glink)) {
mbox_free_channel(rpm->mbox_chan);
return PTR_ERR(glink);
}
platform_set_drvdata(pdev, glink);
rpm->glink = glink;
platform_set_drvdata(pdev, rpm);
enable_irq(rpm->irq);
return 0;
}
static int glink_rpm_remove(struct platform_device *pdev)
{
struct qcom_glink *glink = platform_get_drvdata(pdev);
struct glink_rpm *rpm = platform_get_drvdata(pdev);
struct qcom_glink *glink = rpm->glink;
disable_irq(rpm->irq);
qcom_glink_native_remove(glink);
mbox_free_channel(rpm->mbox_chan);
return 0;
}

View File

@ -7,8 +7,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
#include <linux/rpmsg.h>
@ -33,6 +35,18 @@
#define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479
#define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480
struct qcom_glink_smem {
struct device dev;
int irq;
struct qcom_glink *glink;
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
u32 remote_pid;
};
struct glink_smem_pipe {
struct qcom_glink_pipe native;
@ -41,7 +55,7 @@ struct glink_smem_pipe {
void *fifo;
int remote_pid;
struct qcom_glink_smem *smem;
};
#define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
@ -49,13 +63,14 @@ struct glink_smem_pipe {
static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
{
struct glink_smem_pipe *pipe = to_smem_pipe(np);
struct qcom_glink_smem *smem = pipe->smem;
size_t len;
void *fifo;
u32 head;
u32 tail;
if (!pipe->fifo) {
fifo = qcom_smem_get(pipe->remote_pid,
fifo = qcom_smem_get(smem->remote_pid,
SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len);
if (IS_ERR(fifo)) {
pr_err("failed to acquire RX fifo handle: %ld\n",
@ -76,7 +91,7 @@ static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
return head - tail;
}
static void glink_smem_rx_peak(struct qcom_glink_pipe *np,
static void glink_smem_rx_peek(struct qcom_glink_pipe *np,
void *data, unsigned int offset, size_t count)
{
struct glink_smem_pipe *pipe = to_smem_pipe(np);
@ -177,16 +192,37 @@ static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe,
*pipe->head = cpu_to_le32(head);
}
static void qcom_glink_smem_release(struct device *dev)
static void glink_smem_tx_kick(struct qcom_glink_pipe *glink_pipe)
{
kfree(dev);
struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
struct qcom_glink_smem *smem = pipe->smem;
mbox_send_message(smem->mbox_chan, NULL);
mbox_client_txdone(smem->mbox_chan, 0);
}
struct qcom_glink *qcom_glink_smem_register(struct device *parent,
static irqreturn_t qcom_glink_smem_intr(int irq, void *data)
{
struct qcom_glink_smem *smem = data;
qcom_glink_native_rx(smem->glink);
return IRQ_HANDLED;
}
static void qcom_glink_smem_release(struct device *dev)
{
struct qcom_glink_smem *smem = container_of(dev, struct qcom_glink_smem, dev);
kfree(smem);
}
struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
struct device_node *node)
{
struct glink_smem_pipe *rx_pipe;
struct glink_smem_pipe *tx_pipe;
struct qcom_glink_smem *smem;
struct qcom_glink *glink;
struct device *dev;
u32 remote_pid;
@ -194,10 +230,12 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
size_t size;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
smem = kzalloc(sizeof(*smem), GFP_KERNEL);
if (!smem)
return ERR_PTR(-ENOMEM);
dev = &smem->dev;
dev->parent = parent;
dev->of_node = node;
dev->release = qcom_glink_smem_release;
@ -216,6 +254,8 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
goto err_put_dev;
}
smem->remote_pid = remote_pid;
rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL);
tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL);
if (!rx_pipe || !tx_pipe) {
@ -264,14 +304,33 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
goto err_put_dev;
}
rx_pipe->native.avail = glink_smem_rx_avail;
rx_pipe->native.peak = glink_smem_rx_peak;
rx_pipe->native.advance = glink_smem_rx_advance;
rx_pipe->remote_pid = remote_pid;
smem->irq = of_irq_get(smem->dev.of_node, 0);
ret = devm_request_irq(&smem->dev, smem->irq, qcom_glink_smem_intr,
IRQF_NO_SUSPEND | IRQF_NO_AUTOEN,
"glink-smem", smem);
if (ret) {
dev_err(&smem->dev, "failed to request IRQ\n");
goto err_put_dev;
}
smem->mbox_client.dev = &smem->dev;
smem->mbox_client.knows_txdone = true;
smem->mbox_chan = mbox_request_channel(&smem->mbox_client, 0);
if (IS_ERR(smem->mbox_chan)) {
ret = dev_err_probe(&smem->dev, PTR_ERR(smem->mbox_chan),
"failed to acquire IPC channel\n");
goto err_put_dev;
}
rx_pipe->smem = smem;
rx_pipe->native.avail = glink_smem_rx_avail;
rx_pipe->native.peek = glink_smem_rx_peek;
rx_pipe->native.advance = glink_smem_rx_advance;
tx_pipe->smem = smem;
tx_pipe->native.avail = glink_smem_tx_avail;
tx_pipe->native.write = glink_smem_tx_write;
tx_pipe->remote_pid = remote_pid;
tx_pipe->native.kick = glink_smem_tx_kick;
*rx_pipe->tail = 0;
*tx_pipe->head = 0;
@ -282,10 +341,17 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
false);
if (IS_ERR(glink)) {
ret = PTR_ERR(glink);
goto err_put_dev;
goto err_free_mbox;
}
return glink;
smem->glink = glink;
enable_irq(smem->irq);
return smem;
err_free_mbox:
mbox_free_channel(smem->mbox_chan);
err_put_dev:
device_unregister(dev);
@ -294,10 +360,16 @@ err_put_dev:
}
EXPORT_SYMBOL_GPL(qcom_glink_smem_register);
void qcom_glink_smem_unregister(struct qcom_glink *glink)
void qcom_glink_smem_unregister(struct qcom_glink_smem *smem)
{
struct qcom_glink *glink = smem->glink;
disable_irq(smem->irq);
qcom_glink_native_remove(glink);
qcom_glink_native_unregister(glink);
mbox_free_channel(smem->mbox_chan);
device_unregister(&smem->dev);
}
EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister);

View File

@ -111,7 +111,7 @@ static int qcom_glink_ssr_notifier_call(struct notifier_block *nb,
msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
msg.seq_num = cpu_to_le32(ssr->seq_num);
msg.name_len = cpu_to_le32(strlen(ssr_name));
strlcpy(msg.name, ssr_name, sizeof(msg.name));
strscpy(msg.name, ssr_name, sizeof(msg.name));
ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
if (ret < 0)

View File

@ -75,6 +75,7 @@ int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
mutex_lock(&eptdev->ept_lock);
eptdev->rpdev = NULL;
if (eptdev->ept) {
/* The default endpoint is released by the rpmsg core */
if (!eptdev->default_ept)
@ -128,6 +129,11 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
return -EBUSY;
}
if (!eptdev->rpdev) {
mutex_unlock(&eptdev->ept_lock);
return -ENETRESET;
}
get_device(dev);
/*
@ -279,7 +285,9 @@ static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
if (!skb_queue_empty(&eptdev->queue))
mask |= EPOLLIN | EPOLLRDNORM;
mutex_lock(&eptdev->ept_lock);
mask |= rpmsg_poll(eptdev->ept, filp, wait);
mutex_unlock(&eptdev->ept_lock);
return mask;
}

View File

@ -194,10 +194,12 @@ static void rpmsg_ctrldev_remove(struct rpmsg_device *rpdev)
struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev);
int ret;
mutex_lock(&ctrldev->ctrl_lock);
/* Destroy all endpoints */
ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
if (ret)
dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
mutex_unlock(&ctrldev->ctrl_lock);
cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
put_device(&ctrldev->dev);

View File

@ -5,7 +5,7 @@
#include <linux/device.h>
struct qcom_glink;
struct qcom_glink_smem;
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
void qcom_glink_ssr_notify(const char *ssr_name);
@ -15,20 +15,20 @@ static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
struct qcom_glink *qcom_glink_smem_register(struct device *parent,
struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
struct device_node *node);
void qcom_glink_smem_unregister(struct qcom_glink *glink);
void qcom_glink_smem_unregister(struct qcom_glink_smem *glink);
#else
static inline struct qcom_glink *
static inline struct qcom_glink_smem *
qcom_glink_smem_register(struct device *parent,
struct device_node *node)
{
return NULL;
}
static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
static inline void qcom_glink_smem_unregister(struct qcom_glink_smem *glink) {}
#endif
#endif