diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig index d0322b41eca5..c1b34cdc2054 100644 --- a/drivers/rpmsg/Kconfig +++ b/drivers/rpmsg/Kconfig @@ -39,6 +39,28 @@ config RPMSG_QCOM_GLINK_SMEM which provides support for using the GLINK communication protocol over SMEM. +config RPMSG_QCOM_GLINK_SPSS + tristate "QTI SPSS Glink driver" + select RPMSG_QCOM_GLINK_NATIVE + depends on MAILBOX + depends on QCOM_SMEM + select QSEE_IPC_IRQ + help + Say y here to enable support for the GLINK SPSS communication driver, + which provides support for using the GLINK communication protocol + over SMEM. This protocol maps the smem and then shares the mapped + region with the remote proc by writing the smem descriptor location + and size into shared registers. + +config RPMSG_QCOM_GLINK_SPI + tristate "QTI SPI Glink driver" + help + Say y here to enable support for the GLINK SPI communication driver, + which provides support for using the GLINK communication protocol + over SPI. This transport performs marshaling of GLINK commands and + data to the appropriate SPI bus wire format and allows for GLINK + communication with remote subsystems that are external to the SoC. + config RPMSG_QCOM_SMD tristate "Qualcomm Shared Memory Driver (SMD)" depends on MAILBOX diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile index 9aa859502d27..1680763bb8b5 100644 --- a/drivers/rpmsg/Makefile +++ b/drivers/rpmsg/Makefile @@ -4,5 +4,7 @@ obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o +obj-$(CONFIG_RPMSG_QCOM_GLINK_SPSS) += qcom_glink_spss.o +obj-$(CONFIG_RPMSG_QCOM_GLINK_SPI) += qcom_glink_spi.o obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index e2ce4e638258..9ab4de92d630 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -18,11 +18,35 @@ #include #include #include +#include #include +#include #include "rpmsg_internal.h" #include "qcom_glink_native.h" +#define GLINK_LOG_PAGE_CNT 2 +#define GLINK_INFO(ctxt, x, ...) \ +do { \ + if (ctxt) \ + ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ +} while (0) + +#define CH_INFO(ch, x, ...) \ +do { \ + if (ch->glink && ch->glink->ilc) \ + ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \ + ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \ +} while (0) + + +#define GLINK_ERR(ctxt, x, ...) \ +do { \ + pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ + if (ctxt) \ + ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ +} while (0) + #define GLINK_NAME_SIZE 32 #define GLINK_VERSION_1 1 @@ -83,6 +107,8 @@ struct glink_core_rx_intent { * @rx_pipe: pipe object for receive FIFO * @tx_pipe: pipe object for transmit FIFO * @irq: IRQ for signaling incoming events + * @kworker: kworker to handle rx_done work + * @task: kthread running @kworker * @rx_work: worker for handling received control messages * @rx_lock: protects the @rx_queue * @rx_queue: queue of received control messages to be processed in @rx_work @@ -90,8 +116,10 @@ struct glink_core_rx_intent { * @idr_lock: synchronizes @lcids and @rcids modifications * @lcids: idr of all channels with a known local channel id * @rcids: idr of all channels with a known remote channel id + * @in_reset: reset status of this edge * @features: remote features * @intentless: flag to indicate that there is no intent + * @ilc: ipc logging context reference */ struct qcom_glink { struct device *dev; @@ -106,6 +134,9 @@ struct qcom_glink { int irq; + struct kthread_worker kworker; + struct task_struct *task; + struct work_struct rx_work; spinlock_t rx_lock; struct list_head rx_queue; @@ -115,9 +146,13 @@ struct qcom_glink { spinlock_t idr_lock; struct idr lcids; struct idr rcids; + + atomic_t in_reset; unsigned long features; bool intentless; + + void *ilc; }; enum { @@ -149,7 +184,8 @@ enum { * @open_req: completed once open-request has been received * @intent_req_lock: Synchronises multiple intent requests * @intent_req_result: Result of intent request - * @intent_req_comp: Completion for intent_req signalling + * @intent_req_comp: Status of intent request completion + * @intent_req_event: Waitqueue for @intent_req_comp */ struct glink_channel { struct rpmsg_endpoint ept; @@ -168,19 +204,23 @@ struct glink_channel { spinlock_t intent_lock; struct idr liids; struct idr riids; - struct work_struct intent_work; + struct kthread_work intent_work; struct list_head done_intents; struct glink_core_rx_intent *buf; int buf_offset; int buf_size; + unsigned int lsigs; + unsigned int rsigs; + struct completion open_ack; struct completion open_req; struct mutex intent_req_lock; bool intent_req_result; - struct completion intent_req_comp; + atomic_t intent_req_comp; + wait_queue_head_t intent_req_event; }; #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) @@ -201,10 +241,11 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops; #define RPM_CMD_TX_DATA_CONT 12 #define RPM_CMD_READ_NOTIF 13 #define RPM_CMD_RX_DONE_W_REUSE 14 +#define RPM_CMD_SIGNALS 15 #define GLINK_FEATURE_INTENTLESS BIT(1) -static void qcom_glink_rx_done_work(struct work_struct *work); +static void qcom_glink_rx_done_work(struct kthread_work *work); static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, const char *name) @@ -225,10 +266,11 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, init_completion(&channel->open_req); init_completion(&channel->open_ack); - init_completion(&channel->intent_req_comp); + atomic_set(&channel->intent_req_comp, 0); + init_waitqueue_head(&channel->intent_req_event); INIT_LIST_HEAD(&channel->done_intents); - INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); + kthread_init_work(&channel->intent_work, qcom_glink_rx_done_work); idr_init(&channel->liids); idr_init(&channel->riids); @@ -243,6 +285,9 @@ static void qcom_glink_channel_release(struct kref *ref) refcount); unsigned long flags; + CH_INFO(channel, "\n"); + wake_up(&channel->intent_req_event); + spin_lock_irqsave(&channel->intent_lock, flags); idr_destroy(&channel->liids); idr_destroy(&channel->riids); @@ -280,6 +325,15 @@ static void qcom_glink_tx_write(struct qcom_glink *glink, glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen); } +static void qcom_glink_pipe_reset(struct qcom_glink *glink) +{ + if (glink->tx_pipe->reset) + glink->tx_pipe->reset(glink->tx_pipe); + + if (glink->rx_pipe->reset) + glink->rx_pipe->reset(glink->rx_pipe); +} + static int qcom_glink_tx(struct qcom_glink *glink, const void *hdr, size_t hlen, const void *data, size_t dlen, bool wait) @@ -300,6 +354,11 @@ static int qcom_glink_tx(struct qcom_glink *glink, goto out; } + if (atomic_read(&glink->in_reset)) { + ret = -ECONNRESET; + goto out; + } + /* Wait without holding the tx_lock */ spin_unlock_irqrestore(&glink->tx_lock, flags); @@ -327,6 +386,7 @@ static int qcom_glink_send_version(struct qcom_glink *glink) msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param2 = cpu_to_le32(glink->features); + GLINK_INFO(glink->ilc, "vers:%d features:%d\n", msg.param1, msg.param2); return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -338,6 +398,7 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink) msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param2 = cpu_to_le32(glink->features); + GLINK_INFO(glink->ilc, "vers:%d features:%d\n", msg.param1, msg.param2); qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -350,6 +411,7 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink, msg.param1 = cpu_to_le16(channel->rcid); msg.param2 = cpu_to_le32(0); + CH_INFO(channel, "\n"); qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -368,7 +430,9 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, } channel->intent_req_result = granted; - complete(&channel->intent_req_comp); + atomic_inc(&channel->intent_req_comp); + wake_up(&channel->intent_req_event); + CH_INFO(channel, "\n"); } /** @@ -404,11 +468,12 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink, return ret; channel->lcid = ret; + CH_INFO(channel, "\n"); req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN); req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param2 = cpu_to_le32(name_len); - strcpy(req.name, channel->name); + strlcpy(req.name, channel->name, GLINK_NAME_SIZE); ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true); if (ret) @@ -417,6 +482,8 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink, return 0; remove_idr: + CH_INFO(channel, "remote_idr\n"); + spin_lock_irqsave(&glink->idr_lock, flags); idr_remove(&glink->lcids, channel->lcid); channel->lcid = 0; @@ -434,6 +501,7 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink, req.param1 = cpu_to_le16(channel->lcid); req.param2 = 0; + CH_INFO(channel, "\n"); qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); } @@ -446,42 +514,58 @@ static void qcom_glink_send_close_ack(struct qcom_glink *glink, req.param1 = cpu_to_le16(rcid); req.param2 = 0; + GLINK_INFO(glink->ilc, "rcid:%d\n", rcid); qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); } -static void qcom_glink_rx_done_work(struct work_struct *work) + +static int __qcom_glink_rx_done(struct qcom_glink *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent, + bool wait) { - struct glink_channel *channel = container_of(work, struct glink_channel, - intent_work); - struct qcom_glink *glink = channel->glink; - struct glink_core_rx_intent *intent, *tmp; struct { u16 id; u16 lcid; u32 liid; } __packed cmd; - unsigned int cid = channel->lcid; - unsigned int iid; - bool reuse; + unsigned int iid = intent->id; + bool reuse = intent->reuse; + int ret; + + cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; + cmd.lcid = cid; + cmd.liid = iid; + + ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, wait); + if (ret) + return ret; + + if (!reuse) { + kfree(intent->data); + kfree(intent); + } + + CH_INFO(channel, "reuse:%d liid:%d", reuse, iid); + return 0; +} + +static void qcom_glink_rx_done_work(struct kthread_work *work) +{ + struct glink_channel *channel = container_of(work, struct glink_channel, + intent_work); + struct qcom_glink *glink = channel->glink; + struct glink_core_rx_intent *intent, *tmp; unsigned long flags; spin_lock_irqsave(&channel->intent_lock, flags); list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { list_del(&intent->node); spin_unlock_irqrestore(&channel->intent_lock, flags); - iid = intent->id; - reuse = intent->reuse; - cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; - cmd.lcid = cid; - cmd.liid = iid; + __qcom_glink_rx_done(glink, channel, intent, true); - qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); - if (!reuse) { - kfree(intent->data); - kfree(intent); - } spin_lock_irqsave(&channel->intent_lock, flags); } spin_unlock_irqrestore(&channel->intent_lock, flags); @@ -491,6 +575,8 @@ static void qcom_glink_rx_done(struct qcom_glink *glink, struct glink_channel *channel, struct glink_core_rx_intent *intent) { + int ret = -EAGAIN; + /* We don't send RX_DONE to intentless systems */ if (glink->intentless) { kfree(intent->data); @@ -507,10 +593,14 @@ static void qcom_glink_rx_done(struct qcom_glink *glink, /* Schedule the sending of a rx_done indication */ spin_lock(&channel->intent_lock); - list_add_tail(&intent->node, &channel->done_intents); - spin_unlock(&channel->intent_lock); + if (list_empty(&channel->done_intents)) + ret = __qcom_glink_rx_done(glink, channel, intent, false); - schedule_work(&channel->intent_work); + if (ret) { + list_add_tail(&intent->node, &channel->done_intents); + kthread_queue_work(&glink->kworker, &channel->intent_work); + } + spin_unlock(&channel->intent_lock); } /** @@ -527,6 +617,8 @@ static void qcom_glink_receive_version(struct qcom_glink *glink, u32 version, u32 features) { + GLINK_INFO(glink->ilc, "vers:%d features:%d\n", version, features); + switch (version) { case 0: break; @@ -554,6 +646,8 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink, u32 version, u32 features) { + GLINK_INFO(glink->ilc, "vers:%d features:%d\n", version, features); + switch (version) { case 0: /* Version negotiation failed */ @@ -589,6 +683,7 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink, msg.param1 = cpu_to_le16(channel->lcid); msg.param2 = cpu_to_le32(granted); + CH_INFO(channel, "\n"); qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); return 0; @@ -622,6 +717,9 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink, cmd.size = cpu_to_le32(intent->size); cmd.liid = cpu_to_le32(intent->id); + CH_INFO(channel, "count:%d size:%d liid:%d\n", 1, + intent->size, intent->id); + qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); return 0; @@ -692,6 +790,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink, } intent->in_use = false; + CH_INFO(channel, "reuse:%d iid:%d\n", reuse, intent->id); if (!reuse) { idr_remove(&channel->riids, intent->id); @@ -792,9 +891,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) return -EAGAIN; } - if (WARN(chunk_size % 4, "Incoming data must be word aligned\n")) - return -EINVAL; - rcid = le16_to_cpu(hdr.msg.param1); spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, rcid); @@ -805,6 +901,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) /* Drop the message */ goto advance_rx; } + CH_INFO(channel, "chunk_size:%d left_size:%d\n", chunk_size, left_size); if (glink->intentless) { /* Might have an ongoing, fragmented, message to append */ @@ -927,6 +1024,8 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink, intent->id = le32_to_cpu(msg->intents[i].iid); intent->size = le32_to_cpu(msg->intents[i].size); + CH_INFO(channel, "riid:%d size:%d\n", intent->id, intent->size); + spin_lock_irqsave(&channel->intent_lock, flags); ret = idr_alloc(&channel->riids, intent, intent->id, intent->id + 1, GFP_ATOMIC); @@ -934,6 +1033,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink, if (ret < 0) dev_err(glink->dev, "failed to store remote intent\n"); + } kfree(msg); @@ -952,7 +1052,56 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) return -EINVAL; } - complete(&channel->open_ack); + CH_INFO(channel, "\n"); + complete_all(&channel->open_ack); + + return 0; +} + +/** + * qcom_glink_send_signals() - convert a signal cmd to wire format and transmit + * @glink: The transport to transmit on. + * @channel: The glink channel + * @sigs: The signals to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int qcom_glink_send_signals(struct qcom_glink *glink, + struct glink_channel *channel, + u32 sigs) +{ + struct glink_msg msg; + + msg.cmd = cpu_to_le16(RPM_CMD_SIGNALS); + msg.param1 = cpu_to_le16(channel->lcid); + msg.param2 = cpu_to_le32(sigs); + + GLINK_INFO(glink->ilc, "sigs:%d\n", sigs); + return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static int qcom_glink_handle_signals(struct qcom_glink *glink, + unsigned int rcid, unsigned int signals) +{ + struct glink_channel *channel; + unsigned long flags; + u32 old; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(glink->dev, "signal for non-existing channel\n"); + return -EINVAL; + } + + old = channel->rsigs; + channel->rsigs = signals; + + if (channel->ept.sig_cb) + channel->ept.sig_cb(channel->ept.rpdev, old, channel->rsigs); + + CH_INFO(channel, "old:%d new:%d\n", old, channel->rsigs); return 0; } @@ -1018,6 +1167,10 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data) qcom_glink_handle_intent_req_ack(glink, param1, param2); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; + case RPM_CMD_SIGNALS: + qcom_glink_handle_signals(glink, param1, param2); + qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); + break; default: dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd); ret = -EINVAL; @@ -1043,6 +1196,7 @@ static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink, if (IS_ERR(channel)) return ERR_CAST(channel); + CH_INFO(channel, "\n"); ret = qcom_glink_send_open_req(glink, channel); if (ret) goto release_channel; @@ -1060,12 +1214,15 @@ static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink, return channel; err_timeout: + CH_INFO(channel, "err_timeout\n"); + /* qcom_glink_send_open_req() did register the channel in lcids*/ spin_lock_irqsave(&glink->idr_lock, flags); idr_remove(&glink->lcids, channel->lcid); spin_unlock_irqrestore(&glink->idr_lock, flags); release_channel: + CH_INFO(channel, "release_channel\n"); /* Release qcom_glink_send_open_req() reference */ kref_put(&channel->refcount, qcom_glink_channel_release); /* Release qcom_glink_alloc_channel() reference */ @@ -1080,6 +1237,8 @@ static int qcom_glink_create_remote(struct qcom_glink *glink, { int ret; + CH_INFO(channel, "\n"); + qcom_glink_send_open_ack(glink, channel); ret = qcom_glink_send_open_req(glink, channel); @@ -1095,15 +1254,16 @@ static int qcom_glink_create_remote(struct qcom_glink *glink, return 0; close_link: + CH_INFO(channel, "close_link %d\n", ret); + /* * Send a close request to "undo" our open-ack. The close-ack will - * release the last reference. + * release qcom_glink_send_open_req() reference and the last reference + * will be relesed after receiving remote_close or transport unregister + * by calling qcom_glink_native_remove(). */ qcom_glink_send_close_req(glink, channel); - /* Release qcom_glink_send_open_req() reference */ - kref_put(&channel->refcount, qcom_glink_channel_release); - return ret; } @@ -1161,7 +1321,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev) __be32 *val = defaults; int size; - if (glink->intentless) + if (glink->intentless || !completion_done(&channel->open_ack)) return 0; prop = of_find_property(np, "qcom,intents", NULL); @@ -1216,20 +1376,27 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, mutex_lock(&channel->intent_req_lock); - reinit_completion(&channel->intent_req_comp); + atomic_set(&channel->intent_req_comp, 0); cmd.id = RPM_CMD_RX_INTENT_REQ; cmd.cid = channel->lcid; cmd.size = size; + CH_INFO(channel, "size:%d\n", size); + ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); if (ret) goto unlock; - ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); + ret = wait_event_timeout(channel->intent_req_event, + atomic_read(&channel->intent_req_comp) || + atomic_read(&glink->in_reset), 10 * HZ); if (!ret) { dev_err(glink->dev, "intent request timed out\n"); ret = -ETIMEDOUT; + } else if (atomic_read(&glink->in_reset)) { + CH_INFO(channel, "ssr detected\n"); + ret = -ECONNRESET; } else { ret = channel->intent_req_result ? 0 : -ECANCELED; } @@ -1315,6 +1482,27 @@ static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len) return __qcom_glink_send(channel, data, len, false); } +static int qcom_glink_get_sigs(struct rpmsg_endpoint *ept, + u32 *lsigs, u32 *rsigs) +{ + struct glink_channel *channel = to_glink_channel(ept); + + *lsigs = channel->lsigs; + *rsigs = channel->rsigs; + + return 0; +} + +static int qcom_glink_set_sigs(struct rpmsg_endpoint *ept, u32 sigs) +{ + struct glink_channel *channel = to_glink_channel(ept); + struct qcom_glink *glink = channel->glink; + + channel->lsigs = sigs; + + return qcom_glink_send_signals(glink, channel, sigs); +} + /* * Finds the device_node for the glink child interested in this channel. */ @@ -1348,6 +1536,8 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = { .destroy_ept = qcom_glink_destroy_ept, .send = qcom_glink_send, .trysend = qcom_glink_trysend, + .get_sigs = qcom_glink_get_sigs, + .set_sigs = qcom_glink_set_sigs, }; static void qcom_glink_rpdev_release(struct device *dev) @@ -1396,7 +1586,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, channel->rcid = ret; spin_unlock_irqrestore(&glink->idr_lock, flags); - complete(&channel->open_req); + complete_all(&channel->open_req); if (create_device) { rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); @@ -1406,7 +1596,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, } rpdev->ept = &channel->ept; - strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE); + strlcpy(rpdev->id.name, name, RPMSG_NAME_SIZE); rpdev->src = RPMSG_ADDR_ANY; rpdev->dst = RPMSG_ADDR_ANY; rpdev->ops = &glink_device_ops; @@ -1422,17 +1612,21 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, channel->rpdev = rpdev; } + CH_INFO(channel, "\n"); return 0; free_rpdev: + CH_INFO(channel, "free_rpdev\n"); kfree(rpdev); rcid_remove: + CH_INFO(channel, "rcid_remove\n"); spin_lock_irqsave(&glink->idr_lock, flags); idr_remove(&glink->rcids, channel->rcid); channel->rcid = 0; spin_unlock_irqrestore(&glink->idr_lock, flags); free_channel: + CH_INFO(channel, "free_channel\n"); /* Release the reference, iff we took it */ if (create_device) kref_put(&channel->refcount, qcom_glink_channel_release); @@ -1451,12 +1645,13 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) spin_unlock_irqrestore(&glink->idr_lock, flags); if (WARN(!channel, "close request on unknown channel\n")) return; + CH_INFO(channel, "\n"); /* cancel pending rx_done work */ - cancel_work_sync(&channel->intent_work); + kthread_cancel_work_sync(&channel->intent_work); if (channel->rpdev) { - strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); + strlcpy(chinfo.name, channel->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = RPMSG_ADDR_ANY; @@ -1484,6 +1679,7 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid) spin_unlock_irqrestore(&glink->idr_lock, flags); return; } + CH_INFO(channel, "\n"); idr_remove(&glink->lcids, channel->lcid); channel->lcid = 0; @@ -1547,21 +1743,93 @@ static void qcom_glink_work(struct work_struct *work) } } +static ssize_t rpmsg_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmsg_device *rpdev = to_rpmsg_device(dev); + struct glink_channel *channel = to_glink_channel(rpdev->ept); + + return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", channel->glink->name); +} +static DEVICE_ATTR_RO(rpmsg_name); + +static struct attribute *qcom_glink_attrs[] = { + &dev_attr_rpmsg_name.attr, + NULL +}; +ATTRIBUTE_GROUPS(qcom_glink); + +static void qcom_glink_device_release(struct device *dev) +{ + struct rpmsg_device *rpdev = to_rpmsg_device(dev); + struct glink_channel *channel = to_glink_channel(rpdev->ept); + + /* Release qcom_glink_alloc_channel() reference */ + kref_put(&channel->refcount, qcom_glink_channel_release); + kfree(rpdev); +} + +static int qcom_glink_create_chrdev(struct qcom_glink *glink) +{ + struct rpmsg_device *rpdev; + struct glink_channel *channel; + + rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); + if (!rpdev) + return -ENOMEM; + + channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev"); + if (IS_ERR(channel)) { + kfree(rpdev); + return PTR_ERR(channel); + } + channel->rpdev = rpdev; + + rpdev->ept = &channel->ept; + rpdev->ops = &glink_device_ops; + rpdev->dev.parent = glink->dev; + rpdev->dev.release = qcom_glink_device_release; + + return rpmsg_chrdev_register_device(rpdev); +} + +static void qcom_glink_set_affinity(struct qcom_glink *glink, u32 *arr, + size_t size) +{ + struct cpumask cpumask; + int i; + + cpumask_clear(&cpumask); + for (i = 0; i < size; i++) { + if (arr[i] < num_possible_cpus()) + cpumask_set_cpu(arr[i], &cpumask); + } + if (irq_set_affinity(glink->irq, &cpumask)) + dev_err(glink->dev, "failed to set irq affinity\n"); + if (sched_setaffinity(glink->task->pid, &cpumask)) + dev_err(glink->dev, "failed to set task affinity\n"); +} + + struct qcom_glink *qcom_glink_native_probe(struct device *dev, unsigned long features, struct qcom_glink_pipe *rx, struct qcom_glink_pipe *tx, bool intentless) { + struct qcom_glink *glink; + u32 *arr; + int size; int irq; int ret; - struct qcom_glink *glink; glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL); if (!glink) return ERR_PTR(-ENOMEM); glink->dev = dev; + glink->dev->groups = qcom_glink_groups; + glink->tx_pipe = tx; glink->rx_pipe = rx; @@ -1576,6 +1844,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, spin_lock_init(&glink->idr_lock); idr_init(&glink->lcids); idr_init(&glink->rcids); + atomic_set(&glink->in_reset, 0); ret = of_property_read_string(dev->of_node, "label", &glink->name); if (ret < 0) @@ -1590,6 +1859,15 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, return ERR_CAST(glink->mbox_chan); } + kthread_init_worker(&glink->kworker); + glink->task = kthread_run(kthread_worker_fn, &glink->kworker, + "glink_%s", glink->name); + if (IS_ERR(glink->task)) { + dev_err(dev, "failed to spawn intent kthread %d\n", + PTR_ERR(glink->task)); + return ERR_CAST(glink->task); + } + irq = of_irq_get(dev->of_node, 0); ret = devm_request_irq(dev, irq, qcom_glink_native_intr, @@ -1602,9 +1880,29 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, glink->irq = irq; + size = of_property_count_u32_elems(dev->of_node, "cpu-affinity"); + if (size > 0) { + arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL); + if (!arr) + return ERR_PTR(-ENOMEM); + ret = of_property_read_u32_array(dev->of_node, "cpu-affinity", + arr, size); + if (!ret) + qcom_glink_set_affinity(glink, arr, size); + kfree(arr); + } + ret = qcom_glink_send_version(glink); - if (ret) + if (ret) { + dev_err(dev, "failed to send version %d\n", ret); return ERR_PTR(ret); + } + + ret = qcom_glink_create_chrdev(glink); + if (ret) + dev_err(glink->dev, "failed to register chrdev\n"); + + glink->ilc = ipc_log_context_create(GLINK_LOG_PAGE_CNT, glink->name, 0); return glink; } @@ -1624,21 +1922,51 @@ void qcom_glink_native_remove(struct qcom_glink *glink) int ret; unsigned long flags; + atomic_inc(&glink->in_reset); disable_irq(glink->irq); cancel_work_sync(&glink->rx_work); + /* Signal all threads to cancel tx */ + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, cid) { + wake_up(&channel->intent_req_event); + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); if (ret) dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, cid) { + spin_unlock_irqrestore(&glink->idr_lock, flags); + /* cancel pending rx_done work for each channel*/ + kthread_cancel_work_sync(&channel->intent_work); + spin_lock_irqsave(&glink->idr_lock, flags); + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + spin_lock_irqsave(&glink->idr_lock, flags); + /* Release any defunct local channels, waiting for close-ack */ - idr_for_each_entry(&glink->lcids, channel, cid) + idr_for_each_entry(&glink->lcids, channel, cid) { kref_put(&channel->refcount, qcom_glink_channel_release); + idr_remove(&glink->lcids, cid); + } + + /* Release any defunct local channels, waiting for close-req */ + idr_for_each_entry(&glink->rcids, channel, cid) { + kref_put(&channel->refcount, qcom_glink_channel_release); + idr_remove(&glink->rcids, cid); + } idr_destroy(&glink->lcids); idr_destroy(&glink->rcids); spin_unlock_irqrestore(&glink->idr_lock, flags); + + kthread_flush_worker(&glink->kworker); + kthread_stop(glink->task); + qcom_glink_pipe_reset(glink); mbox_free_channel(glink->mbox_chan); } EXPORT_SYMBOL_GPL(qcom_glink_native_remove); diff --git a/drivers/rpmsg/qcom_glink_native.h b/drivers/rpmsg/qcom_glink_native.h index 624184fc458e..51c8e0d2c3f6 100644 --- a/drivers/rpmsg/qcom_glink_native.h +++ b/drivers/rpmsg/qcom_glink_native.h @@ -22,6 +22,8 @@ struct qcom_glink_pipe { void (*write)(struct qcom_glink_pipe *glink_pipe, const void *hdr, size_t hlen, const void *data, size_t dlen); + + void (*reset)(struct qcom_glink_pipe *glink_pipe); }; struct qcom_glink; diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c index 2b5cf2790954..47c098bfe6de 100644 --- a/drivers/rpmsg/qcom_glink_smem.c +++ b/drivers/rpmsg/qcom_glink_smem.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016, Linaro Ltd + * Copyright (c) 2018, The Linux Foundation, All rights reserved. */ #include @@ -90,13 +91,11 @@ static void glink_smem_rx_peak(struct qcom_glink_pipe *np, len = min_t(size_t, count, pipe->native.length - tail); if (len) { - __ioread32_copy(data, pipe->fifo + tail, - len / sizeof(u32)); + memcpy_fromio(data, pipe->fifo + tail, len); } if (len != count) { - __ioread32_copy(data + len, pipe->fifo, - (count - len) / sizeof(u32)); + memcpy_fromio(data + len, pipe->fifo, (count - len)); } } @@ -109,7 +108,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np, tail = le32_to_cpu(*pipe->tail); tail += count; - if (tail > pipe->native.length) + if (tail >= pipe->native.length) tail -= pipe->native.length; *pipe->tail = cpu_to_le32(tail); diff --git a/drivers/rpmsg/qcom_glink_spi.c b/drivers/rpmsg/qcom_glink_spi.c new file mode 100644 index 000000000000..53e19c7a0882 --- /dev/null +++ b/drivers/rpmsg/qcom_glink_spi.c @@ -0,0 +1,2501 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "rpmsg_internal.h" +#include "qcom_glink_native.h" + +#define GLINK_LOG_PAGE_CNT 2 +#define GLINK_INFO(ctxt, x, ...) \ +do { \ + if (ctxt->ilc) \ + ipc_log_string(ctxt->ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ +} while (0) + +#define CH_INFO(ch, x, ...) \ +do { \ + if (ch->glink && ch->glink->ilc) \ + ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \ + ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \ +} while (0) + + +#define GLINK_ERR(ctxt, x, ...) \ +do { \ + pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ + if (ctxt->ilc) \ + ipc_log_string(ctxt->ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ +} while (0) + +#define SPI_ALIGNMENT 16 +#define FIFO_FULL_RESERVE 8 +#define TX_BLOCKED_CMD_RESERVE 16 +#define DEFAULT_FIFO_SIZE 1024 +#define SHORT_SIZE 16 +#define XPRT_ALIGNMENT 4 + +#define MAX_INACTIVE_CYCLES 50 +#define POLL_INTERVAL_US 500 + +#define ACTIVE_TX BIT(0) +#define ACTIVE_RX BIT(1) + +#define ID_MASK 0xFFFFFF + +#define GLINK_NAME_SIZE 32 +#define GLINK_VERSION_1 1 + +#define SPI_GLINK_CID_MIN 1 +#define SPI_GLINK_CID_MAX 65536 + +struct glink_msg { + __le16 cmd; + __le16 param1; + __le32 param2; + __le32 param3; + __le32 param4; + u8 data[]; +} __packed; + +/** + * struct glink_defer_cmd - deferred incoming control message + * @node: list node + * @msg: message header + * data: payload of the message + * + * Copy of a received control message, to be added to @rx_queue and processed + * by @rx_work of @glink_spi. + */ +struct glink_defer_cmd { + struct list_head node; + + struct glink_msg msg; + u8 data[]; +}; + +/** + * struct glink_core_rx_intent - RX intent + * RX intent + * + * @data: pointer to the data (may be NULL for zero-copy) + * @id: remote or local intent ID + * @size: size of the original intent (do not modify) + * @addr: addr to read/write the data from + * @reuse: To mark if the intent can be reused after first use + * @in_use: To mark if intent is already in use for the channel + * @offset: next write offset (initially 0) + */ +struct glink_core_rx_intent { + void *data; + u32 id; + size_t size; + u32 addr; + bool reuse; + bool in_use; + u32 offset; + + struct list_head node; +}; + +/** + * @fifo_base: Base Address of the RX FIFO. + * @length: End Address of the RX FIFO. + * @tail_addr: Address of the TX FIFO Read Index Register. + * @head_addr: Address of the TX FIFO Write Index Register. + * @local_addr: Address of the RX FIFO Read Index Register. + */ +struct glink_spi_pipe { + u32 fifo_base; + u32 length; + + u32 tail_addr; + u32 head_addr; + + u32 local_addr; +}; + +/** + * struct glink_cmpnt - Component to cache spi component and its operations + * @master_dev: Device structure corresponding to spi device. + * @master_ops: Operations supported by the spi device. + */ +struct glink_cmpnt { + struct device *master_dev; + struct wdsp_mgr_ops *master_ops; +}; + +/** + * struct glink_spi - driver context, relates to one remote subsystem + * @dev: reference to the associated struct device + * @name: name of this edge + * @rx_pipe: pipe object for receive FIFO + * @tx_pipe: pipe object for transmit FIFO + * @rx_work: worker for handling received control messages + * @rx_worker: worker struct for handling received control messages + * @rx_task: task that runs the rx_worker + * @rx_lock: protects the @rx_queue + * @rx_queue: queue of received control messages to be processed in @rx_work + * @tx_lock: synchronizes operations on the tx fifo + * @idr_lock: synchronizes @lcids and @rcids modifications + * @lcids: idr of all channels with a known local channel id + * @rcids: idr of all channels with a known remote channel id + * @spi_ops: spi ops for sending data to the remote + * @cmpnt: component to be registered with the wdsp component manager + * @in_reset indicates that remote processor is in reset + * @ilc: ipc logging context reference + */ +struct glink_spi { + struct device dev; + + const char *name; + + struct glink_spi_pipe rx_pipe; + struct glink_spi_pipe tx_pipe; + + struct kthread_work rx_work; + struct kthread_worker rx_worker; + struct task_struct *rx_task; + + spinlock_t rx_lock; + struct list_head rx_queue; + struct work_struct rx_defer_work; + + struct mutex tx_lock; + + spinlock_t idr_lock; + struct idr lcids; + struct idr rcids; + u32 features; + + bool intentless; + + struct wcd_spi_ops spi_ops; + struct glink_cmpnt cmpnt; + atomic_t activity_cnt; + atomic_t in_reset; + + void *ilc; +}; + +enum { + GLINK_STATE_CLOSED, + GLINK_STATE_OPENING, + GLINK_STATE_OPEN, + GLINK_STATE_CLOSING, +}; + +/** + * struct glink_channel - internal representation of a channel + * @rpdev: rpdev reference, only used for primary endpoints + * @ept: rpmsg endpoint this channel is associated with + * @glink: glink_spi context handle + * @refcount: refcount for the channel object + * @recv_lock: guard for @ept.cb + * @name: unique channel name/identifier + * @lcid: channel id, in local space + * @rcid: channel id, in remote space + * @intent_lock: lock for protection of @liids, @riids + * @liids: idr of all local intents + * @riids: idr of all remote intents + * @intent_work: worker responsible for transmitting rx_done packets + * @done_intents: list of intents that needs to be announced rx_done + * @buf: receive buffer, for gathering fragments + * @buf_offset: write offset in @buf + * @buf_size: size of current @buf + * @open_ack: completed once remote has acked the open-request + * @open_req: completed once open-request has been received + * @intent_req_lock: Synchronises multiple intent requests + * @intent_req_result: Result of intent request + * @intent_req_comp: Completion for intent_req signalling + */ +struct glink_channel { + struct rpmsg_endpoint ept; + + struct rpmsg_device *rpdev; + struct glink_spi *glink; + + struct kref refcount; + + spinlock_t recv_lock; + + char *name; + unsigned int lcid; + unsigned int rcid; + + spinlock_t intent_lock; + struct idr liids; + struct idr riids; + struct work_struct intent_work; + struct list_head done_intents; + + struct glink_core_rx_intent *buf; + int buf_offset; + int buf_size; + + unsigned int lsigs; + unsigned int rsigs; + + struct completion open_ack; + struct completion open_req; + + struct mutex intent_req_lock; + bool intent_req_result; + struct completion intent_req_comp; +}; + +#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) + +static const struct rpmsg_endpoint_ops glink_endpoint_ops; + +#define SPI_CMD_VERSION 0 +#define SPI_CMD_VERSION_ACK 1 +#define SPI_CMD_OPEN 2 +#define SPI_CMD_CLOSE 3 +#define SPI_CMD_OPEN_ACK 4 +#define SPI_CMD_CLOSE_ACK 5 +#define SPI_CMD_INTENT 6 +#define SPI_CMD_RX_DONE 7 +#define SPI_CMD_RX_DONE_W_REUSE 8 +#define SPI_CMD_RX_INTENT_REQ 9 +#define SPI_CMD_RX_INTENT_REQ_ACK 10 +#define SPI_CMD_TX_DATA 11 +#define SPI_CMD_TX_DATA_CONT 12 +#define SPI_CMD_READ_NOTIF 13 +#define SPI_CMD_SIGNALS 14 +#define SPI_CMD_TX_SHORT_DATA 17 + +static void glink_spi_rx_done_work(struct work_struct *work); +static void glink_spi_remove(struct glink_spi *glink); + +/** + * spi_resume() - Vote for the spi device resume + * @cmpnt: Component to identify the spi device. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int spi_resume(struct glink_cmpnt *cmpnt) +{ + if (!cmpnt || !cmpnt->master_dev || !cmpnt->master_ops || + !cmpnt->master_ops->resume) + return 0; + + return cmpnt->master_ops->resume(cmpnt->master_dev); +} + +/** + * glink_spi_xprt_set_poll_mode() - Set the transport to polling mode + * @glink: Edge information corresponding to the transport. + * + * This helper function indicates the start of RX polling. This will + * prevent the system from suspending and keeps polling for RX for a + * pre-defined duration. + */ +static void glink_spi_xprt_set_poll_mode(struct glink_spi *glink) +{ + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); +} + +/** + * glink_spi_xprt_set_irq_mode() - Set the transport to IRQ mode + * @glink: Edge information corresponding to the transport. + * + * This helper indicates the end of RX polling. This will allow the + * system to suspend and new RX data can be handled only through an IRQ. + */ +static void glink_spi_xprt_set_irq_mode(struct glink_spi *glink) +{ + atomic_dec(&glink->activity_cnt); +} + +static struct glink_channel *glink_spi_alloc_channel(struct glink_spi *glink, + const char *name) +{ + struct glink_channel *channel; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return ERR_PTR(-ENOMEM); + + /* Setup glink internal glink_channel data */ + spin_lock_init(&channel->recv_lock); + spin_lock_init(&channel->intent_lock); + mutex_init(&channel->intent_req_lock); + + channel->glink = glink; + channel->name = kstrdup(name, GFP_KERNEL); + + init_completion(&channel->open_req); + init_completion(&channel->open_ack); + init_completion(&channel->intent_req_comp); + + INIT_LIST_HEAD(&channel->done_intents); + INIT_WORK(&channel->intent_work, glink_spi_rx_done_work); + + idr_init(&channel->liids); + idr_init(&channel->riids); + kref_init(&channel->refcount); + + return channel; +} + +static void glink_spi_channel_release(struct kref *ref) +{ + struct glink_channel *channel = container_of(ref, struct glink_channel, + refcount); + unsigned long flags; + + CH_INFO(channel, "\n"); + spin_lock_irqsave(&channel->intent_lock, flags); + idr_destroy(&channel->liids); + idr_destroy(&channel->riids); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + kfree(channel->name); + kfree(channel); +} + +/** + * glink_spi_read() - Receive data over SPI bus + * @glink: Edge from which the data has to be received. + * @src: Source Address of the RX data. + * @dst: Address of the destination RX buffer. + * @size: Size of the RX data. + * + * This function is used to receive data or command as a byte stream from + * the remote subsystem over the SPI bus. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_spi_read(struct glink_spi *glink, u32 src, void *dst, + size_t size) +{ + struct wcd_spi_msg spi_msg = { 0 }; + + if (unlikely(!glink->spi_ops.read_dev)) + return -EINVAL; + + spi_msg.data = dst; + spi_msg.remote_addr = src; + spi_msg.len = size; + return glink->spi_ops.read_dev(glink->spi_ops.spi_dev, &spi_msg); +} + +/** + * glink_spi_write() - Transmit data over SPI bus + * @glink: Edge from which the data has to be received. + * @src: Address of the TX buffer. + * @dst: Destination Address of the TX Data. + * @size: Size of the TX data. + * + * This function is used to transmit data or command as a byte stream to + * the remote subsystem over the SPI bus. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_spi_write(struct glink_spi *glink, void *src, u32 dst, + size_t size) +{ + struct wcd_spi_msg spi_msg = { 0 }; + + if (unlikely(!glink->spi_ops.write_dev)) + return -EINVAL; + + spi_msg.data = src; + spi_msg.remote_addr = dst; + spi_msg.len = size; + return glink->spi_ops.write_dev(glink->spi_ops.spi_dev, &spi_msg); +} + +/** + * glink_spi_reg_read() - Read the TX/RX FIFO Read/Write Index registers + * @glink: Edge from which the registers have to be read. + * @reg_addr: Address of the register to be read. + * @data: Buffer into which the register data has to be read. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_spi_reg_read(struct glink_spi *glink, u32 reg_addr, u32 *data) +{ + int ret; + + ret = glink_spi_read(glink, reg_addr, data, sizeof(*data)); + if (ret) + return ret; + + /* SPI register reads need to be masked */ + *data = *data & ID_MASK; + return 0; +} + +/** + * glink_spi_reg_write() - Write the TX/RX FIFO Read/Write Index registers + * @glink: Edge to which the registers have to be written. + * @reg_addr: Address of the registers to be written. + * @data: Data to be written to the registers. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +static int glink_spi_reg_write(struct glink_spi *glink, u32 reg_addr, u32 data) +{ + return glink_spi_write(glink, &data, reg_addr, sizeof(data)); +} + +static size_t glink_spi_rx_avail(struct glink_spi *glink) +{ + struct glink_spi_pipe *pipe = &glink->rx_pipe; + u32 head; + u32 tail; + int ret; + + if (atomic_read(&glink->in_reset)) + return 0; + + if (unlikely(!pipe->fifo_base)) { + ret = glink_spi_reg_read(glink, pipe->tail_addr, + &pipe->local_addr); + if (ret < 0) { + GLINK_ERR(glink, "Error %d reading rx tail\n", ret); + return 0; + } + pipe->fifo_base = pipe->local_addr; + } + + tail = pipe->local_addr; + ret = glink_spi_reg_read(glink, pipe->head_addr, &head); + if (ret < 0) { + GLINK_ERR(glink, "Error %d reading rx head\n", ret); + return 0; + } + + if (head < tail) + return pipe->length - (tail - head); + else + return head - tail; +} + +static void glink_spi_rx_peak(struct glink_spi *glink, + void *data, unsigned int offset, size_t count) +{ + struct glink_spi_pipe *pipe = &glink->rx_pipe; + u32 fifo_end; + size_t len; + u32 tail; + + fifo_end = pipe->fifo_base + pipe->length; + tail = pipe->local_addr; + tail += offset; + if (tail >= fifo_end) + tail -= pipe->length; + + len = min_t(size_t, count, fifo_end - tail); + if (len) + glink_spi_read(glink, tail, data, len); + + if (len != count) + glink_spi_read(glink, pipe->fifo_base, data + len, count - len); +} + +static void glink_spi_rx_advance(struct glink_spi *glink, size_t count) +{ + struct glink_spi_pipe *pipe = &glink->rx_pipe; + u32 tail; + int ret; + + tail = pipe->local_addr; + tail += count; + + if (tail >= pipe->fifo_base + pipe->length) + tail -= pipe->length; + + pipe->local_addr = tail; + ret = glink_spi_reg_write(glink, pipe->tail_addr, tail); + if (ret) + GLINK_ERR(glink, "Error writing rx tail\n", ret); +} + +static size_t glink_spi_tx_avail(struct glink_spi *glink) +{ + struct glink_spi_pipe *pipe = &glink->tx_pipe; + u32 avail; + u32 head; + u32 tail; + int ret; + + if (atomic_read(&glink->in_reset)) + return 0; + + if (unlikely(!pipe->fifo_base)) { + ret = glink_spi_reg_read(glink, pipe->head_addr, + &pipe->local_addr); + if (ret < 0) { + GLINK_ERR(glink, "Error %d reading tx head\n", ret); + return 0; + } + pipe->fifo_base = pipe->local_addr; + } + + head = pipe->local_addr; + ret = glink_spi_reg_read(glink, pipe->tail_addr, &tail); + if (ret < 0) { + GLINK_ERR(glink, "Error %d reading tx tail\n", ret); + return 0; + } + + if (tail <= head) + avail = pipe->fifo_base + pipe->length - head + tail; + else + avail = tail - head; + + if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)) + avail = 0; + else + avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; + + return avail; +} + +static unsigned int glink_spi_tx_write_one(struct glink_spi *glink, u32 head, + void *data, size_t count) +{ + struct glink_spi_pipe *pipe = &glink->tx_pipe; + size_t len; + int ret; + + len = min_t(size_t, count, pipe->fifo_base + pipe->length - head); + if (len) { + ret = glink_spi_write(glink, data, head, len); + if (ret) + GLINK_ERR(glink, "Error %d writing tx data\n", ret); + } + + if (len != count) { + ret = glink_spi_write(glink, data + len, pipe->fifo_base, + count - len); + if (ret) + GLINK_ERR(glink, "Error %d writing tx data\n", ret); + } + + head += count; + if (head >= pipe->fifo_base + pipe->length) + head -= pipe->length; + + return head; +} + +static void glink_spi_tx_write(struct glink_spi *glink, void *hdr, size_t hlen, + void *data, size_t dlen) +{ + struct glink_spi_pipe *pipe = &glink->tx_pipe; + u32 head; + int ret; + + head = pipe->local_addr; + + if (hlen) + head = glink_spi_tx_write_one(glink, head, hdr, hlen); + if (dlen) + head = glink_spi_tx_write_one(glink, head, data, dlen); + + /* Ensure head is always aligned to 8 bytes */ + head = ALIGN(head, SPI_ALIGNMENT); + if (head >= pipe->fifo_base + pipe->length) + head -= pipe->length; + + pipe->local_addr = head; + ret = glink_spi_reg_write(glink, pipe->head_addr, head); + if (ret) + GLINK_ERR(glink, "Error %d writing tx head\n", ret); + +} + +static int glink_spi_tx(struct glink_spi *glink, void *hdr, size_t hlen, + void *data, size_t dlen, bool wait) +{ + unsigned int tlen = hlen + dlen; + int ret = 0; + + if (tlen >= glink->tx_pipe.length) + return -EINVAL; + + mutex_lock(&glink->tx_lock); + + while (glink_spi_tx_avail(glink) < tlen) { + if (!wait) { + ret = -EAGAIN; + goto out; + } + + if (atomic_read(&glink->in_reset)) { + ret = -ENXIO; + goto out; + } + + /* Wait without holding the tx_lock */ + mutex_unlock(&glink->tx_lock); + + usleep_range(10000, 15000); + + mutex_lock(&glink->tx_lock); + } + + glink_spi_tx_write(glink, hdr, hlen, data, dlen); + +out: + mutex_unlock(&glink->tx_lock); + + + return ret; +} + +static int glink_spi_send_version(struct glink_spi *glink) +{ + struct glink_msg msg = { 0 }; + + msg.cmd = cpu_to_le16(SPI_CMD_VERSION); + msg.param1 = cpu_to_le16(GLINK_VERSION_1); + msg.param2 = cpu_to_le32(glink->features); + + GLINK_INFO(glink, "vers:%d features:%d\n", msg.param1, msg.param2); + return glink_spi_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static void glink_spi_send_version_ack(struct glink_spi *glink) +{ + struct glink_msg msg = { 0 }; + + msg.cmd = cpu_to_le16(SPI_CMD_VERSION_ACK); + msg.param1 = cpu_to_le16(GLINK_VERSION_1); + msg.param2 = cpu_to_le32(glink->features); + + GLINK_INFO(glink, "vers:%d features:%d\n", msg.param1, msg.param2); + glink_spi_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +/** + * glink_spi_receive_version() - receive version/features from remote system + * + * @glink: pointer to transport interface + * @r_version: remote version + * @r_features: remote features + * + * This function is called in response to a remote-initiated version/feature + * negotiation sequence. + */ +static void glink_spi_receive_version(struct glink_spi *glink, + u32 version, + u32 features) +{ + GLINK_INFO(glink, "vers:%d features:%d\n", version, features); + + switch (version) { + case 0: + break; + case GLINK_VERSION_1: + glink->features &= features; + /* FALLTHROUGH */ + default: + glink_spi_send_version_ack(glink); + break; + } +} + +/** + * glink_spi_receive_version_ack() - receive negotiation ack from remote system + * + * @glink: pointer to transport interface + * @r_version: remote version response + * @r_features: remote features response + * + * This function is called in response to a local-initiated version/feature + * negotiation sequence and is the counter-offer from the remote side based + * upon the initial version and feature set requested. + */ +static void glink_spi_receive_version_ack(struct glink_spi *glink, + u32 version, + u32 features) +{ + GLINK_INFO(glink, "vers:%d features:%d\n", version, features); + + switch (version) { + case 0: + /* Version negotiation failed */ + break; + case GLINK_VERSION_1: + if (features == glink->features) + break; + + glink->features &= features; + /* FALLTHROUGH */ + default: + glink_spi_send_version(glink); + break; + } +} + +/** + * glink_spi_send_open_req() - send a SPI_CMD_OPEN request to the remote + * @glink: Ptr to the glink edge + * @channel: Ptr to the channel that the open req is sent + * + * Allocates a local channel id and sends a SPI_CMD_OPEN message to the remote. + * Will return with refcount held, regardless of outcome. + * + * Returns 0 on success, negative errno otherwise. + */ +static int glink_spi_send_open_req(struct glink_spi *glink, + struct glink_channel *channel) +{ + + struct cmd_msg { + __le16 cmd; + __le16 lcid; + __le16 length; + __le16 req_xprt; + __le64 reserved; + }; + struct { + struct cmd_msg msg; + u8 name[GLINK_NAME_SIZE]; + } __packed req; + int name_len = strlen(channel->name) + 1; + int req_len = ALIGN(sizeof(req.msg) + name_len, SPI_ALIGNMENT); + int ret; + unsigned long flags; + + kref_get(&channel->refcount); + + spin_lock_irqsave(&glink->idr_lock, flags); + ret = idr_alloc_cyclic(&glink->lcids, channel, + SPI_GLINK_CID_MIN, SPI_GLINK_CID_MAX, + GFP_ATOMIC); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (ret < 0) + return ret; + + channel->lcid = ret; + CH_INFO(channel, "\n"); + + memset(&req, 0, sizeof(req)); + req.msg.cmd = cpu_to_le16(SPI_CMD_OPEN); + req.msg.lcid = cpu_to_le16(channel->lcid); + req.msg.length = cpu_to_le16(name_len); + strlcpy(req.name, channel->name, GLINK_NAME_SIZE); + + ret = glink_spi_tx(glink, &req, req_len, NULL, 0, true); + if (ret) + goto remove_idr; + + return 0; + +remove_idr: + CH_INFO(channel, "remove_idr\n"); + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->lcids, channel->lcid); + channel->lcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + return ret; +} + +static void glink_spi_send_open_ack(struct glink_spi *glink, + struct glink_channel *channel) +{ + struct glink_msg msg = { 0 }; + + msg.cmd = cpu_to_le16(SPI_CMD_OPEN_ACK); + msg.param1 = cpu_to_le16(channel->rcid); + + CH_INFO(channel, "\n"); + glink_spi_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static int glink_spi_rx_open_ack(struct glink_spi *glink, unsigned int lcid) +{ + struct glink_channel *channel; + + spin_lock(&glink->idr_lock); + channel = idr_find(&glink->lcids, lcid); + spin_unlock(&glink->idr_lock); + if (!channel) { + GLINK_ERR(glink, "Invalid open ack packet %d\n", lcid); + return -EINVAL; + } + + CH_INFO(channel, "\n"); + complete_all(&channel->open_ack); + + return 0; +} + +static void glink_spi_send_close_req(struct glink_spi *glink, + struct glink_channel *channel) +{ + struct glink_msg req = { 0 }; + + req.cmd = cpu_to_le16(SPI_CMD_CLOSE); + req.param1 = cpu_to_le16(channel->lcid); + + CH_INFO(channel, "\n"); + glink_spi_tx(glink, &req, sizeof(req), NULL, 0, true); +} + +static void glink_spi_send_close_ack(struct glink_spi *glink, + unsigned int rcid) +{ + struct glink_msg req = { 0 }; + + req.cmd = cpu_to_le16(SPI_CMD_CLOSE_ACK); + req.param1 = cpu_to_le16(rcid); + + GLINK_INFO(glink, "rcid:%d\n", rcid); + glink_spi_tx(glink, &req, sizeof(req), NULL, 0, true); +} + +static int glink_spi_request_intent(struct glink_spi *glink, + struct glink_channel *channel, + size_t size) +{ + struct glink_msg req = { 0 }; + int ret; + + mutex_lock(&channel->intent_req_lock); + + reinit_completion(&channel->intent_req_comp); + + req.cmd = cpu_to_le16(SPI_CMD_RX_INTENT_REQ); + req.param1 = cpu_to_le16(channel->lcid); + req.param2 = cpu_to_le32(size); + + CH_INFO(channel, "size:%d\n", size); + + ret = glink_spi_tx(glink, &req, sizeof(req), NULL, 0, true); + if (ret) + goto unlock; + + ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); + if (!ret) { + dev_err(&glink->dev, "intent request timed out\n"); + ret = -ETIMEDOUT; + } else { + ret = channel->intent_req_result ? 0 : -ECANCELED; + } + +unlock: + mutex_unlock(&channel->intent_req_lock); + return ret; +} + +static int glink_spi_handle_intent(struct glink_spi *glink, + unsigned int cid, + unsigned int count, + void *rx_data, + size_t avail) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + struct intent_pair { + __le32 size; + __le32 iid; + __le64 addr; + }; + struct intent_pair *intents; + const size_t msglen = sizeof(struct intent_pair) * count; + int ret; + int i; + unsigned long flags; + + if (avail < msglen) { + dev_err(&glink->dev, "Not enough data in buf\n"); + return avail; + } + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(&glink->dev, "intents for non-existing channel\n"); + return msglen; + } + + intents = (struct intent_pair *)rx_data; + for (i = 0; i < count; ++i) { + intent = kzalloc(sizeof(*intent), GFP_ATOMIC); + if (!intent) + break; + + intent->id = le32_to_cpu(intents[i].iid); + intent->size = le32_to_cpu(intents[i].size); + intent->addr = (u32)le64_to_cpu(intents[i].addr); + + CH_INFO(channel, "riid:%d size:%d\n", intent->id, intent->size); + + spin_lock_irqsave(&channel->intent_lock, flags); + ret = idr_alloc(&channel->riids, intent, + intent->id, intent->id + 1, GFP_ATOMIC); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + if (ret < 0) + dev_err(&glink->dev, "failed to store remote intent\n"); + } + + return msglen; +} + +static void glink_spi_handle_intent_req_ack(struct glink_spi *glink, + unsigned int cid, bool granted) +{ + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(&glink->dev, "unable to find channel\n"); + return; + } + + channel->intent_req_result = granted; + complete(&channel->intent_req_comp); + CH_INFO(channel, "\n"); +} + +/** + * glink_spi_send_intent_req_ack() - convert an rx intent request ack cmd to + wire format and transmit + * @glink: The transport to transmit on. + * @channel: The glink channel + * @granted: The request response to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int glink_spi_send_intent_req_ack(struct glink_spi *glink, + struct glink_channel *channel, + bool granted) +{ + struct glink_msg msg = { 0 }; + + msg.cmd = cpu_to_le16(SPI_CMD_RX_INTENT_REQ_ACK); + msg.param1 = cpu_to_le16(channel->lcid); + msg.param2 = cpu_to_le32(granted); + + CH_INFO(channel, "\n"); + glink_spi_tx(glink, &msg, sizeof(msg), NULL, 0, true); + + return 0; +} + +static struct glink_core_rx_intent * +glink_spi_alloc_intent(struct glink_spi *glink, + struct glink_channel *channel, + size_t size, + bool reuseable) +{ + struct glink_core_rx_intent *intent; + int ret; + unsigned long flags; + + intent = kzalloc(sizeof(*intent), GFP_KERNEL); + if (!intent) + return NULL; + + intent->data = kzalloc(size, GFP_KERNEL); + if (!intent->data) + goto free_intent; + + spin_lock_irqsave(&channel->intent_lock, flags); + ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); + if (ret < 0) { + spin_unlock_irqrestore(&channel->intent_lock, flags); + goto free_data; + } + spin_unlock_irqrestore(&channel->intent_lock, flags); + + intent->id = ret; + intent->size = size; + intent->reuse = reuseable; + + return intent; + +free_data: + kfree(intent->data); +free_intent: + kfree(intent); + return NULL; +} + +/** + * glink_spi_advertise_intent - convert an rx intent cmd to wire format and + * transmit + * @glink: The transport to transmit on. + * @channel: The local channel + * @size: The intent to pass on to remote. + * + * Return: 0 on success or standard Linux error code. + */ +static int glink_spi_advertise_intent(struct glink_spi *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent) +{ + struct command { + struct glink_msg msg; + __le32 size; + __le32 liid; + __le64 addr; + } __packed; + struct command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.msg.cmd = cpu_to_le16(SPI_CMD_INTENT); + cmd.msg.param1 = cpu_to_le16(channel->lcid); + cmd.msg.param2 = cpu_to_le32(1); + cmd.size = cpu_to_le32(intent->size); + cmd.liid = cpu_to_le32(intent->id); + + CH_INFO(channel, "count:%d size:%d liid:%d\n", 1, + intent->size, intent->id); + + glink_spi_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); + + return 0; +} + +/** + * glink_spi_handle_intent_req() - Receive a request for rx_intent + * from remote side + * if_ptr: Pointer to the transport interface + * rcid: Remote channel ID + * size: size of the intent + * + * The function searches for the local channel to which the request for + * rx_intent has arrived and allocates and notifies the remote back + */ +static void glink_spi_handle_intent_req(struct glink_spi *glink, + u32 cid, size_t size) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + + if (!channel) { + pr_err("%s channel not found for cid %d\n", __func__, cid); + return; + } + + intent = glink_spi_alloc_intent(glink, channel, size, false); + if (intent) + glink_spi_advertise_intent(glink, channel, intent); + + glink_spi_send_intent_req_ack(glink, channel, !!intent); +} + +static int glink_spi_send_short(struct glink_channel *channel, + void *data, int len, + struct glink_core_rx_intent *intent, bool wait) +{ + struct glink_spi *glink = channel->glink; + struct { + struct glink_msg msg; + u8 data[SHORT_SIZE]; + } __packed req; + + CH_INFO(channel, "intent offset:%d len:%d\n", intent->offset, len); + + req.msg.cmd = cpu_to_le16(SPI_CMD_TX_SHORT_DATA); + req.msg.param1 = cpu_to_le16(channel->lcid); + req.msg.param2 = cpu_to_le32(intent->id); + req.msg.param3 = cpu_to_le32(len); + req.msg.param4 = cpu_to_be32(0); + memcpy(req.data, data, len); + + mutex_lock(&glink->tx_lock); + while (glink_spi_tx_avail(glink) < sizeof(req)) { + if (!wait) { + mutex_unlock(&glink->tx_lock); + return -EAGAIN; + } + + if (atomic_read(&glink->in_reset)) { + mutex_unlock(&glink->tx_lock); + return -EINVAL; + } + + /* Wait without holding the tx_lock */ + mutex_unlock(&glink->tx_lock); + + usleep_range(10000, 15000); + + mutex_lock(&glink->tx_lock); + } + glink_spi_tx_write(glink, &req, sizeof(req), NULL, 0); + + mutex_unlock(&glink->tx_lock); + return 0; +} + +static int glink_spi_send_data(struct glink_channel *channel, + void *data, int chunk_size, int left_size, + struct glink_core_rx_intent *intent, bool wait) +{ + struct glink_spi *glink = channel->glink; + struct { + struct glink_msg msg; + __le32 chunk_size; + __le32 left_size; + } __packed req; + + CH_INFO(channel, "chunk:%d, left:%d\n", chunk_size, left_size); + + memset(&req, 0, sizeof(req)); + if (intent->offset) + req.msg.cmd = cpu_to_le16(SPI_CMD_TX_DATA_CONT); + else + req.msg.cmd = cpu_to_le16(SPI_CMD_TX_DATA); + + req.msg.param1 = cpu_to_le16(channel->lcid); + req.msg.param2 = cpu_to_le32(intent->id); + req.chunk_size = cpu_to_le32(chunk_size); + req.left_size = cpu_to_le32(left_size); + + mutex_lock(&glink->tx_lock); + while (glink_spi_tx_avail(glink) < sizeof(req)) { + if (!wait) { + mutex_unlock(&glink->tx_lock); + return -EAGAIN; + } + + if (atomic_read(&glink->in_reset)) { + mutex_unlock(&glink->tx_lock); + return -EINVAL; + } + + /* Wait without holding the tx_lock */ + mutex_unlock(&glink->tx_lock); + + usleep_range(10000, 15000); + + mutex_lock(&glink->tx_lock); + } + glink_spi_write(glink, data, intent->addr + intent->offset, chunk_size); + intent->offset += chunk_size; + glink_spi_tx_write(glink, &req, sizeof(req), NULL, 0); + + mutex_unlock(&glink->tx_lock); + return 0; +} + +static int __glink_spi_send(struct glink_channel *channel, + void *data, int len, bool wait) +{ + struct glink_spi *glink = channel->glink; + struct glink_core_rx_intent *intent = NULL; + struct glink_core_rx_intent *tmp; + int size = len; + int iid = 0; + int ret = 0; + unsigned long flags; + + CH_INFO(channel, "size:%d, wait:%d\n", len, wait); + + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); + while (!intent) { + spin_lock_irqsave(&channel->intent_lock, flags); + idr_for_each_entry(&channel->riids, tmp, iid) { + if (tmp->size >= len && !tmp->in_use) { + if (!intent) + intent = tmp; + else if (intent->size > tmp->size) + intent = tmp; + if (intent->size == len) + break; + } + } + if (intent) + intent->in_use = true; + spin_unlock_irqrestore(&channel->intent_lock, flags); + + /* We found an available intent */ + if (intent) + break; + + if (!wait) { + ret = -EBUSY; + goto tx_exit; + } + + ret = glink_spi_request_intent(glink, channel, len); + if (ret < 0) + goto tx_exit; + } + + if (len <= SHORT_SIZE) + size = 0; + else if (size & (XPRT_ALIGNMENT - 1)) + size = ALIGN(len - SHORT_SIZE, XPRT_ALIGNMENT); + + if (size) { + ret = glink_spi_send_data(channel, data, size, len - size, + intent, wait); + if (ret) + goto tx_exit; + } + + data = (char *)data + size; + size = len - size; + if (size) + ret = glink_spi_send_short(channel, data, size, intent, wait); + +tx_exit: + /* Mark intent available if we failed */ + if (ret && intent) + intent->in_use = false; + + atomic_dec(&glink->activity_cnt); + + return ret; +} + +static void glink_spi_handle_rx_done(struct glink_spi *glink, + u32 cid, uint32_t iid, + bool reuse) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(&glink->dev, "invalid channel id received\n"); + return; + } + + spin_lock_irqsave(&channel->intent_lock, flags); + intent = idr_find(&channel->riids, iid); + + if (!intent) { + spin_unlock_irqrestore(&channel->intent_lock, flags); + dev_err(&glink->dev, "invalid intent id received\n"); + return; + } + + intent->offset = 0; + intent->in_use = false; + CH_INFO(channel, "reuse:%d iid:%d\n", reuse, intent->id); + + if (!reuse) { + idr_remove(&channel->riids, intent->id); + kfree(intent); + } + spin_unlock_irqrestore(&channel->intent_lock, flags); +} + +static int __glink_spi_rx_done(struct glink_spi *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent, + bool wait) +{ + struct { + u16 id; + u16 lcid; + u32 liid; + u64 reserved; + } __packed cmd; + unsigned int cid = channel->lcid; + unsigned int iid = intent->id; + bool reuse = intent->reuse; + int ret; + + cmd.id = reuse ? SPI_CMD_RX_DONE_W_REUSE : SPI_CMD_RX_DONE; + cmd.lcid = cid; + cmd.liid = iid; + + ret = glink_spi_tx(glink, &cmd, sizeof(cmd), NULL, 0, wait); + if (ret) + return ret; + + intent->offset = 0; + if (!reuse) { + kfree(intent->data); + kfree(intent); + } + + CH_INFO(channel, "reuse:%d liid:%d", reuse, iid); + return 0; +} + +static void glink_spi_rx_done_work(struct work_struct *work) +{ + struct glink_channel *channel = container_of(work, struct glink_channel, + intent_work); + struct glink_spi *glink = channel->glink; + struct glink_core_rx_intent *intent, *tmp; + unsigned long flags; + + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); + + spin_lock_irqsave(&channel->intent_lock, flags); + list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { + list_del(&intent->node); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + __glink_spi_rx_done(glink, channel, intent, true); + + spin_lock_irqsave(&channel->intent_lock, flags); + } + spin_unlock_irqrestore(&channel->intent_lock, flags); + + atomic_dec(&glink->activity_cnt); +} + +static void glink_spi_rx_done(struct glink_spi *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent) +{ + unsigned long flags; + int ret = -EAGAIN; + + /* We don't send RX_DONE to intentless systems */ + if (glink->intentless) { + kfree(intent->data); + kfree(intent); + return; + } + + /* Take it off the tree of receive intents */ + if (!intent->reuse) { + spin_lock_irqsave(&channel->intent_lock, flags); + idr_remove(&channel->liids, intent->id); + spin_unlock_irqrestore(&channel->intent_lock, flags); + } + + /* Schedule the sending of a rx_done indication */ + if (list_empty(&channel->done_intents)) + ret = __glink_spi_rx_done(glink, channel, intent, false); + + if (ret) { + spin_lock_irqsave(&channel->intent_lock, flags); + list_add_tail(&intent->node, &channel->done_intents); + schedule_work(&channel->intent_work); + spin_unlock_irqrestore(&channel->intent_lock, flags); + } +} + +/* Locally initiated rpmsg_create_ept */ +static struct glink_channel *glink_spi_create_local(struct glink_spi *glink, + const char *name) +{ + struct glink_channel *channel; + int ret; + unsigned long flags; + + channel = glink_spi_alloc_channel(glink, name); + if (IS_ERR(channel)) + return ERR_CAST(channel); + + CH_INFO(channel, "\n"); + ret = glink_spi_send_open_req(glink, channel); + if (ret) + goto release_channel; + + ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); + if (!ret) + goto err_timeout; + + ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ); + if (!ret) + goto err_timeout; + + glink_spi_send_open_ack(glink, channel); + + return channel; + +err_timeout: + CH_INFO(channel, "err_timeout\n"); + + /* glink_spi_send_open_req() did register the channel in lcids*/ + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->lcids, channel->lcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + +release_channel: + CH_INFO(channel, "release_channel\n"); + /* Release glink_spi_send_open_req() reference */ + kref_put(&channel->refcount, glink_spi_channel_release); + /* Release glink_spi_alloc_channel() reference */ + kref_put(&channel->refcount, glink_spi_channel_release); + + return ERR_PTR(-ETIMEDOUT); +} + +/* Remote initiated rpmsg_create_ept */ +static int glink_spi_create_remote(struct glink_spi *glink, + struct glink_channel *channel) +{ + int ret; + + CH_INFO(channel, "\n"); + + glink_spi_send_open_ack(glink, channel); + + ret = glink_spi_send_open_req(glink, channel); + if (ret) + goto close_link; + + ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); + if (!ret) { + ret = -ETIMEDOUT; + goto close_link; + } + + return 0; + +close_link: + CH_INFO(channel, "close_link %d\n", ret); + + /* + * Send a close request to "undo" our open-ack. The close-ack will + * release glink_spi_send_open_req() reference and the last reference + * will be release after rx_close or transport unregister by calling + * glink_spi_remove(). + */ + glink_spi_send_close_req(glink, channel); + + return ret; +} + +static struct rpmsg_endpoint *glink_spi_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, + void *priv, + struct rpmsg_channel_info + chinfo) +{ + struct glink_channel *parent = to_glink_channel(rpdev->ept); + struct glink_channel *channel; + struct glink_spi *glink = parent->glink; + struct rpmsg_endpoint *ept; + const char *name = chinfo.name; + int cid; + int ret; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->rcids, channel, cid) { + if (!strcmp(channel->name, name)) + break; + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + if (!channel) { + channel = glink_spi_create_local(glink, name); + if (IS_ERR(channel)) + return NULL; + } else { + ret = glink_spi_create_remote(glink, channel); + if (ret) + return NULL; + } + + ept = &channel->ept; + ept->rpdev = rpdev; + ept->cb = cb; + ept->priv = priv; + ept->ops = &glink_endpoint_ops; + + return ept; +} + +static int glink_spi_announce_create(struct rpmsg_device *rpdev) +{ + struct glink_channel *channel = to_glink_channel(rpdev->ept); + struct device_node *np = rpdev->dev.of_node; + struct glink_spi *glink = channel->glink; + struct glink_core_rx_intent *intent; + const struct property *prop = NULL; + __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) }; + int num_intents; + int num_groups = 1; + __be32 *val = defaults; + int size; + + if (glink->intentless || !completion_done(&channel->open_ack)) + return 0; + + prop = of_find_property(np, "qcom,intents", NULL); + if (prop) { + val = prop->value; + num_groups = prop->length / sizeof(u32) / 2; + } + + /* Channel is now open, advertise base set of intents */ + while (num_groups--) { + size = be32_to_cpup(val++); + num_intents = be32_to_cpup(val++); + while (num_intents--) { + intent = glink_spi_alloc_intent(glink, channel, size, + true); + if (!intent) + break; + + glink_spi_advertise_intent(glink, channel, intent); + } + } + return 0; +} + +static void glink_spi_destroy_ept(struct rpmsg_endpoint *ept) +{ + struct glink_channel *channel = to_glink_channel(ept); + struct glink_spi *glink = channel->glink; + unsigned long flags; + + spin_lock_irqsave(&channel->recv_lock, flags); + channel->ept.cb = NULL; + spin_unlock_irqrestore(&channel->recv_lock, flags); + + /* Decouple the potential rpdev from the channel */ + channel->rpdev = NULL; + + glink_spi_send_close_req(glink, channel); +} + +static void glink_spi_rx_close(struct glink_spi *glink, unsigned int rcid) +{ + struct rpmsg_channel_info chinfo; + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (WARN(!channel, "close request on unknown channel\n")) + return; + CH_INFO(channel, "\n"); + + /* cancel pending rx_done work */ + cancel_work_sync(&channel->intent_work); + + if (channel->rpdev) { + strlcpy(chinfo.name, channel->name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + rpmsg_unregister_device(&glink->dev, &chinfo); + } + + glink_spi_send_close_ack(glink, channel->rcid); + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->rcids, channel->rcid); + channel->rcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + kref_put(&channel->refcount, glink_spi_channel_release); +} + +static void glink_spi_rx_close_ack(struct glink_spi *glink, unsigned int lcid) +{ + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->lcids, lcid); + if (WARN(!channel, "close ack on unknown channel\n")) { + spin_unlock_irqrestore(&glink->idr_lock, flags); + return; + } + CH_INFO(channel, "\n"); + + idr_remove(&glink->lcids, channel->lcid); + channel->lcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + kref_put(&channel->refcount, glink_spi_channel_release); +} + +static int glink_spi_send(struct rpmsg_endpoint *ept, void *data, int len) +{ + struct glink_channel *channel = to_glink_channel(ept); + + return __glink_spi_send(channel, data, len, true); +} + +static int glink_spi_trysend(struct rpmsg_endpoint *ept, void *data, int len) +{ + struct glink_channel *channel = to_glink_channel(ept); + + return __glink_spi_send(channel, data, len, false); +} + +/** + * glink_spi_send_signals() - convert a signal cmd to wire format and transmit + * @glink: The transport to transmit on. + * @channel: The glink channel + * @sigs: The signals to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int glink_spi_send_signals(struct glink_spi *glink, + struct glink_channel *channel, + u32 sigs) +{ + struct glink_msg msg; + + msg.cmd = cpu_to_le16(SPI_CMD_SIGNALS); + msg.param1 = cpu_to_le16(channel->lcid); + msg.param2 = cpu_to_le32(sigs); + + GLINK_INFO(glink, "sigs:%d\n", sigs); + return glink_spi_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static int glink_spi_handle_signals(struct glink_spi *glink, + unsigned int rcid, unsigned int signals) +{ + struct glink_channel *channel; + unsigned long flags; + u32 old; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(&glink->dev, "signal for non-existing channel\n"); + return -EINVAL; + } + + old = channel->rsigs; + channel->rsigs = signals; + + if (channel->ept.sig_cb) + channel->ept.sig_cb(channel->ept.rpdev, old, channel->rsigs); + + CH_INFO(channel, "old:%d new:%d\n", old, channel->rsigs); + + return 0; +} + +static int glink_spi_get_sigs(struct rpmsg_endpoint *ept, + u32 *lsigs, u32 *rsigs) +{ + struct glink_channel *channel = to_glink_channel(ept); + + *lsigs = channel->lsigs; + *rsigs = channel->rsigs; + + return 0; +} + +static int glink_spi_set_sigs(struct rpmsg_endpoint *ept, u32 sigs) +{ + struct glink_channel *channel = to_glink_channel(ept); + struct glink_spi *glink = channel->glink; + + channel->lsigs = sigs; + + return glink_spi_send_signals(glink, channel, sigs); +} + +/* + * Finds the device_node for the glink child interested in this channel. + */ +static struct device_node *glink_spi_match_channel(struct device_node *node, + const char *channel) +{ + struct device_node *child; + const char *name; + const char *key; + int ret; + + for_each_available_child_of_node(node, child) { + key = "qcom,glink-channels"; + ret = of_property_read_string(child, key, &name); + if (ret) + continue; + + if (strcmp(name, channel) == 0) + return child; + } + + return NULL; +} + +static const struct rpmsg_device_ops glink_device_ops = { + .create_ept = glink_spi_create_ept, + .announce_create = glink_spi_announce_create, +}; + +static const struct rpmsg_endpoint_ops glink_endpoint_ops = { + .destroy_ept = glink_spi_destroy_ept, + .send = glink_spi_send, + .trysend = glink_spi_trysend, + .get_sigs = glink_spi_get_sigs, + .set_sigs = glink_spi_set_sigs, +}; + +static void glink_spi_rpdev_release(struct device *dev) +{ + struct rpmsg_device *rpdev = to_rpmsg_device(dev); + struct glink_channel *channel = to_glink_channel(rpdev->ept); + + channel->rpdev = NULL; + kfree(rpdev); +} + +static int glink_spi_rx_open(struct glink_spi *glink, unsigned int rcid, + char *name) +{ + struct glink_channel *channel; + struct rpmsg_device *rpdev; + bool create_device = false; + struct device_node *node; + int lcid; + int ret; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, lcid) { + if (!strcmp(channel->name, name)) + break; + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + if (!channel) { + channel = glink_spi_alloc_channel(glink, name); + if (IS_ERR(channel)) + return PTR_ERR(channel); + + /* The opening dance was initiated by the remote */ + create_device = true; + } + + spin_lock_irqsave(&glink->idr_lock, flags); + ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC); + if (ret < 0) { + dev_err(&glink->dev, "Unable to insert channel into rcid list\n"); + spin_unlock_irqrestore(&glink->idr_lock, flags); + goto free_channel; + } + channel->rcid = ret; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + complete_all(&channel->open_req); + + if (create_device) { + rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); + if (!rpdev) { + ret = -ENOMEM; + goto rcid_remove; + } + + rpdev->ept = &channel->ept; + strlcpy(rpdev->id.name, name, RPMSG_NAME_SIZE); + rpdev->src = RPMSG_ADDR_ANY; + rpdev->dst = RPMSG_ADDR_ANY; + rpdev->ops = &glink_device_ops; + + node = glink_spi_match_channel(glink->dev.of_node, name); + rpdev->dev.of_node = node; + rpdev->dev.parent = &glink->dev; + rpdev->dev.release = glink_spi_rpdev_release; + + ret = rpmsg_register_device(rpdev); + if (ret) + goto free_rpdev; + + channel->rpdev = rpdev; + } + CH_INFO(channel, "\n"); + + return 0; + +free_rpdev: + CH_INFO(channel, "free_rpdev\n"); + kfree(rpdev); +rcid_remove: + CH_INFO(channel, "rcid_remove\n"); + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->rcids, channel->rcid); + channel->rcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); +free_channel: + CH_INFO(channel, "free_channel\n"); + /* Release the reference, iff we took it */ + if (create_device) + kref_put(&channel->refcount, glink_spi_channel_release); + + return ret; +} + +static int glink_spi_rx_data(struct glink_spi *glink, + unsigned int rcid, unsigned int liid, + void *rx_data, size_t avail) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + struct data_desc { + __le32 chunk_size; + __le32 left_size; + __le64 addr; + }; + struct data_desc *hdr; + unsigned int chunk_size; + unsigned int left_size; + u32 addr; + size_t msglen; + unsigned long flags; + + msglen = sizeof(*hdr); + if (avail < msglen) { + dev_dbg(&glink->dev, "Not enough data in fifo\n"); + return avail; + } + hdr = (struct data_desc *)rx_data; + + chunk_size = le32_to_cpu(hdr->chunk_size); + left_size = le32_to_cpu(hdr->left_size); + addr = (u32)le64_to_cpu(hdr->addr); + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_dbg(&glink->dev, "Data on non-existing channel\n"); + return msglen; + } + CH_INFO(channel, "chunk_size:%d left_size:%d\n", chunk_size, left_size); + + spin_lock_irqsave(&channel->intent_lock, flags); + intent = idr_find(&channel->liids, liid); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + if (!intent) { + dev_err(&glink->dev, + "no intent found for channel %s intent %d\n", + channel->name, liid); + return msglen; + } + + if (intent->size - intent->offset < chunk_size) { + dev_err(&glink->dev, "Insufficient space in intent\n"); + + /* The packet header lied, drop payload */ + return msglen; + } + + /* Read message from addr sent by WDSP */ + glink_spi_read(glink, addr, intent->data + intent->offset, chunk_size); + intent->offset += chunk_size; + + /* Handle message when no fragments remain to be received */ + if (!left_size) { + spin_lock_irqsave(&channel->recv_lock, flags); + if (channel->ept.cb) { + channel->ept.cb(channel->ept.rpdev, + intent->data, + intent->offset, + channel->ept.priv, + RPMSG_ADDR_ANY); + } + spin_unlock_irqrestore(&channel->recv_lock, flags); + + intent->offset = 0; + channel->buf = NULL; + + glink_spi_rx_done(glink, channel, intent); + } + return msglen; +} + +static int glink_spi_rx_short_data(struct glink_spi *glink, + unsigned int rcid, unsigned int liid, + unsigned int chunk_size, + unsigned int left_size, + void *src, size_t avail) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + size_t msglen = SHORT_SIZE; + unsigned long flags; + + if (avail < msglen) { + dev_dbg(&glink->dev, "Not enough data in fifo\n"); + return avail; + } + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_dbg(&glink->dev, "Data on non-existing channel\n"); + return msglen; + } + CH_INFO(channel, "chunk_size:%d left_size:%d\n", chunk_size, left_size); + + spin_lock_irqsave(&channel->intent_lock, flags); + intent = idr_find(&channel->liids, liid); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + if (!intent) { + dev_err(&glink->dev, + "no intent found for channel %s intent %d\n", + channel->name, liid); + return msglen; + } + + if (intent->size - intent->offset < chunk_size) { + dev_err(&glink->dev, "Insufficient space in intent\n"); + + /* The packet header lied, drop payload */ + return msglen; + } + + /* Read message from addr sent by WDSP */ + memcpy(intent->data + intent->offset, src, chunk_size); + intent->offset += chunk_size; + + /* Handle message when no fragments remain to be received */ + if (!left_size) { + spin_lock_irqsave(&channel->recv_lock, flags); + if (channel->ept.cb) { + channel->ept.cb(channel->ept.rpdev, + intent->data, + intent->offset, + channel->ept.priv, + RPMSG_ADDR_ANY); + } + spin_unlock_irqrestore(&channel->recv_lock, flags); + + intent->offset = 0; + channel->buf = NULL; + + glink_spi_rx_done(glink, channel, intent); + } + return msglen; +} + +static void glink_spi_defer_work(struct work_struct *work) +{ + struct glink_spi *glink = container_of(work, struct glink_spi, + rx_defer_work); + + struct glink_defer_cmd *dcmd; + struct glink_msg *msg; + unsigned long flags; + unsigned int param1; + unsigned int param2; + unsigned int param3; + unsigned int param4; + unsigned int cmd; + + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); + for (;;) { + spin_lock_irqsave(&glink->rx_lock, flags); + if (list_empty(&glink->rx_queue)) { + spin_unlock_irqrestore(&glink->rx_lock, flags); + break; + } + dcmd = list_first_entry(&glink->rx_queue, + struct glink_defer_cmd, node); + list_del(&dcmd->node); + spin_unlock_irqrestore(&glink->rx_lock, flags); + + msg = &dcmd->msg; + cmd = le16_to_cpu(msg->cmd); + param1 = le16_to_cpu(msg->param1); + param2 = le32_to_cpu(msg->param2); + param3 = le32_to_cpu(msg->param3); + param4 = le32_to_cpu(msg->param4); + + switch (cmd) { + case SPI_CMD_OPEN: + glink_spi_rx_open(glink, param1, msg->data); + break; + case SPI_CMD_CLOSE: + glink_spi_rx_close(glink, param1); + break; + case SPI_CMD_CLOSE_ACK: + glink_spi_rx_close_ack(glink, param1); + break; + default: + WARN(1, "Unknown defer object %d\n", cmd); + break; + } + + kfree(dcmd); + } + atomic_dec(&glink->activity_cnt); +} + +static int glink_spi_rx_defer(struct glink_spi *glink, + void *rx_data, u32 rx_avail, size_t extra) +{ + struct glink_defer_cmd *dcmd; + + extra = ALIGN(extra, SPI_ALIGNMENT); + + if (rx_avail < sizeof(struct glink_msg) + extra) { + dev_dbg(&glink->dev, "Insufficient data in rx fifo"); + return -ENXIO; + } + + dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_KERNEL); + if (!dcmd) + return -ENOMEM; + + INIT_LIST_HEAD(&dcmd->node); + + memcpy(&dcmd->msg, rx_data, sizeof(dcmd->msg) + extra); + + spin_lock(&glink->rx_lock); + list_add_tail(&dcmd->node, &glink->rx_queue); + spin_unlock(&glink->rx_lock); + + schedule_work(&glink->rx_defer_work); + + return 0; +} + +static void glink_spi_process_cmd(struct glink_spi *glink, void *rx_data, + u32 rx_size) +{ + struct glink_msg *msg; + unsigned int param1; + unsigned int param2; + unsigned int param3; + unsigned int param4; + unsigned int cmd; + int offset = 0; + int ret; + u16 name_len; + char *name; + + while (offset < rx_size) { + msg = (struct glink_msg *)(rx_data + offset); + offset += sizeof(*msg); + + cmd = le16_to_cpu(msg->cmd); + param1 = le16_to_cpu(msg->param1); + param2 = le32_to_cpu(msg->param2); + param3 = le32_to_cpu(msg->param3); + param4 = le32_to_cpu(msg->param4); + + switch (cmd) { + case SPI_CMD_VERSION: + if (param3) { + glink->rx_pipe.length = param3; + glink->tx_pipe.length = param3; + } + glink_spi_receive_version(glink, param1, param2); + break; + case SPI_CMD_VERSION_ACK: + glink_spi_receive_version_ack(glink, param1, param2); + break; + case SPI_CMD_CLOSE: + case SPI_CMD_CLOSE_ACK: + glink_spi_rx_defer(glink, + rx_data + offset - sizeof(*msg), + rx_size + offset - sizeof(*msg), 0); + break; + case SPI_CMD_RX_INTENT_REQ: + glink_spi_handle_intent_req(glink, param1, param2); + break; + case SPI_CMD_OPEN_ACK: + ret = glink_spi_rx_open_ack(glink, param1); + break; + case SPI_CMD_OPEN: + name_len = (u16)(param2 & 0xFFFF); + name = rx_data + offset; + glink_spi_rx_defer(glink, + rx_data + offset - sizeof(*msg), + rx_size + offset - sizeof(*msg), + ALIGN(name_len, SPI_ALIGNMENT)); + + offset += ALIGN(name_len, SPI_ALIGNMENT); + break; + case SPI_CMD_TX_DATA: + case SPI_CMD_TX_DATA_CONT: + ret = glink_spi_rx_data(glink, param1, param2, + rx_data + offset, + rx_size - offset); + offset += ALIGN(ret, SPI_ALIGNMENT); + break; + case SPI_CMD_TX_SHORT_DATA: + ret = glink_spi_rx_short_data(glink, + param1, param2, + param3, param4, + rx_data + offset, + rx_size - offset); + offset += ALIGN(ret, SPI_ALIGNMENT); + break; + case SPI_CMD_READ_NOTIF: + break; + case SPI_CMD_INTENT: + ret = glink_spi_handle_intent(glink, + param1, param2, + rx_data + offset, + rx_size - offset); + offset += ALIGN(ret, SPI_ALIGNMENT); + break; + case SPI_CMD_RX_DONE: + glink_spi_handle_rx_done(glink, param1, param2, false); + break; + case SPI_CMD_RX_DONE_W_REUSE: + glink_spi_handle_rx_done(glink, param1, param2, true); + break; + case SPI_CMD_RX_INTENT_REQ_ACK: + glink_spi_handle_intent_req_ack(glink, param1, param2); + break; + case SPI_CMD_SIGNALS: + glink_spi_handle_signals(glink, param1, param2); + break; + default: + dev_err(&glink->dev, "unhandled rx cmd: %d\n", cmd); + break; + } + } +} + +static void glink_spi_work(struct kthread_work *work) +{ + struct glink_spi *glink = container_of(work, struct glink_spi, + rx_work); + u32 inactive_cycles = 0; + u32 rx_avail; + void *rx_data; + + glink_spi_xprt_set_poll_mode(glink); + do { + rx_avail = glink_spi_rx_avail(glink); + if (!rx_avail) { + usleep_range(POLL_INTERVAL_US, POLL_INTERVAL_US + 50); + inactive_cycles++; + continue; + } + inactive_cycles = 0; + + rx_data = kzalloc(rx_avail, GFP_KERNEL); + if (!rx_data) + break; + + glink_spi_rx_peak(glink, rx_data, 0, rx_avail); + glink_spi_process_cmd(glink, rx_data, rx_avail); + kfree(rx_data); + glink_spi_rx_advance(glink, rx_avail); + + } while (inactive_cycles < MAX_INACTIVE_CYCLES && + !atomic_read(&glink->in_reset)); + glink_spi_xprt_set_irq_mode(glink); +} + +static int glink_spi_cmpnt_init(struct device *dev, void *priv) +{ + return 0; +} + +static int glink_spi_cmpnt_deinit(struct device *dev, void *priv) +{ + return 0; +} + +static int glink_spi_cmpnt_event_handler(struct device *dev, void *priv, + enum wdsp_event_type event, + void *data) +{ + struct glink_spi *glink = dev_get_drvdata(dev); + struct glink_cmpnt *cmpnt = &glink->cmpnt; + int ret = 0; + + switch (event) { + case WDSP_EVENT_PRE_BOOTUP: + if (!cmpnt || !cmpnt->master_dev || !cmpnt->master_ops || + !cmpnt->master_ops->get_devops_for_cmpnt) + break; + + ret = cmpnt->master_ops->get_devops_for_cmpnt(cmpnt->master_dev, + WDSP_CMPNT_TRANSPORT, &glink->spi_ops); + if (ret) + GLINK_ERR(glink, "Failed to get transport device\n"); + break; + case WDSP_EVENT_POST_BOOTUP: + atomic_set(&glink->in_reset, 0); + ret = glink_spi_send_version(glink); + if (ret) + GLINK_ERR(glink, "failed to send version %d\n", ret); + + /* FALLTHROUGH */ + case WDSP_EVENT_IPC1_INTR: + kthread_queue_work(&glink->rx_worker, &glink->rx_work); + break; + case WDSP_EVENT_PRE_SHUTDOWN: + glink_spi_remove(glink); + break; + case WDSP_EVENT_RESUME: + break; + case WDSP_EVENT_SUSPEND: + if (atomic_read(&glink->activity_cnt)) + ret = -EBUSY; + break; + default: + GLINK_INFO(glink, "unhandled event %d", event); + break; + } + + return ret; +} + +/* glink_spi_cmpnt_ops - Callback operations registered wtih wdsp framework */ +static struct wdsp_cmpnt_ops glink_spi_cmpnt_ops = { + .init = glink_spi_cmpnt_init, + .deinit = glink_spi_cmpnt_deinit, + .event_handler = glink_spi_cmpnt_event_handler, +}; + +static int glink_component_bind(struct device *dev, struct device *master, + void *data) +{ + struct glink_spi *glink = dev_get_drvdata(dev); + struct glink_cmpnt *cmpnt = &glink->cmpnt; + int ret = 0; + + cmpnt->master_dev = master; + cmpnt->master_ops = data; + + if (cmpnt->master_ops && cmpnt->master_ops->register_cmpnt_ops) + ret = cmpnt->master_ops->register_cmpnt_ops(master, dev, glink, + &glink_spi_cmpnt_ops); + else + ret = -EINVAL; + + if (ret) + dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n", + __func__, ret); + return ret; +} + +static void glink_component_unbind(struct device *dev, struct device *master, + void *data) +{ + struct glink_spi *glink = dev_get_drvdata(dev); + struct glink_cmpnt *cmpnt = &glink->cmpnt; + + cmpnt->master_dev = NULL; + cmpnt->master_ops = NULL; +} + +static const struct component_ops glink_component_ops = { + .bind = glink_component_bind, + .unbind = glink_component_unbind, +}; + +static int glink_spi_init_pipe(const char *key, struct device_node *node, + struct glink_spi_pipe *pipe) +{ + const struct property *prop = NULL; + __be32 *addrs; + + prop = of_find_property(node, key, NULL); + if (!prop) { + pr_err("%s failed to find prop %s\n", __func__, key); + return -ENODEV; + } + + if ((prop->length / sizeof(u32)) != 2) { + pr_err("%s %s wrong length %d\n", __func__, key, prop->length); + return -EINVAL; + } + addrs = prop->value; + + pipe->tail_addr = be32_to_cpup(addrs++); + pipe->head_addr = be32_to_cpup(addrs++); + pipe->length = DEFAULT_FIFO_SIZE; + + return 0; +} + +static void glink_spi_release(struct device *dev) +{ + struct glink_spi *glink = container_of(dev, struct glink_spi, dev); + + kfree(glink); +} + +struct glink_spi *qcom_glink_spi_register(struct device *parent, + struct device_node *node) +{ + struct glink_spi *glink; + struct device *dev; + int ret; + + glink = kzalloc(sizeof(*glink), GFP_KERNEL); + if (!glink) + return ERR_PTR(-ENOMEM); + + dev = &glink->dev; + dev->parent = parent; + dev->of_node = node; + dev->release = glink_spi_release; + dev_set_name(dev, "%s:%s", node->parent->name, node->name); + ret = device_register(dev); + if (ret) { + pr_err("failed to register glink edge\n"); + return ERR_PTR(ret); + } + dev_set_drvdata(dev, glink); + + ret = of_property_read_string(dev->of_node, "label", &glink->name); + if (ret < 0) + glink->name = dev->of_node->name; + + glink->features = GLINK_FEATURE_INTENT_REUSE; + glink->intentless = false; + + mutex_init(&glink->tx_lock); + spin_lock_init(&glink->rx_lock); + INIT_LIST_HEAD(&glink->rx_queue); + INIT_WORK(&glink->rx_defer_work, glink_spi_defer_work); + + kthread_init_work(&glink->rx_work, glink_spi_work); + kthread_init_worker(&glink->rx_worker); + + spin_lock_init(&glink->idr_lock); + idr_init(&glink->lcids); + idr_init(&glink->rcids); + + atomic_set(&glink->in_reset, 1); + atomic_set(&glink->activity_cnt, 0); + + ret = glink_spi_init_pipe("tx-descriptors", node, &glink->tx_pipe); + if (ret) + goto err_put_dev; + + ret = glink_spi_init_pipe("rx-descriptors", node, &glink->rx_pipe); + if (ret) + goto err_put_dev; + + ret = component_add(dev, &glink_component_ops); + if (ret) { + dev_err(dev, "component_add failed, err = %d\n", ret); + goto err_put_dev; + } + + glink->ilc = ipc_log_context_create(GLINK_LOG_PAGE_CNT, glink->name, 0); + + glink->rx_task = kthread_run(kthread_worker_fn, &glink->rx_worker, + "spi_%s", glink->name); + if (IS_ERR(glink->rx_task)) { + ret = PTR_ERR(glink->rx_task); + dev_err(dev, "kthread run failed %d\n", ret); + goto err_put_dev; + } + + return glink; + +err_put_dev: + dev_set_drvdata(dev, NULL); + put_device(dev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(qcom_glink_spi_register); + +static int glink_spi_remove_device(struct device *dev, void *data) +{ + device_unregister(dev); + + return 0; +} + +static void glink_spi_remove(struct glink_spi *glink) +{ + struct glink_spi_pipe *rx_pipe = &glink->rx_pipe; + struct glink_spi_pipe *tx_pipe = &glink->tx_pipe; + struct glink_channel *channel; + int cid; + int ret; + unsigned long flags; + + GLINK_INFO(glink, "\n"); + + atomic_set(&glink->in_reset, 1); + kthread_cancel_work_sync(&glink->rx_work); + cancel_work_sync(&glink->rx_defer_work); + + ret = device_for_each_child(&glink->dev, NULL, glink_spi_remove_device); + if (ret) + dev_warn(&glink->dev, "Can't remove GLINK devices: %d\n", ret); + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, cid) { + spin_unlock_irqrestore(&glink->idr_lock, flags); + /* cancel_work_sync may sleep */ + cancel_work_sync(&channel->intent_work); + spin_lock_irqsave(&glink->idr_lock, flags); + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + spin_lock_irqsave(&glink->idr_lock, flags); + /* Release any defunct local channels, waiting for close-ack */ + idr_for_each_entry(&glink->lcids, channel, cid) { + kref_put(&channel->refcount, glink_spi_channel_release); + idr_remove(&glink->lcids, cid); + } + + /* Release any defunct local channels, waiting for close-req */ + idr_for_each_entry(&glink->lcids, channel, cid) { + kref_put(&channel->refcount, glink_spi_channel_release); + idr_remove(&glink->lcids, cid); + } + + idr_destroy(&glink->lcids); + idr_destroy(&glink->rcids); + spin_unlock_irqrestore(&glink->idr_lock, flags); + + tx_pipe->fifo_base = 0; + tx_pipe->local_addr = 0; + tx_pipe->length = DEFAULT_FIFO_SIZE; + + rx_pipe->fifo_base = 0; + rx_pipe->local_addr = 0; + rx_pipe->length = DEFAULT_FIFO_SIZE; +} + +void qcom_glink_spi_unregister(struct glink_spi *glink) +{ + device_unregister(&glink->dev); +} +EXPORT_SYMBOL(qcom_glink_spi_unregister); + +MODULE_DESCRIPTION("QTI GLINK SPI Transport"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rpmsg/qcom_glink_spss.c b/drivers/rpmsg/qcom_glink_spss.c new file mode 100644 index 000000000000..6112c77cd7c8 --- /dev/null +++ b/drivers/rpmsg/qcom_glink_spss.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qcom_glink_native.h" + +#define FIFO_FULL_RESERVE 8 +#define FIFO_ALIGNMENT 8 +#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */ + +#define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478 +#define SPSS_TX_FIFO_SIZE SZ_2K +#define SPSS_RX_FIFO_SIZE SZ_2K + +struct glink_spss_cfg { + __le32 tx_tail; + __le32 tx_head; + __le32 tx_fifo_size; + __le32 rx_tail; + __le32 rx_head; + __le32 rx_fifo_size; +}; + +struct glink_spss_pipe { + struct qcom_glink_pipe native; + + __le32 *tail; + __le32 *head; + + void *fifo; + + int remote_pid; +}; + +#define to_spss_pipe(p) container_of(p, struct glink_spss_pipe, native) + +static void glink_spss_reset(struct qcom_glink_pipe *np) +{ + struct glink_spss_pipe *pipe = to_spss_pipe(np); + + *pipe->head = cpu_to_le32(0); + *pipe->tail = cpu_to_le32(0); +} + +static size_t glink_spss_rx_avail(struct qcom_glink_pipe *np) +{ + struct glink_spss_pipe *pipe = to_spss_pipe(np); + u32 head; + u32 tail; + + head = le32_to_cpu(*pipe->head); + tail = le32_to_cpu(*pipe->tail); + + if (head < tail) + return pipe->native.length - tail + head; + else + return head - tail; +} + +static void glink_spss_rx_peak(struct qcom_glink_pipe *np, + void *data, unsigned int offset, size_t count) +{ + struct glink_spss_pipe *pipe = to_spss_pipe(np); + size_t len; + u32 tail; + + tail = le32_to_cpu(*pipe->tail); + tail += offset; + if (tail >= pipe->native.length) + tail -= pipe->native.length; + + len = min_t(size_t, count, pipe->native.length - tail); + if (len) + memcpy_fromio(data, pipe->fifo + tail, len); + + if (len != count) + memcpy_fromio(data + len, pipe->fifo, count - len); +} + +static void glink_spss_rx_advance(struct qcom_glink_pipe *np, + size_t count) +{ + struct glink_spss_pipe *pipe = to_spss_pipe(np); + u32 tail; + + tail = le32_to_cpu(*pipe->tail); + + tail += count; + if (tail >= pipe->native.length) + tail -= pipe->native.length; + + *pipe->tail = cpu_to_le32(tail); +} + +static size_t glink_spss_tx_avail(struct qcom_glink_pipe *np) +{ + struct glink_spss_pipe *pipe = to_spss_pipe(np); + u32 head; + u32 tail; + u32 avail; + + head = le32_to_cpu(*pipe->head); + tail = le32_to_cpu(*pipe->tail); + + if (tail <= head) + avail = pipe->native.length - head + tail; + else + avail = tail - head; + + if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)) + avail = 0; + else + avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; + + return avail; +} + +static unsigned int glink_spss_tx_write_one(struct glink_spss_pipe *pipe, + unsigned int head, + const void *data, size_t count) +{ + size_t len; + + len = min_t(size_t, count, pipe->native.length - head); + if (len) + memcpy(pipe->fifo + head, data, len); + + if (len != count) + memcpy(pipe->fifo, data + len, count - len); + + head += count; + if (head >= pipe->native.length) + head -= pipe->native.length; + + return head; +} + +static void glink_spss_tx_write(struct qcom_glink_pipe *glink_pipe, + const void *hdr, size_t hlen, + const void *data, size_t dlen) +{ + struct glink_spss_pipe *pipe = to_spss_pipe(glink_pipe); + unsigned int head; + + head = le32_to_cpu(*pipe->head); + + head = glink_spss_tx_write_one(pipe, head, hdr, hlen); + head = glink_spss_tx_write_one(pipe, head, data, dlen); + + /* Ensure head is always aligned to 8 bytes */ + head = ALIGN(head, 8); + if (head >= pipe->native.length) + head -= pipe->native.length; + + /* Ensure ordering of fifo and head update */ + wmb(); + + *pipe->head = cpu_to_le32(head); +} + +static void qcom_glink_spss_release(struct device *dev) +{ + kfree(dev); +} + +static int glink_spss_advertise_cfg(struct device *dev, + u32 size, phys_addr_t addr) +{ + struct device_node *np = dev->of_node; + __le64 __iomem *spss_addr; + __le32 __iomem *spss_size; + struct resource addr_r; + struct resource size_r; + int addr_idx; + int size_idx; + + addr_idx = of_property_match_string(np, "reg-names", "qcom,spss-addr"); + size_idx = of_property_match_string(np, "reg-names", "qcom,spss-size"); + if (addr_idx < 0 || size_idx < 0) { + dev_err(dev, "failed to find location registers\n"); + return -EINVAL; + } + + if (of_address_to_resource(np, addr_idx, &addr_r)) + return -ENOMEM; + spss_addr = devm_ioremap(dev, addr_r.start, resource_size(&addr_r)); + if (IS_ERR_OR_NULL(spss_addr)) { + dev_err(dev, "failed to map spss addr resource\n"); + return -ENOMEM; + } + + if (of_address_to_resource(np, size_idx, &size_r)) + return -ENOMEM; + spss_size = devm_ioremap(dev, size_r.start, resource_size(&size_r)); + if (IS_ERR_OR_NULL(spss_size)) { + dev_err(dev, "failed to map spss size resource\n"); + return -ENOMEM; + } + + *spss_addr = cpu_to_le64(addr); + *spss_size = cpu_to_le32(size); + devm_iounmap(dev, spss_addr); + devm_iounmap(dev, spss_size); + + return 0; +} + +struct qcom_glink *qcom_glink_spss_register(struct device *parent, + struct device_node *node) +{ + struct glink_spss_pipe *rx_pipe; + struct glink_spss_pipe *tx_pipe; + struct glink_spss_cfg *cfg; + struct qcom_glink *glink; + struct device *dev; + u32 remote_pid; + size_t tx_size; + size_t rx_size; + size_t size; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + dev->parent = parent; + dev->of_node = node; + dev->release = qcom_glink_spss_release; + dev_set_name(dev, "%s:%s", node->parent->name, node->name); + ret = device_register(dev); + if (ret) { + pr_err("failed to register glink edge %s\n", node->name); + return ERR_PTR(ret); + } + + ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", + &remote_pid); + if (ret) { + dev_err(dev, "failed to parse qcom,remote-pid\n"); + goto err_put_dev; + } + + rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL); + tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL); + if (!rx_pipe || !tx_pipe) { + ret = -ENOMEM; + goto err_put_dev; + } + + tx_size = SPSS_TX_FIFO_SIZE; + rx_size = SPSS_RX_FIFO_SIZE; + size = tx_size + rx_size + sizeof(*cfg); + ret = qcom_smem_alloc(remote_pid, + SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, size); + if (ret && ret != -EEXIST) { + dev_err(dev, "failed to allocate glink descriptors\n"); + goto err_put_dev; + } + + cfg = qcom_smem_get(remote_pid, + SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size); + if (IS_ERR(cfg)) { + dev_err(dev, "failed to acquire xprt descriptor\n"); + ret = PTR_ERR(cfg); + goto err_put_dev; + } + if (size != tx_size + rx_size + sizeof(*cfg)) { + dev_err(dev, "glink descriptor of invalid size\n"); + ret = -EINVAL; + goto err_put_dev; + } + cfg->tx_fifo_size = cpu_to_le32(tx_size); + cfg->rx_fifo_size = cpu_to_le32(rx_size); + + tx_pipe->tail = &cfg->tx_tail; + tx_pipe->head = &cfg->tx_head; + tx_pipe->native.length = tx_size; + tx_pipe->fifo = (u8 *)cfg + sizeof(*cfg); + + rx_pipe->tail = &cfg->rx_tail; + rx_pipe->head = &cfg->rx_head; + rx_pipe->native.length = rx_size; + rx_pipe->fifo = (u8 *)cfg + sizeof(*cfg) + tx_size; + + rx_pipe->native.avail = glink_spss_rx_avail; + rx_pipe->native.peak = glink_spss_rx_peak; + rx_pipe->native.advance = glink_spss_rx_advance; + rx_pipe->native.reset = glink_spss_reset; + rx_pipe->remote_pid = remote_pid; + + tx_pipe->native.avail = glink_spss_tx_avail; + tx_pipe->native.write = glink_spss_tx_write; + tx_pipe->native.reset = glink_spss_reset; + tx_pipe->remote_pid = remote_pid; + + *rx_pipe->tail = 0; + *tx_pipe->head = 0; + + ret = glink_spss_advertise_cfg(dev, size, qcom_smem_virt_to_phys(cfg)); + if (ret) + goto err_put_dev; + + glink = qcom_glink_native_probe(dev, + GLINK_FEATURE_INTENT_REUSE, + &rx_pipe->native, &tx_pipe->native, + false); + if (IS_ERR(glink)) { + ret = PTR_ERR(glink); + goto err_put_dev; + } + + return glink; + +err_put_dev: + put_device(dev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(qcom_glink_spss_register); + +void qcom_glink_spss_unregister(struct qcom_glink *glink) +{ + qcom_glink_native_remove(glink); + qcom_glink_native_unregister(glink); +} +EXPORT_SYMBOL(qcom_glink_spss_unregister); + +MODULE_DESCRIPTION("QTI GLINK SPSS driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 8122807db380..23513769ffcf 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -4,6 +4,7 @@ * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Ohad Ben-Cohen * Brian Swetland @@ -81,7 +82,7 @@ EXPORT_SYMBOL(rpmsg_create_ept); */ void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) { - if (ept) + if (ept && ept->ops) ept->ops->destroy_ept(ept); } EXPORT_SYMBOL(rpmsg_destroy_ept); @@ -283,6 +284,42 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, } EXPORT_SYMBOL(rpmsg_trysend_offchannel); +/** + * rpmsg_get_sigs() - get the signals for this endpoint + * @ept: the rpmsg endpoint + * @sigs: serial signals bitmask + * + * Returns 0 on success and an appropriate error value on failure. + */ +int rpmsg_get_sigs(struct rpmsg_endpoint *ept, u32 *lsigs, u32 *rsigs) +{ + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->get_sigs) + return -ENXIO; + + return ept->ops->get_sigs(ept, lsigs, rsigs); +} +EXPORT_SYMBOL(rpmsg_get_sigs); + +/** + * rpmsg_set_sigs() - set the remote signals for this endpoint + * @ept: the rpmsg endpoint + * @sigs: serial signals bitmask + * + * Returns 0 on success and an appropriate error value on failure. + */ +int rpmsg_set_sigs(struct rpmsg_endpoint *ept, u32 sigs) +{ + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->set_sigs) + return -ENXIO; + + return ept->ops->set_sigs(ept, sigs); +} +EXPORT_SYMBOL(rpmsg_set_sigs); + /* * match an rpmsg channel with a channel info struct. * this is used to make sure we're not creating rpmsg devices for channels @@ -468,6 +505,10 @@ static int rpmsg_dev_probe(struct device *dev) rpdev->ept = ept; rpdev->src = ept->addr; + + if (rpdrv->signals) + ept->sig_cb = rpdrv->signals; + } err = rpdrv->probe(rpdev); diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h index 0d791c30b7ea..195c3c55a0b1 100644 --- a/drivers/rpmsg/rpmsg_internal.h +++ b/drivers/rpmsg/rpmsg_internal.h @@ -4,6 +4,7 @@ * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Ohad Ben-Cohen * Brian Swetland @@ -46,6 +47,8 @@ struct rpmsg_device_ops { * @trysend: see @rpmsg_trysend(), required * @trysendto: see @rpmsg_trysendto(), optional * @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional + * @get_sigs: see @rpmsg_get_sigs(), optional + * @set_sigs: see @rpmsg_set_sigs(), optional * * Indirection table for the operations that a rpmsg backend should implement. * In addition to @destroy_ept, the backend must at least implement @send and @@ -65,6 +68,8 @@ struct rpmsg_endpoint_ops { void *data, int len); __poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait); + int (*get_sigs)(struct rpmsg_endpoint *ept, u32 *lsigs, u32 *rsigs); + int (*set_sigs)(struct rpmsg_endpoint *ept, u32 sigs); }; int rpmsg_register_device(struct rpmsg_device *rpdev); diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 9fe156d1c018..397af4615407 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -4,6 +4,7 @@ * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * All rights reserved. */ @@ -60,12 +61,14 @@ struct rpmsg_device { }; typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32); +typedef int (*rpmsg_rx_sig_t)(struct rpmsg_device *, u32, u32); /** * struct rpmsg_endpoint - binds a local rpmsg address to its user * @rpdev: rpmsg channel device * @refcount: when this drops to zero, the ept is deallocated * @cb: rx callback handler + * @sig_cb: rx serial signal handler * @cb_lock: must be taken before accessing/changing @cb * @addr: local rpmsg address * @priv: private data for the driver's use @@ -88,6 +91,7 @@ struct rpmsg_endpoint { struct rpmsg_device *rpdev; struct kref refcount; rpmsg_rx_cb_t cb; + rpmsg_rx_sig_t sig_cb; struct mutex cb_lock; u32 addr; void *priv; @@ -102,6 +106,7 @@ struct rpmsg_endpoint { * @probe: invoked when a matching rpmsg channel (i.e. device) is found * @remove: invoked when the rpmsg channel is removed * @callback: invoked when an inbound message is received on the channel + * @signals: invoked when a serial signal change is received on the channel */ struct rpmsg_driver { struct device_driver drv; @@ -109,6 +114,7 @@ struct rpmsg_driver { int (*probe)(struct rpmsg_device *dev); void (*remove)(struct rpmsg_device *dev); int (*callback)(struct rpmsg_device *, void *, int, void *, u32); + int (*signals)(struct rpmsg_device *rpdev, u32 old, u32 new); }; #if IS_ENABLED(CONFIG_RPMSG) @@ -135,6 +141,9 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait); +int rpmsg_get_sigs(struct rpmsg_endpoint *ept, u32 *lsigs, u32 *rsigs); +int rpmsg_set_sigs(struct rpmsg_endpoint *ept, u32 sigs); + #else static inline int register_rpmsg_device(struct rpmsg_device *dev) @@ -242,6 +251,23 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, return 0; } +static inline int rpmsg_get_sigs(struct rpmsg_endpoint *ept, u32 *lsigs, + u32 *rsigs) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_set_sigs(struct rpmsg_endpoint *ept, u32 sigs) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + #endif /* IS_ENABLED(CONFIG_RPMSG) */ /* use a macro to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h index 96e26d94719f..0912adf7761e 100644 --- a/include/linux/rpmsg/qcom_glink.h +++ b/include/linux/rpmsg/qcom_glink.h @@ -6,6 +6,7 @@ #include struct qcom_glink; +struct glink_spi; #if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM) @@ -26,4 +27,44 @@ static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {} #endif + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SPSS) + +struct qcom_glink *qcom_glink_spss_register(struct device *parent, + struct device_node *node); +void qcom_glink_spss_unregister(struct qcom_glink *glink); + +#else + +static inline struct qcom_glink * +qcom_glink_spss_register(struct device *parent, + struct device_node *node) +{ + return NULL; +} + +static inline void qcom_glink_spss_unregister(struct qcom_glink *glink) {} + +#endif + + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SPI) + +struct glink_spi *qcom_glink_spi_register(struct device *parent, + struct device_node *node); +void qcom_glink_spi_unregister(struct glink_spi *glink); + +#else + +static inline struct glink_spi * +qcom_glink_spi_register(struct device *parent, struct device_node *node) +{ + return NULL; +} + +static inline void qcom_glink_spi_unregister(struct glink_spi *glink) {} + +#endif + + #endif