1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Remote Processor Procedure Call Driver
4 *
5 * Copyright (C) 2012-2019 Texas Instruments Incorporated - http://www.ti.com/
6 * Erik Rainey <erik.rainey@ti.com>
7 * Suman Anna <s-anna@ti.com>
8 */
10 #define pr_fmt(fmt) "%s: " fmt, __func__
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/idr.h>
16 #include <linux/poll.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/fdtable.h>
20 #include <linux/remoteproc.h>
21 #include <linux/rpmsg.h>
22 #include <linux/rpmsg_rpc.h>
23 #include <linux/rpmsg/virtio_rpmsg.h>
24 #include <linux/sched/signal.h>
26 #include "rpmsg_rpc_internal.h"
28 #define RPPC_MAX_DEVICES (8)
29 #define RPPC_MAX_REG_FDS (10)
31 #define RPPC_SIG_NUM_PARAM(sig) ((sig).num_param - 1)
33 /* TODO: remove these fields */
34 #define RPPC_JOBID_DISCRETE (0)
35 #define RPPC_POOLID_DEFAULT (0x8000)
37 static struct class *rppc_class;
38 static dev_t rppc_dev;
40 /* store all remote rpc connection services (usually one per remoteproc) */
41 static DEFINE_IDR(rppc_devices);
42 static DEFINE_MUTEX(rppc_devices_lock);
44 /*
45 * Retrieve the rproc instance so that it can be used for performing
46 * address translations
47 */
48 static inline struct rproc *rpdev_to_rproc(struct rpmsg_device *rpdev)
49 {
50 return rproc_get_by_child(&rpdev->dev);
51 }
53 /*
54 * A wrapper function to translate local physical addresses to the remote core
55 * device addresses (virtual addresses that a code on remote processor can use
56 * directly.
57 *
58 * XXX: Fix this to return negative values on errors to follow normal kernel
59 * conventions, and since 0 can also be a valid remote processor address
60 *
61 * Returns a remote processor device address on success, 0 otherwise
62 */
63 dev_addr_t rppc_local_to_remote_da(struct rppc_instance *rpc, phys_addr_t pa)
64 {
65 int ret;
66 struct rproc *rproc;
67 u64 da = 0;
68 dev_addr_t rda;
69 struct device *dev = rpc->dev;
71 if (mutex_lock_interruptible(&rpc->rppcdev->lock))
72 return 0;
74 rproc = rpdev_to_rproc(rpc->rppcdev->rpdev);
75 if (!rproc) {
76 dev_err(dev, "error getting rproc for rpdev 0x%x\n",
77 (u32)rpc->rppcdev->rpdev);
78 } else {
79 ret = rproc_pa_to_da(rproc, pa, &da);
80 if (ret) {
81 dev_err(dev, "error from rproc_pa_to_da, rproc = %p, pa = %pa ret = %d\n",
82 rproc, &pa, ret);
83 }
84 }
85 rda = (dev_addr_t)da;
87 mutex_unlock(&rpc->rppcdev->lock);
89 return rda;
90 }
92 static void rppc_print_msg(struct rppc_instance *rpc, char *prefix,
93 char buffer[512])
94 {
95 struct rppc_msg_header *hdr = (struct rppc_msg_header *)buffer;
96 struct rppc_instance_handle *hdl = NULL;
97 struct rppc_query_function *info = NULL;
98 struct rppc_packet *packet = NULL;
99 struct rppc_param_data *param = NULL;
100 struct device *dev = rpc->dev;
101 u32 i = 0, paramsz = sizeof(*param);
103 dev_dbg(dev, "%s HDR: msg_type = %d msg_len = %d\n",
104 prefix, hdr->msg_type, hdr->msg_len);
106 switch (hdr->msg_type) {
107 case RPPC_MSGTYPE_CREATE_RESP:
108 case RPPC_MSGTYPE_DELETE_RESP:
109 hdl = RPPC_PAYLOAD(buffer, rppc_instance_handle);
110 dev_dbg(dev, "%s endpoint = %d status = %d\n",
111 prefix, hdl->endpoint_address, hdl->status);
112 break;
113 case RPPC_MSGTYPE_FUNCTION_INFO:
114 info = RPPC_PAYLOAD(buffer, rppc_query_function);
115 dev_dbg(dev, "%s (info not yet implemented)\n", prefix);
116 break;
117 case RPPC_MSGTYPE_FUNCTION_CALL:
118 packet = RPPC_PAYLOAD(buffer, rppc_packet);
119 dev_dbg(dev, "%s PACKET: desc = %04x msg_id = %04x flags = %08x func = 0x%08x result = %d size = %u\n",
120 prefix, packet->desc, packet->msg_id,
121 packet->flags, packet->fxn_id,
122 packet->result, packet->data_size);
123 param = (struct rppc_param_data *)packet->data;
124 for (i = 0; i < (packet->data_size / paramsz); i++) {
125 dev_dbg(dev, "%s param[%u] size = %zu data = %zu (0x%08x)",
126 prefix, i, param[i].size, param[i].data,
127 param[i].data);
128 }
129 break;
130 default:
131 break;
132 }
133 }
135 /* free any outstanding function calls */
136 static void rppc_delete_fxns(struct rppc_instance *rpc)
137 {
138 struct rppc_function_list *pos, *n;
140 if (!list_empty(&rpc->fxn_list)) {
141 mutex_lock(&rpc->lock);
142 list_for_each_entry_safe(pos, n, &rpc->fxn_list, list) {
143 list_del(&pos->list);
144 kfree(pos->function);
145 kfree(pos);
146 }
147 mutex_unlock(&rpc->lock);
148 }
149 }
151 static
152 struct rppc_function *rppc_find_fxn(struct rppc_instance *rpc, u16 msg_id)
153 {
154 struct rppc_function *function = NULL;
155 struct rppc_function_list *pos, *n;
156 struct device *dev = rpc->dev;
158 mutex_lock(&rpc->lock);
159 list_for_each_entry_safe(pos, n, &rpc->fxn_list, list) {
160 dev_dbg(dev, "looking for msg %u, found msg %u\n",
161 msg_id, pos->msg_id);
162 if (pos->msg_id == msg_id) {
163 function = pos->function;
164 list_del(&pos->list);
165 kfree(pos);
166 break;
167 }
168 }
169 mutex_unlock(&rpc->lock);
171 return function;
172 }
174 static int rppc_add_fxn(struct rppc_instance *rpc,
175 struct rppc_function *function, u16 msg_id)
176 {
177 struct rppc_function_list *fxn = NULL;
178 struct device *dev = rpc->dev;
180 fxn = kzalloc(sizeof(*fxn), GFP_KERNEL);
181 if (!fxn)
182 return -ENOMEM;
184 fxn->function = function;
185 fxn->msg_id = msg_id;
186 mutex_lock(&rpc->lock);
187 list_add(&fxn->list, &rpc->fxn_list);
188 mutex_unlock(&rpc->lock);
189 dev_dbg(dev, "added msg id %u to list", msg_id);
191 return 0;
192 }
194 static
195 void rppc_handle_create_resp(struct rppc_instance *rpc, char *data, int len)
196 {
197 struct device *dev = rpc->dev;
198 struct rppc_msg_header *hdr = (struct rppc_msg_header *)data;
199 struct rppc_instance_handle *hdl;
200 u32 exp_len = sizeof(*hdl) + sizeof(*hdr);
202 if (len != exp_len) {
203 dev_err(dev, "invalid response message length %d (expected %d bytes)",
204 len, exp_len);
205 rpc->state = RPPC_STATE_STALE;
206 return;
207 }
209 hdl = RPPC_PAYLOAD(data, rppc_instance_handle);
211 mutex_lock(&rpc->lock);
212 if (rpc->state != RPPC_STATE_STALE && hdl->status == 0) {
213 rpc->dst = hdl->endpoint_address;
214 rpc->state = RPPC_STATE_CONNECTED;
215 } else {
216 rpc->state = RPPC_STATE_STALE;
217 }
218 rpc->in_transition = 0;
219 dev_dbg(dev, "creation response: status %d addr 0x%x\n",
220 hdl->status, hdl->endpoint_address);
222 complete(&rpc->reply_arrived);
223 mutex_unlock(&rpc->lock);
224 }
226 static
227 void rppc_handle_delete_resp(struct rppc_instance *rpc, char *data, int len)
228 {
229 struct device *dev = rpc->dev;
230 struct rppc_msg_header *hdr = (struct rppc_msg_header *)data;
231 struct rppc_instance_handle *hdl;
232 u32 exp_len = sizeof(*hdl) + sizeof(*hdr);
234 if (len != exp_len) {
235 dev_err(dev, "invalid response message length %d (expected %d bytes)",
236 len, exp_len);
237 rpc->state = RPPC_STATE_STALE;
238 return;
239 }
240 if (hdr->msg_len != sizeof(*hdl)) {
241 dev_err(dev, "disconnect message was incorrect size!\n");
242 rpc->state = RPPC_STATE_STALE;
243 return;
244 }
246 hdl = RPPC_PAYLOAD(data, rppc_instance_handle);
247 dev_dbg(dev, "deletion response: status %d addr 0x%x\n",
248 hdl->status, hdl->endpoint_address);
249 mutex_lock(&rpc->lock);
250 rpc->dst = 0;
251 rpc->state = RPPC_STATE_DISCONNECTED;
252 rpc->in_transition = 0;
253 complete(&rpc->reply_arrived);
254 mutex_unlock(&rpc->lock);
255 }
257 /*
258 * store the received message and wake up any blocking processes,
259 * waiting for new data. The allocated buffer would be freed after
260 * the user-space reads the packet.
261 */
262 static void rppc_handle_fxn_resp(struct rppc_instance *rpc, char *data, int len)
263 {
264 struct rppc_msg_header *hdr = (struct rppc_msg_header *)data;
265 struct sk_buff *skb;
266 char *skbdata;
268 /* TODO: need to check the response length? */
269 skb = alloc_skb(hdr->msg_len, GFP_KERNEL);
270 if (!skb)
271 return;
272 skbdata = skb_put(skb, hdr->msg_len);
273 memcpy(skbdata, hdr->msg_data, hdr->msg_len);
275 mutex_lock(&rpc->lock);
276 skb_queue_tail(&rpc->queue, skb);
277 mutex_unlock(&rpc->lock);
279 wake_up_interruptible(&rpc->readq);
280 }
282 /*
283 * callback function for processing the different responses
284 * from the remote processor on a particular rpmsg channel
285 * instance.
286 */
287 static int rppc_cb(struct rpmsg_device *rpdev,
288 void *data, int len, void *priv, u32 src)
289 {
290 struct rppc_msg_header *hdr = data;
291 struct rppc_instance *rpc = priv;
292 struct device *dev = rpc->dev;
293 char *buf = (char *)data;
295 dev_dbg(dev, "<== incoming msg src %d len %d msg_type %d msg_len %d\n",
296 src, len, hdr->msg_type, hdr->msg_len);
297 rppc_print_msg(rpc, "RX:", buf);
299 if (len <= sizeof(*hdr)) {
300 dev_err(dev, "message truncated\n");
301 rpc->state = RPPC_STATE_STALE;
302 return -EINVAL;
303 }
305 switch (hdr->msg_type) {
306 case RPPC_MSGTYPE_CREATE_RESP:
307 rppc_handle_create_resp(rpc, data, len);
308 break;
309 case RPPC_MSGTYPE_DELETE_RESP:
310 rppc_handle_delete_resp(rpc, data, len);
311 break;
312 case RPPC_MSGTYPE_FUNCTION_CALL:
313 case RPPC_MSGTYPE_FUNCTION_RET:
314 rppc_handle_fxn_resp(rpc, data, len);
315 break;
316 default:
317 dev_warn(dev, "unexpected msg type: %d\n", hdr->msg_type);
318 break;
319 }
321 return 0;
322 }
324 /*
325 * send a connection request to the remote rpc connection service. Use
326 * the new local address created during .open for this instance as the
327 * source address to complete the connection.
328 */
329 static int rppc_connect(struct rppc_instance *rpc,
330 struct rppc_create_instance *connect)
331 {
332 int ret = 0;
333 u32 len = 0;
334 char kbuf[512];
335 struct rppc_device *rppcdev = rpc->rppcdev;
336 struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
338 if (rpc->state == RPPC_STATE_CONNECTED) {
339 dev_dbg(rpc->dev, "endpoint already connected\n");
340 return -EISCONN;
341 }
343 hdr->msg_type = RPPC_MSGTYPE_CREATE_REQ;
344 hdr->msg_len = sizeof(*connect);
345 memcpy(hdr->msg_data, connect, hdr->msg_len);
346 len = sizeof(struct rppc_msg_header) + hdr->msg_len;
348 init_completion(&rpc->reply_arrived);
349 rpc->in_transition = 1;
350 ret = rpmsg_send_offchannel(rppcdev->rpdev->ept, rpc->ept->addr,
351 rppcdev->rpdev->dst, (char *)kbuf, len);
352 if (ret > 0) {
353 dev_err(rpc->dev, "rpmsg_send failed: %d\n", ret);
354 return ret;
355 }
357 ret = wait_for_completion_interruptible_timeout(&rpc->reply_arrived,
358 msecs_to_jiffies(5000));
359 if (rpc->state == RPPC_STATE_CONNECTED)
360 return 0;
362 if (rpc->state == RPPC_STATE_STALE)
363 return -ENXIO;
365 if (ret > 0) {
366 dev_err(rpc->dev, "premature wakeup: %d\n", ret);
367 return -EIO;
368 }
370 return -ETIMEDOUT;
371 }
373 static void rppc_disconnect(struct rppc_instance *rpc)
374 {
375 int ret;
376 size_t len;
377 char kbuf[512];
378 struct rppc_device *rppcdev = rpc->rppcdev;
379 struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
380 struct rppc_instance_handle *handle =
381 RPPC_PAYLOAD(kbuf, rppc_instance_handle);
383 if (rpc->state != RPPC_STATE_CONNECTED)
384 return;
386 hdr->msg_type = RPPC_MSGTYPE_DELETE_REQ;
387 hdr->msg_len = sizeof(u32);
388 handle->endpoint_address = rpc->dst;
389 handle->status = 0;
390 len = sizeof(struct rppc_msg_header) + hdr->msg_len;
392 dev_dbg(rpc->dev, "disconnecting from RPC service at %d\n",
393 rpc->dst);
394 ret = rpmsg_send_offchannel(rppcdev->rpdev->ept, rpc->ept->addr,
395 rppcdev->rpdev->dst, kbuf, len);
396 if (ret)
397 dev_err(rpc->dev, "rpmsg_send failed: %d\n", ret);
399 /*
400 * TODO: should we wait for a message to come back?
401 * For now, no.
402 */
403 wait_for_completion_interruptible(&rpc->reply_arrived);
404 }
406 static int rppc_register_buffers(struct rppc_instance *rpc,
407 unsigned long arg)
408 {
409 struct rppc_buf_fds data;
410 int *fds = NULL;
411 struct rppc_dma_buf **bufs = NULL;
412 struct rppc_dma_buf *tmp;
413 int i = 0, ret = 0;
415 if (copy_from_user(&data, (char __user *)arg, sizeof(data)))
416 return -EFAULT;
418 /* impose a maximum number of buffers for now */
419 if (data.num > RPPC_MAX_REG_FDS)
420 return -EINVAL;
422 fds = kcalloc(data.num, sizeof(*fds), GFP_KERNEL);
423 if (!fds)
424 return -ENOMEM;
426 if (copy_from_user(fds, (char __user *)data.fds,
427 sizeof(*fds) * data.num)) {
428 ret = -EFAULT;
429 goto free_fds;
430 }
432 for (i = 0; i < data.num; i++) {
433 rcu_read_lock();
434 if (!fcheck(fds[i])) {
435 rcu_read_unlock();
436 ret = -EBADF;
437 goto free_fds;
438 }
439 rcu_read_unlock();
441 tmp = rppc_find_dmabuf(rpc, fds[i]);
442 if (!IS_ERR_OR_NULL(tmp)) {
443 ret = -EEXIST;
444 goto free_fds;
445 }
446 }
448 bufs = kcalloc(data.num, sizeof(*bufs), GFP_KERNEL);
449 if (!bufs) {
450 ret = -ENOMEM;
451 goto free_fds;
452 }
454 for (i = 0; i < data.num; i++) {
455 bufs[i] = rppc_alloc_dmabuf(rpc, fds[i], false);
456 if (IS_ERR(bufs[i])) {
457 ret = PTR_ERR(bufs[i]);
458 break;
459 }
460 }
461 if (i == data.num)
462 goto free_bufs;
464 for (i -= 1; i >= 0; i--)
465 rppc_free_dmabuf(bufs[i]->id, bufs[i], rpc);
467 free_bufs:
468 kfree(bufs);
469 free_fds:
470 kfree(fds);
471 return ret;
472 }
474 static int rppc_unregister_buffers(struct rppc_instance *rpc,
475 unsigned long arg)
476 {
477 struct rppc_buf_fds data;
478 int *fds = NULL;
479 struct rppc_dma_buf **bufs = NULL;
480 int i = 0, ret = 0;
482 if (copy_from_user(&data, (char __user *)arg, sizeof(data)))
483 return -EFAULT;
485 /* impose a maximum number of buffers for now */
486 if (data.num > RPPC_MAX_REG_FDS)
487 return -EINVAL;
489 fds = kcalloc(data.num, sizeof(*fds), GFP_KERNEL);
490 if (!fds)
491 return -ENOMEM;
493 if (copy_from_user(fds, (char __user *)data.fds,
494 sizeof(*fds) * data.num)) {
495 ret = -EFAULT;
496 goto free_fds;
497 }
499 bufs = kcalloc(data.num, sizeof(*bufs), GFP_KERNEL);
500 if (!bufs) {
501 ret = -ENOMEM;
502 goto free_fds;
503 }
505 for (i = 0; i < data.num; i++) {
506 rcu_read_lock();
507 if (!fcheck(fds[i])) {
508 rcu_read_unlock();
509 ret = -EBADF;
510 goto free_bufs;
511 }
512 rcu_read_unlock();
514 bufs[i] = rppc_find_dmabuf(rpc, fds[i]);
515 if (IS_ERR_OR_NULL(bufs[i])) {
516 ret = -EEXIST;
517 goto free_bufs;
518 }
519 }
521 for (i = 0; i < data.num; i++)
522 rppc_free_dmabuf(bufs[i]->id, bufs[i], rpc);
524 free_bufs:
525 kfree(bufs);
526 free_fds:
527 kfree(fds);
528 return ret;
529 }
531 /*
532 * create a new rpc instance that a user-space client can use to invoke
533 * remote functions. A new local address would be created and tied with
534 * this instance for uniquely identifying the messages communicated by
535 * this instance with the remote side.
536 *
537 * The function is blocking if there is no underlying connection manager
538 * channel, unless the device is opened with non-blocking flags specifically.
539 */
540 static int rppc_open(struct inode *inode, struct file *filp)
541 {
542 struct rppc_device *rppcdev;
543 struct rppc_instance *rpc;
544 struct rpmsg_channel_info chinfo = {};
546 rppcdev = container_of(inode->i_cdev, struct rppc_device, cdev);
548 if (!rppcdev->rpdev)
549 if ((filp->f_flags & O_NONBLOCK) ||
550 wait_for_completion_interruptible(&rppcdev->comp))
551 return -EBUSY;
553 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
554 if (!rpc)
555 return -ENOMEM;
557 mutex_init(&rpc->lock);
558 skb_queue_head_init(&rpc->queue);
559 init_waitqueue_head(&rpc->readq);
560 INIT_LIST_HEAD(&rpc->fxn_list);
561 idr_init(&rpc->dma_idr);
562 rpc->in_transition = 0;
563 rpc->msg_id = 0;
564 rpc->state = RPPC_STATE_DISCONNECTED;
565 rpc->rppcdev = rppcdev;
567 rpc->dev = get_device(rppcdev->dev);
568 chinfo.src = RPMSG_ADDR_ANY;
569 chinfo.dst = RPMSG_ADDR_ANY;
570 rpc->ept = rpmsg_create_ept(rppcdev->rpdev, rppc_cb, rpc, chinfo);
571 if (!rpc->ept) {
572 dev_err(rpc->dev, "create ept failed\n");
573 put_device(rpc->dev);
574 kfree(rpc);
575 return -ENOMEM;
576 }
577 filp->private_data = rpc;
579 mutex_lock(&rppcdev->lock);
580 list_add(&rpc->list, &rppcdev->instances);
581 mutex_unlock(&rppcdev->lock);
583 dev_dbg(rpc->dev, "local addr assigned: 0x%x\n", rpc->ept->addr);
585 return 0;
586 }
588 /*
589 * release and free all the resources associated with a particular rpc
590 * instance. This includes the data structures maintaining the current
591 * outstanding function invocations, and all the buffers registered for
592 * use with this instance. Send a disconnect message and cleanup the
593 * local end-point only if the instance is in a normal state, with the
594 * remote connection manager functional.
595 */
596 static int rppc_release(struct inode *inode, struct file *filp)
597 {
598 struct rppc_instance *rpc = filp->private_data;
599 struct rppc_device *rppcdev = rpc->rppcdev;
600 struct sk_buff *skb = NULL;
602 dev_dbg(rpc->dev, "releasing Instance %p, in state %d\n", rpc,
603 rpc->state);
605 if (rpc->state != RPPC_STATE_STALE) {
606 if (rpc->ept) {
607 rppc_disconnect(rpc);
608 rpmsg_destroy_ept(rpc->ept);
609 rpc->ept = NULL;
610 }
611 }
613 rppc_delete_fxns(rpc);
615 while (!skb_queue_empty(&rpc->queue)) {
616 skb = skb_dequeue(&rpc->queue);
617 kfree_skb(skb);
618 }
620 mutex_lock(&rpc->lock);
621 idr_for_each(&rpc->dma_idr, rppc_free_dmabuf, rpc);
622 idr_destroy(&rpc->dma_idr);
623 mutex_unlock(&rpc->lock);
625 mutex_lock(&rppcdev->lock);
626 list_del(&rpc->list);
627 mutex_unlock(&rppcdev->lock);
629 dev_dbg(rpc->dev, "instance %p has been deleted!\n", rpc);
630 if (list_empty(&rppcdev->instances))
631 dev_dbg(rpc->dev, "all instances have been removed!\n");
633 put_device(rpc->dev);
634 kfree(rpc);
635 return 0;
636 }
638 static long rppc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
639 {
640 struct rppc_instance *rpc = filp->private_data;
641 struct rppc_create_instance connect;
642 int ret = 0;
644 dev_dbg(rpc->dev, "%s: cmd %d, arg 0x%lx\n", __func__, cmd, arg);
646 if (_IOC_TYPE(cmd) != RPPC_IOC_MAGIC)
647 return -ENOTTY;
649 if (_IOC_NR(cmd) > RPPC_IOC_MAXNR)
650 return -ENOTTY;
652 switch (cmd) {
653 case RPPC_IOC_CREATE:
654 ret = copy_from_user(&connect, (char __user *)arg,
655 sizeof(connect));
656 if (ret) {
657 dev_err(rpc->dev, "%s: %d: copy_from_user fail: %d\n",
658 __func__, _IOC_NR(cmd), ret);
659 ret = -EFAULT;
660 } else {
661 connect.name[sizeof(connect.name) - 1] = '\0';
662 ret = rppc_connect(rpc, &connect);
663 }
664 break;
665 case RPPC_IOC_BUFREGISTER:
666 ret = rppc_register_buffers(rpc, arg);
667 break;
668 case RPPC_IOC_BUFUNREGISTER:
669 ret = rppc_unregister_buffers(rpc, arg);
670 break;
671 default:
672 dev_err(rpc->dev, "unhandled ioctl cmd: %d\n", cmd);
673 break;
674 }
676 return ret;
677 }
679 static ssize_t rppc_read(struct file *filp, char __user *buf, size_t len,
680 loff_t *offp)
681 {
682 struct rppc_instance *rpc = filp->private_data;
683 struct rppc_packet *packet = NULL;
684 struct rppc_param_data *parameters = NULL;
685 struct rppc_function *function = NULL;
686 struct rppc_function_return returned;
687 struct sk_buff *skb = NULL;
688 int ret = 0;
689 int use = sizeof(returned);
690 DEFINE_WAIT(wait);
692 if (mutex_lock_interruptible(&rpc->lock))
693 return -ERESTARTSYS;
695 /* instance is invalid */
696 if (rpc->state == RPPC_STATE_STALE) {
697 mutex_unlock(&rpc->lock);
698 return -ENXIO;
699 }
701 /* not yet connected to the remote side */
702 if (rpc->state == RPPC_STATE_DISCONNECTED) {
703 mutex_unlock(&rpc->lock);
704 return -ENOTCONN;
705 }
707 if (len > use) {
708 mutex_unlock(&rpc->lock);
709 return -EOVERFLOW;
710 }
711 if (len < use) {
712 mutex_unlock(&rpc->lock);
713 return -EINVAL;
714 }
716 /* TODO: Use the much simpler wait_event_interruptible API */
717 while (skb_queue_empty(&rpc->queue)) {
718 mutex_unlock(&rpc->lock);
719 /* non-blocking requested ? return now */
720 if (filp->f_flags & O_NONBLOCK)
721 return -EAGAIN;
723 prepare_to_wait_exclusive(&rpc->readq, &wait,
724 TASK_INTERRUPTIBLE);
725 if (skb_queue_empty(&rpc->queue) &&
726 rpc->state != RPPC_STATE_STALE)
727 schedule();
728 finish_wait(&rpc->readq, &wait);
729 if (signal_pending(current))
730 return -ERESTARTSYS;
732 ret = mutex_lock_interruptible(&rpc->lock);
733 if (ret < 0)
734 return -ERESTARTSYS;
736 if (rpc->state == RPPC_STATE_STALE) {
737 mutex_unlock(&rpc->lock);
738 return -ENXIO;
739 }
741 /* make sure state is sane while we waited */
742 if (rpc->state != RPPC_STATE_CONNECTED) {
743 mutex_unlock(&rpc->lock);
744 ret = -EIO;
745 goto out;
746 }
747 }
749 skb = skb_dequeue(&rpc->queue);
750 if (WARN_ON(!skb)) {
751 mutex_unlock(&rpc->lock);
752 ret = -EIO;
753 goto out;
754 }
756 mutex_unlock(&rpc->lock);
758 packet = (struct rppc_packet *)skb->data;
759 parameters = (struct rppc_param_data *)packet->data;
761 /*
762 * pull the function memory from the list and untranslate
763 * the remote device address pointers in the packet back
764 * to MPU pointers.
765 */
766 function = rppc_find_fxn(rpc, packet->msg_id);
767 if (function && function->num_translations > 0) {
768 ret = rppc_xlate_buffers(rpc, function, RPPC_RPA_TO_UVA);
769 if (ret < 0) {
770 dev_err(rpc->dev, "failed to translate back pointers from remote core!\n");
771 goto failure;
772 }
773 }
774 returned.fxn_id = RPPC_FXN_MASK(packet->fxn_id);
775 returned.status = packet->result;
777 if (copy_to_user(buf, &returned, use)) {
778 dev_err(rpc->dev, "%s: copy_to_user fail\n", __func__);
779 ret = -EFAULT;
780 } else {
781 ret = use;
782 }
784 failure:
785 kfree(function);
786 kfree_skb(skb);
787 out:
788 return ret;
789 }
791 static ssize_t rppc_write(struct file *filp, const char __user *ubuf,
792 size_t len, loff_t *offp)
793 {
794 struct rppc_instance *rpc = filp->private_data;
795 struct rppc_device *rppcdev = rpc->rppcdev;
796 struct device *dev = rpc->dev;
797 struct rppc_msg_header *hdr = NULL;
798 struct rppc_function *function = NULL;
799 struct rppc_packet *packet = NULL;
800 struct rppc_param_data *parameters = NULL;
801 char kbuf[512];
802 int use = 0, ret = 0, param = 0;
803 u32 sig_idx = 0;
804 u32 sig_prm = 0;
805 static u32 rppc_atomic_size[RPPC_PARAM_ATOMIC_MAX] = {
806 0, /* RPPC_PARAM_VOID */
807 1, /* RPPC_PARAM_S08 */
808 1, /* RPPC_PARAM_U08 */
809 2, /* RPPC_PARAM_S16 */
810 2, /* RPPC_PARAM_U16 */
811 4, /* RPPC_PARAM_S32 */
812 4, /* RPPC_PARAM_U32 */
813 8, /* RPPC_PARAM_S64 */
814 8 /* RPPC_PARAM_U64 */
815 };
817 if (len < sizeof(*function)) {
818 ret = -ENOTSUPP;
819 goto failure;
820 }
822 if (len > (sizeof(*function) + RPPC_MAX_TRANSLATIONS *
823 sizeof(struct rppc_param_translation))) {
824 ret = -ENOTSUPP;
825 goto failure;
826 }
828 if (rpc->state != RPPC_STATE_CONNECTED) {
829 ret = -ENOTCONN;
830 goto failure;
831 }
833 function = kzalloc(len, GFP_KERNEL);
834 if (!function) {
835 ret = -ENOMEM;
836 goto failure;
837 }
839 if (copy_from_user(function, ubuf, len)) {
840 ret = -EMSGSIZE;
841 goto failure;
842 }
844 if (function->fxn_id >= rppcdev->num_funcs - 1) {
845 ret = -EINVAL;
846 goto failure;
847 }
849 /* increment the message id and wrap if needed */
850 rpc->msg_id = (rpc->msg_id + 1) & 0xFFFF;
852 memset(kbuf, 0, sizeof(kbuf));
853 sig_idx = function->fxn_id + 1;
854 hdr = (struct rppc_msg_header *)kbuf;
855 hdr->msg_type = RPPC_MSGTYPE_FUNCTION_CALL;
856 hdr->msg_len = sizeof(*packet);
857 packet = RPPC_PAYLOAD(kbuf, rppc_packet);
858 packet->desc = RPPC_DESC_EXEC_SYNC;
859 packet->msg_id = rpc->msg_id;
860 packet->flags = (RPPC_JOBID_DISCRETE << 16) | RPPC_POOLID_DEFAULT;
861 packet->fxn_id = RPPC_SET_FXN_IDX(function->fxn_id);
862 packet->result = 0;
863 packet->data_size = sizeof(*parameters) * function->num_params;
865 /* check the signatures against what were published */
866 if (RPPC_SIG_NUM_PARAM(rppcdev->signatures[sig_idx]) !=
867 function->num_params) {
868 dev_err(dev, "number of parameters mismatch! params = %u expected = %u\n",
869 function->num_params,
870 RPPC_SIG_NUM_PARAM(rppcdev->signatures[sig_idx]));
871 ret = -EINVAL;
872 goto failure;
873 }
875 /*
876 * compute the parameter pointer changes last since this will cause the
877 * cache operations
878 */
879 parameters = (struct rppc_param_data *)packet->data;
880 for (param = 0; param < function->num_params; param++) {
881 u32 param_type;
883 sig_prm = param + 1;
884 param_type = rppcdev->signatures[sig_idx].params[sig_prm].type;
885 /*
886 * check to make sure the parameter description matches the
887 * signature published from the other side.
888 */
889 if (function->params[param].type == RPPC_PARAM_TYPE_PTR &&
890 !RPPC_IS_PTR(param_type)) {
891 dev_err(dev, "parameter %u Pointer Type Mismatch sig type:%x func %u\n",
892 param, param_type, sig_idx);
893 ret = -EINVAL;
894 goto failure;
895 } else if (param > 0 && function->params[param].type ==
896 RPPC_PARAM_TYPE_ATOMIC) {
897 if (!RPPC_IS_ATOMIC(param_type)) {
898 dev_err(dev, "parameter Atomic Type Mismatch\n");
899 ret = -EINVAL;
900 goto failure;
901 } else {
902 if (rppc_atomic_size[param_type] !=
903 function->params[param].size) {
904 dev_err(dev, "size mismatch! u:%u sig:%u\n",
905 function->params[param].size,
906 rppc_atomic_size[param_type]);
907 ret = -EINVAL;
908 goto failure;
909 }
910 }
911 }
913 parameters[param].size = function->params[param].size;
915 /* check the type and lookup if it's a pointer */
916 if (function->params[param].type == RPPC_PARAM_TYPE_PTR) {
917 /*
918 * internally the buffer translations takes care of the
919 * offsets.
920 */
921 int fd = function->params[param].fd;
923 parameters[param].data = (size_t)rppc_buffer_lookup(rpc,
924 (virt_addr_t)function->params[param].data,
925 (virt_addr_t)function->params[param].base, fd);
926 } else if (function->params[param].type ==
927 RPPC_PARAM_TYPE_ATOMIC) {
928 parameters[param].data = function->params[param].data;
929 } else {
930 ret = -ENOTSUPP;
931 goto failure;
932 }
933 }
935 /* compute the size of the rpmsg packet */
936 use = sizeof(*hdr) + hdr->msg_len + packet->data_size;
938 /* failed to provide the translation data */
939 if (function->num_translations > 0 &&
940 len < (sizeof(*function) + (function->num_translations *
941 sizeof(struct rppc_param_translation)))) {
942 ret = -EINVAL;
943 goto failure;
944 }
946 /*
947 * if there are pointers to translate for the user, do so now.
948 * alter our copy of function and the user's parameters so that
949 * the proper pointers can be sent to remote cores
950 */
951 if (function->num_translations > 0) {
952 ret = rppc_xlate_buffers(rpc, function, RPPC_UVA_TO_RPA);
953 if (ret < 0) {
954 dev_err(dev, "failed to translate all pointers for remote core!\n");
955 goto failure;
956 }
957 }
959 ret = rppc_add_fxn(rpc, function, rpc->msg_id);
960 if (ret < 0) {
961 rppc_xlate_buffers(rpc, function, RPPC_RPA_TO_UVA);
962 goto failure;
963 }
965 rppc_print_msg(rpc, "TX:", kbuf);
967 ret = rpmsg_send_offchannel(rppcdev->rpdev->ept, rpc->ept->addr,
968 rpc->dst, kbuf, use);
969 if (ret) {
970 dev_err(dev, "rpmsg_send failed: %d\n", ret);
971 rppc_find_fxn(rpc, rpc->msg_id);
972 rppc_xlate_buffers(rpc, function, RPPC_RPA_TO_UVA);
973 goto failure;
974 }
975 dev_dbg(dev, "==> sent msg to remote endpoint %u\n", rpc->dst);
977 failure:
978 if (ret >= 0)
979 ret = len;
980 else
981 kfree(function);
983 return ret;
984 }
986 static __poll_t rppc_poll(struct file *filp, struct poll_table_struct *wait)
987 {
988 struct rppc_instance *rpc = filp->private_data;
989 __poll_t mask = 0;
991 poll_wait(filp, &rpc->readq, wait);
992 if (rpc->state == RPPC_STATE_STALE) {
993 mask = EPOLLERR;
994 goto out;
995 }
997 /* if the queue is not empty set the poll bit correctly */
998 if (!skb_queue_empty(&rpc->queue))
999 mask |= (EPOLLIN | EPOLLRDNORM);
1001 /* TODO: writes are deemed to be successful always, fix this later */
1002 if (true)
1003 mask |= EPOLLOUT | EPOLLWRNORM;
1005 out:
1006 return mask;
1007 }
1009 static const struct file_operations rppc_fops = {
1010 .owner = THIS_MODULE,
1011 .open = rppc_open,
1012 .release = rppc_release,
1013 .unlocked_ioctl = rppc_ioctl,
1014 .read = rppc_read,
1015 .write = rppc_write,
1016 .poll = rppc_poll,
1017 };
1019 /*
1020 * send a function query message, the sysfs entry will be created
1021 * during the processing of the response message
1022 */
1023 static int rppc_query_function(struct rpmsg_device *rpdev)
1024 {
1025 int ret = 0;
1026 u32 len = 0;
1027 char kbuf[512];
1028 struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
1029 struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
1030 struct rppc_query_function *fxn_info =
1031 (struct rppc_query_function *)hdr->msg_data;
1033 if (rppcdev->cur_func >= rppcdev->num_funcs)
1034 return -EINVAL;
1036 hdr->msg_type = RPPC_MSGTYPE_FUNCTION_QUERY;
1037 hdr->msg_len = sizeof(*fxn_info);
1038 len = sizeof(*hdr) + hdr->msg_len;
1039 fxn_info->info_type = RPPC_INFOTYPE_FUNC_SIGNATURE;
1040 fxn_info->fxn_id = rppcdev->cur_func++;
1042 dev_dbg(&rpdev->dev, "sending function query type %u for function %u\n",
1043 fxn_info->info_type, fxn_info->fxn_id);
1044 ret = rpmsg_send(rpdev->ept, (char *)kbuf, len);
1045 if (ret) {
1046 dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
1047 return ret;
1048 }
1050 return 0;
1051 }
1053 static void
1054 rppc_handle_devinfo_resp(struct rpmsg_device *rpdev, char *data, int len)
1055 {
1056 struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
1057 struct rppc_device_info *info;
1058 u32 exp_len = sizeof(*info) + sizeof(struct rppc_msg_header);
1060 if (len != exp_len) {
1061 dev_err(&rpdev->dev, "invalid message length %d (expected %d bytes)",
1062 len, exp_len);
1063 return;
1064 }
1066 info = RPPC_PAYLOAD(data, rppc_device_info);
1067 if (info->num_funcs > RPPC_MAX_NUM_FUNCS) {
1068 rppcdev->num_funcs = 0;
1069 dev_err(&rpdev->dev, "number of functions (%d) exceeds the limit supported(%d)\n",
1070 info->num_funcs, RPPC_MAX_NUM_FUNCS);
1071 return;
1072 }
1074 rppcdev->num_funcs = info->num_funcs;
1075 rppcdev->signatures = kcalloc(rppcdev->num_funcs,
1076 sizeof(struct rppc_func_signature),
1077 GFP_KERNEL);
1078 if (!rppcdev->signatures)
1079 return;
1081 dev_info(&rpdev->dev, "published functions = %u\n", info->num_funcs);
1083 /* send the function query for first function */
1084 if (rppc_query_function(rpdev) == -EINVAL)
1085 dev_err(&rpdev->dev, "failed to get a reasonable number of functions!\n");
1086 }
1088 static void
1089 rppc_handle_fxninfo_resp(struct rpmsg_device *rpdev, char *data, int len)
1090 {
1091 struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
1092 struct rppc_query_function *fxn_info;
1093 struct rppc_func_signature *signature;
1094 u32 exp_len = sizeof(*fxn_info) + sizeof(struct rppc_msg_header);
1095 int i;
1097 if (len != exp_len) {
1098 dev_err(&rpdev->dev, "invalid message length %d (expected %d bytes)",
1099 len, exp_len);
1100 return;
1101 }
1103 fxn_info = RPPC_PAYLOAD(data, rppc_query_function);
1104 dev_dbg(&rpdev->dev, "response for function query of type %u\n",
1105 fxn_info->info_type);
1107 switch (fxn_info->info_type) {
1108 case RPPC_INFOTYPE_FUNC_SIGNATURE:
1109 if (fxn_info->fxn_id >= rppcdev->num_funcs) {
1110 dev_err(&rpdev->dev, "function(%d) is out of range!\n",
1111 fxn_info->fxn_id);
1112 break;
1113 }
1115 memcpy(&rppcdev->signatures[fxn_info->fxn_id],
1116 &fxn_info->info.signature, sizeof(*signature));
1118 /* TODO: delete these debug prints later */
1119 dev_dbg(&rpdev->dev, "received info for func(%d); name = %s #params = %u\n",
1120 fxn_info->fxn_id, fxn_info->info.signature.name,
1121 fxn_info->info.signature.num_param);
1122 signature = &rppcdev->signatures[fxn_info->fxn_id];
1123 for (i = 0; i < signature->num_param; i++) {
1124 dev_dbg(&rpdev->dev, "param[%u] type = %x dir = %u\n",
1125 i, signature->params[i].type,
1126 signature->params[i].direction);
1127 }
1129 /* query again until we've hit our limit */
1130 if (rppc_query_function(rpdev) == -EINVAL) {
1131 dev_dbg(&rpdev->dev, "reached end of function list!\n");
1132 rppc_create_sysfs(rppcdev);
1133 }
1134 break;
1135 default:
1136 dev_err(&rpdev->dev, "unrecognized fxn query response %u\n",
1137 fxn_info->info_type);
1138 break;
1139 }
1140 }
1142 static int rppc_driver_cb(struct rpmsg_device *rpdev, void *data, int len,
1143 void *priv, u32 src)
1144 {
1145 struct rppc_msg_header *hdr = data;
1146 char *buf = (char *)data;
1148 dev_dbg(&rpdev->dev, "<== incoming drv msg src %d len %d msg_type %d msg_len %d\n",
1149 src, len, hdr->msg_type, hdr->msg_len);
1151 if (len <= sizeof(*hdr)) {
1152 dev_err(&rpdev->dev, "message truncated\n");
1153 return -EINVAL;
1154 }
1156 switch (hdr->msg_type) {
1157 case RPPC_MSGTYPE_DEVINFO_RESP:
1158 rppc_handle_devinfo_resp(rpdev, buf, len);
1159 break;
1160 case RPPC_MSGTYPE_FUNCTION_INFO:
1161 rppc_handle_fxninfo_resp(rpdev, buf, len);
1162 break;
1163 default:
1164 dev_err(&rpdev->dev, "unrecognized message type %u\n",
1165 hdr->msg_type);
1166 break;
1167 }
1169 return 0;
1170 }
1172 static int find_rpccdev_by_name(int id, void *p, void *data)
1173 {
1174 struct rppc_device *rppcdev = p;
1176 return strcmp(rppcdev->desc, data) ? 0 : (int)p;
1177 }
1179 /*
1180 * send a device info query message, the device will be created
1181 * during the processing of the response message
1182 */
1183 static int rppc_device_create(struct rpmsg_device *rpdev)
1184 {
1185 int ret;
1186 u32 len;
1187 char kbuf[512];
1188 struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
1190 hdr->msg_type = RPPC_MSGTYPE_DEVINFO_REQ;
1191 hdr->msg_len = 0;
1192 len = sizeof(*hdr);
1193 ret = rpmsg_send(rpdev->ept, (char *)kbuf, len);
1194 if (ret) {
1195 dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
1196 return ret;
1197 }
1199 return 0;
1200 }
1202 static int rppc_probe(struct rpmsg_device *rpdev)
1203 {
1204 int ret, minor;
1205 int major = MAJOR(rppc_dev);
1206 struct rppc_device *rppcdev = NULL;
1207 dev_t dev;
1208 char namedesc[RPMSG_NAME_SIZE];
1210 dev_info(&rpdev->dev, "probing service %s with src %u dst %u\n",
1211 rpdev->desc, rpdev->src, rpdev->dst);
1213 mutex_lock(&rppc_devices_lock);
1214 snprintf(namedesc, sizeof(namedesc), "%s", rpdev->desc);
1215 rppcdev = (struct rppc_device *)idr_for_each(&rppc_devices,
1216 find_rpccdev_by_name, namedesc);
1217 if (rppcdev) {
1218 rppcdev->rpdev = rpdev;
1219 dev_set_drvdata(&rpdev->dev, rppcdev);
1220 goto serv_up;
1221 }
1223 rppcdev = kzalloc(sizeof(*rppcdev), GFP_KERNEL);
1224 if (!rppcdev) {
1225 ret = -ENOMEM;
1226 goto exit;
1227 }
1229 minor = idr_alloc(&rppc_devices, rppcdev, 0, 0, GFP_KERNEL);
1230 if (minor < 0) {
1231 ret = minor;
1232 dev_err(&rpdev->dev, "failed to get a minor number: %d\n", ret);
1233 goto free_rppcdev;
1234 }
1236 INIT_LIST_HEAD(&rppcdev->instances);
1237 mutex_init(&rppcdev->lock);
1238 init_completion(&rppcdev->comp);
1240 rppcdev->minor = minor;
1241 rppcdev->rpdev = rpdev;
1242 strncpy(rppcdev->desc, namedesc, RPMSG_NAME_SIZE);
1243 dev_set_drvdata(&rpdev->dev, rppcdev);
1245 cdev_init(&rppcdev->cdev, &rppc_fops);
1246 rppcdev->cdev.owner = THIS_MODULE;
1247 dev = MKDEV(major, minor);
1248 ret = cdev_add(&rppcdev->cdev, dev, 1);
1249 if (ret) {
1250 dev_err(&rpdev->dev, "cdev_add failed: %d\n", ret);
1251 goto free_id;
1252 }
1254 serv_up:
1255 rppcdev->dev = device_create(rppc_class, &rpdev->dev,
1256 MKDEV(major, rppcdev->minor), NULL,
1257 namedesc);
1258 if (IS_ERR(rppcdev->dev)) {
1259 ret = PTR_ERR(rppcdev->dev);
1261 dev_err(&rpdev->dev, "device_create failed: %d\n", ret);
1262 goto free_cdev;
1263 }
1264 dev_set_drvdata(rppcdev->dev, rppcdev);
1266 ret = rppc_device_create(rpdev);
1267 if (ret) {
1268 dev_err(&rpdev->dev, "failed to query channel info: %d\n", ret);
1269 dev = MKDEV(MAJOR(rppc_dev), rppcdev->minor);
1270 goto free_dev;
1271 }
1273 complete_all(&rppcdev->comp);
1275 dev_dbg(&rpdev->dev, "new RPPC connection srv channel: %u -> %u!\n",
1276 rpdev->src, rpdev->dst);
1278 mutex_unlock(&rppc_devices_lock);
1279 return 0;
1281 free_dev:
1282 device_destroy(rppc_class, dev);
1283 free_cdev:
1284 cdev_del(&rppcdev->cdev);
1285 free_id:
1286 idr_remove(&rppc_devices, rppcdev->minor);
1287 free_rppcdev:
1288 kfree(rppcdev);
1289 exit:
1290 mutex_unlock(&rppc_devices_lock);
1291 return ret;
1292 }
1294 static void rppc_remove(struct rpmsg_device *rpdev)
1295 {
1296 struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
1297 struct rppc_instance *rpc = NULL;
1298 int major = MAJOR(rppc_dev);
1300 dev_dbg(&rpdev->dev, "removing rpmsg-rpc device %u.%u\n",
1301 major, rppcdev->minor);
1303 mutex_lock(&rppc_devices_lock);
1305 rppc_remove_sysfs(rppcdev);
1306 rppcdev->cur_func = 0;
1307 kfree(rppcdev->signatures);
1309 /* if there are no instances in the list, just teardown */
1310 if (list_empty(&rppcdev->instances)) {
1311 dev_dbg(&rpdev->dev, "no instances, removing device!\n");
1312 device_destroy(rppc_class, MKDEV(major, rppcdev->minor));
1313 cdev_del(&rppcdev->cdev);
1314 idr_remove(&rppc_devices, rppcdev->minor);
1315 kfree(rppcdev);
1316 mutex_unlock(&rppc_devices_lock);
1317 return;
1318 }
1320 /*
1321 * if there are rpc instances that means that this is a recovery
1322 * operation. Don't clean the rppcdev, and retain it for reuse.
1323 * mark each instance as invalid, and complete any on-going transactions
1324 */
1325 init_completion(&rppcdev->comp);
1326 mutex_lock(&rppcdev->lock);
1327 list_for_each_entry(rpc, &rppcdev->instances, list) {
1328 dev_dbg(&rpdev->dev, "instance %p in state %d\n",
1329 rpc, rpc->state);
1330 if (rpc->state == RPPC_STATE_CONNECTED && rpc->in_transition)
1331 complete_all(&rpc->reply_arrived);
1332 rpc->state = RPPC_STATE_STALE;
1333 if (rpc->ept) {
1334 rpmsg_destroy_ept(rpc->ept);
1335 rpc->ept = NULL;
1336 }
1337 wake_up_interruptible(&rpc->readq);
1338 }
1339 device_destroy(rppc_class, MKDEV(major, rppcdev->minor));
1340 rppcdev->dev = NULL;
1341 rppcdev->rpdev = NULL;
1342 mutex_unlock(&rppcdev->lock);
1343 mutex_unlock(&rppc_devices_lock);
1344 dev_dbg(&rpdev->dev, "removed rpmsg rpmsg-rpc service %s\n",
1345 rpdev->desc);
1346 }
1348 static struct rpmsg_device_id rppc_id_table[] = {
1349 {.name = "rpmsg-rpc"},
1350 {},
1351 };
1353 static struct rpmsg_driver rppc_driver = {
1354 .drv.name = KBUILD_MODNAME,
1355 .id_table = rppc_id_table,
1356 .probe = rppc_probe,
1357 .remove = rppc_remove,
1358 .callback = rppc_driver_cb,
1359 };
1361 static int __init rppc_init(void)
1362 {
1363 int ret;
1365 ret = alloc_chrdev_region(&rppc_dev, 0, RPPC_MAX_DEVICES,
1366 KBUILD_MODNAME);
1367 if (ret) {
1368 pr_err("alloc_chrdev_region failed: %d\n", ret);
1369 goto out;
1370 }
1372 rppc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
1373 if (IS_ERR(rppc_class)) {
1374 ret = PTR_ERR(rppc_class);
1375 pr_err("class_create failed: %d\n", ret);
1376 goto unreg_region;
1377 }
1379 ret = register_rpmsg_driver(&rppc_driver);
1380 if (ret) {
1381 pr_err("register_rpmsg_driver failed: %d\n", ret);
1382 goto destroy_class;
1383 }
1384 return 0;
1386 destroy_class:
1387 class_destroy(rppc_class);
1388 unreg_region:
1389 unregister_chrdev_region(rppc_dev, RPPC_MAX_DEVICES);
1390 out:
1391 return ret;
1392 }
1394 static void __exit rppc_exit(void)
1395 {
1396 unregister_rpmsg_driver(&rppc_driver);
1397 class_destroy(rppc_class);
1398 unregister_chrdev_region(rppc_dev, RPPC_MAX_DEVICES);
1399 }
1401 module_init(rppc_init);
1402 module_exit(rppc_exit);
1403 MODULE_DEVICE_TABLE(rpmsg, rppc_id_table);
1405 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
1406 MODULE_AUTHOR("Erik Rainey <erik.rainey@ti.com>");
1407 MODULE_DESCRIPTION("Remote Processor Procedure Call Driver");
1408 MODULE_ALIAS("rpmsg:rpmsg-rpc");
1409 MODULE_LICENSE("GPL v2");