1 /*
2 * @file rpmsg-rpc.c
3 *
4 * @brief devctl handler for RPC component.
5 *
6 * ============================================================================
7 *
8 * Copyright (c) 2013, Texas Instruments Incorporated
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * * Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * * Neither the name of Texas Instruments Incorporated nor the names of
22 * its contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
32 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
33 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Contact information for paper mail:
37 * Texas Instruments
38 * Post Office Box 655303
39 * Dallas, Texas 75265
40 * Contact information:
41 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
42 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
43 * ============================================================================
44 *
45 */
48 /* Standard headers */
49 #include <ti/syslink/Std.h>
51 /* OSAL & Utils headers */
52 #include <ti/syslink/utils/List.h>
53 #include <ti/syslink/utils/String.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/Memory.h>
56 #include <ti/syslink/utils/IGateProvider.h>
57 #include <ti/syslink/utils/GateSpinlock.h>
58 #include <_MultiProc.h>
60 /*QNX specific header include */
61 #include <errno.h>
62 #include <unistd.h>
63 #include <sys/iofunc.h>
64 #include <sys/dispatch.h>
65 #include <sys/netmgr.h>
66 #include <devctl.h>
68 /* Module headers */
69 #include <ti/ipc/rpmsg_rpc.h>
70 #include <ti/ipc/MessageQCopy.h>
71 #include <_MessageQCopy.h>
72 #include <_MessageQCopyDefs.h>
73 #include "OsalSemaphore.h"
74 #include "std_qnx.h"
75 #include <pthread.h>
77 #include <memmgr/tilermem.h>
78 #include <memmgr/tiler.h>
80 #include "rpmsg-rpc.h"
81 #include <rpmsg.h>
83 #define PRIORITY_REALTIME_LOW 29
85 extern int mem_offset64_peer(pid_t pid, const uintptr_t addr, size_t len,
86 off64_t *offset, size_t *contig_len);
88 static MsgList_t *nl_cache;
89 static int num_nl = 0;
90 static WaitingReaders_t *wr_cache;
91 static int num_wr = 0;
93 /*
94 * Instead of constantly allocating and freeing the notifier structures
95 * we just cache a few of them, and recycle them instead.
96 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
97 */
99 static MsgList_t *get_nl()
100 {
101 MsgList_t *item;
102 item = nl_cache;
103 if (item != NULL) {
104 nl_cache = nl_cache->next;
105 num_nl--;
106 } else {
107 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
108 }
109 return(item);
110 }
112 static void put_nl(MsgList_t *item)
113 {
114 if (num_nl >= CACHE_NUM) {
115 Memory_free(NULL, item, sizeof(*item));
116 } else {
117 item->next = nl_cache;
118 nl_cache = item;
119 num_nl++;
120 }
121 return;
122 }
124 static WaitingReaders_t *get_wr()
125 {
126 WaitingReaders_t *item;
127 item = wr_cache;
128 if (item != NULL) {
129 wr_cache = wr_cache->next;
130 num_wr--;
131 } else {
132 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
133 }
134 return(item);
135 }
137 static void put_wr(WaitingReaders_t *item)
138 {
139 if (num_wr >= CACHE_NUM) {
140 Memory_free(NULL, item, sizeof(*item));
141 } else {
142 item->next = wr_cache;
143 wr_cache = item;
144 num_wr++;
145 }
146 return;
147 }
149 /* structure to hold rpmsg-rpc device information */
150 typedef struct named_device {
151 iofunc_mount_t mattr;
152 iofunc_attr_t cattr;
153 int resmgr_id;
154 pthread_mutex_t mutex;
155 iofunc_funcs_t mfuncs;
156 resmgr_connect_funcs_t cfuncs;
157 resmgr_io_funcs_t iofuncs;
158 char device_name[_POSIX_PATH_MAX];
159 } named_device_t;
161 /* rpmsg-rpc device structure */
162 typedef struct rpmsg_rpc_dev {
163 dispatch_t * dpp;
164 thread_pool_t * tpool;
165 named_device_t rpmsg_rpc;
166 } rpmsg_rpc_dev_t;
168 /*!
169 * @brief Remote connection object
170 */
171 typedef struct rpmsg_rpc_conn_object {
172 rpmsg_rpc_dev_t * dev;
173 MessageQCopy_Handle mq;
174 UInt32 addr;
175 UInt16 procId;
176 ProcMgr_Handle procH;
177 UInt32 numFuncs;
178 } rpmsg_rpc_conn_object;
180 /*!
181 * @brief rpc instance object
182 */
183 typedef struct rpmsg_rpc_object_tag {
184 MessageQCopy_Handle mq;
185 rpmsg_rpc_conn_object * conn;
186 UInt32 addr;
187 UInt32 remoteAddr;
188 UInt16 procId;
189 pid_t pid;
190 Bool created;
191 iofunc_notify_t notify[3];
192 } rpmsg_rpc_object;
194 /*!
195 * @brief Structure of Event callback argument passed to register fucntion.
196 */
197 typedef struct rpmsg_rpc_EventCbck_tag {
198 List_Elem element;
199 /*!< List element header */
200 rpmsg_rpc_object * rpc;
201 /*!< User rpc info pointer. Passed back to user callback function */
202 UInt32 pid;
203 /*!< Process Identifier for user process. */
204 } rpmsg_rpc_EventCbck ;
206 /*!
207 * @brief Structure of Fxn Info for reverse translations.
208 */
209 typedef struct rpmsg_rpc_FxnInfo_tag {
210 List_Elem element;
211 /*!< List element header */
212 UInt16 msgId;
213 /*!< Unique msgId of the rpc fxn call */
214 struct rppc_function func;
215 /*!< rpc function information. */
216 } rpmsg_rpc_FxnInfo ;
218 /*!
219 * @brief Keeps the information related to Event.
220 */
221 typedef struct rpmsg_rpc_EventState_tag {
222 List_Handle bufList;
223 /*!< Head of received event list. */
224 List_Handle fxnList;
225 /*!< Head of received msg list. */
226 UInt32 pid;
227 /*!< User process ID. */
228 rpmsg_rpc_object * rpc;
229 /*!< User rpc comp. */
230 UInt32 refCount;
231 /*!< Reference count, used when multiple Notify_registerEvent are called
232 from same process space (multi threads/processes). */
233 WaitingReaders_t * head;
234 /*!< Waiting readers head. */
235 WaitingReaders_t * tail;
236 /*!< Waiting readers tail. */
237 } rpmsg_rpc_EventState;
239 /*!
240 * @brief Per-connection information
241 */
242 typedef struct rpmsg_rpc_ocb {
243 iofunc_ocb_t hdr;
244 pid_t pid;
245 rpmsg_rpc_object * rpc;
246 } rpmsg_rpc_ocb_t;
248 typedef struct rpmsg_rpc_name {
249 char name[RPMSG_NAME_SIZE];
250 }rpmsg_rpc_name_t;
252 static struct rpmsg_rpc_name rpmsg_rpc_names[] = {
253 {.name = "rpmsg-rpc"},
254 };
256 #define NUM_RPMSG_RPC_QUEUES sizeof(rpmsg_rpc_names)/sizeof(*rpmsg_rpc_names)
258 /*!
259 * @brief rpmsg-rpc Module state object
260 */
261 typedef struct rpmsg_rpc_ModuleObject_tag {
262 Bool isSetup;
263 /*!< Indicates whether the module has been already setup */
264 Bool openRefCount;
265 /*!< Open reference count. */
266 IGateProvider_Handle gateHandle;
267 /*!< Handle of gate to be used for local thread safety */
268 rpmsg_rpc_EventState eventState [MAX_PROCESSES];
269 /*!< List for all user processes registered. */
270 rpmsg_rpc_conn_object * objects [MAX_CONNS];
271 /*!< List of all remote connections. */
272 MessageQCopy_Handle mqHandle[NUM_RPMSG_RPC_QUEUES];
273 /*!< Local mq handle associated with this module */
274 UInt32 endpoint[NUM_RPMSG_RPC_QUEUES];
275 /*!< Local endpoint associated with the mq handle */
276 OsalSemaphore_Handle sem;
277 /*!< Handle to semaphore used for rpc instance connection notifications */
278 pthread_t nt;
279 /*!< notifier thread */
280 pthread_mutex_t lock;
281 /*!< protection between notifier and event */
282 pthread_cond_t cond;
283 /*!< protection between notifier and event */
284 MsgList_t *head;
285 /*!< list head */
286 MsgList_t *tail;
287 /*!< list tail */
288 int run;
289 /*!< notifier thread must keep running */
290 } rpmsg_rpc_ModuleObject;
292 /*!
293 * @brief Structure of Event Packet read from notify kernel-side.
294 */
295 typedef struct rpmsg_rpc_EventPacket_tag {
296 List_Elem element;
297 /*!< List element header */
298 UInt32 pid;
299 /* Processor identifier */
300 rpmsg_rpc_object * obj;
301 /*!< Pointer to the channel associated with this callback */
302 UInt8 data[MessageQCopy_BUFSIZE];
303 /*!< Data associated with event. */
304 UInt32 len;
305 /*!< Length of the data associated with event. */
306 UInt32 src;
307 /*!< Src endpoint associated with event. */
308 struct rpmsg_rpc_EventPacket * next;
309 struct rpmsg_rpc_EventPacket * prev;
310 } rpmsg_rpc_EventPacket ;
313 /*
314 * Instead of constantly allocating and freeing the uBuf structures
315 * we just cache a few of them, and recycle them instead.
316 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
317 */
318 static rpmsg_rpc_EventPacket *uBuf_cache;
319 static int num_uBuf = 0;
321 static void flush_uBuf()
322 {
323 rpmsg_rpc_EventPacket *uBuf = NULL;
325 while(uBuf_cache) {
326 num_uBuf--;
327 uBuf = uBuf_cache;
328 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
329 Memory_free(NULL, uBuf, sizeof(*uBuf));
330 }
331 }
333 static rpmsg_rpc_EventPacket *get_uBuf()
334 {
335 rpmsg_rpc_EventPacket *uBuf;
336 uBuf = uBuf_cache;
337 if (uBuf != NULL) {
338 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
339 num_uBuf--;
340 } else {
341 uBuf = Memory_alloc(NULL, sizeof(rpmsg_rpc_EventPacket), 0, NULL);
342 }
343 return(uBuf);
344 }
346 static void put_uBuf(rpmsg_rpc_EventPacket * uBuf)
347 {
348 if (num_uBuf >= CACHE_NUM) {
349 Memory_free(NULL, uBuf, sizeof(*uBuf));
350 } else {
351 uBuf->next = (struct rpmsg_rpc_EventPacket *)uBuf_cache;
352 uBuf_cache = uBuf;
353 num_uBuf++;
354 }
355 return;
356 }
358 /** ============================================================================
359 * Function Prototypes
360 * ============================================================================
361 */
362 int
363 _rpmsg_rpc_translate (ProcMgr_Handle handle, char *data, pid_t pid,
364 bool reverse);
366 /** ============================================================================
367 * Globals
368 * ============================================================================
369 */
370 /*!
371 * @var rpmsg_rpc_state
372 *
373 * @brief rpmsg-rpc state object variable
374 */
375 static rpmsg_rpc_ModuleObject rpmsg_rpc_state =
376 {
377 .gateHandle = NULL,
378 .isSetup = FALSE,
379 .openRefCount = 0,
380 .nt = 0,
381 .lock = PTHREAD_MUTEX_INITIALIZER,
382 .cond = PTHREAD_COND_INITIALIZER,
383 .head = NULL,
384 .tail = NULL,
385 .run = 0
386 };
388 static uint16_t msg_id = 0xFFFF;
390 extern dispatch_t * syslink_dpp;
393 static MsgList_t *find_nl(int index)
394 {
395 MsgList_t *item=NULL;
396 item = rpmsg_rpc_state.head;
397 while (item) {
398 if (item->index == index)
399 return(item);
400 item = item->next;
401 }
402 return(item);
403 }
405 /* we have the right locks when calling this function */
406 /*!
407 * @brief Function to enqueue a notify list item.
408 *
409 * @param index Index of the client process associated with the item.
410 *
411 * @sa find_nl
412 * get_nl
413 */
414 static int enqueue_notify_list(int index)
415 {
416 MsgList_t *item;
417 item = find_nl(index);
418 if (item == NULL) {
419 item = get_nl();
420 if (item == NULL) {
421 return(-1);
422 }
423 item->next = NULL;
424 item->index = index;
425 item->num_events=1;
426 if (rpmsg_rpc_state.head == NULL) {
427 rpmsg_rpc_state.head = item;
428 rpmsg_rpc_state.tail = item;
429 item->prev = NULL;
430 }
431 else {
432 item->prev = rpmsg_rpc_state.tail;
433 rpmsg_rpc_state.tail->next = item;
434 rpmsg_rpc_state.tail = item;
435 }
436 }
437 else {
438 item->num_events++;
439 }
440 return(0);
441 }
443 /* we have the right locks when calling this function */
444 /*!
445 * @brief Function to dequeue a notify list item.
446 *
447 * @param item The item to remove.
448 *
449 * @sa put_nl
450 */
451 static inline int dequeue_notify_list_item(MsgList_t *item)
452 {
453 int index;
454 if (item == NULL) {
455 return(-1);
456 }
457 index = item->index;
458 item->num_events--;
459 if (item->num_events > 0) {
460 return(index);
461 }
462 if (rpmsg_rpc_state.head == item) {
463 // removing head
464 rpmsg_rpc_state.head = item->next;
465 if (rpmsg_rpc_state.head != NULL) {
466 rpmsg_rpc_state.head->prev = NULL;
467 }
468 else {
469 // removing head and tail
470 rpmsg_rpc_state.tail = NULL;
471 }
472 }
473 else {
474 item->prev->next = item->next;
475 if (item->next != NULL) {
476 item->next->prev = item->prev;
477 }
478 else {
479 // removing tail
480 rpmsg_rpc_state.tail = item->prev;
481 }
482 }
483 put_nl(item);
484 return(index);
485 }
487 /* we have the right locks when calling this function */
488 /*!
489 * @brief Function to add a waiting reader to the list.
490 *
491 * @param index Index of the client process waiting reader to add.
492 * @param rcvid Receive ID of the client process that was passed
493 * when the client called read().
494 *
495 * @sa None
496 */
497 static int enqueue_waiting_reader(int index, int rcvid)
498 {
499 WaitingReaders_t *item;
500 item = get_wr();
501 if (item == NULL) {
502 return(-1);
503 }
504 item->rcvid = rcvid;
505 item->next = NULL;
506 if (rpmsg_rpc_state.eventState [index].head == NULL) {
507 rpmsg_rpc_state.eventState [index].head = item;
508 rpmsg_rpc_state.eventState [index].tail = item;
509 }
510 else {
511 rpmsg_rpc_state.eventState [index].tail->next = item;
512 rpmsg_rpc_state.eventState [index].tail = item;
513 }
514 return(EOK);
515 }
517 /* we have the right locks when calling this function */
518 /* caller frees item */
519 /*!
520 * @brief Function to remove a waiting reader from the list.
521 *
522 * @param index Index of the client process waiting reader to dequeue.
523 *
524 * @sa None
525 */
526 static WaitingReaders_t *dequeue_waiting_reader(int index)
527 {
528 WaitingReaders_t *item = NULL;
529 if (rpmsg_rpc_state.eventState [index].head) {
530 item = rpmsg_rpc_state.eventState [index].head;
531 rpmsg_rpc_state.eventState [index].head = rpmsg_rpc_state.eventState [index].head->next;
532 if (rpmsg_rpc_state.eventState [index].head == NULL) {
533 rpmsg_rpc_state.eventState [index].tail = NULL;
534 }
535 }
536 return(item);
537 }
539 /*!
540 * @brief Function find a specified waiting reader.
541 *
542 * @param index Index of the client process waiting for the message.
543 * @param rcvid Receive ID of the client process that was passed
544 * when the client called read().
545 *
546 * @sa None
547 */
549 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
550 {
551 WaitingReaders_t *item = NULL;
552 WaitingReaders_t *prev = NULL;
553 if (rpmsg_rpc_state.eventState [index].head) {
554 item = rpmsg_rpc_state.eventState [index].head;
555 while (item) {
556 if (item->rcvid == rcvid) {
557 /* remove item from list */
558 if (prev)
559 prev->next = item->next;
560 if (item == rpmsg_rpc_state.eventState [index].head)
561 rpmsg_rpc_state.eventState [index].head = item->next;
562 break;
563 }
564 else {
565 prev = item;
566 item = item->next;
567 }
568 }
569 }
570 return item;
571 }
573 /*!
574 * @brief Function used to check if there is a waiting reader with an
575 * event (message) ready to be delivered.
576 *
577 * @param index Index of the client process waiting for the message.
578 * @param item Pointer to the waiting reader.
579 *
580 * @sa dequeue_notify_list_item
581 * dequeue_waiting_reader
582 */
584 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
585 {
586 MsgList_t *temp;
587 if (rpmsg_rpc_state.head == NULL) {
588 return(0);
589 }
590 temp = rpmsg_rpc_state.head;
591 while (temp) {
592 if (rpmsg_rpc_state.eventState [temp->index].head) {
593 // event and reader found
594 if (dequeue_notify_list_item(temp) >= 0) {
595 *index = temp->index;
596 *item = dequeue_waiting_reader(temp->index);
597 }
598 else {
599 /* error occurred, return 0 as item has not been set */
600 return(0);
601 }
602 return(1);
603 }
604 temp = temp->next;
605 }
606 return(0);
607 }
609 /*!
610 * @brief Function used to deliver the notification to the client that
611 * it has received a message.
612 *
613 * @param index Index of the client process receiving hte message.
614 * @param rcvid Receive ID of the client process that was passed
615 * when the client called read().
616 *
617 * @sa put_uBuf
618 */
620 static void deliver_notification(int index, int rcvid)
621 {
622 int err = EOK;
623 rpmsg_rpc_EventPacket * uBuf = NULL;
625 uBuf = (rpmsg_rpc_EventPacket *) List_get (rpmsg_rpc_state.eventState [index].bufList);
627 /* Let the check remain at run-time. */
628 if (uBuf != NULL) {
629 err = MsgReply(rcvid, uBuf->len, uBuf->data, uBuf->len);
630 if (err == -1)
631 perror("deliver_notification: MsgReply");
632 /* Free the processed event callback packet. */
633 put_uBuf(uBuf);
634 }
635 else {
636 MsgReply(rcvid, EOK, NULL, 0);
637 }
638 return;
639 }
641 /*!
642 * @brief Thread used for notifying waiting readers of messages.
643 *
644 * @param arg Thread-specific private arg.
645 *
646 * @sa find_available_reader_and_event
647 * deliver_notification
648 * put_wr
649 */
650 static void *notifier_thread(void *arg)
651 {
652 int status;
653 int index;
654 WaitingReaders_t *item = NULL;
655 pthread_mutex_lock(&rpmsg_rpc_state.lock);
656 while (rpmsg_rpc_state.run) {
657 status = find_available_reader_and_event(&index, &item);
658 if ( (status == 0) || (item == NULL) ) {
659 status = pthread_cond_wait(&rpmsg_rpc_state.cond, &rpmsg_rpc_state.lock);
660 if ((status != EOK) && (status != EINTR)) {
661 // false wakeup
662 break;
663 }
664 status = find_available_reader_and_event(&index, &item);
665 if ( (status == 0) || (item == NULL) ) {
666 continue;
667 }
668 }
669 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
670 // we have unlocked, and now we have an event to deliver
671 // we deliver one event at a time, relock, check and continue
672 deliver_notification(index, item->rcvid);
673 pthread_mutex_lock(&rpmsg_rpc_state.lock);
674 put_wr(item);
675 }
676 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
677 return(NULL);
678 }
681 static
682 Int
683 _rpmsg_rpc_create(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_rpc_ocb_t *ocb)
684 {
685 Int status = EOK;
686 struct rppc_create_instance * cargs =
687 (struct rppc_create_instance *)(_DEVCTL_DATA (msg->i));
688 struct rppc_msg_header * msg_hdr = NULL;
689 rpmsg_rpc_object * rpc = ocb->rpc;
690 Char * msg_data = NULL;
691 UInt8 buf[sizeof(struct rppc_create_instance) + sizeof(struct rppc_msg_header)];
693 if (rpc->created == TRUE) {
694 GT_0trace(curTrace, GT_4CLASS, "Already created.");
695 status = (EINVAL);
696 }
697 else if ((ctp->info.msglen - sizeof(msg->i)) <
698 sizeof (struct rppc_create_instance)) {
699 status = (EINVAL);
700 }
701 else if (String_nlen(cargs->name, 47) == -1) {
702 status = (EINVAL);
703 }
704 else {
705 msg_hdr = (struct rppc_msg_header *)buf;
706 msg_hdr->msg_type = RPPC_MSG_CREATE_INSTANCE;
707 msg_hdr->msg_len = sizeof(struct rppc_create_instance);
708 msg_data = (Char *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
709 Memory_copy(msg_data, cargs, sizeof(struct rppc_create_instance));
711 status = MessageQCopy_send (rpc->conn->procId, // remote procid
712 MultiProc_self(), // local procid
713 rpc->conn->addr, // remote server
714 rpc->addr, // local address
715 buf, // connect msg
716 sizeof(buf), // msg size
717 TRUE); // wait for available bufs
718 if (status != MessageQCopy_S_SUCCESS) {
719 GT_0trace(curTrace, GT_4CLASS, "Failed to send create message.");
720 status = (EIO);
721 }
722 else {
723 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
724 if (rpc->created == TRUE) {
725 msg->o.ret_val = EOK;
726 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o)));
727 }
728 else if (status < 0) {
729 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
730 status = (EIO);
731 }
732 else {
733 status = (ETIMEDOUT);
734 }
735 }
736 }
738 return status;
739 }
742 static
743 Int
744 _rpmsg_rpc_destroy(resmgr_context_t *ctp, io_devctl_t *msg,
745 rpmsg_rpc_ocb_t *ocb)
746 {
747 Int status = EOK;
748 struct rppc_msg_header * hdr = NULL;
749 rpmsg_rpc_object * rpc = ocb->rpc;
750 UInt8 buf[sizeof(struct rppc_instance_handle) + sizeof(struct rppc_msg_header)];
751 struct rppc_instance_handle * instance = NULL;
753 if (rpc->created != TRUE) {
754 GT_0trace(curTrace, GT_4CLASS, "Already destroyed.");
755 status = (EINVAL);
756 }
757 else {
758 hdr = (struct rppc_msg_header *)buf;
759 hdr->msg_type = RPPC_MSG_DESTROY_INSTANCE;
760 hdr->msg_len = sizeof(struct rppc_instance_handle);
761 instance = (struct rppc_instance_handle *)((UInt32)hdr + sizeof(struct rppc_msg_header));
762 instance->endpoint_address = rpc->remoteAddr;
763 instance->status = 0;
765 status = MessageQCopy_send (rpc->conn->procId, // remote procid
766 MultiProc_self(), // local procid
767 rpc->conn->addr, // remote server
768 rpc->addr, // local address
769 buf, // connect msg
770 sizeof(buf), // msg size
771 TRUE); // wait for available bufs
772 if (status != MessageQCopy_S_SUCCESS) {
773 GT_0trace(curTrace, GT_4CLASS, "Failed to send disconnect message.");
774 status = (EIO);
775 }
776 else {
777 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
778 if (rpc->created != FALSE || status < 0) {
779 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
780 status = (EIO);
781 }
782 else {
783 status = (ETIMEDOUT);
784 }
785 }
786 }
788 return status;
789 }
792 Int
793 rpmsg_rpc_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
794 {
795 Int status = 0;
796 rpmsg_rpc_ocb_t *ocb = (rpmsg_rpc_ocb_t *)i_ocb;
798 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
799 return(_RESMGR_ERRNO(status));
800 status = 0;
802 switch (msg->i.dcmd)
803 {
804 case RPPC_IOC_CREATE:
805 status = _rpmsg_rpc_create (ctp, msg, ocb);
806 break;
807 #if 0
808 case RPPC_IOC_DESTROY:
809 status = _rpmsg_rpc_destroy (ctp, msg, ocb);
810 break;
811 #endif
812 default:
813 status = (ENOSYS);
814 break;
815 }
817 return status;
818 }
821 /*!
822 * @brief Attach a process to rpmsg-rpc user support framework.
823 *
824 * @param pid Process identifier
825 *
826 * @sa _rpmsg_rpc_detach
827 */
828 static
829 Int
830 _rpmsg_rpc_attach (rpmsg_rpc_object * rpc)
831 {
832 Int32 status = EOK;
833 Bool flag = FALSE;
834 Bool isInit = FALSE;
835 List_Object * bufList = NULL;
836 List_Object * fxnList = NULL;
837 IArg key = 0;
838 List_Params listparams;
839 UInt32 i;
841 GT_1trace (curTrace, GT_ENTER, "_rpmsg_rpc_attach", rpc);
843 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
844 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
845 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
846 rpmsg_rpc_state.eventState [i].refCount++;
847 isInit = TRUE;
848 status = EOK;
849 break;
850 }
851 }
853 if (isInit == FALSE) {
854 List_Params_init (&listparams);
855 bufList = List_create (&listparams) ;
856 fxnList = List_create (&listparams) ;
857 /* Search for an available slot for user process. */
858 for (i = 0 ; i < MAX_PROCESSES ; i++) {
859 if (rpmsg_rpc_state.eventState [i].rpc == NULL) {
860 rpmsg_rpc_state.eventState [i].rpc = rpc;
861 rpmsg_rpc_state.eventState [i].refCount = 1;
862 rpmsg_rpc_state.eventState [i].bufList = bufList;
863 rpmsg_rpc_state.eventState [i].fxnList = fxnList;
864 flag = TRUE;
865 break;
866 }
867 }
869 /* No free slots found. Let this check remain at run-time,
870 * since it is dependent on user environment.
871 */
872 if (flag != TRUE) {
873 /*! @retval Notify_E_RESOURCE Maximum number of
874 supported user clients have already been registered. */
875 status = -ENOMEM;
876 GT_setFailureReason (curTrace,
877 GT_4CLASS,
878 "rpmsgDrv_attach",
879 status,
880 "Maximum number of supported user"
881 " clients have already been "
882 "registered.");
883 if (bufList != NULL) {
884 List_delete (&bufList);
885 }
886 if (fxnList != NULL) {
887 List_delete (&fxnList);
888 }
889 }
890 }
891 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
893 GT_1trace (curTrace, GT_LEAVE, "_rpmsg_rpc_attach", status);
895 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
896 return status ;
897 }
900 /*!
901 * @brief This function adds a data to a registered process.
902 *
903 * @param dce RPC object associated with the client
904 * @param src Source address (endpoint) sending the data
905 * @param pid Process ID associated with the client
906 * @param data Data to be added
907 * @param len Length of data to be added
908 *
909 * @sa
910 */
911 Int
912 _rpmsg_rpc_addBufByPid (rpmsg_rpc_object *rpc,
913 UInt32 src,
914 UInt32 pid,
915 UInt16 msgId,
916 void * data,
917 UInt32 len)
918 {
919 Int32 status = EOK;
920 Bool flag = FALSE;
921 rpmsg_rpc_EventPacket * uBuf = NULL;
922 IArg key;
923 UInt32 i;
924 WaitingReaders_t *item;
925 MsgList_t *msgItem;
926 List_Elem * elem = NULL;
927 List_Elem * temp = NULL;
929 GT_5trace (curTrace,
930 GT_ENTER,
931 "_rpmsg_rpc_addBufByPid",
932 rpc,
933 src,
934 pid,
935 data,
936 len);
938 GT_assert (curTrace, (rpmsg_rpc_state.isSetup == TRUE));
940 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
941 /* Find the registration for this callback */
942 for (i = 0 ; i < MAX_PROCESSES ; i++) {
943 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
944 flag = TRUE;
945 break;
946 }
947 }
948 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
950 #if !defined(SYSLINK_BUILD_OPTIMIZE)
951 if (flag != TRUE) {
952 /*! @retval ENOMEM Could not find a registered handler
953 for this process. */
954 status = -ENOMEM;
955 GT_setFailureReason (curTrace,
956 GT_4CLASS,
957 "_rpmsgDrv_addBufByPid",
958 status,
959 "Could not find a registered handler "
960 "for this process.!");
961 }
962 else {
963 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
964 /* Allocate memory for the buf */
965 pthread_mutex_lock(&rpmsg_rpc_state.lock);
966 uBuf = get_uBuf();
967 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
969 #if !defined(SYSLINK_BUILD_OPTIMIZE)
970 if (uBuf == NULL) {
971 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
972 packet for received callback. */
973 status = -ENOMEM;
974 GT_setFailureReason (curTrace,
975 GT_4CLASS,
976 "_rpmsgDrv_addBufByPid",
977 status,
978 "Failed to allocate memory for event"
979 " packet for received callback.!");
980 }
981 else {
982 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
983 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
984 List_traverse_safe(elem, temp, rpmsg_rpc_state.eventState [i].fxnList) {
985 if (((rpmsg_rpc_FxnInfo *)elem)->msgId == msgId) {
986 List_remove(rpmsg_rpc_state.eventState [i].fxnList, elem);
987 break;
988 }
989 }
990 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
992 if (elem != (List_Elem *)rpmsg_rpc_state.eventState [i].fxnList) {
993 struct rppc_function * function;
994 function = &(((rpmsg_rpc_FxnInfo *)elem)->func);
995 _rpmsg_rpc_translate(NULL, (char *)function, pid, true);
996 Memory_free(NULL, elem, sizeof(rpmsg_rpc_FxnInfo) +\
997 RPPC_TRANS_SIZE(function->num_translations));
998 }
1000 List_elemClear (&(uBuf->element));
1001 GT_assert (curTrace,
1002 (rpmsg_rpc_state.eventState [i].bufList != NULL));
1004 if (data) {
1005 Memory_copy(uBuf->data, data, len);
1006 }
1007 uBuf->len = len;
1009 List_put (rpmsg_rpc_state.eventState [i].bufList,
1010 &(uBuf->element));
1011 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1012 item = dequeue_waiting_reader(i);
1013 if (item) {
1014 // there is a waiting reader
1015 deliver_notification(i, item->rcvid);
1016 put_wr(item);
1017 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1018 status = EOK;
1019 }
1020 else {
1021 if (enqueue_notify_list(i) < 0) {
1022 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1023 status = -ENOMEM;
1024 GT_setFailureReason (curTrace,
1025 GT_4CLASS,
1026 "_rpmsgDrv_addBufByPid",
1027 status,
1028 "Failed to allocate memory for notifier");
1029 }
1030 else {
1031 msgItem = find_nl(i);
1032 /* TODO: rpc could be NULL in some cases */
1033 if (rpc && msgItem) {
1034 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, msgItem->num_events, 0)) {
1035 iofunc_notify_trigger(rpc->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
1036 }
1037 }
1038 status = EOK;
1039 pthread_cond_signal(&rpmsg_rpc_state.cond);
1040 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1041 }
1042 }
1043 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1044 }
1045 }
1046 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1048 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
1050 return status;
1051 }
1054 /*!
1055 * @brief This function implements the callback registered with
1056 * MessageQCopy_create for each client. This function
1057 * adds the message from the remote proc to a list
1058 * where it is routed to the appropriate waiting reader.
1059 *
1060 * @param procId processor Id from which interrupt is received
1061 * @param lineId Interrupt line ID to be used
1062 * @param eventId eventId registered
1063 * @param arg argument to call back
1064 * @param payload payload received
1065 *
1066 * @sa
1067 */
1068 Void
1069 _rpmsg_rpc_cb (MessageQCopy_Handle handle, void * data, int len, void * priv,
1070 UInt32 src, UInt16 srcProc)
1071 {
1072 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1073 Int32 status = 0;
1074 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1075 rpmsg_rpc_object * rpc = NULL;
1076 struct rppc_msg_header * msg_hdr = NULL;
1077 struct rppc_instance_handle * instance;
1078 struct rppc_packet * packet = NULL;
1080 GT_6trace (curTrace,
1081 GT_ENTER,
1082 "_rpmsg_rpc_cb",
1083 handle,
1084 data,
1085 len,
1086 priv,
1087 src,
1088 srcProc);
1090 if (len < sizeof(struct rppc_msg_header)) {
1091 status = EINVAL;
1092 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb", status,
1093 "len is smaller than sizeof rppc_msg_header");
1094 return;
1095 }
1097 rpc = (rpmsg_rpc_object *) priv;
1098 msg_hdr = (struct rppc_msg_header *)data;
1100 switch (msg_hdr->msg_type) {
1101 case RPPC_MSG_INSTANCE_CREATED:
1102 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1103 status = EINVAL;
1104 rpc->created = FALSE;
1105 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1106 status, "msg_len is invalid");
1107 }
1108 else {
1109 instance = (struct rppc_instance_handle *)
1110 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1111 rpc->remoteAddr = instance->endpoint_address;
1112 if (instance->status != 0) {
1113 rpc->created = FALSE;
1114 }
1115 else {
1116 rpc->created = TRUE;
1117 }
1118 }
1119 /* post the semaphore to have the ioctl reply */
1120 OsalSemaphore_post(rpmsg_rpc_state.sem);
1121 break;
1122 case RPPC_MSG_INSTANCE_DESTROYED:
1123 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1124 status = EINVAL;
1125 rpc->created = FALSE;
1126 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1127 status, "msg_len is invalid");
1128 }
1129 else {
1130 instance = (struct rppc_instance_handle *)
1131 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1132 rpc->remoteAddr = instance->endpoint_address;
1133 if (instance->status != 0) {
1134 rpc->created = TRUE;
1135 }
1136 else {
1137 rpc->created = FALSE;
1138 }
1139 }
1140 /* post the semaphore to have the ioctl reply */
1141 OsalSemaphore_post(rpmsg_rpc_state.sem);
1142 break;
1144 case RPPC_MSG_CALL_FUNCTION:
1145 if ((len != sizeof(struct rppc_msg_header) + msg_hdr->msg_len) ||
1146 (msg_hdr->msg_len < sizeof(struct rppc_packet))) {
1147 status = EINVAL;
1148 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1149 status, "msg_len is invalid");
1150 }
1151 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1152 if (len != sizeof(struct rppc_msg_header) + sizeof(struct rppc_packet) + packet->data_size) {
1153 status = EINVAL;
1154 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1155 status, "msg_len is invalid");
1156 }
1157 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1158 status =
1159 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1160 _rpmsg_rpc_addBufByPid (rpc,
1161 src,
1162 rpc->pid,
1163 packet->msg_id,
1164 &packet->fxn_id,
1165 sizeof(packet->fxn_id) + sizeof(packet->result));
1166 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1167 if (status < 0) {
1168 GT_setFailureReason (curTrace,
1169 GT_4CLASS,
1170 "_rpmsg_rpc_cb",
1171 status,
1172 "Failed to add callback packet for pid");
1173 }
1174 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1175 break;
1176 default:
1177 break;
1178 }
1180 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_rpc_cb");
1181 }
1183 /**
1184 * Handler for ocb_calloc() requests.
1185 *
1186 * Special handler for ocb_calloc() requests that we export for control. An
1187 * open request from the client will result in a call to our special ocb_calloc
1188 * handler. This function attaches the client's pid using _rpmsg_dce_attach
1189 * and allocates client-specific information. This function creates an
1190 * endpoint for the client to communicate with the dCE server on the
1191 * remote core also.
1192 *
1193 * \param ctp Thread's associated context information.
1194 * \param device Device attributes structure.
1195 *
1196 * \return Pointer to an iofunc_ocb_t OCB structure.
1197 */
1199 IOFUNC_OCB_T *
1200 rpmsg_rpc_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
1201 {
1202 rpmsg_rpc_ocb_t *ocb = NULL;
1203 rpmsg_rpc_object *obj = NULL;
1204 struct _msg_info cl_info;
1205 rpmsg_rpc_dev_t * dev = NULL;
1206 int i = 0;
1207 Bool found = FALSE;
1208 char path1[20];
1209 char path2[20];
1211 GT_2trace (curTrace, GT_ENTER, "rpmsg_rpc_ocb_calloc",
1212 ctp, device);
1214 /* Allocate the OCB */
1215 ocb = (rpmsg_rpc_ocb_t *) calloc (1, sizeof (rpmsg_rpc_ocb_t));
1216 if (ocb == NULL){
1217 errno = ENOMEM;
1218 return (NULL);
1219 }
1221 ocb->pid = ctp->info.pid;
1223 /* Allocate memory for the rpmsg object. */
1224 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_object), 0u, NULL);
1225 if (obj == NULL) {
1226 errno = ENOMEM;
1227 free(ocb);
1228 return (NULL);
1229 }
1230 else {
1231 ocb->rpc = obj;
1232 IOFUNC_NOTIFY_INIT(obj->notify);
1233 obj->created = FALSE;
1234 /* determine conn and procId for communication based on which device
1235 * was opened */
1236 MsgInfo(ctp->rcvid, &cl_info);
1237 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
1238 for (i = 0; i < MAX_CONNS; i++) {
1239 if (rpmsg_rpc_state.objects[i] != NULL) {
1240 dev = rpmsg_rpc_state.objects[i]->dev;
1241 resmgr_pathname(dev->rpmsg_rpc.resmgr_id, 0, path2,
1242 sizeof(path2));
1243 if (!strcmp(path1, path2)) {
1244 found = TRUE;
1245 break;
1246 }
1247 }
1248 }
1249 if (found) {
1250 obj->conn = rpmsg_rpc_state.objects[i];
1251 obj->procId = obj->conn->procId;
1252 obj->pid = ctp->info.pid;
1253 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL,
1254 _rpmsg_rpc_cb, obj, &obj->addr);
1255 if (obj->mq == NULL) {
1256 errno = ENOMEM;
1257 free(obj);
1258 free(ocb);
1259 return (NULL);
1260 }
1261 else {
1262 if (_rpmsg_rpc_attach (ocb->rpc) < 0) {
1263 errno = ENOMEM;
1264 MessageQCopy_delete (&obj->mq);
1265 free(obj);
1266 free(ocb);
1267 return (NULL);
1268 }
1269 }
1270 }
1271 }
1273 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_ocb_calloc", ocb);
1275 return (IOFUNC_OCB_T *)(ocb);
1276 }
1279 /*!
1280 * @brief Detach a process from rpmsg-rpc user support framework.
1281 *
1282 * @param pid Process identifier
1283 *
1284 * @sa _rpmsg_rpc_attach
1285 */
1286 static
1287 Int
1288 _rpmsg_rpc_detach (rpmsg_rpc_object * rpc)
1289 {
1290 Int32 status = EOK;
1291 Int32 tmpStatus = EOK;
1292 Bool flag = FALSE;
1293 List_Object * bufList = NULL;
1294 List_Object * fxnList = NULL;
1295 UInt32 i;
1296 IArg key;
1297 MsgList_t * item;
1298 WaitingReaders_t * wr = NULL;
1299 struct _msg_info info;
1301 GT_1trace (curTrace, GT_ENTER, "rpmsg_rpc_detach", rpc);
1303 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1305 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1306 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1307 if (rpmsg_rpc_state.eventState [i].refCount == 1) {
1308 rpmsg_rpc_state.eventState [i].refCount = 0;
1310 flag = TRUE;
1311 break;
1312 }
1313 else {
1314 rpmsg_rpc_state.eventState [i].refCount--;
1315 status = EOK;
1316 break;
1317 }
1318 }
1319 }
1320 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1322 if (flag == TRUE) {
1323 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1324 /* Last client being unregistered for this process. */
1325 rpmsg_rpc_state.eventState [i].rpc = NULL;
1327 /* Store in local variable to delete outside lock. */
1328 bufList = rpmsg_rpc_state.eventState [i].bufList;
1330 rpmsg_rpc_state.eventState [i].bufList = NULL;
1332 /* Store in local variable to delete outside lock. */
1333 fxnList = rpmsg_rpc_state.eventState [i].fxnList;
1335 rpmsg_rpc_state.eventState [i].fxnList = NULL;
1337 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1338 }
1340 if (flag != TRUE) {
1341 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1342 if (i == MAX_PROCESSES) {
1343 /*! @retval Notify_E_NOTFOUND The specified user process was
1344 not found registered with Notify Driver module. */
1345 status = -ENOMEM;
1346 GT_setFailureReason (curTrace,
1347 GT_4CLASS,
1348 "rpmsg_rpc_detach",
1349 status,
1350 "The specified user process was not found"
1351 " registered with rpmsg Driver module.");
1352 }
1353 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1354 }
1355 else {
1356 if (bufList != NULL) {
1357 /* Dequeue waiting readers and reply to them */
1358 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1359 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1360 /* Check if rcvid is still valid */
1361 if (MsgInfo(wr->rcvid, &info) != -1) {
1362 put_wr(wr);
1363 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1364 MsgError(wr->rcvid, EINTR);
1365 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1366 }
1367 }
1368 /* Check for pending ionotify/select calls */
1369 if (rpc) {
1370 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
1371 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
1372 }
1373 }
1375 /* Free event packets for any received but unprocessed events. */
1376 while ((item = find_nl(i)) != NULL) {
1377 if (dequeue_notify_list_item(item) >= 0) {
1378 rpmsg_rpc_EventPacket * uBuf = NULL;
1380 uBuf = (rpmsg_rpc_EventPacket *) List_get (bufList);
1382 /* Let the check remain at run-time. */
1383 if (uBuf != NULL) {
1384 put_uBuf(uBuf);
1385 }
1386 }
1387 }
1388 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1390 /* Last client being unregistered with Notify module. */
1391 List_delete (&bufList);
1392 }
1393 if (fxnList != NULL) {
1394 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1395 rpmsg_rpc_FxnInfo * fxnInfo = NULL;
1396 while ((fxnInfo = (rpmsg_rpc_FxnInfo *)List_dequeue (fxnList))) {
1397 Memory_free (NULL, fxnInfo, sizeof(rpmsg_rpc_FxnInfo) +\
1398 RPPC_TRANS_SIZE(fxnInfo->func.num_translations));
1399 }
1400 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1401 List_delete (&fxnList);
1402 }
1404 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1405 if ((tmpStatus < 0) && (status >= 0)) {
1406 status = tmpStatus;
1407 GT_setFailureReason (curTrace,
1408 GT_4CLASS,
1409 "rpmsg_rpc_detach",
1410 status,
1411 "Failed to delete termination semaphore!");
1412 }
1413 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1414 }
1416 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_detach", status);
1418 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1419 return status;
1420 }
1422 /**
1423 * Handler for ocb_free() requests.
1424 *
1425 * Special handler for ocb_free() requests that we export for control. A
1426 * close request from the client will result in a call to our special ocb_free
1427 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1428 * and frees any client-specific information that was allocated.
1429 *
1430 * \param i_ocb OCB associated with client's session.
1431 *
1432 * \return POSIX errno value.
1433 *
1434 * \retval None.
1435 */
1437 void
1438 rpmsg_rpc_ocb_free (IOFUNC_OCB_T * i_ocb)
1439 {
1440 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)i_ocb;
1441 rpmsg_rpc_object *obj;
1443 if (ocb && ocb->rpc) {
1444 obj = ocb->rpc;
1445 if (obj->created == TRUE) {
1446 /* Need to disconnect this device */
1447 _rpmsg_rpc_destroy(NULL, NULL, ocb);
1448 }
1449 _rpmsg_rpc_detach(ocb->rpc);
1450 if (obj->mq) {
1451 MessageQCopy_delete (&obj->mq);
1452 obj->mq = NULL;
1453 }
1454 free (obj);
1455 free (ocb);
1456 }
1457 }
1459 /**
1460 * Handler for close_ocb() requests.
1461 *
1462 * This function removes the notification entries associated with the current
1463 * client.
1464 *
1465 * \param ctp Thread's associated context information.
1466 * \param reserved This argument must be NULL.
1467 * \param ocb OCB associated with client's session.
1468 *
1469 * \return POSIX errno value.
1470 *
1471 * \retval EOK Success.
1472 */
1474 Int
1475 rpmsg_rpc_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1476 {
1477 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1478 iofunc_notify_remove(ctp, rpc_ocb->rpc->notify);
1479 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1480 }
1482 /**
1483 * Handler for read() requests.
1484 *
1485 * Handles special read() requests that we export for control. A read
1486 * request will get a message from the remote processor that is associated
1487 * with the client that is calling read().
1488 *
1489 * \param ctp Thread's associated context information.
1490 * \param msg The actual read() message.
1491 * \param ocb OCB associated with client's session.
1492 *
1493 * \return POSIX errno value.
1494 *
1495 * \retval EOK Success.
1496 * \retval EAGAIN Call is non-blocking and no messages available.
1497 * \retval ENOMEM Not enough memory to preform the read.
1498 */
1500 int rpmsg_rpc_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *ocb)
1501 {
1502 Int status;
1503 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1504 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1505 Bool flag = FALSE;
1506 Int retVal = EOK;
1507 UInt32 i;
1508 MsgList_t * item;
1509 Int nonblock;
1511 if ((status = iofunc_read_verify(ctp, msg, ocb, &nonblock)) != EOK)
1512 return (status);
1514 if (rpc->created != TRUE) {
1515 return (ENOTCONN);
1516 }
1518 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1519 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1520 flag = TRUE;
1521 break;
1522 }
1523 }
1525 /* Let the check remain at run-time. */
1526 if (flag == TRUE) {
1527 /* Let the check remain at run-time for handling any run-time
1528 * race conditions.
1529 */
1530 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1531 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1532 item = find_nl(i);
1533 if (dequeue_notify_list_item(item) < 0) {
1534 if (nonblock) {
1535 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1536 return EAGAIN;
1537 }
1538 else {
1539 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1540 if (retVal == EOK) {
1541 pthread_cond_signal(&rpmsg_rpc_state.cond);
1542 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1543 return(_RESMGR_NOREPLY);
1544 }
1545 retVal = ENOMEM;
1546 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1547 }
1548 }
1549 else {
1550 deliver_notification(i, ctp->rcvid);
1551 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1552 return(_RESMGR_NOREPLY);
1553 }
1554 }
1555 }
1557 /*! @retval Number-of-bytes-read Number of bytes read. */
1558 return retVal;
1559 }
1561 /**
1562 * Unblock read calls
1563 *
1564 * This function checks if the client is blocked on a read call and if so,
1565 * unblocks the client.
1566 *
1567 * \param ctp Thread's associated context information.
1568 * \param msg The pulse message.
1569 * \param ocb OCB associated with client's session.
1570 *
1571 * \return POSIX errno value.
1572 *
1573 * \retval EINTR The client has been unblocked.
1574 * \retval other The client has not been unblocked or the client was not
1575 * blocked.
1576 */
1578 int rpmsg_rpc_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *ocb)
1579 {
1580 UInt32 i;
1581 Bool flag = FALSE;
1582 WaitingReaders_t * wr;
1583 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1584 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1586 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1587 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1588 flag = TRUE;
1589 break;
1590 }
1591 }
1593 /* Let the check remain at run-time. */
1594 if (flag == TRUE) {
1595 /* Let the check remain at run-time for handling any run-time
1596 * race conditions.
1597 */
1598 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1599 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1600 wr = find_waiting_reader(i, ctp->rcvid);
1601 if (wr) {
1602 put_wr(wr);
1603 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1604 return (EINTR);
1605 }
1606 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1607 }
1608 }
1610 return _RESMGR_NOREPLY;
1611 }
1613 /**
1614 * Handler for unblock() requests.
1615 *
1616 * Handles unblock request for the client which is requesting to no longer be
1617 * blocked on the rpmsg-rpc driver.
1618 *
1619 * \param ctp Thread's associated context information.
1620 * \param msg The pulse message.
1621 * \param ocb OCB associated with client's session.
1622 *
1623 * \return POSIX errno value.
1624 *
1625 * \retval EINTR The rcvid has been unblocked.
1626 */
1628 int rpmsg_rpc_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1629 {
1630 int status = _RESMGR_NOREPLY;
1631 struct _msg_info info;
1633 /*
1634 * Try to run the default unblock for this message.
1635 */
1636 if ((status = iofunc_unblock_default(ctp,msg,ocb)) != _RESMGR_DEFAULT) {
1637 return status;
1638 }
1640 /*
1641 * Check if rcvid is still valid and still has an unblock
1642 * request pending.
1643 */
1644 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1645 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1646 return _RESMGR_NOREPLY;
1647 }
1649 if (rpmsg_rpc_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1650 return _RESMGR_ERRNO(EINTR);
1651 }
1653 return _RESMGR_ERRNO(EINTR);
1654 }
1657 uint32_t
1658 _rpmsg_rpc_pa2da(ProcMgr_Handle handle, uint32_t pa)
1659 {
1660 Int status = 0;
1661 uint32_t da;
1663 if (pa >= TILER_MEM_8BIT && pa < TILER_MEM_END) {
1664 return pa;
1665 }
1666 else {
1667 status = ProcMgr_translateAddr(handle, (Ptr *)&da,
1668 ProcMgr_AddrType_SlaveVirt,
1669 (Ptr)pa, ProcMgr_AddrType_MasterPhys);
1670 if (status >= 0)
1671 return da;
1672 else
1673 return 0;
1674 }
1675 }
1677 int
1678 _rpmsg_rpc_translate(ProcMgr_Handle handle, char *data, pid_t pid, bool reverse)
1679 {
1680 int status = EOK;
1681 struct rppc_function * function = NULL;
1682 struct rppc_param_translation * translation = NULL;
1683 int i = 0;
1684 off64_t phys_addr;
1685 off64_t paddr[RPPC_MAX_PARAMETERS];
1686 uint32_t ipu_addr;
1687 size_t phys_len = 0;
1688 uintptr_t ptr;
1689 void * vptr[RPPC_MAX_PARAMETERS];
1690 uint32_t idx = 0;
1691 uint32_t param_offset = 0;
1693 function = (struct rppc_function *)data;
1694 memset(vptr, 0, sizeof(void *) * RPPC_MAX_PARAMETERS);
1695 memset(paddr, 0, sizeof(off64_t) * RPPC_MAX_PARAMETERS);
1697 translation = (struct rppc_param_translation *)function->translations;
1698 for (i = 0; i < function->num_translations; i++) {
1699 idx = translation[i].index;
1700 if (idx >= function->num_params) {
1701 status = -EINVAL;
1702 break;
1703 }
1704 param_offset = function->params[idx].data - function->params[idx].base;
1705 if (translation[i].offset - param_offset + sizeof(uint32_t) > function->params[idx].size) {
1706 status = -EINVAL;
1707 break;
1708 }
1709 if (!vptr[idx]) {
1710 /* get the physical address of ptr */
1711 status = mem_offset64_peer(pid,
1712 function->params[idx].data,
1713 function->params[idx].size,
1714 &paddr[idx], &phys_len);
1715 if (status >= 0 && phys_len == function->params[idx].size) {
1716 /* map into my process space */
1717 vptr[idx] = mmap64(NULL, function->params[idx].size,
1718 PROT_NOCACHE | PROT_READ | PROT_WRITE,
1719 MAP_PHYS, NOFD, paddr[idx]);
1720 if (vptr == MAP_FAILED) {
1721 status = -ENOMEM;
1722 break;
1723 }
1724 }
1725 else {
1726 status = -EINVAL;
1727 break;
1728 }
1729 }
1730 /* Get physical address of the contents */
1731 ptr = (uint32_t)vptr[idx] + translation[i].offset - param_offset;
1732 if (reverse) {
1733 *(uint32_t *)ptr = translation[i].base;
1734 }
1735 else {
1736 translation[i].base = *(uint32_t *)ptr;
1737 status = mem_offset64_peer(pid, *(uint32_t *)ptr, sizeof(uint32_t),
1738 &phys_addr, &phys_len);
1739 if (status >= 0 && phys_len == sizeof(uint32_t)) {
1740 /* translate pa2da */
1741 if ((ipu_addr =
1742 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1743 /* update vptr contents */
1744 *(uint32_t *)ptr = ipu_addr;
1745 else {
1746 status = -EINVAL;
1747 break;
1748 }
1749 }
1750 else {
1751 status = -EINVAL;
1752 break;
1753 }
1754 }
1755 }
1757 /* No need to do this for reverse translations */
1758 for (i = 0; i < function->num_params && status >= 0 && !reverse; i++) {
1759 if (function->params[i].type == RPPC_PARAM_TYPE_PTR) {
1760 if (paddr[i]) {
1761 phys_addr = paddr[i];
1762 }
1763 else {
1764 /* translate the param pointer */
1765 status = mem_offset64_peer(pid,
1766 (uintptr_t)(function->params[i].data),
1767 function->params[i].size, &phys_addr,
1768 &phys_len);
1769 }
1770 if (status >= 0) {
1771 if ((ipu_addr =
1772 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1773 function->params[i].data = ipu_addr;
1774 else {
1775 status = -EINVAL;
1776 break;
1777 }
1778 }
1779 else {
1780 status = -EINVAL;
1781 break;
1782 }
1783 }
1784 }
1786 for (i = 0; i < function->num_params; i++) {
1787 if (vptr[i])
1788 munmap(vptr[i], function->params[i].size);
1789 }
1791 return status;
1792 }
1794 /**
1795 * Handler for write() requests.
1796 *
1797 * Handles special write() requests that we export for control. A write()
1798 * request will send a message to the remote processor which is associated with
1799 * the client.
1800 *
1801 * \param ctp Thread's associated context information.
1802 * \param msg The actual write() message.
1803 * \param io_ocb OCB associated with client's session.
1804 *
1805 * \return POSIX errno value.
1806 *
1807 * \retval EOK Success.
1808 * \retval ENOMEM Not enough memory to preform the write.
1809 * \retval EIO MessageQCopy_send failed.
1810 * \retval EINVAL msg->i.bytes is negative.
1811 */
1813 int
1814 rpmsg_rpc_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1815 {
1816 int status;
1817 char buf[MessageQCopy_BUFSIZE];
1818 int bytes;
1819 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)io_ocb;
1820 rpmsg_rpc_object * rpc = ocb->rpc;
1821 struct rppc_msg_header * msg_hdr = NULL;
1822 struct rppc_packet *packet = NULL;
1823 struct rppc_function *function = NULL;
1824 char usr_msg[MessageQCopy_BUFSIZE];
1825 int i = 0;
1826 rpmsg_rpc_EventState * event_state = NULL;
1827 rpmsg_rpc_FxnInfo * fxn_info = NULL;
1828 IArg key = 0;
1830 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1831 return (status);
1832 }
1834 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct rppc_msg_header) > MessageQCopy_BUFSIZE ?
1835 MessageQCopy_BUFSIZE - sizeof(struct rppc_msg_header) : msg->i.nbytes;
1836 if (bytes < 0) {
1837 return EINVAL;
1838 }
1839 _IO_SET_WRITE_NBYTES (ctp, bytes);
1841 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1842 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1843 break;
1844 }
1845 }
1846 if (i == MAX_PROCESSES) {
1847 return EINVAL;
1848 }
1849 event_state = &rpmsg_rpc_state.eventState[i];
1851 msg_hdr = (struct rppc_msg_header *)buf;
1852 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1854 status = resmgr_msgread(ctp, usr_msg, bytes, sizeof(msg->i));
1855 if (status != bytes) {
1856 return (errno);
1857 }
1858 else if (bytes < sizeof(struct rppc_function)) {
1859 return (EINVAL);
1860 }
1861 function = (struct rppc_function *)usr_msg;
1863 if (bytes < RPPC_PARAM_SIZE(function->num_translations)) {
1864 return (EINVAL);
1865 }
1867 /* check that we're in the correct state */
1868 if (rpc->created != TRUE) {
1869 return (EINVAL);
1870 }
1872 /* store the fxn info for use with reverse translation */
1873 fxn_info = Memory_alloc (NULL, sizeof(rpmsg_rpc_FxnInfo) +\
1874 RPPC_TRANS_SIZE(function->num_translations),
1875 0, NULL);
1876 List_elemClear(&(fxn_info->element));
1877 Memory_copy (&(fxn_info->func), function,
1878 RPPC_PARAM_SIZE(function->num_translations));
1880 status = _rpmsg_rpc_translate(rpc->conn->procH, (char *)function,
1881 ctp->info.pid, false);
1882 if (status < 0) {
1883 Memory_free(NULL, fxn_info, sizeof(rpmsg_rpc_FxnInfo) +\
1884 RPPC_TRANS_SIZE(function->num_translations));
1885 return -status;
1886 }
1888 msg_hdr->msg_type = RPPC_MSG_CALL_FUNCTION;
1889 msg_hdr->msg_len = sizeof(struct rppc_packet);
1891 /* initialize the packet structure */
1892 packet->desc = RPPC_DESC_EXEC_SYNC;
1893 packet->msg_id = msg_id == 0xFFFF ? msg_id = 1 : ++(msg_id);
1894 packet->flags = (0x8000);//OMAPRPC_POOLID_DEFAULT;
1895 packet->fxn_id = RPPC_SET_FXN_IDX(function->fxn_id);
1896 packet->result = 0;
1897 packet->data_size = 0;
1899 for (i = 0; i < function->num_params; i++) {
1900 ((UInt32 *)(packet->data))[i*2] = function->params[i].size;
1901 ((UInt32 *)(packet->data))[(i*2)+1] = function->params[i].data;
1902 packet->data_size += (sizeof(UInt32) * 2);
1903 }
1904 msg_hdr->msg_len += packet->data_size;
1906 fxn_info->msgId = packet->msg_id;
1907 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1908 List_enqueue(event_state->fxnList, &(fxn_info->element));
1909 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1911 status = MessageQCopy_send(rpc->conn->procId, MultiProc_self(),
1912 rpc->remoteAddr, rpc->addr, buf,
1913 msg_hdr->msg_len + sizeof(struct rppc_msg_header),
1914 TRUE);
1915 if (status < 0) {
1916 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1917 List_remove(event_state->fxnList, &(fxn_info->element));
1918 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1919 Memory_free(NULL, fxn_info, sizeof(rpmsg_rpc_FxnInfo) +\
1920 RPPC_TRANS_SIZE(function->num_translations));
1921 return (EIO);
1922 }
1924 return(EOK);
1925 }
1929 /**
1930 * Handler for notify() requests.
1931 *
1932 * Handles special notify() requests that we export for control. A notify
1933 * request results from the client calling select().
1934 *
1935 * \param ctp Thread's associated context information.
1936 * \param msg The actual notify() message.
1937 * \param ocb OCB associated with client's session.
1938 *
1939 * \return POSIX errno value.
1940 */
1942 Int rpmsg_rpc_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1943 {
1944 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1945 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1946 int trig;
1947 int i = 0;
1948 Bool flag = FALSE;
1949 MsgList_t * item = NULL;
1950 int status = EOK;
1952 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1954 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1955 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1956 flag = TRUE;
1957 break;
1958 }
1959 }
1961 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1962 /* Let the check remain at run-time. */
1963 if (flag == TRUE) {
1964 /* Let the check remain at run-time for handling any run-time
1965 * race conditions.
1966 */
1967 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1968 item = find_nl(i);
1969 if (item && item->num_events > 0) {
1970 trig |= _NOTIFY_COND_INPUT;
1971 }
1972 }
1973 }
1974 status = iofunc_notify(ctp, msg, rpc_ocb->rpc->notify, trig, NULL, NULL);
1975 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1976 return status;
1977 }
1979 /**
1980 * Detaches an rpmsg-dce resource manager device name.
1981 *
1982 * \param dev The device to detach.
1983 *
1984 * \return POSIX errno value.
1985 */
1987 static
1988 Void
1989 _deinit_rpmsg_rpc_device (rpmsg_rpc_dev_t * dev)
1990 {
1991 resmgr_detach(syslink_dpp, dev->rpmsg_rpc.resmgr_id, _RESMGR_DETACH_CLOSE);
1993 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
1995 free (dev);
1997 return;
1998 }
2000 /**
2001 * Initializes and attaches rpmsg-dce resource manager functions to an
2002 * rpmsg-dce device name.
2003 *
2004 * \param num The number to append to the end of the device name.
2005 *
2006 * \return Pointer to the created rpmsg_dce_dev_t device.
2007 */
2009 static
2010 rpmsg_rpc_dev_t *
2011 _init_rpmsg_rpc_device (char * name)
2012 {
2013 iofunc_attr_t * attr;
2014 resmgr_attr_t resmgr_attr;
2015 rpmsg_rpc_dev_t * dev = NULL;
2017 dev = malloc(sizeof(*dev));
2018 if (dev == NULL) {
2019 return NULL;
2020 }
2022 memset(&resmgr_attr, 0, sizeof resmgr_attr);
2023 resmgr_attr.nparts_max = 10;
2024 resmgr_attr.msg_max_size = 2048;
2025 memset(&dev->rpmsg_rpc.mattr, 0, sizeof(iofunc_mount_t));
2026 dev->rpmsg_rpc.mattr.flags = ST_NOSUID | ST_NOEXEC;
2027 dev->rpmsg_rpc.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
2028 IOFUNC_PC_NO_TRUNC |
2029 IOFUNC_PC_SYNC_IO;
2030 dev->rpmsg_rpc.mattr.funcs = &dev->rpmsg_rpc.mfuncs;
2031 memset(&dev->rpmsg_rpc.mfuncs, 0, sizeof(iofunc_funcs_t));
2032 dev->rpmsg_rpc.mfuncs.nfuncs = _IOFUNC_NFUNCS;
2033 dev->rpmsg_rpc.mfuncs.ocb_calloc = rpmsg_rpc_ocb_calloc;
2034 dev->rpmsg_rpc.mfuncs.ocb_free = rpmsg_rpc_ocb_free;
2035 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_rpc.cfuncs,
2036 _RESMGR_IO_NFUNCS, &dev->rpmsg_rpc.iofuncs);
2037 iofunc_attr_init(attr = &dev->rpmsg_rpc.cattr, S_IFCHR | 0777, NULL, NULL);
2038 dev->rpmsg_rpc.iofuncs.devctl = rpmsg_rpc_devctl;
2039 dev->rpmsg_rpc.iofuncs.notify = rpmsg_rpc_notify;
2040 dev->rpmsg_rpc.iofuncs.close_ocb = rpmsg_rpc_close_ocb;
2041 dev->rpmsg_rpc.iofuncs.read = rpmsg_rpc_read;
2042 dev->rpmsg_rpc.iofuncs.write = rpmsg_rpc_write;
2043 dev->rpmsg_rpc.iofuncs.unblock = rpmsg_rpc_read_unblock;
2044 attr->mount = &dev->rpmsg_rpc.mattr;
2045 iofunc_time_update(attr);
2046 pthread_mutex_init(&dev->rpmsg_rpc.mutex, NULL);
2048 snprintf (dev->rpmsg_rpc.device_name, _POSIX_PATH_MAX, "/dev/%s", name);
2049 if (-1 == (dev->rpmsg_rpc.resmgr_id =
2050 resmgr_attach(syslink_dpp, &resmgr_attr,
2051 dev->rpmsg_rpc.device_name, _FTYPE_ANY, 0,
2052 &dev->rpmsg_rpc.cfuncs,
2053 &dev->rpmsg_rpc.iofuncs, attr))) {
2054 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
2055 free(dev);
2056 return(NULL);
2057 }
2059 return(dev);
2060 }
2062 /**
2063 * Callback passed to MessageQCopy_registerNotify.
2064 *
2065 * This callback is called when a remote processor creates a MessageQCopy
2066 * handle with the same name as the local MessageQCopy handle and then
2067 * calls NameMap_register to notify the HOST of the handle.
2068 *
2069 * \param handle The remote handle.
2070 * \param procId The remote proc ID of the remote handle.
2071 * \param endpoint The endpoint address of the remote handle.
2072 *
2073 * \return None.
2074 */
2076 static
2077 Void
2078 _rpmsg_rpc_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
2079 UInt32 endpoint, Char * desc, Bool create)
2080 {
2081 Int status = 0, i = 0, j = 0;
2082 Bool found = FALSE;
2083 rpmsg_rpc_conn_object * obj = NULL;
2084 char msg[512];
2085 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)msg;
2087 for (i = 0; i < MAX_CONNS; i++) {
2088 if (rpmsg_rpc_state.objects[i] == NULL) {
2089 found = TRUE;
2090 break;
2091 }
2092 }
2094 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2095 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2096 break;
2097 }
2098 }
2100 if (found && j < NUM_RPMSG_RPC_QUEUES) {
2101 /* found a space to save this mq handle, allocate memory */
2102 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_conn_object), 0x0, NULL);
2103 if (obj) {
2104 /* store the object in the module info */
2105 rpmsg_rpc_state.objects[i] = obj;
2107 /* store the mq info in the object */
2108 obj->mq = handle;
2109 obj->procId = procId;
2110 status = ProcMgr_open(&obj->procH, obj->procId);
2111 if (status < 0) {
2112 Osal_printf("Failed to open handle to proc %d", procId);
2113 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2114 }
2115 else {
2116 obj->addr = endpoint;
2118 /* create a /dev/rpmsg-rpc instance for users to open */
2119 obj->dev = _init_rpmsg_rpc_device(desc);
2120 if (obj->dev == NULL) {
2121 Osal_printf("Failed to create %s", desc);
2122 ProcMgr_close(&obj->procH);
2123 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2124 }
2125 }
2126 }
2128 /* Send a message to query the chan info. Handle creating of the conn
2129 * in the callback */
2130 msg_hdr->msg_type = RPPC_MSG_QUERY_CHAN_INFO;
2131 msg_hdr->msg_len = 0;
2132 status = MessageQCopy_send(procId, MultiProc_self(), endpoint,
2133 rpmsg_rpc_state.endpoint[j],
2134 msg, sizeof(struct rppc_msg_header),
2135 TRUE);
2136 }
2137 }
2139 /**
2140 * Callback passed to MessageQCopy_create for the module.
2141 *
2142 * This callback is called when a message is received for the rpmsg-dce
2143 * module. This callback will never be called, since each client connection
2144 * gets it's own endpoint for message passing.
2145 *
2146 * \param handle The local MessageQCopy handle.
2147 * \param data Data message
2148 * \param len Length of data message
2149 * \param priv Private information for the endpoint
2150 * \param src Remote endpoint sending this message
2151 * \param srcProc Remote proc ID sending this message
2152 *
2153 * \return None.
2154 */
2156 static
2157 Void
2158 _rpmsg_rpc_module_cb (MessageQCopy_Handle handle, void * data, int len,
2159 void * priv, UInt32 src, UInt16 srcProc)
2160 {
2161 Int status = 0, i = 0, j = 0;
2162 rpmsg_rpc_conn_object * obj = NULL;
2163 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)data;
2165 Osal_printf ("_rpmsg_rpc_module_cb callback");
2167 for (i = 0; i < MAX_CONNS; i++) {
2168 if (rpmsg_rpc_state.objects[i] != NULL &&
2169 rpmsg_rpc_state.objects[i]->addr == src) {
2170 obj = rpmsg_rpc_state.objects[i];
2171 break;
2172 }
2173 }
2175 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2176 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2177 break;
2178 }
2179 }
2181 if (obj && j < NUM_RPMSG_RPC_QUEUES) {
2182 switch (msg_hdr->msg_type) {
2183 case RPPC_MSG_CHAN_INFO:
2184 {
2185 struct rppc_channel_info * chan_info =
2186 (struct rppc_channel_info *)(msg_hdr + 1);
2187 obj->numFuncs = chan_info->num_funcs;
2188 /* TODO: Query the info about each function */
2189 break;
2190 }
2191 default:
2192 status = EINVAL;
2193 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_module_cb",
2194 status, "invalid msg_type received");
2195 break;
2196 }
2197 }
2198 }
2201 /*!
2202 * @brief Module setup function.
2203 *
2204 * @sa rpmsg_rpc_destroy
2205 */
2206 Int
2207 rpmsg_rpc_setup (Void)
2208 {
2209 UInt16 i;
2210 List_Params listparams;
2211 Int status = 0;
2212 Error_Block eb;
2213 pthread_attr_t thread_attr;
2214 struct sched_param sched_param;
2216 GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_setup");
2218 Error_init(&eb);
2220 List_Params_init (&listparams);
2221 rpmsg_rpc_state.gateHandle = (IGateProvider_Handle)
2222 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
2223 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2224 if (rpmsg_rpc_state.gateHandle == NULL) {
2225 status = -ENOMEM;
2226 GT_setFailureReason (curTrace,
2227 GT_4CLASS,
2228 "_rpmsg_rpc_setup",
2229 status,
2230 "Failed to create spinlock gate!");
2231 }
2232 else {
2233 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2234 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2235 rpmsg_rpc_state.eventState [i].bufList = NULL;
2236 rpmsg_rpc_state.eventState [i].rpc = NULL;
2237 rpmsg_rpc_state.eventState [i].refCount = 0;
2238 rpmsg_rpc_state.eventState [i].head = NULL;
2239 rpmsg_rpc_state.eventState [i].tail = NULL;
2240 }
2242 pthread_attr_init(&thread_attr);
2243 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
2244 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
2245 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
2246 pthread_attr_setschedparam(&thread_attr, &sched_param);
2248 rpmsg_rpc_state.run = TRUE;
2249 if (pthread_create(&rpmsg_rpc_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
2250 pthread_setname_np(rpmsg_rpc_state.nt, "rpmsg-rpc-notifier");
2251 /* Initialize the driver mapping array. */
2252 Memory_set (&rpmsg_rpc_state.objects,
2253 0,
2254 (sizeof (rpmsg_rpc_conn_object *)
2255 * MAX_CONNS));
2256 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2257 /* create a local handle and register for notifications with MessageQCopy */
2258 rpmsg_rpc_state.mqHandle[i] = MessageQCopy_create (
2259 MessageQCopy_ADDRANY,
2260 rpmsg_rpc_names[i].name,
2261 _rpmsg_rpc_module_cb,
2262 NULL,
2263 &rpmsg_rpc_state.endpoint[i]);
2264 if (rpmsg_rpc_state.mqHandle[i] == NULL) {
2265 /*! @retval RPC_FAIL Failed to create MessageQCopy handle! */
2266 status = -ENOMEM;
2267 GT_setFailureReason (curTrace,
2268 GT_4CLASS,
2269 "rpmsg_rpc_setup",
2270 status,
2271 "Failed to create MessageQCopy handle!");
2272 break;
2273 }
2274 else {
2275 /* TBD: This could be replaced with a messageqcopy_open type call, one for
2276 * each core */
2277 status = MessageQCopy_registerNotify (rpmsg_rpc_state.mqHandle[i],
2278 _rpmsg_rpc_notify_cb);
2279 if (status < 0) {
2280 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2281 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2282 status = -ENOMEM;
2283 GT_setFailureReason (curTrace,
2284 GT_4CLASS,
2285 "rpmsg_rpc_setup",
2286 status,
2287 "Failed to register MQCopy handle!");
2288 break;
2289 }
2290 }
2291 }
2292 if (status >= 0){
2293 rpmsg_rpc_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
2294 if (rpmsg_rpc_state.sem == NULL) {
2295 //MessageQCopy_unregisterNotify();
2296 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2297 status = -ENOMEM;
2298 GT_setFailureReason (curTrace,
2299 GT_4CLASS,
2300 "rpmsg_rpc_setup",
2301 status,
2302 "Failed to register MQCopy handle!");
2303 }
2304 }
2305 if (status >= 0) {
2306 rpmsg_rpc_state.isSetup = TRUE;
2307 }
2308 else {
2309 for (; i > 0; --i) {
2310 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2311 }
2312 rpmsg_rpc_state.run = FALSE;
2313 }
2314 }
2315 else {
2316 rpmsg_rpc_state.run = FALSE;
2317 }
2318 pthread_attr_destroy(&thread_attr);
2319 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2320 }
2321 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2323 GT_0trace (curTrace, GT_LEAVE, "rpmsg_rpc_setup");
2324 return status;
2325 }
2328 /*!
2329 * @brief Module destroy function.
2330 *
2331 * @sa rpmsg_rpc_setup
2332 */
2333 Void
2334 rpmsg_rpc_destroy (Void)
2335 {
2336 rpmsg_rpc_EventPacket * packet;
2337 UInt32 i;
2338 List_Handle bufList;
2339 rpmsg_rpc_object * rpc = NULL;
2340 WaitingReaders_t * wr = NULL;
2341 struct _msg_info info;
2343 GT_0trace (curTrace, GT_ENTER, "_rpmsg_rpc_destroy");
2345 for (i = 0; i < MAX_CONNS; i++) {
2346 if (rpmsg_rpc_state.objects[i]) {
2347 rpmsg_rpc_conn_object * obj = rpmsg_rpc_state.objects[i];
2348 _deinit_rpmsg_rpc_device(obj->dev);
2349 ProcMgr_close(&obj->procH);
2350 Memory_free(NULL, obj, sizeof(rpmsg_rpc_conn_object));
2351 rpmsg_rpc_state.objects[i] = NULL;
2352 }
2353 }
2355 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2356 rpc = NULL;
2357 if (rpmsg_rpc_state.eventState [i].rpc != NULL) {
2358 /* This is recovery. Need to mark rpc structures as invalid */
2359 rpc = rpmsg_rpc_state.eventState[i].rpc;
2360 MessageQCopy_delete(&rpc->mq);
2361 rpc->mq = NULL;
2362 }
2363 bufList = rpmsg_rpc_state.eventState [i].bufList;
2365 rpmsg_rpc_state.eventState [i].bufList = NULL;
2366 rpmsg_rpc_state.eventState [i].rpc = NULL;
2367 rpmsg_rpc_state.eventState [i].refCount = 0;
2368 if (bufList != NULL) {
2369 /* Dequeue waiting readers and reply to them */
2370 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2371 while ((wr = dequeue_waiting_reader(i)) != NULL) {
2372 /* Check if rcvid is still valid */
2373 if (MsgInfo(wr->rcvid, &info) != -1) {
2374 put_wr(wr);
2375 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2376 MsgError(wr->rcvid, EINTR);
2377 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2378 }
2379 }
2380 /* Check for pending ionotify/select calls */
2381 if (rpc) {
2382 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
2383 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
2384 }
2385 }
2386 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2388 /* Free event packets for any received but unprocessed events. */
2389 while (List_empty (bufList) != TRUE){
2390 packet = (rpmsg_rpc_EventPacket *)
2391 List_get (bufList);
2392 if (packet != NULL){
2393 Memory_free (NULL, packet, sizeof(*packet));
2394 }
2395 }
2396 List_delete (&(bufList));
2397 }
2398 }
2400 /* Free the cached list */
2401 flush_uBuf();
2403 if (rpmsg_rpc_state.sem) {
2404 OsalSemaphore_delete(&rpmsg_rpc_state.sem);
2405 }
2407 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2408 if (rpmsg_rpc_state.mqHandle[i]) {
2409 //MessageQCopy_unregisterNotify();
2410 MessageQCopy_delete(&rpmsg_rpc_state.mqHandle[i]);
2411 }
2412 }
2414 if (rpmsg_rpc_state.gateHandle != NULL) {
2415 GateSpinlock_delete ((GateSpinlock_Handle *)
2416 &(rpmsg_rpc_state.gateHandle));
2417 }
2419 rpmsg_rpc_state.isSetup = FALSE ;
2420 rpmsg_rpc_state.run = FALSE;
2421 // run through and destroy the thread, and all outstanding
2422 // rpc structures
2423 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2424 pthread_cond_signal(&rpmsg_rpc_state.cond);
2425 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2426 pthread_join(rpmsg_rpc_state.nt, NULL);
2427 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2428 while (rpmsg_rpc_state.head != NULL) {
2429 int index;
2430 WaitingReaders_t *item;
2431 index = dequeue_notify_list_item(rpmsg_rpc_state.head);
2432 if (index < 0)
2433 break;
2434 item = dequeue_waiting_reader(index);
2435 while (item) {
2436 put_wr(item);
2437 item = dequeue_waiting_reader(index);
2438 }
2439 }
2440 rpmsg_rpc_state.head = NULL ;
2441 rpmsg_rpc_state.tail = NULL ;
2442 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2444 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2445 }