1 /*
2 * @file rpmsg-rpc.c
3 *
4 * @brief devctl handler for RPC component.
5 *
6 * ============================================================================
7 *
8 * Copyright (c) 2013, Texas Instruments Incorporated
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * * Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * * Neither the name of Texas Instruments Incorporated nor the names of
22 * its contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
32 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
33 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Contact information for paper mail:
37 * Texas Instruments
38 * Post Office Box 655303
39 * Dallas, Texas 75265
40 * Contact information:
41 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
42 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
43 * ============================================================================
44 *
45 */
48 /* Standard headers */
49 #include <ti/syslink/Std.h>
51 /* OSAL & Utils headers */
52 #include <ti/syslink/utils/List.h>
53 #include <ti/syslink/utils/String.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/Memory.h>
56 #include <ti/syslink/utils/IGateProvider.h>
57 #include <ti/syslink/utils/GateSpinlock.h>
58 #include <_MultiProc.h>
60 /*QNX specific header include */
61 #include <errno.h>
62 #include <unistd.h>
63 #include <sys/iofunc.h>
64 #include <sys/dispatch.h>
65 #include <sys/netmgr.h>
66 #include <sys/mman.h>
67 #include <devctl.h>
69 /* Module headers */
70 #include <ti/ipc/rpmsg_rpc.h>
71 #include <ti/ipc/MessageQCopy.h>
72 #include <_MessageQCopy.h>
73 #include <_MessageQCopyDefs.h>
74 #include "OsalSemaphore.h"
75 #include "std_qnx.h"
76 #include <pthread.h>
78 #include "rpmsg-rpc.h"
79 #include <rpmsg.h>
81 #define PRIORITY_REALTIME_LOW 29
83 extern int mem_offset64_peer(pid_t pid, const uintptr_t addr, size_t len,
84 off64_t *offset, size_t *contig_len);
86 static MsgList_t *nl_cache;
87 static int num_nl = 0;
88 static WaitingReaders_t *wr_cache;
89 static int num_wr = 0;
91 /*
92 * Instead of constantly allocating and freeing the notifier structures
93 * we just cache a few of them, and recycle them instead.
94 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
95 */
97 static MsgList_t *get_nl()
98 {
99 MsgList_t *item;
100 item = nl_cache;
101 if (item != NULL) {
102 nl_cache = nl_cache->next;
103 num_nl--;
104 } else {
105 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
106 }
107 return(item);
108 }
110 static void put_nl(MsgList_t *item)
111 {
112 if (num_nl >= CACHE_NUM) {
113 Memory_free(NULL, item, sizeof(*item));
114 } else {
115 item->next = nl_cache;
116 nl_cache = item;
117 num_nl++;
118 }
119 return;
120 }
122 static WaitingReaders_t *get_wr()
123 {
124 WaitingReaders_t *item;
125 item = wr_cache;
126 if (item != NULL) {
127 wr_cache = wr_cache->next;
128 num_wr--;
129 } else {
130 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
131 }
132 return(item);
133 }
135 static void put_wr(WaitingReaders_t *item)
136 {
137 if (num_wr >= CACHE_NUM) {
138 Memory_free(NULL, item, sizeof(*item));
139 } else {
140 item->next = wr_cache;
141 wr_cache = item;
142 num_wr++;
143 }
144 return;
145 }
147 /* structure to hold rpmsg-rpc device information */
148 typedef struct named_device {
149 iofunc_mount_t mattr;
150 iofunc_attr_t cattr;
151 int resmgr_id;
152 pthread_mutex_t mutex;
153 iofunc_funcs_t mfuncs;
154 resmgr_connect_funcs_t cfuncs;
155 resmgr_io_funcs_t iofuncs;
156 char device_name[_POSIX_PATH_MAX];
157 } named_device_t;
159 /* rpmsg-rpc device structure */
160 typedef struct rpmsg_rpc_dev {
161 dispatch_t * dpp;
162 thread_pool_t * tpool;
163 named_device_t rpmsg_rpc;
164 } rpmsg_rpc_dev_t;
166 /*!
167 * @brief Remote connection object
168 */
169 typedef struct rpmsg_rpc_conn_object {
170 rpmsg_rpc_dev_t * dev;
171 MessageQCopy_Handle mq;
172 UInt32 addr;
173 UInt16 procId;
174 ProcMgr_Handle procH;
175 UInt32 numFuncs;
176 Bool destroy;
177 } rpmsg_rpc_conn_object;
179 /*!
180 * @brief rpc instance object
181 */
182 typedef struct rpmsg_rpc_object_tag {
183 MessageQCopy_Handle mq;
184 rpmsg_rpc_conn_object * conn;
185 UInt32 addr;
186 UInt32 remoteAddr;
187 UInt16 procId;
188 pid_t pid;
189 Bool created;
190 iofunc_notify_t notify[3];
191 } rpmsg_rpc_object;
193 /*!
194 * @brief Structure of Event callback argument passed to register fucntion.
195 */
196 typedef struct rpmsg_rpc_EventCbck_tag {
197 List_Elem element;
198 /*!< List element header */
199 rpmsg_rpc_object * rpc;
200 /*!< User rpc info pointer. Passed back to user callback function */
201 UInt32 pid;
202 /*!< Process Identifier for user process. */
203 } rpmsg_rpc_EventCbck ;
205 /*!
206 * @brief Structure of Fxn Info for reverse translations.
207 */
208 typedef struct rpmsg_rpc_FxnInfo_tag {
209 List_Elem element;
210 /*!< List element header */
211 UInt16 msgId;
212 /*!< Unique msgId of the rpc fxn call */
213 struct rppc_function func;
214 /*!< rpc function information. */
215 } rpmsg_rpc_FxnInfo ;
217 /*!
218 * @brief Keeps the information related to Event.
219 */
220 typedef struct rpmsg_rpc_EventState_tag {
221 List_Handle bufList;
222 /*!< Head of received event list. */
223 List_Handle fxnList;
224 /*!< Head of received msg list. */
225 UInt32 pid;
226 /*!< User process ID. */
227 rpmsg_rpc_object * rpc;
228 /*!< User rpc comp. */
229 UInt32 refCount;
230 /*!< Reference count, used when multiple Notify_registerEvent are called
231 from same process space (multi threads/processes). */
232 WaitingReaders_t * head;
233 /*!< Waiting readers head. */
234 WaitingReaders_t * tail;
235 /*!< Waiting readers tail. */
236 } rpmsg_rpc_EventState;
238 /*!
239 * @brief Per-connection information
240 */
241 typedef struct rpmsg_rpc_ocb {
242 iofunc_ocb_t hdr;
243 pid_t pid;
244 rpmsg_rpc_object * rpc;
245 } rpmsg_rpc_ocb_t;
247 typedef struct rpmsg_rpc_name {
248 char name[RPMSG_NAME_SIZE];
249 }rpmsg_rpc_name_t;
251 static struct rpmsg_rpc_name rpmsg_rpc_names[] = {
252 {.name = "rpmsg-rpc"},
253 };
255 #define NUM_RPMSG_RPC_QUEUES sizeof(rpmsg_rpc_names)/sizeof(*rpmsg_rpc_names)
257 /*!
258 * @brief rpmsg-rpc Module state object
259 */
260 typedef struct rpmsg_rpc_ModuleObject_tag {
261 Bool isSetup;
262 /*!< Indicates whether the module has been already setup */
263 Bool openRefCount;
264 /*!< Open reference count. */
265 IGateProvider_Handle gateHandle;
266 /*!< Handle of gate to be used for local thread safety */
267 rpmsg_rpc_EventState eventState [MAX_PROCESSES];
268 /*!< List for all user processes registered. */
269 rpmsg_rpc_conn_object * objects [MAX_CONNS];
270 /*!< List of all remote connections. */
271 MessageQCopy_Handle mqHandle[NUM_RPMSG_RPC_QUEUES];
272 /*!< Local mq handle associated with this module */
273 UInt32 endpoint[NUM_RPMSG_RPC_QUEUES];
274 /*!< Local endpoint associated with the mq handle */
275 OsalSemaphore_Handle sem;
276 /*!< Handle to semaphore used for rpc instance connection notifications */
277 pthread_t nt;
278 /*!< notifier thread */
279 pthread_mutex_t lock;
280 /*!< protection between notifier and event */
281 pthread_cond_t cond;
282 /*!< protection between notifier and event */
283 MsgList_t *head;
284 /*!< list head */
285 MsgList_t *tail;
286 /*!< list tail */
287 int run;
288 /*!< notifier thread must keep running */
289 } rpmsg_rpc_ModuleObject;
291 /*!
292 * @brief Structure of Event Packet read from notify kernel-side.
293 */
294 typedef struct rpmsg_rpc_EventPacket_tag {
295 List_Elem element;
296 /*!< List element header */
297 UInt32 pid;
298 /* Processor identifier */
299 rpmsg_rpc_object * obj;
300 /*!< Pointer to the channel associated with this callback */
301 UInt8 data[MessageQCopy_BUFSIZE];
302 /*!< Data associated with event. */
303 UInt32 len;
304 /*!< Length of the data associated with event. */
305 UInt32 src;
306 /*!< Src endpoint associated with event. */
307 struct rpmsg_rpc_EventPacket * next;
308 struct rpmsg_rpc_EventPacket * prev;
309 } rpmsg_rpc_EventPacket ;
312 /*
313 * Instead of constantly allocating and freeing the uBuf structures
314 * we just cache a few of them, and recycle them instead.
315 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
316 */
317 static rpmsg_rpc_EventPacket *uBuf_cache;
318 static int num_uBuf = 0;
320 static void flush_uBuf()
321 {
322 rpmsg_rpc_EventPacket *uBuf = NULL;
324 while(uBuf_cache) {
325 num_uBuf--;
326 uBuf = uBuf_cache;
327 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
328 Memory_free(NULL, uBuf, sizeof(*uBuf));
329 }
330 }
332 static rpmsg_rpc_EventPacket *get_uBuf()
333 {
334 rpmsg_rpc_EventPacket *uBuf;
335 uBuf = uBuf_cache;
336 if (uBuf != NULL) {
337 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
338 num_uBuf--;
339 } else {
340 uBuf = Memory_alloc(NULL, sizeof(rpmsg_rpc_EventPacket), 0, NULL);
341 }
342 return(uBuf);
343 }
345 static void put_uBuf(rpmsg_rpc_EventPacket * uBuf)
346 {
347 if (num_uBuf >= CACHE_NUM) {
348 Memory_free(NULL, uBuf, sizeof(*uBuf));
349 } else {
350 uBuf->next = (struct rpmsg_rpc_EventPacket *)uBuf_cache;
351 uBuf_cache = uBuf;
352 num_uBuf++;
353 }
354 return;
355 }
357 /** ============================================================================
358 * Function Prototypes
359 * ============================================================================
360 */
361 int
362 _rpmsg_rpc_translate (ProcMgr_Handle handle, char *data, pid_t pid,
363 bool reverse);
365 /** ============================================================================
366 * Globals
367 * ============================================================================
368 */
369 /*!
370 * @var rpmsg_rpc_state
371 *
372 * @brief rpmsg-rpc state object variable
373 */
374 static rpmsg_rpc_ModuleObject rpmsg_rpc_state =
375 {
376 .gateHandle = NULL,
377 .isSetup = FALSE,
378 .openRefCount = 0,
379 .nt = 0,
380 .lock = PTHREAD_MUTEX_INITIALIZER,
381 .cond = PTHREAD_COND_INITIALIZER,
382 .head = NULL,
383 .tail = NULL,
384 .run = 0
385 };
387 static uint16_t msg_id = 0xFFFF;
389 extern dispatch_t * syslink_dpp;
392 static MsgList_t *find_nl(int index)
393 {
394 MsgList_t *item=NULL;
395 item = rpmsg_rpc_state.head;
396 while (item) {
397 if (item->index == index)
398 return(item);
399 item = item->next;
400 }
401 return(item);
402 }
404 /* we have the right locks when calling this function */
405 /*!
406 * @brief Function to enqueue a notify list item.
407 *
408 * @param index Index of the client process associated with the item.
409 *
410 * @sa find_nl
411 * get_nl
412 */
413 static int enqueue_notify_list(int index)
414 {
415 MsgList_t *item;
416 item = find_nl(index);
417 if (item == NULL) {
418 item = get_nl();
419 if (item == NULL) {
420 return(-1);
421 }
422 item->next = NULL;
423 item->index = index;
424 item->num_events=1;
425 if (rpmsg_rpc_state.head == NULL) {
426 rpmsg_rpc_state.head = item;
427 rpmsg_rpc_state.tail = item;
428 item->prev = NULL;
429 }
430 else {
431 item->prev = rpmsg_rpc_state.tail;
432 rpmsg_rpc_state.tail->next = item;
433 rpmsg_rpc_state.tail = item;
434 }
435 }
436 else {
437 item->num_events++;
438 }
439 return(0);
440 }
442 /* we have the right locks when calling this function */
443 /*!
444 * @brief Function to dequeue a notify list item.
445 *
446 * @param item The item to remove.
447 *
448 * @sa put_nl
449 */
450 static inline int dequeue_notify_list_item(MsgList_t *item)
451 {
452 int index;
453 if (item == NULL) {
454 return(-1);
455 }
456 index = item->index;
457 item->num_events--;
458 if (item->num_events > 0) {
459 return(index);
460 }
461 if (rpmsg_rpc_state.head == item) {
462 // removing head
463 rpmsg_rpc_state.head = item->next;
464 if (rpmsg_rpc_state.head != NULL) {
465 rpmsg_rpc_state.head->prev = NULL;
466 }
467 else {
468 // removing head and tail
469 rpmsg_rpc_state.tail = NULL;
470 }
471 }
472 else {
473 item->prev->next = item->next;
474 if (item->next != NULL) {
475 item->next->prev = item->prev;
476 }
477 else {
478 // removing tail
479 rpmsg_rpc_state.tail = item->prev;
480 }
481 }
482 put_nl(item);
483 return(index);
484 }
486 /* we have the right locks when calling this function */
487 /*!
488 * @brief Function to add a waiting reader to the list.
489 *
490 * @param index Index of the client process waiting reader to add.
491 * @param rcvid Receive ID of the client process that was passed
492 * when the client called read().
493 *
494 * @sa None
495 */
496 static int enqueue_waiting_reader(int index, int rcvid)
497 {
498 WaitingReaders_t *item;
499 item = get_wr();
500 if (item == NULL) {
501 return(-1);
502 }
503 item->rcvid = rcvid;
504 item->next = NULL;
505 if (rpmsg_rpc_state.eventState [index].head == NULL) {
506 rpmsg_rpc_state.eventState [index].head = item;
507 rpmsg_rpc_state.eventState [index].tail = item;
508 }
509 else {
510 rpmsg_rpc_state.eventState [index].tail->next = item;
511 rpmsg_rpc_state.eventState [index].tail = item;
512 }
513 return(EOK);
514 }
516 /* we have the right locks when calling this function */
517 /* caller frees item */
518 /*!
519 * @brief Function to remove a waiting reader from the list.
520 *
521 * @param index Index of the client process waiting reader to dequeue.
522 *
523 * @sa None
524 */
525 static WaitingReaders_t *dequeue_waiting_reader(int index)
526 {
527 WaitingReaders_t *item = NULL;
528 if (rpmsg_rpc_state.eventState [index].head) {
529 item = rpmsg_rpc_state.eventState [index].head;
530 rpmsg_rpc_state.eventState [index].head = rpmsg_rpc_state.eventState [index].head->next;
531 if (rpmsg_rpc_state.eventState [index].head == NULL) {
532 rpmsg_rpc_state.eventState [index].tail = NULL;
533 }
534 }
535 return(item);
536 }
538 /*!
539 * @brief Function find a specified waiting reader.
540 *
541 * @param index Index of the client process waiting for the message.
542 * @param rcvid Receive ID of the client process that was passed
543 * when the client called read().
544 *
545 * @sa None
546 */
548 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
549 {
550 WaitingReaders_t *item = NULL;
551 WaitingReaders_t *prev = NULL;
552 if (rpmsg_rpc_state.eventState [index].head) {
553 item = rpmsg_rpc_state.eventState [index].head;
554 while (item) {
555 if (item->rcvid == rcvid) {
556 /* remove item from list */
557 if (prev)
558 prev->next = item->next;
559 if (item == rpmsg_rpc_state.eventState [index].head)
560 rpmsg_rpc_state.eventState [index].head = item->next;
561 break;
562 }
563 else {
564 prev = item;
565 item = item->next;
566 }
567 }
568 }
569 return item;
570 }
572 /*!
573 * @brief Function used to check if there is a waiting reader with an
574 * event (message) ready to be delivered.
575 *
576 * @param index Index of the client process waiting for the message.
577 * @param item Pointer to the waiting reader.
578 *
579 * @sa dequeue_notify_list_item
580 * dequeue_waiting_reader
581 */
583 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
584 {
585 MsgList_t *temp;
586 if (rpmsg_rpc_state.head == NULL) {
587 return(0);
588 }
589 temp = rpmsg_rpc_state.head;
590 while (temp) {
591 if (rpmsg_rpc_state.eventState [temp->index].head) {
592 // event and reader found
593 if (dequeue_notify_list_item(temp) >= 0) {
594 *index = temp->index;
595 *item = dequeue_waiting_reader(temp->index);
596 }
597 else {
598 /* error occurred, return 0 as item has not been set */
599 return(0);
600 }
601 return(1);
602 }
603 temp = temp->next;
604 }
605 return(0);
606 }
608 /*!
609 * @brief Function used to deliver the notification to the client that
610 * it has received a message.
611 *
612 * @param index Index of the client process receiving hte message.
613 * @param rcvid Receive ID of the client process that was passed
614 * when the client called read().
615 *
616 * @sa put_uBuf
617 */
619 static void deliver_notification(int index, int rcvid)
620 {
621 int err = EOK;
622 rpmsg_rpc_EventPacket * uBuf = NULL;
624 uBuf = (rpmsg_rpc_EventPacket *) List_get (rpmsg_rpc_state.eventState [index].bufList);
626 /* Let the check remain at run-time. */
627 if (uBuf != NULL) {
628 err = MsgReply(rcvid, uBuf->len, uBuf->data, uBuf->len);
629 if (err == -1)
630 perror("deliver_notification: MsgReply");
631 /* Free the processed event callback packet. */
632 put_uBuf(uBuf);
633 }
634 else {
635 MsgReply(rcvid, EOK, NULL, 0);
636 }
637 return;
638 }
640 /*!
641 * @brief Thread used for notifying waiting readers of messages.
642 *
643 * @param arg Thread-specific private arg.
644 *
645 * @sa find_available_reader_and_event
646 * deliver_notification
647 * put_wr
648 */
649 static void *notifier_thread(void *arg)
650 {
651 int status;
652 int index;
653 WaitingReaders_t *item = NULL;
654 pthread_mutex_lock(&rpmsg_rpc_state.lock);
655 while (rpmsg_rpc_state.run) {
656 status = find_available_reader_and_event(&index, &item);
657 if ( (status == 0) || (item == NULL) ) {
658 status = pthread_cond_wait(&rpmsg_rpc_state.cond, &rpmsg_rpc_state.lock);
659 if ((status != EOK) && (status != EINTR)) {
660 // false wakeup
661 break;
662 }
663 status = find_available_reader_and_event(&index, &item);
664 if ( (status == 0) || (item == NULL) ) {
665 continue;
666 }
667 }
668 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
669 // we have unlocked, and now we have an event to deliver
670 // we deliver one event at a time, relock, check and continue
671 deliver_notification(index, item->rcvid);
672 pthread_mutex_lock(&rpmsg_rpc_state.lock);
673 put_wr(item);
674 }
675 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
676 return(NULL);
677 }
680 static
681 Int
682 _rpmsg_rpc_create(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_rpc_ocb_t *ocb)
683 {
684 Int status = EOK;
685 struct rppc_create_instance * cargs =
686 (struct rppc_create_instance *)(_DEVCTL_DATA (msg->i));
687 struct rppc_msg_header * msg_hdr = NULL;
688 rpmsg_rpc_object * rpc = ocb->rpc;
689 Char * msg_data = NULL;
690 UInt8 buf[sizeof(struct rppc_create_instance) + sizeof(struct rppc_msg_header)];
692 if (rpc->created == TRUE) {
693 GT_0trace(curTrace, GT_4CLASS, "Already created.");
694 status = (EINVAL);
695 }
696 else if ((ctp->info.msglen - sizeof(msg->i)) <
697 sizeof (struct rppc_create_instance)) {
698 status = (EINVAL);
699 }
700 else if (String_nlen(cargs->name, 47) == -1) {
701 status = (EINVAL);
702 }
703 else {
704 msg_hdr = (struct rppc_msg_header *)buf;
705 msg_hdr->msg_type = RPPC_MSG_CREATE_INSTANCE;
706 msg_hdr->msg_len = sizeof(struct rppc_create_instance);
707 msg_data = (Char *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
708 Memory_copy(msg_data, cargs, sizeof(struct rppc_create_instance));
710 status = MessageQCopy_send (rpc->conn->procId, // remote procid
711 MultiProc_self(), // local procid
712 rpc->conn->addr, // remote server
713 rpc->addr, // local address
714 buf, // connect msg
715 sizeof(buf), // msg size
716 TRUE); // wait for available bufs
717 if (status != MessageQCopy_S_SUCCESS) {
718 GT_0trace(curTrace, GT_4CLASS, "Failed to send create message.");
719 status = (EIO);
720 }
721 else {
722 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
723 if (rpc->created == TRUE) {
724 msg->o.ret_val = EOK;
725 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o)));
726 }
727 else if (status < 0) {
728 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
729 status = (EIO);
730 }
731 else {
732 status = (ETIMEDOUT);
733 }
734 }
735 }
737 return status;
738 }
741 static
742 Int
743 _rpmsg_rpc_destroy(resmgr_context_t *ctp, io_devctl_t *msg,
744 rpmsg_rpc_ocb_t *ocb)
745 {
746 Int status = EOK;
747 struct rppc_msg_header * hdr = NULL;
748 rpmsg_rpc_object * rpc = ocb->rpc;
749 UInt8 buf[sizeof(struct rppc_instance_handle) + sizeof(struct rppc_msg_header)];
750 struct rppc_instance_handle * instance = NULL;
752 if (rpc->created != TRUE) {
753 GT_0trace(curTrace, GT_4CLASS, "Already destroyed.");
754 status = (EINVAL);
755 }
756 else if (!rpc->conn->destroy) {
757 hdr = (struct rppc_msg_header *)buf;
758 hdr->msg_type = RPPC_MSG_DESTROY_INSTANCE;
759 hdr->msg_len = sizeof(struct rppc_instance_handle);
760 instance = (struct rppc_instance_handle *)((UInt32)hdr + sizeof(struct rppc_msg_header));
761 instance->endpoint_address = rpc->remoteAddr;
762 instance->status = 0;
764 status = MessageQCopy_send (rpc->conn->procId, // remote procid
765 MultiProc_self(), // local procid
766 rpc->conn->addr, // remote server
767 rpc->addr, // local address
768 buf, // connect msg
769 sizeof(buf), // msg size
770 TRUE); // wait for available bufs
771 if (status != MessageQCopy_S_SUCCESS) {
772 GT_0trace(curTrace, GT_4CLASS, "Failed to send disconnect message.");
773 status = (EIO);
774 }
775 else {
776 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
777 if (rpc->created != FALSE || status < 0) {
778 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
779 status = (EIO);
780 }
781 else {
782 status = (ETIMEDOUT);
783 }
784 }
785 }
786 else {
787 /* This is the shutdown, remote proc has already been stopped,
788 * so just set created to false. */
789 rpc->created = FALSE;
790 }
792 return status;
793 }
796 Int
797 rpmsg_rpc_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
798 {
799 Int status = 0;
800 rpmsg_rpc_ocb_t *ocb = (rpmsg_rpc_ocb_t *)i_ocb;
802 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
803 return(_RESMGR_ERRNO(status));
804 status = 0;
806 switch (msg->i.dcmd)
807 {
808 case RPPC_IOC_CREATE:
809 status = _rpmsg_rpc_create (ctp, msg, ocb);
810 break;
811 #if 0
812 case RPPC_IOC_DESTROY:
813 status = _rpmsg_rpc_destroy (ctp, msg, ocb);
814 break;
815 #endif
816 default:
817 status = (ENOSYS);
818 break;
819 }
821 return status;
822 }
825 /*!
826 * @brief Attach a process to rpmsg-rpc user support framework.
827 *
828 * @param pid Process identifier
829 *
830 * @sa _rpmsg_rpc_detach
831 */
832 static
833 Int
834 _rpmsg_rpc_attach (rpmsg_rpc_object * rpc)
835 {
836 Int32 status = EOK;
837 Bool flag = FALSE;
838 Bool isInit = FALSE;
839 List_Object * bufList = NULL;
840 List_Object * fxnList = NULL;
841 IArg key = 0;
842 List_Params listparams;
843 UInt32 i;
845 GT_1trace (curTrace, GT_ENTER, "_rpmsg_rpc_attach", rpc);
847 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
848 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
849 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
850 rpmsg_rpc_state.eventState [i].refCount++;
851 isInit = TRUE;
852 status = EOK;
853 break;
854 }
855 }
857 if (isInit == FALSE) {
858 List_Params_init (&listparams);
859 bufList = List_create (&listparams) ;
860 fxnList = List_create (&listparams) ;
861 /* Search for an available slot for user process. */
862 for (i = 0 ; i < MAX_PROCESSES ; i++) {
863 if (rpmsg_rpc_state.eventState [i].rpc == NULL) {
864 rpmsg_rpc_state.eventState [i].rpc = rpc;
865 rpmsg_rpc_state.eventState [i].refCount = 1;
866 rpmsg_rpc_state.eventState [i].bufList = bufList;
867 rpmsg_rpc_state.eventState [i].fxnList = fxnList;
868 flag = TRUE;
869 break;
870 }
871 }
873 /* No free slots found. Let this check remain at run-time,
874 * since it is dependent on user environment.
875 */
876 if (flag != TRUE) {
877 /*! @retval Notify_E_RESOURCE Maximum number of
878 supported user clients have already been registered. */
879 status = -ENOMEM;
880 GT_setFailureReason (curTrace,
881 GT_4CLASS,
882 "rpmsgDrv_attach",
883 status,
884 "Maximum number of supported user"
885 " clients have already been "
886 "registered.");
887 if (bufList != NULL) {
888 List_delete (&bufList);
889 }
890 if (fxnList != NULL) {
891 List_delete (&fxnList);
892 }
893 }
894 }
895 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
897 GT_1trace (curTrace, GT_LEAVE, "_rpmsg_rpc_attach", status);
899 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
900 return status ;
901 }
904 /*!
905 * @brief This function adds a data to a registered process.
906 *
907 * @param dce RPC object associated with the client
908 * @param src Source address (endpoint) sending the data
909 * @param pid Process ID associated with the client
910 * @param data Data to be added
911 * @param len Length of data to be added
912 *
913 * @sa
914 */
915 Int
916 _rpmsg_rpc_addBufByPid (rpmsg_rpc_object *rpc,
917 UInt32 src,
918 UInt32 pid,
919 UInt16 msgId,
920 void * data,
921 UInt32 len)
922 {
923 Int32 status = EOK;
924 Bool flag = FALSE;
925 rpmsg_rpc_EventPacket * uBuf = NULL;
926 IArg key;
927 UInt32 i;
928 WaitingReaders_t *item;
929 MsgList_t *msgItem;
930 List_Elem * elem = NULL;
931 List_Elem * temp = NULL;
933 GT_5trace (curTrace,
934 GT_ENTER,
935 "_rpmsg_rpc_addBufByPid",
936 rpc,
937 src,
938 pid,
939 data,
940 len);
942 GT_assert (curTrace, (rpmsg_rpc_state.isSetup == TRUE));
944 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
945 /* Find the registration for this callback */
946 for (i = 0 ; i < MAX_PROCESSES ; i++) {
947 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
948 flag = TRUE;
949 break;
950 }
951 }
952 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
954 #if !defined(SYSLINK_BUILD_OPTIMIZE)
955 if (flag != TRUE) {
956 /*! @retval ENOMEM Could not find a registered handler
957 for this process. */
958 status = -ENOMEM;
959 GT_setFailureReason (curTrace,
960 GT_4CLASS,
961 "_rpmsgDrv_addBufByPid",
962 status,
963 "Could not find a registered handler "
964 "for this process.!");
965 }
966 else {
967 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
968 /* Allocate memory for the buf */
969 pthread_mutex_lock(&rpmsg_rpc_state.lock);
970 uBuf = get_uBuf();
971 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
973 #if !defined(SYSLINK_BUILD_OPTIMIZE)
974 if (uBuf == NULL) {
975 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
976 packet for received callback. */
977 status = -ENOMEM;
978 GT_setFailureReason (curTrace,
979 GT_4CLASS,
980 "_rpmsgDrv_addBufByPid",
981 status,
982 "Failed to allocate memory for event"
983 " packet for received callback.!");
984 }
985 else {
986 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
987 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
988 List_traverse_safe(elem, temp, rpmsg_rpc_state.eventState [i].fxnList) {
989 if (((rpmsg_rpc_FxnInfo *)elem)->msgId == msgId) {
990 List_remove(rpmsg_rpc_state.eventState [i].fxnList, elem);
991 break;
992 }
993 }
994 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
996 if (elem != (List_Elem *)rpmsg_rpc_state.eventState [i].fxnList) {
997 struct rppc_function * function;
998 function = &(((rpmsg_rpc_FxnInfo *)elem)->func);
999 _rpmsg_rpc_translate(NULL, (char *)function, pid, true);
1000 Memory_free(NULL, elem, sizeof(rpmsg_rpc_FxnInfo) +\
1001 RPPC_TRANS_SIZE(function->num_translations));
1002 }
1004 List_elemClear (&(uBuf->element));
1005 GT_assert (curTrace,
1006 (rpmsg_rpc_state.eventState [i].bufList != NULL));
1008 if (data) {
1009 Memory_copy(uBuf->data, data, len);
1010 }
1011 uBuf->len = len;
1013 List_put (rpmsg_rpc_state.eventState [i].bufList,
1014 &(uBuf->element));
1015 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1016 item = dequeue_waiting_reader(i);
1017 if (item) {
1018 // there is a waiting reader
1019 deliver_notification(i, item->rcvid);
1020 put_wr(item);
1021 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1022 status = EOK;
1023 }
1024 else {
1025 if (enqueue_notify_list(i) < 0) {
1026 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1027 status = -ENOMEM;
1028 GT_setFailureReason (curTrace,
1029 GT_4CLASS,
1030 "_rpmsgDrv_addBufByPid",
1031 status,
1032 "Failed to allocate memory for notifier");
1033 }
1034 else {
1035 msgItem = find_nl(i);
1036 /* TODO: rpc could be NULL in some cases */
1037 if (rpc && msgItem) {
1038 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, msgItem->num_events, 0)) {
1039 iofunc_notify_trigger(rpc->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
1040 }
1041 }
1042 status = EOK;
1043 pthread_cond_signal(&rpmsg_rpc_state.cond);
1044 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1045 }
1046 }
1047 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1048 }
1049 }
1050 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1052 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
1054 return status;
1055 }
1058 /*!
1059 * @brief This function implements the callback registered with
1060 * MessageQCopy_create for each client. This function
1061 * adds the message from the remote proc to a list
1062 * where it is routed to the appropriate waiting reader.
1063 *
1064 * @param procId processor Id from which interrupt is received
1065 * @param lineId Interrupt line ID to be used
1066 * @param eventId eventId registered
1067 * @param arg argument to call back
1068 * @param payload payload received
1069 *
1070 * @sa
1071 */
1072 Void
1073 _rpmsg_rpc_cb (MessageQCopy_Handle handle, void * data, int len, void * priv,
1074 UInt32 src, UInt16 srcProc)
1075 {
1076 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1077 Int32 status = 0;
1078 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1079 rpmsg_rpc_object * rpc = NULL;
1080 struct rppc_msg_header * msg_hdr = NULL;
1081 struct rppc_instance_handle * instance;
1082 struct rppc_packet * packet = NULL;
1084 GT_6trace (curTrace,
1085 GT_ENTER,
1086 "_rpmsg_rpc_cb",
1087 handle,
1088 data,
1089 len,
1090 priv,
1091 src,
1092 srcProc);
1094 if (len < sizeof(struct rppc_msg_header)) {
1095 status = EINVAL;
1096 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb", status,
1097 "len is smaller than sizeof rppc_msg_header");
1098 return;
1099 }
1101 rpc = (rpmsg_rpc_object *) priv;
1102 msg_hdr = (struct rppc_msg_header *)data;
1104 switch (msg_hdr->msg_type) {
1105 case RPPC_MSG_INSTANCE_CREATED:
1106 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1107 status = EINVAL;
1108 rpc->created = FALSE;
1109 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1110 status, "msg_len is invalid");
1111 }
1112 else {
1113 instance = (struct rppc_instance_handle *)
1114 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1115 rpc->remoteAddr = instance->endpoint_address;
1116 if (instance->status != 0) {
1117 rpc->created = FALSE;
1118 }
1119 else {
1120 rpc->created = TRUE;
1121 }
1122 }
1123 /* post the semaphore to have the ioctl reply */
1124 OsalSemaphore_post(rpmsg_rpc_state.sem);
1125 break;
1126 case RPPC_MSG_INSTANCE_DESTROYED:
1127 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1128 status = EINVAL;
1129 rpc->created = FALSE;
1130 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1131 status, "msg_len is invalid");
1132 }
1133 else {
1134 instance = (struct rppc_instance_handle *)
1135 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1136 rpc->remoteAddr = instance->endpoint_address;
1137 if (instance->status != 0) {
1138 rpc->created = TRUE;
1139 }
1140 else {
1141 rpc->created = FALSE;
1142 }
1143 }
1144 /* post the semaphore to have the ioctl reply */
1145 OsalSemaphore_post(rpmsg_rpc_state.sem);
1146 break;
1148 case RPPC_MSG_CALL_FUNCTION:
1149 if ((len != sizeof(struct rppc_msg_header) + msg_hdr->msg_len) ||
1150 (msg_hdr->msg_len < sizeof(struct rppc_packet))) {
1151 status = EINVAL;
1152 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1153 status, "msg_len is invalid");
1154 }
1155 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1156 if (len != sizeof(struct rppc_msg_header) + sizeof(struct rppc_packet) + packet->data_size) {
1157 status = EINVAL;
1158 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1159 status, "msg_len is invalid");
1160 }
1161 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1162 status =
1163 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1164 _rpmsg_rpc_addBufByPid (rpc,
1165 src,
1166 rpc->pid,
1167 packet->msg_id,
1168 &packet->fxn_id,
1169 sizeof(packet->fxn_id) + sizeof(packet->result));
1170 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1171 if (status < 0) {
1172 GT_setFailureReason (curTrace,
1173 GT_4CLASS,
1174 "_rpmsg_rpc_cb",
1175 status,
1176 "Failed to add callback packet for pid");
1177 }
1178 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1179 break;
1180 default:
1181 break;
1182 }
1184 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_rpc_cb");
1185 }
1187 /**
1188 * Handler for ocb_calloc() requests.
1189 *
1190 * Special handler for ocb_calloc() requests that we export for control. An
1191 * open request from the client will result in a call to our special ocb_calloc
1192 * handler. This function attaches the client's pid using _rpmsg_dce_attach
1193 * and allocates client-specific information. This function creates an
1194 * endpoint for the client to communicate with the dCE server on the
1195 * remote core also.
1196 *
1197 * \param ctp Thread's associated context information.
1198 * \param device Device attributes structure.
1199 *
1200 * \return Pointer to an iofunc_ocb_t OCB structure.
1201 */
1203 IOFUNC_OCB_T *
1204 rpmsg_rpc_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
1205 {
1206 rpmsg_rpc_ocb_t *ocb = NULL;
1207 rpmsg_rpc_object *obj = NULL;
1208 struct _msg_info cl_info;
1209 rpmsg_rpc_dev_t * dev = NULL;
1210 int i = 0;
1211 Bool found = FALSE;
1212 char path1[20];
1213 char path2[20];
1215 GT_2trace (curTrace, GT_ENTER, "rpmsg_rpc_ocb_calloc",
1216 ctp, device);
1218 /* Allocate the OCB */
1219 ocb = (rpmsg_rpc_ocb_t *) calloc (1, sizeof (rpmsg_rpc_ocb_t));
1220 if (ocb == NULL){
1221 errno = ENOMEM;
1222 return (NULL);
1223 }
1225 ocb->pid = ctp->info.pid;
1227 /* Allocate memory for the rpmsg object. */
1228 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_object), 0u, NULL);
1229 if (obj == NULL) {
1230 errno = ENOMEM;
1231 free(ocb);
1232 return (NULL);
1233 }
1234 else {
1235 ocb->rpc = obj;
1236 IOFUNC_NOTIFY_INIT(obj->notify);
1237 obj->created = FALSE;
1238 /* determine conn and procId for communication based on which device
1239 * was opened */
1240 MsgInfo(ctp->rcvid, &cl_info);
1241 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
1242 for (i = 0; i < MAX_CONNS; i++) {
1243 if (rpmsg_rpc_state.objects[i] != NULL) {
1244 dev = rpmsg_rpc_state.objects[i]->dev;
1245 resmgr_pathname(dev->rpmsg_rpc.resmgr_id, 0, path2,
1246 sizeof(path2));
1247 if (!strcmp(path1, path2)) {
1248 found = TRUE;
1249 break;
1250 }
1251 }
1252 }
1253 if (found) {
1254 obj->conn = rpmsg_rpc_state.objects[i];
1255 obj->procId = obj->conn->procId;
1256 obj->pid = ctp->info.pid;
1257 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL,
1258 _rpmsg_rpc_cb, obj, &obj->addr);
1259 if (obj->mq == NULL) {
1260 errno = ENOMEM;
1261 free(obj);
1262 free(ocb);
1263 return (NULL);
1264 }
1265 else {
1266 if (_rpmsg_rpc_attach (ocb->rpc) < 0) {
1267 errno = ENOMEM;
1268 MessageQCopy_delete (&obj->mq);
1269 free(obj);
1270 free(ocb);
1271 return (NULL);
1272 }
1273 }
1274 }
1275 }
1277 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_ocb_calloc", ocb);
1279 return (IOFUNC_OCB_T *)(ocb);
1280 }
1283 /*!
1284 * @brief Detach a process from rpmsg-rpc user support framework.
1285 *
1286 * @param pid Process identifier
1287 *
1288 * @sa _rpmsg_rpc_attach
1289 */
1290 static
1291 Int
1292 _rpmsg_rpc_detach (rpmsg_rpc_object * rpc)
1293 {
1294 Int32 status = EOK;
1295 Int32 tmpStatus = EOK;
1296 Bool flag = FALSE;
1297 List_Object * bufList = NULL;
1298 List_Object * fxnList = NULL;
1299 UInt32 i;
1300 IArg key;
1301 MsgList_t * item;
1302 WaitingReaders_t * wr = NULL;
1303 struct _msg_info info;
1305 GT_1trace (curTrace, GT_ENTER, "rpmsg_rpc_detach", rpc);
1307 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1309 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1310 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1311 if (rpmsg_rpc_state.eventState [i].refCount == 1) {
1312 rpmsg_rpc_state.eventState [i].refCount = 0;
1314 flag = TRUE;
1315 break;
1316 }
1317 else {
1318 rpmsg_rpc_state.eventState [i].refCount--;
1319 status = EOK;
1320 break;
1321 }
1322 }
1323 }
1324 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1326 if (flag == TRUE) {
1327 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1328 /* Last client being unregistered for this process. */
1329 rpmsg_rpc_state.eventState [i].rpc = NULL;
1331 /* Store in local variable to delete outside lock. */
1332 bufList = rpmsg_rpc_state.eventState [i].bufList;
1334 rpmsg_rpc_state.eventState [i].bufList = NULL;
1336 /* Store in local variable to delete outside lock. */
1337 fxnList = rpmsg_rpc_state.eventState [i].fxnList;
1339 rpmsg_rpc_state.eventState [i].fxnList = NULL;
1341 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1342 }
1344 if (flag != TRUE) {
1345 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1346 if (i == MAX_PROCESSES) {
1347 /*! @retval Notify_E_NOTFOUND The specified user process was
1348 not found registered with Notify Driver module. */
1349 status = -ENOMEM;
1350 GT_setFailureReason (curTrace,
1351 GT_4CLASS,
1352 "rpmsg_rpc_detach",
1353 status,
1354 "The specified user process was not found"
1355 " registered with rpmsg Driver module.");
1356 }
1357 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1358 }
1359 else {
1360 if (bufList != NULL) {
1361 /* Dequeue waiting readers and reply to them */
1362 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1363 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1364 /* Check if rcvid is still valid */
1365 if (MsgInfo(wr->rcvid, &info) != -1) {
1366 put_wr(wr);
1367 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1368 MsgError(wr->rcvid, EINTR);
1369 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1370 }
1371 }
1372 /* Check for pending ionotify/select calls */
1373 if (rpc) {
1374 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
1375 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
1376 }
1377 }
1379 /* Free event packets for any received but unprocessed events. */
1380 while ((item = find_nl(i)) != NULL) {
1381 if (dequeue_notify_list_item(item) >= 0) {
1382 rpmsg_rpc_EventPacket * uBuf = NULL;
1384 uBuf = (rpmsg_rpc_EventPacket *) List_get (bufList);
1386 /* Let the check remain at run-time. */
1387 if (uBuf != NULL) {
1388 put_uBuf(uBuf);
1389 }
1390 }
1391 }
1392 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1394 /* Last client being unregistered with Notify module. */
1395 List_delete (&bufList);
1396 }
1397 if (fxnList != NULL) {
1398 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1399 rpmsg_rpc_FxnInfo * fxnInfo = NULL;
1400 while ((fxnInfo = (rpmsg_rpc_FxnInfo *)List_dequeue (fxnList))) {
1401 Memory_free (NULL, fxnInfo, sizeof(rpmsg_rpc_FxnInfo) +\
1402 RPPC_TRANS_SIZE(fxnInfo->func.num_translations));
1403 }
1404 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1405 List_delete (&fxnList);
1406 }
1408 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1409 if ((tmpStatus < 0) && (status >= 0)) {
1410 status = tmpStatus;
1411 GT_setFailureReason (curTrace,
1412 GT_4CLASS,
1413 "rpmsg_rpc_detach",
1414 status,
1415 "Failed to delete termination semaphore!");
1416 }
1417 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1418 }
1420 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_detach", status);
1422 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1423 return status;
1424 }
1426 /**
1427 * Handler for ocb_free() requests.
1428 *
1429 * Special handler for ocb_free() requests that we export for control. A
1430 * close request from the client will result in a call to our special ocb_free
1431 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1432 * and frees any client-specific information that was allocated.
1433 *
1434 * \param i_ocb OCB associated with client's session.
1435 *
1436 * \return POSIX errno value.
1437 *
1438 * \retval None.
1439 */
1441 void
1442 rpmsg_rpc_ocb_free (IOFUNC_OCB_T * i_ocb)
1443 {
1444 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)i_ocb;
1445 rpmsg_rpc_object *obj;
1447 if (ocb && ocb->rpc) {
1448 obj = ocb->rpc;
1449 if (obj->created == TRUE) {
1450 /* Need to disconnect this device */
1451 _rpmsg_rpc_destroy(NULL, NULL, ocb);
1452 }
1453 _rpmsg_rpc_detach(ocb->rpc);
1454 if (obj->mq) {
1455 MessageQCopy_delete (&obj->mq);
1456 obj->mq = NULL;
1457 }
1458 free (obj);
1459 free (ocb);
1460 }
1461 }
1463 /**
1464 * Handler for close_ocb() requests.
1465 *
1466 * This function removes the notification entries associated with the current
1467 * client.
1468 *
1469 * \param ctp Thread's associated context information.
1470 * \param reserved This argument must be NULL.
1471 * \param ocb OCB associated with client's session.
1472 *
1473 * \return POSIX errno value.
1474 *
1475 * \retval EOK Success.
1476 */
1478 Int
1479 rpmsg_rpc_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1480 {
1481 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1482 iofunc_notify_remove(ctp, rpc_ocb->rpc->notify);
1483 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1484 }
1486 /**
1487 * Handler for read() requests.
1488 *
1489 * Handles special read() requests that we export for control. A read
1490 * request will get a message from the remote processor that is associated
1491 * with the client that is calling read().
1492 *
1493 * \param ctp Thread's associated context information.
1494 * \param msg The actual read() message.
1495 * \param ocb OCB associated with client's session.
1496 *
1497 * \return POSIX errno value.
1498 *
1499 * \retval EOK Success.
1500 * \retval EAGAIN Call is non-blocking and no messages available.
1501 * \retval ENOMEM Not enough memory to preform the read.
1502 */
1504 int rpmsg_rpc_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *ocb)
1505 {
1506 Int status;
1507 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1508 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1509 Bool flag = FALSE;
1510 Int retVal = EOK;
1511 UInt32 i;
1512 MsgList_t * item;
1513 Int nonblock;
1515 if ((status = iofunc_read_verify(ctp, msg, ocb, &nonblock)) != EOK)
1516 return (status);
1518 if (rpc->created != TRUE) {
1519 return (ENOTCONN);
1520 }
1522 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1523 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1524 flag = TRUE;
1525 break;
1526 }
1527 }
1529 /* Let the check remain at run-time. */
1530 if (flag == TRUE) {
1531 /* Let the check remain at run-time for handling any run-time
1532 * race conditions.
1533 */
1534 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1535 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1536 item = find_nl(i);
1537 if (dequeue_notify_list_item(item) < 0) {
1538 if (nonblock) {
1539 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1540 return EAGAIN;
1541 }
1542 else {
1543 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1544 if (retVal == EOK) {
1545 pthread_cond_signal(&rpmsg_rpc_state.cond);
1546 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1547 return(_RESMGR_NOREPLY);
1548 }
1549 retVal = ENOMEM;
1550 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1551 }
1552 }
1553 else {
1554 deliver_notification(i, ctp->rcvid);
1555 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1556 return(_RESMGR_NOREPLY);
1557 }
1558 }
1559 }
1561 /*! @retval Number-of-bytes-read Number of bytes read. */
1562 return retVal;
1563 }
1565 /**
1566 * Unblock read calls
1567 *
1568 * This function checks if the client is blocked on a read call and if so,
1569 * unblocks the client.
1570 *
1571 * \param ctp Thread's associated context information.
1572 * \param msg The pulse message.
1573 * \param ocb OCB associated with client's session.
1574 *
1575 * \return POSIX errno value.
1576 *
1577 * \retval EINTR The client has been unblocked.
1578 * \retval other The client has not been unblocked or the client was not
1579 * blocked.
1580 */
1582 int rpmsg_rpc_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *ocb)
1583 {
1584 UInt32 i;
1585 Bool flag = FALSE;
1586 WaitingReaders_t * wr;
1587 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1588 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1590 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1591 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1592 flag = TRUE;
1593 break;
1594 }
1595 }
1597 /* Let the check remain at run-time. */
1598 if (flag == TRUE) {
1599 /* Let the check remain at run-time for handling any run-time
1600 * race conditions.
1601 */
1602 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1603 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1604 wr = find_waiting_reader(i, ctp->rcvid);
1605 if (wr) {
1606 put_wr(wr);
1607 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1608 return (EINTR);
1609 }
1610 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1611 }
1612 }
1614 return _RESMGR_NOREPLY;
1615 }
1617 /**
1618 * Handler for unblock() requests.
1619 *
1620 * Handles unblock request for the client which is requesting to no longer be
1621 * blocked on the rpmsg-rpc driver.
1622 *
1623 * \param ctp Thread's associated context information.
1624 * \param msg The pulse message.
1625 * \param ocb OCB associated with client's session.
1626 *
1627 * \return POSIX errno value.
1628 *
1629 * \retval EINTR The rcvid has been unblocked.
1630 */
1632 int rpmsg_rpc_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1633 {
1634 int status = _RESMGR_NOREPLY;
1635 struct _msg_info info;
1637 /*
1638 * Try to run the default unblock for this message.
1639 */
1640 if ((status = iofunc_unblock_default(ctp,msg,ocb)) != _RESMGR_DEFAULT) {
1641 return status;
1642 }
1644 /*
1645 * Check if rcvid is still valid and still has an unblock
1646 * request pending.
1647 */
1648 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1649 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1650 return _RESMGR_NOREPLY;
1651 }
1653 if (rpmsg_rpc_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1654 return _RESMGR_ERRNO(EINTR);
1655 }
1657 return _RESMGR_ERRNO(EINTR);
1658 }
1661 uint32_t
1662 _rpmsg_rpc_pa2da(ProcMgr_Handle handle, uint32_t pa)
1663 {
1664 Int status = 0;
1665 uint32_t da;
1667 status = ProcMgr_translateAddr(handle, (Ptr *)&da,
1668 ProcMgr_AddrType_SlaveVirt,
1669 (Ptr)pa, ProcMgr_AddrType_MasterPhys);
1670 if (status >= 0) {
1671 return da;
1672 }
1673 else {
1674 return 0;
1675 }
1676 }
1678 int
1679 _rpmsg_rpc_translate(ProcMgr_Handle handle, char *data, pid_t pid, bool reverse)
1680 {
1681 int status = EOK;
1682 struct rppc_function * function = NULL;
1683 struct rppc_param_translation * translation = NULL;
1684 int i = 0;
1685 off64_t phys_addr;
1686 off64_t paddr[RPPC_MAX_PARAMETERS];
1687 uint32_t ipu_addr;
1688 size_t phys_len = 0;
1689 uintptr_t ptr;
1690 void * vptr[RPPC_MAX_PARAMETERS];
1691 uint32_t idx = 0;
1692 uint32_t param_offset = 0;
1694 function = (struct rppc_function *)data;
1695 memset(vptr, 0, sizeof(void *) * RPPC_MAX_PARAMETERS);
1696 memset(paddr, 0, sizeof(off64_t) * RPPC_MAX_PARAMETERS);
1698 translation = (struct rppc_param_translation *)function->translations;
1699 for (i = 0; i < function->num_translations; i++) {
1700 idx = translation[i].index;
1701 if (idx >= function->num_params) {
1702 status = -EINVAL;
1703 break;
1704 }
1705 param_offset = function->params[idx].data - function->params[idx].base;
1706 if (translation[i].offset - param_offset + sizeof(uint32_t) > function->params[idx].size) {
1707 status = -EINVAL;
1708 break;
1709 }
1710 if (!vptr[idx]) {
1711 /* get the physical address of ptr */
1712 status = mem_offset64_peer(pid,
1713 function->params[idx].data,
1714 function->params[idx].size,
1715 &paddr[idx], &phys_len);
1716 if (status >= 0 && phys_len == function->params[idx].size) {
1717 /* map into my process space */
1718 vptr[idx] = mmap64(NULL, function->params[idx].size,
1719 PROT_NOCACHE | PROT_READ | PROT_WRITE,
1720 MAP_PHYS, NOFD, paddr[idx]);
1721 if (vptr[idx] == MAP_FAILED) {
1722 vptr[idx] = 0;
1723 status = -ENOMEM;
1724 break;
1725 }
1726 }
1727 else {
1728 status = -EINVAL;
1729 break;
1730 }
1731 }
1732 /* Get physical address of the contents */
1733 ptr = (uint32_t)vptr[idx] + translation[i].offset - param_offset;
1734 if (reverse) {
1735 *(uint32_t *)ptr = translation[i].base;
1736 }
1737 else {
1738 translation[i].base = *(uint32_t *)ptr;
1739 status = mem_offset64_peer(pid, *(uint32_t *)ptr, sizeof(uint32_t),
1740 &phys_addr, &phys_len);
1741 if (status >= 0 && phys_len == sizeof(uint32_t)) {
1742 /* translate pa2da */
1743 if ((ipu_addr =
1744 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1745 /* update vptr contents */
1746 *(uint32_t *)ptr = ipu_addr;
1747 else {
1748 status = -EINVAL;
1749 break;
1750 }
1751 }
1752 else {
1753 status = -EINVAL;
1754 break;
1755 }
1756 }
1757 }
1759 /* No need to do this for reverse translations */
1760 for (i = 0; i < function->num_params && status >= 0 && !reverse; i++) {
1761 if (function->params[i].type == RPPC_PARAM_TYPE_PTR) {
1762 if (paddr[i]) {
1763 phys_addr = paddr[i];
1764 }
1765 else {
1766 /* translate the param pointer */
1767 status = mem_offset64_peer(pid,
1768 (uintptr_t)(function->params[i].data),
1769 function->params[i].size, &phys_addr,
1770 &phys_len);
1771 }
1772 if (status >= 0) {
1773 if ((ipu_addr =
1774 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1775 function->params[i].data = ipu_addr;
1776 else {
1777 status = -EINVAL;
1778 break;
1779 }
1780 }
1781 else {
1782 status = -EINVAL;
1783 break;
1784 }
1785 }
1786 }
1788 for (i = 0; i < function->num_params; i++) {
1789 if (vptr[i])
1790 munmap(vptr[i], function->params[i].size);
1791 }
1793 return status;
1794 }
1796 /**
1797 * Handler for write() requests.
1798 *
1799 * Handles special write() requests that we export for control. A write()
1800 * request will send a message to the remote processor which is associated with
1801 * the client.
1802 *
1803 * \param ctp Thread's associated context information.
1804 * \param msg The actual write() message.
1805 * \param io_ocb OCB associated with client's session.
1806 *
1807 * \return POSIX errno value.
1808 *
1809 * \retval EOK Success.
1810 * \retval ENOMEM Not enough memory to preform the write.
1811 * \retval EIO MessageQCopy_send failed.
1812 * \retval EINVAL msg->i.bytes is negative.
1813 */
1815 int
1816 rpmsg_rpc_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1817 {
1818 int status;
1819 char buf[MessageQCopy_BUFSIZE];
1820 int bytes;
1821 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)io_ocb;
1822 rpmsg_rpc_object * rpc = ocb->rpc;
1823 struct rppc_msg_header * msg_hdr = NULL;
1824 struct rppc_packet *packet = NULL;
1825 struct rppc_function *function = NULL;
1826 char usr_msg[MessageQCopy_BUFSIZE];
1827 int i = 0;
1828 rpmsg_rpc_EventState * event_state = NULL;
1829 rpmsg_rpc_FxnInfo * fxn_info = NULL;
1830 IArg key = 0;
1832 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1833 return (status);
1834 }
1836 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct rppc_msg_header) > MessageQCopy_BUFSIZE ?
1837 MessageQCopy_BUFSIZE - sizeof(struct rppc_msg_header) : msg->i.nbytes;
1838 if (bytes < 0) {
1839 return EINVAL;
1840 }
1841 _IO_SET_WRITE_NBYTES (ctp, bytes);
1843 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1844 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1845 break;
1846 }
1847 }
1848 if (i == MAX_PROCESSES) {
1849 return EINVAL;
1850 }
1851 event_state = &rpmsg_rpc_state.eventState[i];
1853 msg_hdr = (struct rppc_msg_header *)buf;
1854 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1856 status = resmgr_msgread(ctp, usr_msg, bytes, sizeof(msg->i));
1857 if (status != bytes) {
1858 return (errno);
1859 }
1860 else if (bytes < sizeof(struct rppc_function)) {
1861 return (EINVAL);
1862 }
1863 function = (struct rppc_function *)usr_msg;
1865 if (bytes < RPPC_PARAM_SIZE(function->num_translations)) {
1866 return (EINVAL);
1867 }
1869 /* check that we're in the correct state */
1870 if (rpc->created != TRUE) {
1871 return (EINVAL);
1872 }
1874 /* store the fxn info for use with reverse translation */
1875 fxn_info = Memory_alloc (NULL, sizeof(rpmsg_rpc_FxnInfo) +\
1876 RPPC_TRANS_SIZE(function->num_translations),
1877 0, NULL);
1878 List_elemClear(&(fxn_info->element));
1879 Memory_copy (&(fxn_info->func), function,
1880 RPPC_PARAM_SIZE(function->num_translations));
1882 status = _rpmsg_rpc_translate(rpc->conn->procH, (char *)function,
1883 ctp->info.pid, false);
1884 if (status < 0) {
1885 Memory_free(NULL, fxn_info, sizeof(rpmsg_rpc_FxnInfo) +\
1886 RPPC_TRANS_SIZE(function->num_translations));
1887 return -status;
1888 }
1890 msg_hdr->msg_type = RPPC_MSG_CALL_FUNCTION;
1891 msg_hdr->msg_len = sizeof(struct rppc_packet);
1893 /* initialize the packet structure */
1894 packet->desc = RPPC_DESC_EXEC_SYNC;
1895 packet->msg_id = msg_id == 0xFFFF ? msg_id = 1 : ++(msg_id);
1896 packet->flags = (0x8000);//OMAPRPC_POOLID_DEFAULT;
1897 packet->fxn_id = RPPC_SET_FXN_IDX(function->fxn_id);
1898 packet->result = 0;
1899 packet->data_size = 0;
1901 for (i = 0; i < function->num_params; i++) {
1902 ((UInt32 *)(packet->data))[i*2] = function->params[i].size;
1903 ((UInt32 *)(packet->data))[(i*2)+1] = function->params[i].data;
1904 packet->data_size += (sizeof(UInt32) * 2);
1905 }
1906 msg_hdr->msg_len += packet->data_size;
1908 fxn_info->msgId = packet->msg_id;
1909 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1910 List_enqueue(event_state->fxnList, &(fxn_info->element));
1911 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1913 status = MessageQCopy_send(rpc->conn->procId, MultiProc_self(),
1914 rpc->remoteAddr, rpc->addr, buf,
1915 msg_hdr->msg_len + sizeof(struct rppc_msg_header),
1916 TRUE);
1917 if (status < 0) {
1918 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1919 List_remove(event_state->fxnList, &(fxn_info->element));
1920 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1921 Memory_free(NULL, fxn_info, sizeof(rpmsg_rpc_FxnInfo) +\
1922 RPPC_TRANS_SIZE(function->num_translations));
1923 return (EIO);
1924 }
1926 return(EOK);
1927 }
1931 /**
1932 * Handler for notify() requests.
1933 *
1934 * Handles special notify() requests that we export for control. A notify
1935 * request results from the client calling select().
1936 *
1937 * \param ctp Thread's associated context information.
1938 * \param msg The actual notify() message.
1939 * \param ocb OCB associated with client's session.
1940 *
1941 * \return POSIX errno value.
1942 */
1944 Int rpmsg_rpc_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1945 {
1946 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1947 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1948 int trig;
1949 int i = 0;
1950 Bool flag = FALSE;
1951 MsgList_t * item = NULL;
1952 int status = EOK;
1954 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1956 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1957 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1958 flag = TRUE;
1959 break;
1960 }
1961 }
1963 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1964 /* Let the check remain at run-time. */
1965 if (flag == TRUE) {
1966 /* Let the check remain at run-time for handling any run-time
1967 * race conditions.
1968 */
1969 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1970 item = find_nl(i);
1971 if (item && item->num_events > 0) {
1972 trig |= _NOTIFY_COND_INPUT;
1973 }
1974 }
1975 }
1976 status = iofunc_notify(ctp, msg, rpc_ocb->rpc->notify, trig, NULL, NULL);
1977 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1978 return status;
1979 }
1981 /**
1982 * Detaches an rpmsg-dce resource manager device name.
1983 *
1984 * \param dev The device to detach.
1985 *
1986 * \return POSIX errno value.
1987 */
1989 static
1990 Void
1991 _deinit_rpmsg_rpc_device (rpmsg_rpc_dev_t * dev)
1992 {
1993 resmgr_detach(syslink_dpp, dev->rpmsg_rpc.resmgr_id, _RESMGR_DETACH_CLOSE);
1995 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
1997 free (dev);
1999 return;
2000 }
2002 /**
2003 * Initializes and attaches rpmsg-dce resource manager functions to an
2004 * rpmsg-dce device name.
2005 *
2006 * \param num The number to append to the end of the device name.
2007 *
2008 * \return Pointer to the created rpmsg_dce_dev_t device.
2009 */
2011 static
2012 rpmsg_rpc_dev_t *
2013 _init_rpmsg_rpc_device (char * name)
2014 {
2015 iofunc_attr_t * attr;
2016 resmgr_attr_t resmgr_attr;
2017 rpmsg_rpc_dev_t * dev = NULL;
2019 dev = malloc(sizeof(*dev));
2020 if (dev == NULL) {
2021 return NULL;
2022 }
2024 memset(&resmgr_attr, 0, sizeof resmgr_attr);
2025 resmgr_attr.nparts_max = 10;
2026 resmgr_attr.msg_max_size = 2048;
2027 memset(&dev->rpmsg_rpc.mattr, 0, sizeof(iofunc_mount_t));
2028 dev->rpmsg_rpc.mattr.flags = ST_NOSUID | ST_NOEXEC;
2029 dev->rpmsg_rpc.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
2030 IOFUNC_PC_NO_TRUNC |
2031 IOFUNC_PC_SYNC_IO;
2032 dev->rpmsg_rpc.mattr.funcs = &dev->rpmsg_rpc.mfuncs;
2033 memset(&dev->rpmsg_rpc.mfuncs, 0, sizeof(iofunc_funcs_t));
2034 dev->rpmsg_rpc.mfuncs.nfuncs = _IOFUNC_NFUNCS;
2035 dev->rpmsg_rpc.mfuncs.ocb_calloc = rpmsg_rpc_ocb_calloc;
2036 dev->rpmsg_rpc.mfuncs.ocb_free = rpmsg_rpc_ocb_free;
2037 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_rpc.cfuncs,
2038 _RESMGR_IO_NFUNCS, &dev->rpmsg_rpc.iofuncs);
2039 iofunc_attr_init(attr = &dev->rpmsg_rpc.cattr, S_IFCHR | 0777, NULL, NULL);
2040 dev->rpmsg_rpc.iofuncs.devctl = rpmsg_rpc_devctl;
2041 dev->rpmsg_rpc.iofuncs.notify = rpmsg_rpc_notify;
2042 dev->rpmsg_rpc.iofuncs.close_ocb = rpmsg_rpc_close_ocb;
2043 dev->rpmsg_rpc.iofuncs.read = rpmsg_rpc_read;
2044 dev->rpmsg_rpc.iofuncs.write = rpmsg_rpc_write;
2045 dev->rpmsg_rpc.iofuncs.unblock = rpmsg_rpc_read_unblock;
2046 attr->mount = &dev->rpmsg_rpc.mattr;
2047 iofunc_time_update(attr);
2048 pthread_mutex_init(&dev->rpmsg_rpc.mutex, NULL);
2050 snprintf (dev->rpmsg_rpc.device_name, _POSIX_PATH_MAX, "/dev/%s", name);
2051 if (-1 == (dev->rpmsg_rpc.resmgr_id =
2052 resmgr_attach(syslink_dpp, &resmgr_attr,
2053 dev->rpmsg_rpc.device_name, _FTYPE_ANY, 0,
2054 &dev->rpmsg_rpc.cfuncs,
2055 &dev->rpmsg_rpc.iofuncs, attr))) {
2056 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
2057 free(dev);
2058 return(NULL);
2059 }
2061 return(dev);
2062 }
2064 /**
2065 * Callback passed to MessageQCopy_registerNotify.
2066 *
2067 * This callback is called when a remote processor creates a MessageQCopy
2068 * handle with the same name as the local MessageQCopy handle and then
2069 * calls NameMap_register to notify the HOST of the handle.
2070 *
2071 * \param handle The remote handle.
2072 * \param procId The remote proc ID of the remote handle.
2073 * \param endpoint The endpoint address of the remote handle.
2074 *
2075 * \return None.
2076 */
2078 static
2079 Void
2080 _rpmsg_rpc_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
2081 UInt32 endpoint, Char * desc, Bool create)
2082 {
2083 Int status = 0, i = 0, j = 0;
2084 Bool found = FALSE;
2085 rpmsg_rpc_conn_object * obj = NULL;
2086 char msg[512];
2087 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)msg;
2089 for (i = 0; i < MAX_CONNS; i++) {
2090 if (rpmsg_rpc_state.objects[i] == NULL) {
2091 found = TRUE;
2092 break;
2093 }
2094 }
2096 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2097 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2098 break;
2099 }
2100 }
2102 if (found && j < NUM_RPMSG_RPC_QUEUES) {
2103 /* found a space to save this mq handle, allocate memory */
2104 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_conn_object), 0x0, NULL);
2105 if (obj) {
2106 /* store the object in the module info */
2107 rpmsg_rpc_state.objects[i] = obj;
2109 /* store the mq info in the object */
2110 obj->mq = handle;
2111 obj->procId = procId;
2112 status = ProcMgr_open(&obj->procH, obj->procId);
2113 if (status < 0) {
2114 Osal_printf("Failed to open handle to proc %d", procId);
2115 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2116 }
2117 else {
2118 obj->addr = endpoint;
2120 /* create a /dev/rpmsg-rpc instance for users to open */
2121 obj->dev = _init_rpmsg_rpc_device(desc);
2122 if (obj->dev == NULL) {
2123 Osal_printf("Failed to create %s", desc);
2124 ProcMgr_close(&obj->procH);
2125 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2126 }
2127 }
2128 }
2130 /* Send a message to query the chan info. Handle creating of the conn
2131 * in the callback */
2132 msg_hdr->msg_type = RPPC_MSG_QUERY_CHAN_INFO;
2133 msg_hdr->msg_len = 0;
2134 status = MessageQCopy_send(procId, MultiProc_self(), endpoint,
2135 rpmsg_rpc_state.endpoint[j],
2136 msg, sizeof(struct rppc_msg_header),
2137 TRUE);
2138 }
2139 }
2141 /**
2142 * Callback passed to MessageQCopy_create for the module.
2143 *
2144 * This callback is called when a message is received for the rpmsg-dce
2145 * module. This callback will never be called, since each client connection
2146 * gets it's own endpoint for message passing.
2147 *
2148 * \param handle The local MessageQCopy handle.
2149 * \param data Data message
2150 * \param len Length of data message
2151 * \param priv Private information for the endpoint
2152 * \param src Remote endpoint sending this message
2153 * \param srcProc Remote proc ID sending this message
2154 *
2155 * \return None.
2156 */
2158 static
2159 Void
2160 _rpmsg_rpc_module_cb (MessageQCopy_Handle handle, void * data, int len,
2161 void * priv, UInt32 src, UInt16 srcProc)
2162 {
2163 Int status = 0, i = 0, j = 0;
2164 rpmsg_rpc_conn_object * obj = NULL;
2165 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)data;
2167 Osal_printf ("_rpmsg_rpc_module_cb callback");
2169 for (i = 0; i < MAX_CONNS; i++) {
2170 if (rpmsg_rpc_state.objects[i] != NULL &&
2171 rpmsg_rpc_state.objects[i]->addr == src) {
2172 obj = rpmsg_rpc_state.objects[i];
2173 break;
2174 }
2175 }
2177 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2178 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2179 break;
2180 }
2181 }
2183 if (obj && j < NUM_RPMSG_RPC_QUEUES) {
2184 switch (msg_hdr->msg_type) {
2185 case RPPC_MSG_CHAN_INFO:
2186 {
2187 struct rppc_channel_info * chan_info =
2188 (struct rppc_channel_info *)(msg_hdr + 1);
2189 obj->numFuncs = chan_info->num_funcs;
2190 /* TODO: Query the info about each function */
2191 break;
2192 }
2193 default:
2194 status = EINVAL;
2195 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_module_cb",
2196 status, "invalid msg_type received");
2197 break;
2198 }
2199 }
2200 }
2203 /*!
2204 * @brief Module setup function.
2205 *
2206 * @sa rpmsg_rpc_destroy
2207 */
2208 Int
2209 rpmsg_rpc_setup (Void)
2210 {
2211 UInt16 i;
2212 List_Params listparams;
2213 Int status = 0;
2214 Error_Block eb;
2215 pthread_attr_t thread_attr;
2216 struct sched_param sched_param;
2218 GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_setup");
2220 Error_init(&eb);
2222 List_Params_init (&listparams);
2223 rpmsg_rpc_state.gateHandle = (IGateProvider_Handle)
2224 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
2225 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2226 if (rpmsg_rpc_state.gateHandle == NULL) {
2227 status = -ENOMEM;
2228 GT_setFailureReason (curTrace,
2229 GT_4CLASS,
2230 "_rpmsg_rpc_setup",
2231 status,
2232 "Failed to create spinlock gate!");
2233 }
2234 else {
2235 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2236 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2237 rpmsg_rpc_state.eventState [i].bufList = NULL;
2238 rpmsg_rpc_state.eventState [i].rpc = NULL;
2239 rpmsg_rpc_state.eventState [i].refCount = 0;
2240 rpmsg_rpc_state.eventState [i].head = NULL;
2241 rpmsg_rpc_state.eventState [i].tail = NULL;
2242 }
2244 pthread_attr_init(&thread_attr);
2245 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
2246 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
2247 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
2248 pthread_attr_setschedparam(&thread_attr, &sched_param);
2250 rpmsg_rpc_state.run = TRUE;
2251 if (pthread_create(&rpmsg_rpc_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
2252 pthread_setname_np(rpmsg_rpc_state.nt, "rpmsg-rpc-notifier");
2253 /* Initialize the driver mapping array. */
2254 Memory_set (&rpmsg_rpc_state.objects,
2255 0,
2256 (sizeof (rpmsg_rpc_conn_object *)
2257 * MAX_CONNS));
2258 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2259 /* create a local handle and register for notifications with MessageQCopy */
2260 rpmsg_rpc_state.mqHandle[i] = MessageQCopy_create (
2261 MessageQCopy_ADDRANY,
2262 rpmsg_rpc_names[i].name,
2263 _rpmsg_rpc_module_cb,
2264 NULL,
2265 &rpmsg_rpc_state.endpoint[i]);
2266 if (rpmsg_rpc_state.mqHandle[i] == NULL) {
2267 /*! @retval RPC_FAIL Failed to create MessageQCopy handle! */
2268 status = -ENOMEM;
2269 GT_setFailureReason (curTrace,
2270 GT_4CLASS,
2271 "rpmsg_rpc_setup",
2272 status,
2273 "Failed to create MessageQCopy handle!");
2274 break;
2275 }
2276 else {
2277 /* TBD: This could be replaced with a messageqcopy_open type call, one for
2278 * each core */
2279 status = MessageQCopy_registerNotify (rpmsg_rpc_state.mqHandle[i],
2280 _rpmsg_rpc_notify_cb);
2281 if (status < 0) {
2282 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2283 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2284 status = -ENOMEM;
2285 GT_setFailureReason (curTrace,
2286 GT_4CLASS,
2287 "rpmsg_rpc_setup",
2288 status,
2289 "Failed to register MQCopy handle!");
2290 break;
2291 }
2292 }
2293 }
2294 if (status >= 0){
2295 rpmsg_rpc_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
2296 if (rpmsg_rpc_state.sem == NULL) {
2297 //MessageQCopy_unregisterNotify();
2298 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2299 status = -ENOMEM;
2300 GT_setFailureReason (curTrace,
2301 GT_4CLASS,
2302 "rpmsg_rpc_setup",
2303 status,
2304 "Failed to register MQCopy handle!");
2305 }
2306 }
2307 if (status >= 0) {
2308 rpmsg_rpc_state.isSetup = TRUE;
2309 }
2310 else {
2311 for (; i > 0; --i) {
2312 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2313 }
2314 rpmsg_rpc_state.run = FALSE;
2315 }
2316 }
2317 else {
2318 rpmsg_rpc_state.run = FALSE;
2319 }
2320 pthread_attr_destroy(&thread_attr);
2321 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2322 }
2323 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2325 GT_0trace (curTrace, GT_LEAVE, "rpmsg_rpc_setup");
2326 return status;
2327 }
2330 /*!
2331 * @brief Module destroy function.
2332 *
2333 * @sa rpmsg_rpc_setup
2334 */
2335 Void
2336 rpmsg_rpc_destroy (Void)
2337 {
2338 rpmsg_rpc_EventPacket * packet;
2339 UInt32 i;
2340 List_Handle bufList;
2341 rpmsg_rpc_object * rpc = NULL;
2342 WaitingReaders_t * wr = NULL;
2343 struct _msg_info info;
2345 GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_destroy");
2347 for (i = 0; i < MAX_CONNS; i++) {
2348 if (rpmsg_rpc_state.objects[i]) {
2349 rpmsg_rpc_conn_object * obj = rpmsg_rpc_state.objects[i];
2350 obj->destroy = TRUE;
2351 _deinit_rpmsg_rpc_device(obj->dev);
2352 ProcMgr_close(&obj->procH);
2353 Memory_free(NULL, obj, sizeof(rpmsg_rpc_conn_object));
2354 rpmsg_rpc_state.objects[i] = NULL;
2355 }
2356 }
2358 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2359 rpc = NULL;
2360 if (rpmsg_rpc_state.eventState [i].rpc != NULL) {
2361 /* This is recovery. Need to mark rpc structures as invalid */
2362 rpc = rpmsg_rpc_state.eventState[i].rpc;
2363 MessageQCopy_delete(&rpc->mq);
2364 rpc->mq = NULL;
2365 }
2366 bufList = rpmsg_rpc_state.eventState [i].bufList;
2368 rpmsg_rpc_state.eventState [i].bufList = NULL;
2369 rpmsg_rpc_state.eventState [i].rpc = NULL;
2370 rpmsg_rpc_state.eventState [i].refCount = 0;
2371 if (bufList != NULL) {
2372 /* Dequeue waiting readers and reply to them */
2373 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2374 while ((wr = dequeue_waiting_reader(i)) != NULL) {
2375 /* Check if rcvid is still valid */
2376 if (MsgInfo(wr->rcvid, &info) != -1) {
2377 put_wr(wr);
2378 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2379 MsgError(wr->rcvid, EINTR);
2380 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2381 }
2382 }
2383 /* Check for pending ionotify/select calls */
2384 if (rpc) {
2385 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
2386 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
2387 }
2388 }
2389 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2391 /* Free event packets for any received but unprocessed events. */
2392 while (List_empty (bufList) != TRUE){
2393 packet = (rpmsg_rpc_EventPacket *)
2394 List_get (bufList);
2395 if (packet != NULL){
2396 Memory_free (NULL, packet, sizeof(*packet));
2397 }
2398 }
2399 List_delete (&(bufList));
2400 }
2401 }
2403 /* Free the cached list */
2404 flush_uBuf();
2406 if (rpmsg_rpc_state.sem) {
2407 OsalSemaphore_delete(&rpmsg_rpc_state.sem);
2408 }
2410 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2411 if (rpmsg_rpc_state.mqHandle[i]) {
2412 //MessageQCopy_unregisterNotify();
2413 MessageQCopy_delete(&rpmsg_rpc_state.mqHandle[i]);
2414 }
2415 }
2417 if (rpmsg_rpc_state.gateHandle != NULL) {
2418 GateSpinlock_delete ((GateSpinlock_Handle *)
2419 &(rpmsg_rpc_state.gateHandle));
2420 }
2422 rpmsg_rpc_state.isSetup = FALSE ;
2423 rpmsg_rpc_state.run = FALSE;
2424 // run through and destroy the thread, and all outstanding
2425 // rpc structures
2426 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2427 pthread_cond_signal(&rpmsg_rpc_state.cond);
2428 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2429 pthread_join(rpmsg_rpc_state.nt, NULL);
2430 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2431 while (rpmsg_rpc_state.head != NULL) {
2432 int index;
2433 WaitingReaders_t *item;
2434 index = dequeue_notify_list_item(rpmsg_rpc_state.head);
2435 if (index < 0)
2436 break;
2437 item = dequeue_waiting_reader(index);
2438 while (item) {
2439 put_wr(item);
2440 item = dequeue_waiting_reader(index);
2441 }
2442 }
2443 rpmsg_rpc_state.head = NULL ;
2444 rpmsg_rpc_state.tail = NULL ;
2445 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2447 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2448 }