8204639b44fcb87319230ece500cb20a7e718cec
1 /*
2 * Copyright (c) 2013, Texas Instruments Incorporated
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * * Neither the name of Texas Instruments Incorporated nor the names of
16 * its contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
33 /* Standard headers */
34 #include <ti/syslink/Std.h>
36 /* OSAL & Utils headers */
37 #include <ti/syslink/utils/List.h>
38 #include <ti/syslink/utils/String.h>
39 #include <ti/syslink/utils/Trace.h>
40 #include <ti/syslink/utils/Memory.h>
41 #include <ti/syslink/utils/IGateProvider.h>
42 #include <ti/syslink/utils/GateSpinlock.h>
43 #include <_MultiProc.h>
45 /*QNX specific header include */
46 #include <errno.h>
47 #include <unistd.h>
48 #include <sys/iofunc.h>
49 #include <sys/dispatch.h>
50 #include <sys/netmgr.h>
51 #include <sys/mman.h>
52 #include <devctl.h>
54 /* Module headers */
55 #include <ti/syslink/ProcMgr.h>
56 #include <ti/ipc/rpmsg_rpc.h>
57 #include <ti/ipc/MessageQCopy.h>
58 #include <_MessageQCopy.h>
59 #include <_MessageQCopyDefs.h>
60 #include "OsalSemaphore.h"
61 #include "std_qnx.h"
62 #include <pthread.h>
64 #include "rpmsg-rpc.h"
65 #include <rpmsg.h>
67 #define PRIORITY_REALTIME_LOW 29
69 extern int mem_offset64_peer(pid_t pid, const uintptr_t addr, size_t len,
70 off64_t *offset, size_t *contig_len);
72 static MsgList_t *nl_cache;
73 static int num_nl = 0;
74 static WaitingReaders_t *wr_cache;
75 static int num_wr = 0;
77 /*
78 * Instead of constantly allocating and freeing the notifier structures
79 * we just cache a few of them, and recycle them instead.
80 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
81 */
83 static MsgList_t *get_nl()
84 {
85 MsgList_t *item;
86 item = nl_cache;
87 if (item != NULL) {
88 nl_cache = nl_cache->next;
89 num_nl--;
90 } else {
91 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
92 }
93 return(item);
94 }
96 static void put_nl(MsgList_t *item)
97 {
98 if (num_nl >= CACHE_NUM) {
99 Memory_free(NULL, item, sizeof(*item));
100 } else {
101 item->next = nl_cache;
102 nl_cache = item;
103 num_nl++;
104 }
105 return;
106 }
108 static WaitingReaders_t *get_wr()
109 {
110 WaitingReaders_t *item;
111 item = wr_cache;
112 if (item != NULL) {
113 wr_cache = wr_cache->next;
114 num_wr--;
115 } else {
116 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
117 }
118 return(item);
119 }
121 static void put_wr(WaitingReaders_t *item)
122 {
123 if (num_wr >= CACHE_NUM) {
124 Memory_free(NULL, item, sizeof(*item));
125 } else {
126 item->next = wr_cache;
127 wr_cache = item;
128 num_wr++;
129 }
130 return;
131 }
133 /* structure to hold rpmsg-rpc device information */
134 typedef struct named_device {
135 iofunc_mount_t mattr;
136 iofunc_attr_t cattr;
137 int resmgr_id;
138 pthread_mutex_t mutex;
139 iofunc_funcs_t mfuncs;
140 resmgr_connect_funcs_t cfuncs;
141 resmgr_io_funcs_t iofuncs;
142 char device_name[_POSIX_PATH_MAX];
143 } named_device_t;
145 /* rpmsg-rpc device structure */
146 typedef struct rpmsg_rpc_dev {
147 dispatch_t * dpp;
148 thread_pool_t * tpool;
149 named_device_t rpmsg_rpc;
150 } rpmsg_rpc_dev_t;
152 /*!
153 * @brief Remote connection object
154 */
155 typedef struct rpmsg_rpc_conn_object {
156 rpmsg_rpc_dev_t * dev;
157 MessageQCopy_Handle mq;
158 UInt32 addr;
159 UInt16 procId;
160 ProcMgr_Handle procH;
161 UInt32 numFuncs;
162 Bool destroy;
163 } rpmsg_rpc_conn_object;
165 /*!
166 * @brief rpc instance object
167 */
168 typedef struct rpmsg_rpc_object_tag {
169 MessageQCopy_Handle mq;
170 rpmsg_rpc_conn_object * conn;
171 UInt32 addr;
172 UInt32 remoteAddr;
173 UInt16 procId;
174 pid_t pid;
175 Bool created;
176 iofunc_notify_t notify[3];
177 } rpmsg_rpc_object;
179 /*!
180 * @brief Structure of Event callback argument passed to register fucntion.
181 */
182 typedef struct rpmsg_rpc_EventCbck_tag {
183 List_Elem element;
184 /*!< List element header */
185 rpmsg_rpc_object * rpc;
186 /*!< User rpc info pointer. Passed back to user callback function */
187 UInt32 pid;
188 /*!< Process Identifier for user process. */
189 } rpmsg_rpc_EventCbck ;
191 /*!
192 * @brief Structure of Fxn Info for reverse translations.
193 */
194 typedef struct rpmsg_rpc_FxnInfo_tag {
195 List_Elem element;
196 /*!< List element header */
197 UInt16 msgId;
198 /*!< Unique msgId of the rpc fxn call */
199 struct rppc_function func;
200 /*!< rpc function information. */
201 } rpmsg_rpc_FxnInfo ;
203 /*!
204 * @brief Keeps the information related to Event.
205 */
206 typedef struct rpmsg_rpc_EventState_tag {
207 List_Handle bufList;
208 /*!< Head of received event list. */
209 List_Handle fxnList;
210 /*!< Head of received msg list. */
211 UInt32 pid;
212 /*!< User process ID. */
213 rpmsg_rpc_object * rpc;
214 /*!< User rpc comp. */
215 UInt32 refCount;
216 /*!< Reference count, used when multiple Notify_registerEvent are called
217 from same process space (multi threads/processes). */
218 WaitingReaders_t * head;
219 /*!< Waiting readers head. */
220 WaitingReaders_t * tail;
221 /*!< Waiting readers tail. */
222 } rpmsg_rpc_EventState;
224 /*!
225 * @brief Per-connection information
226 */
227 typedef struct rpmsg_rpc_ocb {
228 iofunc_ocb_t hdr;
229 pid_t pid;
230 rpmsg_rpc_object * rpc;
231 } rpmsg_rpc_ocb_t;
233 typedef struct rpmsg_rpc_name {
234 char name[RPMSG_NAME_SIZE];
235 }rpmsg_rpc_name_t;
237 static struct rpmsg_rpc_name rpmsg_rpc_names[] = {
238 {.name = "rpmsg-rpc"},
239 };
241 #define NUM_RPMSG_RPC_QUEUES sizeof(rpmsg_rpc_names)/sizeof(*rpmsg_rpc_names)
243 /*!
244 * @brief rpmsg-rpc Module state object
245 */
246 typedef struct rpmsg_rpc_ModuleObject_tag {
247 Bool isSetup;
248 /*!< Indicates whether the module has been already setup */
249 Bool openRefCount;
250 /*!< Open reference count. */
251 IGateProvider_Handle gateHandle;
252 /*!< Handle of gate to be used for local thread safety */
253 rpmsg_rpc_EventState eventState [MAX_PROCESSES];
254 /*!< List for all user processes registered. */
255 rpmsg_rpc_conn_object * objects [MAX_CONNS];
256 /*!< List of all remote connections. */
257 MessageQCopy_Handle mqHandle[NUM_RPMSG_RPC_QUEUES];
258 /*!< Local mq handle associated with this module */
259 UInt32 endpoint[NUM_RPMSG_RPC_QUEUES];
260 /*!< Local endpoint associated with the mq handle */
261 OsalSemaphore_Handle sem;
262 /*!< Handle to semaphore used for rpc instance connection notifications */
263 pthread_t nt;
264 /*!< notifier thread */
265 pthread_mutex_t lock;
266 /*!< protection between notifier and event */
267 pthread_cond_t cond;
268 /*!< protection between notifier and event */
269 MsgList_t *head;
270 /*!< list head */
271 MsgList_t *tail;
272 /*!< list tail */
273 int run;
274 /*!< notifier thread must keep running */
275 } rpmsg_rpc_ModuleObject;
277 /*!
278 * @brief Structure of Event Packet read from notify kernel-side.
279 */
280 typedef struct rpmsg_rpc_EventPacket_tag {
281 List_Elem element;
282 /*!< List element header */
283 UInt32 pid;
284 /* Processor identifier */
285 rpmsg_rpc_object * obj;
286 /*!< Pointer to the channel associated with this callback */
287 UInt8 data[MessageQCopy_BUFSIZE];
288 /*!< Data associated with event. */
289 UInt32 len;
290 /*!< Length of the data associated with event. */
291 UInt32 src;
292 /*!< Src endpoint associated with event. */
293 struct rpmsg_rpc_EventPacket * next;
294 struct rpmsg_rpc_EventPacket * prev;
295 } rpmsg_rpc_EventPacket ;
298 /*
299 * Instead of constantly allocating and freeing the uBuf structures
300 * we just cache a few of them, and recycle them instead.
301 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
302 */
303 static rpmsg_rpc_EventPacket *uBuf_cache;
304 static int num_uBuf = 0;
306 static void flush_uBuf()
307 {
308 rpmsg_rpc_EventPacket *uBuf = NULL;
310 while(uBuf_cache) {
311 num_uBuf--;
312 uBuf = uBuf_cache;
313 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
314 Memory_free(NULL, uBuf, sizeof(*uBuf));
315 }
316 }
318 static rpmsg_rpc_EventPacket *get_uBuf()
319 {
320 rpmsg_rpc_EventPacket *uBuf;
321 uBuf = uBuf_cache;
322 if (uBuf != NULL) {
323 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
324 num_uBuf--;
325 } else {
326 uBuf = Memory_alloc(NULL, sizeof(rpmsg_rpc_EventPacket), 0, NULL);
327 }
328 return(uBuf);
329 }
331 static void put_uBuf(rpmsg_rpc_EventPacket * uBuf)
332 {
333 if (num_uBuf >= CACHE_NUM) {
334 Memory_free(NULL, uBuf, sizeof(*uBuf));
335 } else {
336 uBuf->next = (struct rpmsg_rpc_EventPacket *)uBuf_cache;
337 uBuf_cache = uBuf;
338 num_uBuf++;
339 }
340 return;
341 }
343 /** ============================================================================
344 * Function Prototypes
345 * ============================================================================
346 */
347 int
348 _rpmsg_rpc_translate (ProcMgr_Handle handle, char *data, pid_t pid,
349 bool reverse);
351 /** ============================================================================
352 * Globals
353 * ============================================================================
354 */
355 /*!
356 * @var rpmsg_rpc_state
357 *
358 * @brief rpmsg-rpc state object variable
359 */
360 static rpmsg_rpc_ModuleObject rpmsg_rpc_state =
361 {
362 .gateHandle = NULL,
363 .isSetup = FALSE,
364 .openRefCount = 0,
365 .nt = 0,
366 .lock = PTHREAD_MUTEX_INITIALIZER,
367 .cond = PTHREAD_COND_INITIALIZER,
368 .head = NULL,
369 .tail = NULL,
370 .run = 0
371 };
373 static uint16_t msg_id = 0xFFFF;
375 extern dispatch_t * syslink_dpp;
378 static MsgList_t *find_nl(int index)
379 {
380 MsgList_t *item=NULL;
381 item = rpmsg_rpc_state.head;
382 while (item) {
383 if (item->index == index)
384 return(item);
385 item = item->next;
386 }
387 return(item);
388 }
390 /* we have the right locks when calling this function */
391 /*!
392 * @brief Function to enqueue a notify list item.
393 *
394 * @param index Index of the client process associated with the item.
395 *
396 * @sa find_nl
397 * get_nl
398 */
399 static int enqueue_notify_list(int index)
400 {
401 MsgList_t *item;
402 item = find_nl(index);
403 if (item == NULL) {
404 item = get_nl();
405 if (item == NULL) {
406 return(-1);
407 }
408 item->next = NULL;
409 item->index = index;
410 item->num_events=1;
411 if (rpmsg_rpc_state.head == NULL) {
412 rpmsg_rpc_state.head = item;
413 rpmsg_rpc_state.tail = item;
414 item->prev = NULL;
415 }
416 else {
417 item->prev = rpmsg_rpc_state.tail;
418 rpmsg_rpc_state.tail->next = item;
419 rpmsg_rpc_state.tail = item;
420 }
421 }
422 else {
423 item->num_events++;
424 }
425 return(0);
426 }
428 /* we have the right locks when calling this function */
429 /*!
430 * @brief Function to dequeue a notify list item.
431 *
432 * @param item The item to remove.
433 *
434 * @sa put_nl
435 */
436 static inline int dequeue_notify_list_item(MsgList_t *item)
437 {
438 int index;
439 if (item == NULL) {
440 return(-1);
441 }
442 index = item->index;
443 item->num_events--;
444 if (item->num_events > 0) {
445 return(index);
446 }
447 if (rpmsg_rpc_state.head == item) {
448 // removing head
449 rpmsg_rpc_state.head = item->next;
450 if (rpmsg_rpc_state.head != NULL) {
451 rpmsg_rpc_state.head->prev = NULL;
452 }
453 else {
454 // removing head and tail
455 rpmsg_rpc_state.tail = NULL;
456 }
457 }
458 else {
459 item->prev->next = item->next;
460 if (item->next != NULL) {
461 item->next->prev = item->prev;
462 }
463 else {
464 // removing tail
465 rpmsg_rpc_state.tail = item->prev;
466 }
467 }
468 put_nl(item);
469 return(index);
470 }
472 /* we have the right locks when calling this function */
473 /*!
474 * @brief Function to add a waiting reader to the list.
475 *
476 * @param index Index of the client process waiting reader to add.
477 * @param rcvid Receive ID of the client process that was passed
478 * when the client called read().
479 *
480 * @sa None
481 */
482 static int enqueue_waiting_reader(int index, int rcvid)
483 {
484 WaitingReaders_t *item;
485 item = get_wr();
486 if (item == NULL) {
487 return(-1);
488 }
489 item->rcvid = rcvid;
490 item->next = NULL;
491 if (rpmsg_rpc_state.eventState [index].head == NULL) {
492 rpmsg_rpc_state.eventState [index].head = item;
493 rpmsg_rpc_state.eventState [index].tail = item;
494 }
495 else {
496 rpmsg_rpc_state.eventState [index].tail->next = item;
497 rpmsg_rpc_state.eventState [index].tail = item;
498 }
499 return(EOK);
500 }
502 /* we have the right locks when calling this function */
503 /* caller frees item */
504 /*!
505 * @brief Function to remove a waiting reader from the list.
506 *
507 * @param index Index of the client process waiting reader to dequeue.
508 *
509 * @sa None
510 */
511 static WaitingReaders_t *dequeue_waiting_reader(int index)
512 {
513 WaitingReaders_t *item = NULL;
514 if (rpmsg_rpc_state.eventState [index].head) {
515 item = rpmsg_rpc_state.eventState [index].head;
516 rpmsg_rpc_state.eventState [index].head = rpmsg_rpc_state.eventState [index].head->next;
517 if (rpmsg_rpc_state.eventState [index].head == NULL) {
518 rpmsg_rpc_state.eventState [index].tail = NULL;
519 }
520 }
521 return(item);
522 }
524 /*!
525 * @brief Function find a specified waiting reader.
526 *
527 * @param index Index of the client process waiting for the message.
528 * @param rcvid Receive ID of the client process that was passed
529 * when the client called read().
530 *
531 * @sa None
532 */
534 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
535 {
536 WaitingReaders_t *item = NULL;
537 WaitingReaders_t *prev = NULL;
538 if (rpmsg_rpc_state.eventState [index].head) {
539 item = rpmsg_rpc_state.eventState [index].head;
540 while (item) {
541 if (item->rcvid == rcvid) {
542 /* remove item from list */
543 if (prev)
544 prev->next = item->next;
545 if (item == rpmsg_rpc_state.eventState [index].head)
546 rpmsg_rpc_state.eventState [index].head = item->next;
547 break;
548 }
549 else {
550 prev = item;
551 item = item->next;
552 }
553 }
554 }
555 return item;
556 }
558 /*!
559 * @brief Function used to check if there is a waiting reader with an
560 * event (message) ready to be delivered.
561 *
562 * @param index Index of the client process waiting for the message.
563 * @param item Pointer to the waiting reader.
564 *
565 * @sa dequeue_notify_list_item
566 * dequeue_waiting_reader
567 */
569 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
570 {
571 MsgList_t *temp;
572 if (rpmsg_rpc_state.head == NULL) {
573 return(0);
574 }
575 temp = rpmsg_rpc_state.head;
576 while (temp) {
577 if (rpmsg_rpc_state.eventState [temp->index].head) {
578 // event and reader found
579 if (dequeue_notify_list_item(temp) >= 0) {
580 *index = temp->index;
581 *item = dequeue_waiting_reader(temp->index);
582 }
583 else {
584 /* error occurred, return 0 as item has not been set */
585 return(0);
586 }
587 return(1);
588 }
589 temp = temp->next;
590 }
591 return(0);
592 }
594 /*!
595 * @brief Function used to deliver the notification to the client that
596 * it has received a message.
597 *
598 * @param index Index of the client process receiving hte message.
599 * @param rcvid Receive ID of the client process that was passed
600 * when the client called read().
601 *
602 * @sa put_uBuf
603 */
605 static void deliver_notification(int index, int rcvid)
606 {
607 int err = EOK;
608 rpmsg_rpc_EventPacket * uBuf = NULL;
610 uBuf = (rpmsg_rpc_EventPacket *) List_get (rpmsg_rpc_state.eventState [index].bufList);
612 /* Let the check remain at run-time. */
613 if (uBuf != NULL) {
614 err = MsgReply(rcvid, uBuf->len, uBuf->data, uBuf->len);
615 if (err == -1)
616 perror("deliver_notification: MsgReply");
617 /* Free the processed event callback packet. */
618 put_uBuf(uBuf);
619 }
620 else {
621 MsgReply(rcvid, EOK, NULL, 0);
622 }
623 return;
624 }
626 /*!
627 * @brief Thread used for notifying waiting readers of messages.
628 *
629 * @param arg Thread-specific private arg.
630 *
631 * @sa find_available_reader_and_event
632 * deliver_notification
633 * put_wr
634 */
635 static void *notifier_thread(void *arg)
636 {
637 int status;
638 int index;
639 WaitingReaders_t *item = NULL;
640 pthread_mutex_lock(&rpmsg_rpc_state.lock);
641 while (rpmsg_rpc_state.run) {
642 status = find_available_reader_and_event(&index, &item);
643 if ( (status == 0) || (item == NULL) ) {
644 status = pthread_cond_wait(&rpmsg_rpc_state.cond, &rpmsg_rpc_state.lock);
645 if ((status != EOK) && (status != EINTR)) {
646 // false wakeup
647 break;
648 }
649 status = find_available_reader_and_event(&index, &item);
650 if ( (status == 0) || (item == NULL) ) {
651 continue;
652 }
653 }
654 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
655 // we have unlocked, and now we have an event to deliver
656 // we deliver one event at a time, relock, check and continue
657 deliver_notification(index, item->rcvid);
658 pthread_mutex_lock(&rpmsg_rpc_state.lock);
659 put_wr(item);
660 }
661 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
662 return(NULL);
663 }
666 static
667 Int
668 _rpmsg_rpc_create(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_rpc_ocb_t *ocb)
669 {
670 Int status = EOK;
671 struct rppc_create_instance * cargs =
672 (struct rppc_create_instance *)(_DEVCTL_DATA (msg->i));
673 struct rppc_msg_header * msg_hdr = NULL;
674 rpmsg_rpc_object * rpc = ocb->rpc;
675 Char * msg_data = NULL;
676 UInt8 buf[sizeof(struct rppc_create_instance) + sizeof(struct rppc_msg_header)];
678 if (rpc->created == TRUE) {
679 GT_0trace(curTrace, GT_4CLASS, "Already created.");
680 status = (EINVAL);
681 }
682 else if ((ctp->info.msglen - sizeof(msg->i)) <
683 sizeof (struct rppc_create_instance)) {
684 status = (EINVAL);
685 }
686 else if (String_nlen(cargs->name, 47) == -1) {
687 status = (EINVAL);
688 }
689 else {
690 msg_hdr = (struct rppc_msg_header *)buf;
691 msg_hdr->msg_type = RPPC_MSG_CREATE_INSTANCE;
692 msg_hdr->msg_len = sizeof(struct rppc_create_instance);
693 msg_data = (Char *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
694 Memory_copy(msg_data, cargs, sizeof(struct rppc_create_instance));
696 status = MessageQCopy_send (rpc->conn->procId, // remote procid
697 MultiProc_self(), // local procid
698 rpc->conn->addr, // remote server
699 rpc->addr, // local address
700 buf, // connect msg
701 sizeof(buf), // msg size
702 TRUE); // wait for available bufs
703 if (status != MessageQCopy_S_SUCCESS) {
704 GT_0trace(curTrace, GT_4CLASS, "Failed to send create message.");
705 status = (EIO);
706 }
707 else {
708 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
709 if (rpc->created == TRUE) {
710 msg->o.ret_val = EOK;
711 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o)));
712 }
713 else if (status < 0) {
714 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
715 status = (EIO);
716 }
717 else {
718 status = (ETIMEDOUT);
719 }
720 }
721 }
723 return status;
724 }
727 static
728 Int
729 _rpmsg_rpc_destroy(resmgr_context_t *ctp, io_devctl_t *msg,
730 rpmsg_rpc_ocb_t *ocb)
731 {
732 Int status = EOK;
733 struct rppc_msg_header * hdr = NULL;
734 rpmsg_rpc_object * rpc = ocb->rpc;
735 UInt8 buf[sizeof(struct rppc_instance_handle) + sizeof(struct rppc_msg_header)];
736 struct rppc_instance_handle * instance = NULL;
738 if (rpc->created != TRUE) {
739 GT_0trace(curTrace, GT_4CLASS, "Already destroyed.");
740 status = (EINVAL);
741 }
742 else if (!rpc->conn->destroy) {
743 hdr = (struct rppc_msg_header *)buf;
744 hdr->msg_type = RPPC_MSG_DESTROY_INSTANCE;
745 hdr->msg_len = sizeof(struct rppc_instance_handle);
746 instance = (struct rppc_instance_handle *)((UInt32)hdr + sizeof(struct rppc_msg_header));
747 instance->endpoint_address = rpc->remoteAddr;
748 instance->status = 0;
750 status = MessageQCopy_send (rpc->conn->procId, // remote procid
751 MultiProc_self(), // local procid
752 rpc->conn->addr, // remote server
753 rpc->addr, // local address
754 buf, // connect msg
755 sizeof(buf), // msg size
756 TRUE); // wait for available bufs
757 if (status != MessageQCopy_S_SUCCESS) {
758 GT_0trace(curTrace, GT_4CLASS, "Failed to send disconnect message.");
759 status = (EIO);
760 }
761 else {
762 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
763 if (rpc->created != FALSE || status < 0) {
764 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
765 status = (EIO);
766 }
767 else {
768 status = (ETIMEDOUT);
769 }
770 }
771 }
772 else {
773 /* This is the shutdown, remote proc has already been stopped,
774 * so just set created to false. */
775 rpc->created = FALSE;
776 }
778 return status;
779 }
782 Int
783 rpmsg_rpc_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
784 {
785 Int status = 0;
786 rpmsg_rpc_ocb_t *ocb = (rpmsg_rpc_ocb_t *)i_ocb;
788 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
789 return(_RESMGR_ERRNO(status));
790 status = 0;
792 switch (msg->i.dcmd)
793 {
794 case RPPC_IOC_CREATE:
795 status = _rpmsg_rpc_create (ctp, msg, ocb);
796 break;
797 #if 0
798 case RPPC_IOC_DESTROY:
799 status = _rpmsg_rpc_destroy (ctp, msg, ocb);
800 break;
801 #endif
802 default:
803 status = (ENOSYS);
804 break;
805 }
807 return status;
808 }
811 /*!
812 * @brief Attach a process to rpmsg-rpc user support framework.
813 *
814 * @param pid Process identifier
815 *
816 * @sa _rpmsg_rpc_detach
817 */
818 static
819 Int
820 _rpmsg_rpc_attach (rpmsg_rpc_object * rpc)
821 {
822 Int32 status = EOK;
823 Bool flag = FALSE;
824 Bool isInit = FALSE;
825 List_Object * bufList = NULL;
826 List_Object * fxnList = NULL;
827 IArg key = 0;
828 List_Params listparams;
829 UInt32 i;
831 GT_1trace (curTrace, GT_ENTER, "_rpmsg_rpc_attach", rpc);
833 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
834 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
835 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
836 rpmsg_rpc_state.eventState [i].refCount++;
837 isInit = TRUE;
838 status = EOK;
839 break;
840 }
841 }
843 if (isInit == FALSE) {
844 List_Params_init (&listparams);
845 bufList = List_create (&listparams) ;
846 fxnList = List_create (&listparams) ;
847 /* Search for an available slot for user process. */
848 for (i = 0 ; i < MAX_PROCESSES ; i++) {
849 if (rpmsg_rpc_state.eventState [i].rpc == NULL) {
850 rpmsg_rpc_state.eventState [i].rpc = rpc;
851 rpmsg_rpc_state.eventState [i].refCount = 1;
852 rpmsg_rpc_state.eventState [i].bufList = bufList;
853 rpmsg_rpc_state.eventState [i].fxnList = fxnList;
854 flag = TRUE;
855 break;
856 }
857 }
859 /* No free slots found. Let this check remain at run-time,
860 * since it is dependent on user environment.
861 */
862 if (flag != TRUE) {
863 /*! @retval Notify_E_RESOURCE Maximum number of
864 supported user clients have already been registered. */
865 status = -ENOMEM;
866 GT_setFailureReason (curTrace,
867 GT_4CLASS,
868 "rpmsgDrv_attach",
869 status,
870 "Maximum number of supported user"
871 " clients have already been "
872 "registered.");
873 if (bufList != NULL) {
874 List_delete (&bufList);
875 }
876 if (fxnList != NULL) {
877 List_delete (&fxnList);
878 }
879 }
880 }
881 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
883 GT_1trace (curTrace, GT_LEAVE, "_rpmsg_rpc_attach", status);
885 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
886 return status ;
887 }
890 /*!
891 * @brief This function adds a data to a registered process.
892 *
893 * @param dce RPC object associated with the client
894 * @param src Source address (endpoint) sending the data
895 * @param pid Process ID associated with the client
896 * @param data Data to be added
897 * @param len Length of data to be added
898 *
899 * @sa
900 */
901 Int
902 _rpmsg_rpc_addBufByPid (rpmsg_rpc_object *rpc,
903 UInt32 src,
904 UInt32 pid,
905 UInt16 msgId,
906 void * data,
907 UInt32 len)
908 {
909 Int32 status = EOK;
910 Bool flag = FALSE;
911 rpmsg_rpc_EventPacket * uBuf = NULL;
912 IArg key;
913 UInt32 i;
914 WaitingReaders_t *item;
915 MsgList_t *msgItem;
916 List_Elem * elem = NULL;
917 List_Elem * temp = NULL;
919 GT_5trace (curTrace,
920 GT_ENTER,
921 "_rpmsg_rpc_addBufByPid",
922 rpc,
923 src,
924 pid,
925 data,
926 len);
928 GT_assert (curTrace, (rpmsg_rpc_state.isSetup == TRUE));
930 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
931 /* Find the registration for this callback */
932 for (i = 0 ; i < MAX_PROCESSES ; i++) {
933 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
934 flag = TRUE;
935 break;
936 }
937 }
938 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
940 #if !defined(SYSLINK_BUILD_OPTIMIZE)
941 if (flag != TRUE) {
942 /*! @retval ENOMEM Could not find a registered handler
943 for this process. */
944 status = -ENOMEM;
945 GT_setFailureReason (curTrace,
946 GT_4CLASS,
947 "_rpmsgDrv_addBufByPid",
948 status,
949 "Could not find a registered handler "
950 "for this process.!");
951 }
952 else {
953 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
954 /* Allocate memory for the buf */
955 pthread_mutex_lock(&rpmsg_rpc_state.lock);
956 uBuf = get_uBuf();
957 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
959 #if !defined(SYSLINK_BUILD_OPTIMIZE)
960 if (uBuf == NULL) {
961 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
962 packet for received callback. */
963 status = -ENOMEM;
964 GT_setFailureReason (curTrace,
965 GT_4CLASS,
966 "_rpmsgDrv_addBufByPid",
967 status,
968 "Failed to allocate memory for event"
969 " packet for received callback.!");
970 }
971 else {
972 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
973 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
974 List_traverse_safe(elem, temp, rpmsg_rpc_state.eventState [i].fxnList) {
975 if (((rpmsg_rpc_FxnInfo *)elem)->msgId == msgId) {
976 List_remove(rpmsg_rpc_state.eventState [i].fxnList, elem);
977 break;
978 }
979 }
980 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
982 if (elem != (List_Elem *)rpmsg_rpc_state.eventState [i].fxnList) {
983 struct rppc_function * function;
984 function = &(((rpmsg_rpc_FxnInfo *)elem)->func);
985 _rpmsg_rpc_translate(NULL, (char *)function, pid, true);
986 Memory_free(NULL, elem, sizeof(rpmsg_rpc_FxnInfo) +\
987 RPPC_TRANS_SIZE(function->num_translations));
988 }
990 List_elemClear (&(uBuf->element));
991 GT_assert (curTrace,
992 (rpmsg_rpc_state.eventState [i].bufList != NULL));
994 if (data) {
995 Memory_copy(uBuf->data, data, len);
996 }
997 uBuf->len = len;
999 List_put (rpmsg_rpc_state.eventState [i].bufList,
1000 &(uBuf->element));
1001 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1002 item = dequeue_waiting_reader(i);
1003 if (item) {
1004 // there is a waiting reader
1005 deliver_notification(i, item->rcvid);
1006 put_wr(item);
1007 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1008 status = EOK;
1009 }
1010 else {
1011 if (enqueue_notify_list(i) < 0) {
1012 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1013 status = -ENOMEM;
1014 GT_setFailureReason (curTrace,
1015 GT_4CLASS,
1016 "_rpmsgDrv_addBufByPid",
1017 status,
1018 "Failed to allocate memory for notifier");
1019 }
1020 else {
1021 msgItem = find_nl(i);
1022 /* TODO: rpc could be NULL in some cases */
1023 if (rpc && msgItem) {
1024 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, msgItem->num_events, 0)) {
1025 iofunc_notify_trigger(rpc->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
1026 }
1027 }
1028 status = EOK;
1029 pthread_cond_signal(&rpmsg_rpc_state.cond);
1030 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1031 }
1032 }
1033 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1034 }
1035 }
1036 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1038 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
1040 return status;
1041 }
1044 /*!
1045 * @brief This function implements the callback registered with
1046 * MessageQCopy_create for each client. This function
1047 * adds the message from the remote proc to a list
1048 * where it is routed to the appropriate waiting reader.
1049 *
1050 * @param procId processor Id from which interrupt is received
1051 * @param lineId Interrupt line ID to be used
1052 * @param eventId eventId registered
1053 * @param arg argument to call back
1054 * @param payload payload received
1055 *
1056 * @sa
1057 */
1058 Void
1059 _rpmsg_rpc_cb (MessageQCopy_Handle handle, void * data, int len, void * priv,
1060 UInt32 src, UInt16 srcProc)
1061 {
1062 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1063 Int32 status = 0;
1064 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1065 rpmsg_rpc_object * rpc = NULL;
1066 struct rppc_msg_header * msg_hdr = NULL;
1067 struct rppc_instance_handle * instance;
1068 struct rppc_packet * packet = NULL;
1070 GT_6trace (curTrace,
1071 GT_ENTER,
1072 "_rpmsg_rpc_cb",
1073 handle,
1074 data,
1075 len,
1076 priv,
1077 src,
1078 srcProc);
1080 if (len < sizeof(struct rppc_msg_header)) {
1081 status = EINVAL;
1082 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb", status,
1083 "len is smaller than sizeof rppc_msg_header");
1084 return;
1085 }
1087 rpc = (rpmsg_rpc_object *) priv;
1088 msg_hdr = (struct rppc_msg_header *)data;
1090 switch (msg_hdr->msg_type) {
1091 case RPPC_MSG_INSTANCE_CREATED:
1092 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1093 status = EINVAL;
1094 rpc->created = FALSE;
1095 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1096 status, "msg_len is invalid");
1097 }
1098 else {
1099 instance = (struct rppc_instance_handle *)
1100 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1101 rpc->remoteAddr = instance->endpoint_address;
1102 if (instance->status != 0) {
1103 rpc->created = FALSE;
1104 }
1105 else {
1106 rpc->created = TRUE;
1107 }
1108 }
1109 /* post the semaphore to have the ioctl reply */
1110 OsalSemaphore_post(rpmsg_rpc_state.sem);
1111 break;
1112 case RPPC_MSG_INSTANCE_DESTROYED:
1113 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1114 status = EINVAL;
1115 rpc->created = FALSE;
1116 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1117 status, "msg_len is invalid");
1118 }
1119 else {
1120 instance = (struct rppc_instance_handle *)
1121 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1122 rpc->remoteAddr = instance->endpoint_address;
1123 if (instance->status != 0) {
1124 rpc->created = TRUE;
1125 }
1126 else {
1127 rpc->created = FALSE;
1128 }
1129 }
1130 /* post the semaphore to have the ioctl reply */
1131 OsalSemaphore_post(rpmsg_rpc_state.sem);
1132 break;
1134 case RPPC_MSG_CALL_FUNCTION:
1135 if ((len != sizeof(struct rppc_msg_header) + msg_hdr->msg_len) ||
1136 (msg_hdr->msg_len < sizeof(struct rppc_packet))) {
1137 status = EINVAL;
1138 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1139 status, "msg_len is invalid");
1140 }
1141 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1142 if (len != sizeof(struct rppc_msg_header) + sizeof(struct rppc_packet) + packet->data_size) {
1143 status = EINVAL;
1144 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1145 status, "msg_len is invalid");
1146 }
1147 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1148 status =
1149 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1150 _rpmsg_rpc_addBufByPid (rpc,
1151 src,
1152 rpc->pid,
1153 packet->msg_id,
1154 &packet->fxn_id,
1155 sizeof(packet->fxn_id) + sizeof(packet->result));
1156 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1157 if (status < 0) {
1158 GT_setFailureReason (curTrace,
1159 GT_4CLASS,
1160 "_rpmsg_rpc_cb",
1161 status,
1162 "Failed to add callback packet for pid");
1163 }
1164 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1165 break;
1166 default:
1167 break;
1168 }
1170 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_rpc_cb");
1171 }
1173 /**
1174 * Handler for ocb_calloc() requests.
1175 *
1176 * Special handler for ocb_calloc() requests that we export for control. An
1177 * open request from the client will result in a call to our special ocb_calloc
1178 * handler. This function attaches the client's pid using _rpmsg_dce_attach
1179 * and allocates client-specific information. This function creates an
1180 * endpoint for the client to communicate with the dCE server on the
1181 * remote core also.
1182 *
1183 * \param ctp Thread's associated context information.
1184 * \param device Device attributes structure.
1185 *
1186 * \return Pointer to an iofunc_ocb_t OCB structure.
1187 */
1189 IOFUNC_OCB_T *
1190 rpmsg_rpc_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
1191 {
1192 rpmsg_rpc_ocb_t *ocb = NULL;
1193 rpmsg_rpc_object *obj = NULL;
1194 struct _msg_info cl_info;
1195 rpmsg_rpc_dev_t * dev = NULL;
1196 int i = 0;
1197 Bool found = FALSE;
1198 char path1[20];
1199 char path2[20];
1201 GT_2trace (curTrace, GT_ENTER, "rpmsg_rpc_ocb_calloc",
1202 ctp, device);
1204 /* Allocate the OCB */
1205 ocb = (rpmsg_rpc_ocb_t *) calloc (1, sizeof (rpmsg_rpc_ocb_t));
1206 if (ocb == NULL){
1207 errno = ENOMEM;
1208 return (NULL);
1209 }
1211 ocb->pid = ctp->info.pid;
1213 /* Allocate memory for the rpmsg object. */
1214 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_object), 0u, NULL);
1215 if (obj == NULL) {
1216 errno = ENOMEM;
1217 free(ocb);
1218 return (NULL);
1219 }
1220 else {
1221 ocb->rpc = obj;
1222 IOFUNC_NOTIFY_INIT(obj->notify);
1223 obj->created = FALSE;
1224 /* determine conn and procId for communication based on which device
1225 * was opened */
1226 MsgInfo(ctp->rcvid, &cl_info);
1227 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
1228 for (i = 0; i < MAX_CONNS; i++) {
1229 if (rpmsg_rpc_state.objects[i] != NULL) {
1230 dev = rpmsg_rpc_state.objects[i]->dev;
1231 resmgr_pathname(dev->rpmsg_rpc.resmgr_id, 0, path2,
1232 sizeof(path2));
1233 if (!strcmp(path1, path2)) {
1234 found = TRUE;
1235 break;
1236 }
1237 }
1238 }
1239 if (found) {
1240 obj->conn = rpmsg_rpc_state.objects[i];
1241 obj->procId = obj->conn->procId;
1242 obj->pid = ctp->info.pid;
1243 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL,
1244 _rpmsg_rpc_cb, obj, &obj->addr);
1245 if (obj->mq == NULL) {
1246 errno = ENOMEM;
1247 free(obj);
1248 free(ocb);
1249 return (NULL);
1250 }
1251 else {
1252 if (_rpmsg_rpc_attach (ocb->rpc) < 0) {
1253 errno = ENOMEM;
1254 MessageQCopy_delete (&obj->mq);
1255 free(obj);
1256 free(ocb);
1257 return (NULL);
1258 }
1259 }
1260 }
1261 }
1263 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_ocb_calloc", ocb);
1265 return (IOFUNC_OCB_T *)(ocb);
1266 }
1269 /*!
1270 * @brief Detach a process from rpmsg-rpc user support framework.
1271 *
1272 * @param pid Process identifier
1273 *
1274 * @sa _rpmsg_rpc_attach
1275 */
1276 static
1277 Int
1278 _rpmsg_rpc_detach (rpmsg_rpc_object * rpc)
1279 {
1280 Int32 status = EOK;
1281 Int32 tmpStatus = EOK;
1282 Bool flag = FALSE;
1283 List_Object * bufList = NULL;
1284 List_Object * fxnList = NULL;
1285 UInt32 i;
1286 IArg key;
1287 MsgList_t * item;
1288 WaitingReaders_t * wr = NULL;
1289 struct _msg_info info;
1291 GT_1trace (curTrace, GT_ENTER, "rpmsg_rpc_detach", rpc);
1293 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1295 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1296 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1297 if (rpmsg_rpc_state.eventState [i].refCount == 1) {
1298 rpmsg_rpc_state.eventState [i].refCount = 0;
1300 flag = TRUE;
1301 break;
1302 }
1303 else {
1304 rpmsg_rpc_state.eventState [i].refCount--;
1305 status = EOK;
1306 break;
1307 }
1308 }
1309 }
1310 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1312 if (flag == TRUE) {
1313 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1314 /* Last client being unregistered for this process. */
1315 rpmsg_rpc_state.eventState [i].rpc = NULL;
1317 /* Store in local variable to delete outside lock. */
1318 bufList = rpmsg_rpc_state.eventState [i].bufList;
1320 rpmsg_rpc_state.eventState [i].bufList = NULL;
1322 /* Store in local variable to delete outside lock. */
1323 fxnList = rpmsg_rpc_state.eventState [i].fxnList;
1325 rpmsg_rpc_state.eventState [i].fxnList = NULL;
1327 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1328 }
1330 if (flag != TRUE) {
1331 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1332 if (i == MAX_PROCESSES) {
1333 /*! @retval Notify_E_NOTFOUND The specified user process was
1334 not found registered with Notify Driver module. */
1335 status = -ENOMEM;
1336 GT_setFailureReason (curTrace,
1337 GT_4CLASS,
1338 "rpmsg_rpc_detach",
1339 status,
1340 "The specified user process was not found"
1341 " registered with rpmsg Driver module.");
1342 }
1343 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1344 }
1345 else {
1346 if (bufList != NULL) {
1347 /* Dequeue waiting readers and reply to them */
1348 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1349 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1350 /* Check if rcvid is still valid */
1351 if (MsgInfo(wr->rcvid, &info) != -1) {
1352 put_wr(wr);
1353 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1354 MsgError(wr->rcvid, EINTR);
1355 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1356 }
1357 }
1358 /* Check for pending ionotify/select calls */
1359 if (rpc) {
1360 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
1361 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
1362 }
1363 }
1365 /* Free event packets for any received but unprocessed events. */
1366 while ((item = find_nl(i)) != NULL) {
1367 if (dequeue_notify_list_item(item) >= 0) {
1368 rpmsg_rpc_EventPacket * uBuf = NULL;
1370 uBuf = (rpmsg_rpc_EventPacket *) List_get (bufList);
1372 /* Let the check remain at run-time. */
1373 if (uBuf != NULL) {
1374 put_uBuf(uBuf);
1375 }
1376 }
1377 }
1378 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1380 /* Last client being unregistered with Notify module. */
1381 List_delete (&bufList);
1382 }
1383 if (fxnList != NULL) {
1384 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1385 rpmsg_rpc_FxnInfo * fxnInfo = NULL;
1386 while ((fxnInfo = (rpmsg_rpc_FxnInfo *)List_dequeue (fxnList))) {
1387 Memory_free (NULL, fxnInfo, sizeof(rpmsg_rpc_FxnInfo) +\
1388 RPPC_TRANS_SIZE(fxnInfo->func.num_translations));
1389 }
1390 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1391 List_delete (&fxnList);
1392 }
1394 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1395 if ((tmpStatus < 0) && (status >= 0)) {
1396 status = tmpStatus;
1397 GT_setFailureReason (curTrace,
1398 GT_4CLASS,
1399 "rpmsg_rpc_detach",
1400 status,
1401 "Failed to delete termination semaphore!");
1402 }
1403 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1404 }
1406 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_detach", status);
1408 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1409 return status;
1410 }
1412 /**
1413 * Handler for ocb_free() requests.
1414 *
1415 * Special handler for ocb_free() requests that we export for control. A
1416 * close request from the client will result in a call to our special ocb_free
1417 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1418 * and frees any client-specific information that was allocated.
1419 *
1420 * \param i_ocb OCB associated with client's session.
1421 *
1422 * \return POSIX errno value.
1423 *
1424 * \retval None.
1425 */
1427 void
1428 rpmsg_rpc_ocb_free (IOFUNC_OCB_T * i_ocb)
1429 {
1430 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)i_ocb;
1431 rpmsg_rpc_object *obj;
1433 if (ocb && ocb->rpc) {
1434 obj = ocb->rpc;
1435 if (obj->created == TRUE) {
1436 /* Need to disconnect this device */
1437 _rpmsg_rpc_destroy(NULL, NULL, ocb);
1438 }
1439 _rpmsg_rpc_detach(ocb->rpc);
1440 if (obj->mq) {
1441 MessageQCopy_delete (&obj->mq);
1442 obj->mq = NULL;
1443 }
1444 free (obj);
1445 free (ocb);
1446 }
1447 }
1449 /**
1450 * Handler for close_ocb() requests.
1451 *
1452 * This function removes the notification entries associated with the current
1453 * client.
1454 *
1455 * \param ctp Thread's associated context information.
1456 * \param reserved This argument must be NULL.
1457 * \param ocb OCB associated with client's session.
1458 *
1459 * \return POSIX errno value.
1460 *
1461 * \retval EOK Success.
1462 */
1464 Int
1465 rpmsg_rpc_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1466 {
1467 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1468 iofunc_notify_remove(ctp, rpc_ocb->rpc->notify);
1469 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1470 }
1472 /**
1473 * Handler for read() requests.
1474 *
1475 * Handles special read() requests that we export for control. A read
1476 * request will get a message from the remote processor that is associated
1477 * with the client that is calling read().
1478 *
1479 * \param ctp Thread's associated context information.
1480 * \param msg The actual read() message.
1481 * \param ocb OCB associated with client's session.
1482 *
1483 * \return POSIX errno value.
1484 *
1485 * \retval EOK Success.
1486 * \retval EAGAIN Call is non-blocking and no messages available.
1487 * \retval ENOMEM Not enough memory to preform the read.
1488 */
1490 int rpmsg_rpc_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *ocb)
1491 {
1492 Int status;
1493 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1494 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1495 Bool flag = FALSE;
1496 Int retVal = EOK;
1497 UInt32 i;
1498 MsgList_t * item;
1499 Int nonblock;
1501 if ((status = iofunc_read_verify(ctp, msg, ocb, &nonblock)) != EOK)
1502 return (status);
1504 if (rpc->created != TRUE) {
1505 return (ENOTCONN);
1506 }
1508 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1509 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1510 flag = TRUE;
1511 break;
1512 }
1513 }
1515 /* Let the check remain at run-time. */
1516 if (flag == TRUE) {
1517 /* Let the check remain at run-time for handling any run-time
1518 * race conditions.
1519 */
1520 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1521 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1522 item = find_nl(i);
1523 if (dequeue_notify_list_item(item) < 0) {
1524 if (nonblock) {
1525 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1526 return EAGAIN;
1527 }
1528 else {
1529 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1530 if (retVal == EOK) {
1531 pthread_cond_signal(&rpmsg_rpc_state.cond);
1532 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1533 return(_RESMGR_NOREPLY);
1534 }
1535 retVal = ENOMEM;
1536 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1537 }
1538 }
1539 else {
1540 deliver_notification(i, ctp->rcvid);
1541 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1542 return(_RESMGR_NOREPLY);
1543 }
1544 }
1545 }
1547 /*! @retval Number-of-bytes-read Number of bytes read. */
1548 return retVal;
1549 }
1551 /**
1552 * Unblock read calls
1553 *
1554 * This function checks if the client is blocked on a read call and if so,
1555 * unblocks the client.
1556 *
1557 * \param ctp Thread's associated context information.
1558 * \param msg The pulse message.
1559 * \param ocb OCB associated with client's session.
1560 *
1561 * \return POSIX errno value.
1562 *
1563 * \retval EINTR The client has been unblocked.
1564 * \retval other The client has not been unblocked or the client was not
1565 * blocked.
1566 */
1568 int rpmsg_rpc_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *ocb)
1569 {
1570 UInt32 i;
1571 Bool flag = FALSE;
1572 WaitingReaders_t * wr;
1573 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1574 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1576 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1577 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1578 flag = TRUE;
1579 break;
1580 }
1581 }
1583 /* Let the check remain at run-time. */
1584 if (flag == TRUE) {
1585 /* Let the check remain at run-time for handling any run-time
1586 * race conditions.
1587 */
1588 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1589 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1590 wr = find_waiting_reader(i, ctp->rcvid);
1591 if (wr) {
1592 put_wr(wr);
1593 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1594 return (EINTR);
1595 }
1596 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1597 }
1598 }
1600 return _RESMGR_NOREPLY;
1601 }
1603 /**
1604 * Handler for unblock() requests.
1605 *
1606 * Handles unblock request for the client which is requesting to no longer be
1607 * blocked on the rpmsg-rpc driver.
1608 *
1609 * \param ctp Thread's associated context information.
1610 * \param msg The pulse message.
1611 * \param ocb OCB associated with client's session.
1612 *
1613 * \return POSIX errno value.
1614 *
1615 * \retval EINTR The rcvid has been unblocked.
1616 */
1618 int rpmsg_rpc_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1619 {
1620 int status = _RESMGR_NOREPLY;
1621 struct _msg_info info;
1623 /*
1624 * Try to run the default unblock for this message.
1625 */
1626 if ((status = iofunc_unblock_default(ctp,msg,ocb)) != _RESMGR_DEFAULT) {
1627 return status;
1628 }
1630 /*
1631 * Check if rcvid is still valid and still has an unblock
1632 * request pending.
1633 */
1634 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1635 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1636 return _RESMGR_NOREPLY;
1637 }
1639 if (rpmsg_rpc_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1640 return _RESMGR_ERRNO(EINTR);
1641 }
1643 return _RESMGR_ERRNO(EINTR);
1644 }
1647 uint32_t
1648 _rpmsg_rpc_pa2da(ProcMgr_Handle handle, uint32_t pa)
1649 {
1650 Int status = 0;
1651 uint32_t da;
1653 status = ProcMgr_translateAddr(handle, (Ptr *)&da,
1654 ProcMgr_AddrType_SlaveVirt,
1655 (Ptr)pa, ProcMgr_AddrType_MasterPhys);
1656 if (status >= 0) {
1657 return da;
1658 }
1659 else {
1660 return 0;
1661 }
1662 }
1664 int
1665 _rpmsg_rpc_translate(ProcMgr_Handle handle, char *data, pid_t pid, bool reverse)
1666 {
1667 int status = EOK;
1668 struct rppc_function * function = NULL;
1669 struct rppc_param_translation * translation = NULL;
1670 int i = 0;
1671 off64_t phys_addr;
1672 off64_t paddr[RPPC_MAX_PARAMETERS];
1673 uint32_t ipu_addr;
1674 size_t phys_len = 0;
1675 uintptr_t ptr;
1676 void * vptr[RPPC_MAX_PARAMETERS];
1677 uint32_t idx = 0;
1678 uint32_t param_offset = 0;
1680 function = (struct rppc_function *)data;
1681 memset(vptr, 0, sizeof(void *) * RPPC_MAX_PARAMETERS);
1682 memset(paddr, 0, sizeof(off64_t) * RPPC_MAX_PARAMETERS);
1684 translation = (struct rppc_param_translation *)function->translations;
1685 for (i = 0; i < function->num_translations; i++) {
1686 idx = translation[i].index;
1687 if (idx >= function->num_params) {
1688 status = -EINVAL;
1689 break;
1690 }
1691 param_offset = function->params[idx].data - function->params[idx].base;
1692 if (translation[i].offset - param_offset + sizeof(uint32_t) > function->params[idx].size) {
1693 status = -EINVAL;
1694 break;
1695 }
1696 if (!vptr[idx]) {
1697 /* get the physical address of ptr */
1698 status = mem_offset64_peer(pid,
1699 function->params[idx].data,
1700 function->params[idx].size,
1701 &paddr[idx], &phys_len);
1702 if (status >= 0 && phys_len == function->params[idx].size) {
1703 /* map into my process space */
1704 vptr[idx] = mmap64(NULL, function->params[idx].size,
1705 PROT_NOCACHE | PROT_READ | PROT_WRITE,
1706 MAP_PHYS, NOFD, paddr[idx]);
1707 if (vptr[idx] == MAP_FAILED) {
1708 vptr[idx] = 0;
1709 status = -ENOMEM;
1710 break;
1711 }
1712 }
1713 else {
1714 status = -EINVAL;
1715 break;
1716 }
1717 }
1718 /* Get physical address of the contents */
1719 ptr = (uint32_t)vptr[idx] + translation[i].offset - param_offset;
1720 if (reverse) {
1721 *(uint32_t *)ptr = translation[i].base;
1722 }
1723 else {
1724 translation[i].base = *(uint32_t *)ptr;
1725 status = mem_offset64_peer(pid, *(uint32_t *)ptr, sizeof(uint32_t),
1726 &phys_addr, &phys_len);
1727 if (status >= 0 && phys_len == sizeof(uint32_t)) {
1728 /* translate pa2da */
1729 if ((ipu_addr =
1730 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1731 /* update vptr contents */
1732 *(uint32_t *)ptr = ipu_addr;
1733 else {
1734 status = -EINVAL;
1735 break;
1736 }
1737 }
1738 else {
1739 status = -EINVAL;
1740 break;
1741 }
1742 }
1743 }
1745 /* No need to do this for reverse translations */
1746 for (i = 0; i < function->num_params && status >= 0 && !reverse; i++) {
1747 if (function->params[i].type == RPPC_PARAM_TYPE_PTR) {
1748 if (paddr[i]) {
1749 phys_addr = paddr[i];
1750 }
1751 else {
1752 /* translate the param pointer */
1753 status = mem_offset64_peer(pid,
1754 (uintptr_t)(function->params[i].data),
1755 function->params[i].size, &phys_addr,
1756 &phys_len);
1757 }
1758 if (status >= 0) {
1759 if ((ipu_addr =
1760 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1761 function->params[i].data = ipu_addr;
1762 else {
1763 status = -EINVAL;
1764 break;
1765 }
1766 }
1767 else {
1768 status = -EINVAL;
1769 break;
1770 }
1771 }
1772 }
1774 for (i = 0; i < function->num_params; i++) {
1775 if (vptr[i])
1776 munmap(vptr[i], function->params[i].size);
1777 }
1779 return status;
1780 }
1782 /**
1783 * Handler for write() requests.
1784 *
1785 * Handles special write() requests that we export for control. A write()
1786 * request will send a message to the remote processor which is associated with
1787 * the client.
1788 *
1789 * \param ctp Thread's associated context information.
1790 * \param msg The actual write() message.
1791 * \param io_ocb OCB associated with client's session.
1792 *
1793 * \return POSIX errno value.
1794 *
1795 * \retval EOK Success.
1796 * \retval ENOMEM Not enough memory to preform the write.
1797 * \retval EIO MessageQCopy_send failed.
1798 * \retval EINVAL msg->i.bytes is negative.
1799 */
1801 int
1802 rpmsg_rpc_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1803 {
1804 int status;
1805 char buf[MessageQCopy_BUFSIZE];
1806 int bytes;
1807 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)io_ocb;
1808 rpmsg_rpc_object * rpc = ocb->rpc;
1809 struct rppc_msg_header * msg_hdr = NULL;
1810 struct rppc_packet *packet = NULL;
1811 struct rppc_function *function = NULL;
1812 char usr_msg[MessageQCopy_BUFSIZE];
1813 int i = 0;
1814 rpmsg_rpc_EventState * event_state = NULL;
1815 rpmsg_rpc_FxnInfo * fxn_info = NULL;
1816 IArg key = 0;
1818 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1819 return (status);
1820 }
1822 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct rppc_msg_header) > MessageQCopy_BUFSIZE ?
1823 MessageQCopy_BUFSIZE - sizeof(struct rppc_msg_header) : msg->i.nbytes;
1824 if (bytes < 0) {
1825 return EINVAL;
1826 }
1827 _IO_SET_WRITE_NBYTES (ctp, bytes);
1829 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1830 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1831 break;
1832 }
1833 }
1834 if (i == MAX_PROCESSES) {
1835 return EINVAL;
1836 }
1837 event_state = &rpmsg_rpc_state.eventState[i];
1839 msg_hdr = (struct rppc_msg_header *)buf;
1840 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1842 status = resmgr_msgread(ctp, usr_msg, bytes, sizeof(msg->i));
1843 if (status != bytes) {
1844 return (errno);
1845 }
1846 else if (bytes < sizeof(struct rppc_function)) {
1847 return (EINVAL);
1848 }
1849 function = (struct rppc_function *)usr_msg;
1851 if (bytes < RPPC_PARAM_SIZE(function->num_translations)) {
1852 return (EINVAL);
1853 }
1855 /* check that we're in the correct state */
1856 if (rpc->created != TRUE) {
1857 return (EINVAL);
1858 }
1860 /* store the fxn info for use with reverse translation */
1861 fxn_info = Memory_alloc (NULL, sizeof(rpmsg_rpc_FxnInfo) +\
1862 RPPC_TRANS_SIZE(function->num_translations),
1863 0, NULL);
1864 List_elemClear(&(fxn_info->element));
1865 Memory_copy (&(fxn_info->func), function,
1866 RPPC_PARAM_SIZE(function->num_translations));
1868 status = _rpmsg_rpc_translate(rpc->conn->procH, (char *)function,
1869 ctp->info.pid, false);
1870 if (status < 0) {
1871 Memory_free(NULL, fxn_info, sizeof(rpmsg_rpc_FxnInfo) +\
1872 RPPC_TRANS_SIZE(function->num_translations));
1873 return -status;
1874 }
1876 msg_hdr->msg_type = RPPC_MSG_CALL_FUNCTION;
1877 msg_hdr->msg_len = sizeof(struct rppc_packet);
1879 /* initialize the packet structure */
1880 packet->desc = RPPC_DESC_EXEC_SYNC;
1881 packet->msg_id = msg_id == 0xFFFF ? msg_id = 1 : ++(msg_id);
1882 packet->flags = (0x8000);//OMAPRPC_POOLID_DEFAULT;
1883 packet->fxn_id = RPPC_SET_FXN_IDX(function->fxn_id);
1884 packet->result = 0;
1885 packet->data_size = 0;
1887 for (i = 0; i < function->num_params; i++) {
1888 ((UInt32 *)(packet->data))[i*2] = function->params[i].size;
1889 ((UInt32 *)(packet->data))[(i*2)+1] = function->params[i].data;
1890 packet->data_size += (sizeof(UInt32) * 2);
1891 }
1892 msg_hdr->msg_len += packet->data_size;
1894 fxn_info->msgId = packet->msg_id;
1895 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1896 List_enqueue(event_state->fxnList, &(fxn_info->element));
1897 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1899 status = MessageQCopy_send(rpc->conn->procId, MultiProc_self(),
1900 rpc->remoteAddr, rpc->addr, buf,
1901 msg_hdr->msg_len + sizeof(struct rppc_msg_header),
1902 TRUE);
1903 if (status < 0) {
1904 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1905 List_remove(event_state->fxnList, &(fxn_info->element));
1906 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1907 Memory_free(NULL, fxn_info, sizeof(rpmsg_rpc_FxnInfo) +\
1908 RPPC_TRANS_SIZE(function->num_translations));
1909 return (EIO);
1910 }
1912 return(EOK);
1913 }
1917 /**
1918 * Handler for notify() requests.
1919 *
1920 * Handles special notify() requests that we export for control. A notify
1921 * request results from the client calling select().
1922 *
1923 * \param ctp Thread's associated context information.
1924 * \param msg The actual notify() message.
1925 * \param ocb OCB associated with client's session.
1926 *
1927 * \return POSIX errno value.
1928 */
1930 Int rpmsg_rpc_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1931 {
1932 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1933 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1934 int trig;
1935 int i = 0;
1936 Bool flag = FALSE;
1937 MsgList_t * item = NULL;
1938 int status = EOK;
1940 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1942 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1943 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1944 flag = TRUE;
1945 break;
1946 }
1947 }
1949 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1950 /* Let the check remain at run-time. */
1951 if (flag == TRUE) {
1952 /* Let the check remain at run-time for handling any run-time
1953 * race conditions.
1954 */
1955 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1956 item = find_nl(i);
1957 if (item && item->num_events > 0) {
1958 trig |= _NOTIFY_COND_INPUT;
1959 }
1960 }
1961 }
1962 status = iofunc_notify(ctp, msg, rpc_ocb->rpc->notify, trig, NULL, NULL);
1963 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1964 return status;
1965 }
1967 /**
1968 * Detaches an rpmsg-dce resource manager device name.
1969 *
1970 * \param dev The device to detach.
1971 *
1972 * \return POSIX errno value.
1973 */
1975 static
1976 Void
1977 _deinit_rpmsg_rpc_device (rpmsg_rpc_dev_t * dev)
1978 {
1979 resmgr_detach(syslink_dpp, dev->rpmsg_rpc.resmgr_id, _RESMGR_DETACH_CLOSE);
1981 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
1983 free (dev);
1985 return;
1986 }
1988 /**
1989 * Initializes and attaches rpmsg-dce resource manager functions to an
1990 * rpmsg-dce device name.
1991 *
1992 * \param num The number to append to the end of the device name.
1993 *
1994 * \return Pointer to the created rpmsg_dce_dev_t device.
1995 */
1997 static
1998 rpmsg_rpc_dev_t *
1999 _init_rpmsg_rpc_device (char * name)
2000 {
2001 iofunc_attr_t * attr;
2002 resmgr_attr_t resmgr_attr;
2003 rpmsg_rpc_dev_t * dev = NULL;
2005 dev = malloc(sizeof(*dev));
2006 if (dev == NULL) {
2007 return NULL;
2008 }
2010 memset(&resmgr_attr, 0, sizeof resmgr_attr);
2011 resmgr_attr.nparts_max = 10;
2012 resmgr_attr.msg_max_size = 2048;
2013 memset(&dev->rpmsg_rpc.mattr, 0, sizeof(iofunc_mount_t));
2014 dev->rpmsg_rpc.mattr.flags = ST_NOSUID | ST_NOEXEC;
2015 dev->rpmsg_rpc.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
2016 IOFUNC_PC_NO_TRUNC |
2017 IOFUNC_PC_SYNC_IO;
2018 dev->rpmsg_rpc.mattr.funcs = &dev->rpmsg_rpc.mfuncs;
2019 memset(&dev->rpmsg_rpc.mfuncs, 0, sizeof(iofunc_funcs_t));
2020 dev->rpmsg_rpc.mfuncs.nfuncs = _IOFUNC_NFUNCS;
2021 dev->rpmsg_rpc.mfuncs.ocb_calloc = rpmsg_rpc_ocb_calloc;
2022 dev->rpmsg_rpc.mfuncs.ocb_free = rpmsg_rpc_ocb_free;
2023 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_rpc.cfuncs,
2024 _RESMGR_IO_NFUNCS, &dev->rpmsg_rpc.iofuncs);
2025 iofunc_attr_init(attr = &dev->rpmsg_rpc.cattr, S_IFCHR | 0777, NULL, NULL);
2026 dev->rpmsg_rpc.iofuncs.devctl = rpmsg_rpc_devctl;
2027 dev->rpmsg_rpc.iofuncs.notify = rpmsg_rpc_notify;
2028 dev->rpmsg_rpc.iofuncs.close_ocb = rpmsg_rpc_close_ocb;
2029 dev->rpmsg_rpc.iofuncs.read = rpmsg_rpc_read;
2030 dev->rpmsg_rpc.iofuncs.write = rpmsg_rpc_write;
2031 dev->rpmsg_rpc.iofuncs.unblock = rpmsg_rpc_read_unblock;
2032 attr->mount = &dev->rpmsg_rpc.mattr;
2033 iofunc_time_update(attr);
2034 pthread_mutex_init(&dev->rpmsg_rpc.mutex, NULL);
2036 snprintf (dev->rpmsg_rpc.device_name, _POSIX_PATH_MAX, "/dev/%s", name);
2037 if (-1 == (dev->rpmsg_rpc.resmgr_id =
2038 resmgr_attach(syslink_dpp, &resmgr_attr,
2039 dev->rpmsg_rpc.device_name, _FTYPE_ANY, 0,
2040 &dev->rpmsg_rpc.cfuncs,
2041 &dev->rpmsg_rpc.iofuncs, attr))) {
2042 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
2043 free(dev);
2044 return(NULL);
2045 }
2047 return(dev);
2048 }
2050 /**
2051 * Callback passed to MessageQCopy_registerNotify.
2052 *
2053 * This callback is called when a remote processor creates a MessageQCopy
2054 * handle with the same name as the local MessageQCopy handle and then
2055 * calls NameMap_register to notify the HOST of the handle.
2056 *
2057 * \param handle The remote handle.
2058 * \param procId The remote proc ID of the remote handle.
2059 * \param endpoint The endpoint address of the remote handle.
2060 *
2061 * \return None.
2062 */
2064 static
2065 Void
2066 _rpmsg_rpc_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
2067 UInt32 endpoint, Char * desc, Bool create)
2068 {
2069 Int status = 0, i = 0, j = 0;
2070 Bool found = FALSE;
2071 rpmsg_rpc_conn_object * obj = NULL;
2072 char msg[512];
2073 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)msg;
2075 for (i = 0; i < MAX_CONNS; i++) {
2076 if (rpmsg_rpc_state.objects[i] == NULL) {
2077 found = TRUE;
2078 break;
2079 }
2080 }
2082 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2083 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2084 break;
2085 }
2086 }
2088 if (found && j < NUM_RPMSG_RPC_QUEUES) {
2089 /* found a space to save this mq handle, allocate memory */
2090 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_conn_object), 0x0, NULL);
2091 if (obj) {
2092 /* store the object in the module info */
2093 rpmsg_rpc_state.objects[i] = obj;
2095 /* store the mq info in the object */
2096 obj->mq = handle;
2097 obj->procId = procId;
2098 status = ProcMgr_open(&obj->procH, obj->procId);
2099 if (status < 0) {
2100 Osal_printf("Failed to open handle to proc %d", procId);
2101 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2102 }
2103 else {
2104 obj->addr = endpoint;
2106 /* create a /dev/rpmsg-rpc instance for users to open */
2107 obj->dev = _init_rpmsg_rpc_device(desc);
2108 if (obj->dev == NULL) {
2109 Osal_printf("Failed to create %s", desc);
2110 ProcMgr_close(&obj->procH);
2111 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2112 }
2113 }
2114 }
2116 /* Send a message to query the chan info. Handle creating of the conn
2117 * in the callback */
2118 msg_hdr->msg_type = RPPC_MSG_QUERY_CHAN_INFO;
2119 msg_hdr->msg_len = 0;
2120 status = MessageQCopy_send(procId, MultiProc_self(), endpoint,
2121 rpmsg_rpc_state.endpoint[j],
2122 msg, sizeof(struct rppc_msg_header),
2123 TRUE);
2124 }
2125 }
2127 /**
2128 * Callback passed to MessageQCopy_create for the module.
2129 *
2130 * This callback is called when a message is received for the rpmsg-dce
2131 * module. This callback will never be called, since each client connection
2132 * gets it's own endpoint for message passing.
2133 *
2134 * \param handle The local MessageQCopy handle.
2135 * \param data Data message
2136 * \param len Length of data message
2137 * \param priv Private information for the endpoint
2138 * \param src Remote endpoint sending this message
2139 * \param srcProc Remote proc ID sending this message
2140 *
2141 * \return None.
2142 */
2144 static
2145 Void
2146 _rpmsg_rpc_module_cb (MessageQCopy_Handle handle, void * data, int len,
2147 void * priv, UInt32 src, UInt16 srcProc)
2148 {
2149 Int status = 0, i = 0, j = 0;
2150 rpmsg_rpc_conn_object * obj = NULL;
2151 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)data;
2153 Osal_printf ("_rpmsg_rpc_module_cb callback");
2155 for (i = 0; i < MAX_CONNS; i++) {
2156 if (rpmsg_rpc_state.objects[i] != NULL &&
2157 rpmsg_rpc_state.objects[i]->addr == src) {
2158 obj = rpmsg_rpc_state.objects[i];
2159 break;
2160 }
2161 }
2163 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2164 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2165 break;
2166 }
2167 }
2169 if (obj && j < NUM_RPMSG_RPC_QUEUES) {
2170 switch (msg_hdr->msg_type) {
2171 case RPPC_MSG_CHAN_INFO:
2172 {
2173 struct rppc_channel_info * chan_info =
2174 (struct rppc_channel_info *)(msg_hdr + 1);
2175 obj->numFuncs = chan_info->num_funcs;
2176 /* TODO: Query the info about each function */
2177 break;
2178 }
2179 default:
2180 status = EINVAL;
2181 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_module_cb",
2182 status, "invalid msg_type received");
2183 break;
2184 }
2185 }
2186 }
2189 /*!
2190 * @brief Module setup function.
2191 *
2192 * @sa rpmsg_rpc_destroy
2193 */
2194 Int
2195 rpmsg_rpc_setup (Void)
2196 {
2197 UInt16 i;
2198 List_Params listparams;
2199 Int status = 0;
2200 Error_Block eb;
2201 pthread_attr_t thread_attr;
2202 struct sched_param sched_param;
2204 GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_setup");
2206 Error_init(&eb);
2208 List_Params_init (&listparams);
2209 rpmsg_rpc_state.gateHandle = (IGateProvider_Handle)
2210 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
2211 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2212 if (rpmsg_rpc_state.gateHandle == NULL) {
2213 status = -ENOMEM;
2214 GT_setFailureReason (curTrace,
2215 GT_4CLASS,
2216 "_rpmsg_rpc_setup",
2217 status,
2218 "Failed to create spinlock gate!");
2219 }
2220 else {
2221 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2222 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2223 rpmsg_rpc_state.eventState [i].bufList = NULL;
2224 rpmsg_rpc_state.eventState [i].rpc = NULL;
2225 rpmsg_rpc_state.eventState [i].refCount = 0;
2226 rpmsg_rpc_state.eventState [i].head = NULL;
2227 rpmsg_rpc_state.eventState [i].tail = NULL;
2228 }
2230 pthread_attr_init(&thread_attr);
2231 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
2232 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
2233 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
2234 pthread_attr_setschedparam(&thread_attr, &sched_param);
2236 rpmsg_rpc_state.run = TRUE;
2237 if (pthread_create(&rpmsg_rpc_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
2238 pthread_setname_np(rpmsg_rpc_state.nt, "rpmsg-rpc-notifier");
2239 /* Initialize the driver mapping array. */
2240 Memory_set (&rpmsg_rpc_state.objects,
2241 0,
2242 (sizeof (rpmsg_rpc_conn_object *)
2243 * MAX_CONNS));
2244 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2245 /* create a local handle and register for notifications with MessageQCopy */
2246 rpmsg_rpc_state.mqHandle[i] = MessageQCopy_create (
2247 MessageQCopy_ADDRANY,
2248 rpmsg_rpc_names[i].name,
2249 _rpmsg_rpc_module_cb,
2250 NULL,
2251 &rpmsg_rpc_state.endpoint[i]);
2252 if (rpmsg_rpc_state.mqHandle[i] == NULL) {
2253 /*! @retval RPC_FAIL Failed to create MessageQCopy handle! */
2254 status = -ENOMEM;
2255 GT_setFailureReason (curTrace,
2256 GT_4CLASS,
2257 "rpmsg_rpc_setup",
2258 status,
2259 "Failed to create MessageQCopy handle!");
2260 break;
2261 }
2262 else {
2263 /* TBD: This could be replaced with a messageqcopy_open type call, one for
2264 * each core */
2265 status = MessageQCopy_registerNotify (rpmsg_rpc_state.mqHandle[i],
2266 _rpmsg_rpc_notify_cb);
2267 if (status < 0) {
2268 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2269 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2270 status = -ENOMEM;
2271 GT_setFailureReason (curTrace,
2272 GT_4CLASS,
2273 "rpmsg_rpc_setup",
2274 status,
2275 "Failed to register MQCopy handle!");
2276 break;
2277 }
2278 }
2279 }
2280 if (status >= 0){
2281 rpmsg_rpc_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
2282 if (rpmsg_rpc_state.sem == NULL) {
2283 //MessageQCopy_unregisterNotify();
2284 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2285 status = -ENOMEM;
2286 GT_setFailureReason (curTrace,
2287 GT_4CLASS,
2288 "rpmsg_rpc_setup",
2289 status,
2290 "Failed to register MQCopy handle!");
2291 }
2292 }
2293 if (status >= 0) {
2294 rpmsg_rpc_state.isSetup = TRUE;
2295 }
2296 else {
2297 for (; i > 0; --i) {
2298 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2299 }
2300 rpmsg_rpc_state.run = FALSE;
2301 }
2302 }
2303 else {
2304 rpmsg_rpc_state.run = FALSE;
2305 }
2306 pthread_attr_destroy(&thread_attr);
2307 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2308 }
2309 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2311 GT_0trace (curTrace, GT_LEAVE, "rpmsg_rpc_setup");
2312 return status;
2313 }
2316 /*!
2317 * @brief Module destroy function.
2318 *
2319 * @sa rpmsg_rpc_setup
2320 */
2321 Void
2322 rpmsg_rpc_destroy (Void)
2323 {
2324 rpmsg_rpc_EventPacket * packet;
2325 UInt32 i;
2326 List_Handle bufList;
2327 rpmsg_rpc_object * rpc = NULL;
2328 WaitingReaders_t * wr = NULL;
2329 struct _msg_info info;
2331 GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_destroy");
2333 for (i = 0; i < MAX_CONNS; i++) {
2334 if (rpmsg_rpc_state.objects[i]) {
2335 rpmsg_rpc_conn_object * obj = rpmsg_rpc_state.objects[i];
2336 obj->destroy = TRUE;
2337 _deinit_rpmsg_rpc_device(obj->dev);
2338 ProcMgr_close(&obj->procH);
2339 Memory_free(NULL, obj, sizeof(rpmsg_rpc_conn_object));
2340 rpmsg_rpc_state.objects[i] = NULL;
2341 }
2342 }
2344 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2345 rpc = NULL;
2346 if (rpmsg_rpc_state.eventState [i].rpc != NULL) {
2347 /* This is recovery. Need to mark rpc structures as invalid */
2348 rpc = rpmsg_rpc_state.eventState[i].rpc;
2349 MessageQCopy_delete(&rpc->mq);
2350 rpc->mq = NULL;
2351 }
2352 bufList = rpmsg_rpc_state.eventState [i].bufList;
2354 rpmsg_rpc_state.eventState [i].bufList = NULL;
2355 rpmsg_rpc_state.eventState [i].rpc = NULL;
2356 rpmsg_rpc_state.eventState [i].refCount = 0;
2357 if (bufList != NULL) {
2358 /* Dequeue waiting readers and reply to them */
2359 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2360 while ((wr = dequeue_waiting_reader(i)) != NULL) {
2361 /* Check if rcvid is still valid */
2362 if (MsgInfo(wr->rcvid, &info) != -1) {
2363 put_wr(wr);
2364 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2365 MsgError(wr->rcvid, EINTR);
2366 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2367 }
2368 }
2369 /* Check for pending ionotify/select calls */
2370 if (rpc) {
2371 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
2372 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
2373 }
2374 }
2375 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2377 /* Free event packets for any received but unprocessed events. */
2378 while (List_empty (bufList) != TRUE){
2379 packet = (rpmsg_rpc_EventPacket *)
2380 List_get (bufList);
2381 if (packet != NULL){
2382 Memory_free (NULL, packet, sizeof(*packet));
2383 }
2384 }
2385 List_delete (&(bufList));
2386 }
2387 }
2389 /* Free the cached list */
2390 flush_uBuf();
2392 if (rpmsg_rpc_state.sem) {
2393 OsalSemaphore_delete(&rpmsg_rpc_state.sem);
2394 }
2396 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2397 if (rpmsg_rpc_state.mqHandle[i]) {
2398 //MessageQCopy_unregisterNotify();
2399 MessageQCopy_delete(&rpmsg_rpc_state.mqHandle[i]);
2400 }
2401 }
2403 if (rpmsg_rpc_state.gateHandle != NULL) {
2404 GateSpinlock_delete ((GateSpinlock_Handle *)
2405 &(rpmsg_rpc_state.gateHandle));
2406 }
2408 rpmsg_rpc_state.isSetup = FALSE ;
2409 rpmsg_rpc_state.run = FALSE;
2410 // run through and destroy the thread, and all outstanding
2411 // rpc structures
2412 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2413 pthread_cond_signal(&rpmsg_rpc_state.cond);
2414 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2415 pthread_join(rpmsg_rpc_state.nt, NULL);
2416 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2417 while (rpmsg_rpc_state.head != NULL) {
2418 int index;
2419 WaitingReaders_t *item;
2420 index = dequeue_notify_list_item(rpmsg_rpc_state.head);
2421 if (index < 0)
2422 break;
2423 item = dequeue_waiting_reader(index);
2424 while (item) {
2425 put_wr(item);
2426 item = dequeue_waiting_reader(index);
2427 }
2428 }
2429 rpmsg_rpc_state.head = NULL ;
2430 rpmsg_rpc_state.tail = NULL ;
2431 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2433 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2434 }