1 /*
2 * @file rpmsg-rpc.c
3 *
4 * @brief devctl handler for RPC component.
5 *
6 * ============================================================================
7 *
8 * Copyright (c) 2013, Texas Instruments Incorporated
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * * Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * * Neither the name of Texas Instruments Incorporated nor the names of
22 * its contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
32 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
33 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Contact information for paper mail:
37 * Texas Instruments
38 * Post Office Box 655303
39 * Dallas, Texas 75265
40 * Contact information:
41 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
42 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
43 * ============================================================================
44 *
45 */
48 /* Standard headers */
49 #include <ti/syslink/Std.h>
51 /* OSAL & Utils headers */
52 #include <ti/syslink/utils/List.h>
53 #include <ti/syslink/utils/String.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/Memory.h>
56 #include <ti/syslink/utils/IGateProvider.h>
57 #include <ti/syslink/utils/GateSpinlock.h>
58 #include <_MultiProc.h>
60 /*QNX specific header include */
61 #include <errno.h>
62 #include <unistd.h>
63 #include <sys/iofunc.h>
64 #include <sys/dispatch.h>
65 #include <sys/netmgr.h>
66 #include <devctl.h>
68 /* Module headers */
69 //#include <ti/ipc/omap_rpc.h>
70 #include <ti/ipc/rpmsg_rpc.h>
71 #include <ti/ipc/MessageQCopy.h>
72 #include <_MessageQCopy.h>
73 #include <_MessageQCopyDefs.h>
74 #include "OsalSemaphore.h"
75 #include "std_qnx.h"
76 #include <pthread.h>
78 #include <memmgr/tilermem.h>
79 #include <memmgr/tiler.h>
81 #include "rpmsg-rpc.h"
82 #include <rpmsg.h>
84 #define PRIORITY_REALTIME_LOW 29
86 extern int mem_offset64_peer(pid_t pid, const uintptr_t addr, size_t len,
87 off64_t *offset, size_t *contig_len);
89 static MsgList_t *nl_cache;
90 static int num_nl = 0;
91 static WaitingReaders_t *wr_cache;
92 static int num_wr = 0;
94 /*
95 * Instead of constantly allocating and freeing the notifier structures
96 * we just cache a few of them, and recycle them instead.
97 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
98 */
100 static MsgList_t *get_nl()
101 {
102 MsgList_t *item;
103 item = nl_cache;
104 if (item != NULL) {
105 nl_cache = nl_cache->next;
106 num_nl--;
107 } else {
108 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
109 }
110 return(item);
111 }
113 static void put_nl(MsgList_t *item)
114 {
115 if (num_nl >= CACHE_NUM) {
116 Memory_free(NULL, item, sizeof(*item));
117 } else {
118 item->next = nl_cache;
119 nl_cache = item;
120 num_nl++;
121 }
122 return;
123 }
125 static WaitingReaders_t *get_wr()
126 {
127 WaitingReaders_t *item;
128 item = wr_cache;
129 if (item != NULL) {
130 wr_cache = wr_cache->next;
131 num_wr--;
132 } else {
133 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
134 }
135 return(item);
136 }
138 static void put_wr(WaitingReaders_t *item)
139 {
140 if (num_wr >= CACHE_NUM) {
141 Memory_free(NULL, item, sizeof(*item));
142 } else {
143 item->next = wr_cache;
144 wr_cache = item;
145 num_wr++;
146 }
147 return;
148 }
150 /* structure to hold rpmsg-rpc device information */
151 typedef struct named_device {
152 iofunc_mount_t mattr;
153 iofunc_attr_t cattr;
154 int resmgr_id;
155 pthread_mutex_t mutex;
156 iofunc_funcs_t mfuncs;
157 resmgr_connect_funcs_t cfuncs;
158 resmgr_io_funcs_t iofuncs;
159 char device_name[_POSIX_PATH_MAX];
160 } named_device_t;
162 /* rpmsg-rpc device structure */
163 typedef struct rpmsg_rpc_dev {
164 dispatch_t * dpp;
165 thread_pool_t * tpool;
166 named_device_t rpmsg_rpc;
167 } rpmsg_rpc_dev_t;
169 /*!
170 * @brief Remote connection object
171 */
172 typedef struct rpmsg_rpc_conn_object {
173 rpmsg_rpc_dev_t * dev;
174 MessageQCopy_Handle mq;
175 UInt32 addr;
176 UInt16 procId;
177 ProcMgr_Handle procH;
178 UInt32 numFuncs;
179 } rpmsg_rpc_conn_object;
181 /*!
182 * @brief rpc instance object
183 */
184 typedef struct rpmsg_rpc_object_tag {
185 MessageQCopy_Handle mq;
186 rpmsg_rpc_conn_object * conn;
187 UInt32 addr;
188 UInt32 remoteAddr;
189 UInt16 procId;
190 pid_t pid;
191 Bool created;
192 iofunc_notify_t notify[3];
193 } rpmsg_rpc_object;
195 /*!
196 * @brief Structure of Event callback argument passed to register fucntion.
197 */
198 typedef struct rpmsg_rpc_EventCbck_tag {
199 List_Elem element;
200 /*!< List element header */
201 rpmsg_rpc_object * rpc;
202 /*!< User rpc info pointer. Passed back to user callback function */
203 UInt32 pid;
204 /*!< Process Identifier for user process. */
205 } rpmsg_rpc_EventCbck ;
207 /*!
208 * @brief Keeps the information related to Event.
209 */
210 typedef struct rpmsg_rpc_EventState_tag {
211 List_Handle bufList;
212 /*!< Head of received event list. */
213 UInt32 pid;
214 /*!< User process ID. */
215 rpmsg_rpc_object * rpc;
216 /*!< User rpc comp. */
217 UInt32 refCount;
218 /*!< Reference count, used when multiple Notify_registerEvent are called
219 from same process space (multi threads/processes). */
220 WaitingReaders_t * head;
221 /*!< Waiting readers head. */
222 WaitingReaders_t * tail;
223 /*!< Waiting readers tail. */
224 } rpmsg_rpc_EventState;
226 /*!
227 * @brief Per-connection information
228 */
229 typedef struct rpmsg_rpc_ocb {
230 iofunc_ocb_t hdr;
231 pid_t pid;
232 rpmsg_rpc_object * rpc;
233 } rpmsg_rpc_ocb_t;
235 typedef struct rpmsg_rpc_name {
236 char name[RPMSG_NAME_SIZE];
237 }rpmsg_rpc_name_t;
239 static struct rpmsg_rpc_name rpmsg_rpc_names[] = {
240 {.name = "rpmsg-rpc"},
241 };
243 #define NUM_RPMSG_RPC_QUEUES sizeof(rpmsg_rpc_names)/sizeof(*rpmsg_rpc_names)
245 /*!
246 * @brief rpmsg-rpc Module state object
247 */
248 typedef struct rpmsg_rpc_ModuleObject_tag {
249 Bool isSetup;
250 /*!< Indicates whether the module has been already setup */
251 Bool openRefCount;
252 /*!< Open reference count. */
253 IGateProvider_Handle gateHandle;
254 /*!< Handle of gate to be used for local thread safety */
255 rpmsg_rpc_EventState eventState [MAX_PROCESSES];
256 /*!< List for all user processes registered. */
257 rpmsg_rpc_conn_object * objects [MAX_CONNS];
258 /*!< List of all remote connections. */
259 MessageQCopy_Handle mqHandle[NUM_RPMSG_RPC_QUEUES];
260 /*!< Local mq handle associated with this module */
261 UInt32 endpoint[NUM_RPMSG_RPC_QUEUES];
262 /*!< Local endpoint associated with the mq handle */
263 OsalSemaphore_Handle sem;
264 /*!< Handle to semaphore used for rpc instance connection notifications */
265 pthread_t nt;
266 /*!< notifier thread */
267 pthread_mutex_t lock;
268 /*!< protection between notifier and event */
269 pthread_cond_t cond;
270 /*!< protection between notifier and event */
271 MsgList_t *head;
272 /*!< list head */
273 MsgList_t *tail;
274 /*!< list tail */
275 int run;
276 /*!< notifier thread must keep running */
277 } rpmsg_rpc_ModuleObject;
279 /*!
280 * @brief Structure of Event Packet read from notify kernel-side.
281 */
282 typedef struct rpmsg_rpc_EventPacket_tag {
283 List_Elem element;
284 /*!< List element header */
285 UInt32 pid;
286 /* Processor identifier */
287 rpmsg_rpc_object * obj;
288 /*!< Pointer to the channel associated with this callback */
289 UInt8 data[MessageQCopy_BUFSIZE];
290 /*!< Data associated with event. */
291 UInt32 len;
292 /*!< Length of the data associated with event. */
293 UInt32 src;
294 /*!< Src endpoint associated with event. */
295 struct rpmsg_rpc_EventPacket * next;
296 struct rpmsg_rpc_EventPacket * prev;
297 } rpmsg_rpc_EventPacket ;
300 /*
301 * Instead of constantly allocating and freeing the uBuf structures
302 * we just cache a few of them, and recycle them instead.
303 * The cache count is set with CACHE_NUM in rpmsg-rpc.h.
304 */
305 static rpmsg_rpc_EventPacket *uBuf_cache;
306 static int num_uBuf = 0;
308 static void flush_uBuf()
309 {
310 rpmsg_rpc_EventPacket *uBuf = NULL;
312 while(uBuf_cache) {
313 num_uBuf--;
314 uBuf = uBuf_cache;
315 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
316 Memory_free(NULL, uBuf, sizeof(*uBuf));
317 }
318 }
320 static rpmsg_rpc_EventPacket *get_uBuf()
321 {
322 rpmsg_rpc_EventPacket *uBuf;
323 uBuf = uBuf_cache;
324 if (uBuf != NULL) {
325 uBuf_cache = (rpmsg_rpc_EventPacket *)uBuf_cache->next;
326 num_uBuf--;
327 } else {
328 uBuf = Memory_alloc(NULL, sizeof(rpmsg_rpc_EventPacket), 0, NULL);
329 }
330 return(uBuf);
331 }
333 static void put_uBuf(rpmsg_rpc_EventPacket * uBuf)
334 {
335 if (num_uBuf >= CACHE_NUM) {
336 Memory_free(NULL, uBuf, sizeof(*uBuf));
337 } else {
338 uBuf->next = (struct rpmsg_rpc_EventPacket *)uBuf_cache;
339 uBuf_cache = uBuf;
340 num_uBuf++;
341 }
342 return;
343 }
346 /** ============================================================================
347 * Globals
348 * ============================================================================
349 */
350 /*!
351 * @var rpmsg_rpc_state
352 *
353 * @brief rpmsg-rpc state object variable
354 */
355 static rpmsg_rpc_ModuleObject rpmsg_rpc_state =
356 {
357 .gateHandle = NULL,
358 .isSetup = FALSE,
359 .openRefCount = 0,
360 .nt = 0,
361 .lock = PTHREAD_MUTEX_INITIALIZER,
362 .cond = PTHREAD_COND_INITIALIZER,
363 .head = NULL,
364 .tail = NULL,
365 .run = 0
366 };
368 extern dispatch_t * syslink_dpp;
371 static MsgList_t *find_nl(int index)
372 {
373 MsgList_t *item=NULL;
374 item = rpmsg_rpc_state.head;
375 while (item) {
376 if (item->index == index)
377 return(item);
378 item = item->next;
379 }
380 return(item);
381 }
383 /* we have the right locks when calling this function */
384 /*!
385 * @brief Function to enqueue a notify list item.
386 *
387 * @param index Index of the client process associated with the item.
388 *
389 * @sa find_nl
390 * get_nl
391 */
392 static int enqueue_notify_list(int index)
393 {
394 MsgList_t *item;
395 item = find_nl(index);
396 if (item == NULL) {
397 item = get_nl();
398 if (item == NULL) {
399 return(-1);
400 }
401 item->next = NULL;
402 item->index = index;
403 item->num_events=1;
404 if (rpmsg_rpc_state.head == NULL) {
405 rpmsg_rpc_state.head = item;
406 rpmsg_rpc_state.tail = item;
407 item->prev = NULL;
408 }
409 else {
410 item->prev = rpmsg_rpc_state.tail;
411 rpmsg_rpc_state.tail->next = item;
412 rpmsg_rpc_state.tail = item;
413 }
414 }
415 else {
416 item->num_events++;
417 }
418 return(0);
419 }
421 /* we have the right locks when calling this function */
422 /*!
423 * @brief Function to dequeue a notify list item.
424 *
425 * @param item The item to remove.
426 *
427 * @sa put_nl
428 */
429 static inline int dequeue_notify_list_item(MsgList_t *item)
430 {
431 int index;
432 if (item == NULL) {
433 return(-1);
434 }
435 index = item->index;
436 item->num_events--;
437 if (item->num_events > 0) {
438 return(index);
439 }
440 if (rpmsg_rpc_state.head == item) {
441 // removing head
442 rpmsg_rpc_state.head = item->next;
443 if (rpmsg_rpc_state.head != NULL) {
444 rpmsg_rpc_state.head->prev = NULL;
445 }
446 else {
447 // removing head and tail
448 rpmsg_rpc_state.tail = NULL;
449 }
450 }
451 else {
452 item->prev->next = item->next;
453 if (item->next != NULL) {
454 item->next->prev = item->prev;
455 }
456 else {
457 // removing tail
458 rpmsg_rpc_state.tail = item->prev;
459 }
460 }
461 put_nl(item);
462 return(index);
463 }
465 /* we have the right locks when calling this function */
466 /*!
467 * @brief Function to add a waiting reader to the list.
468 *
469 * @param index Index of the client process waiting reader to add.
470 * @param rcvid Receive ID of the client process that was passed
471 * when the client called read().
472 *
473 * @sa None
474 */
475 static int enqueue_waiting_reader(int index, int rcvid)
476 {
477 WaitingReaders_t *item;
478 item = get_wr();
479 if (item == NULL) {
480 return(-1);
481 }
482 item->rcvid = rcvid;
483 item->next = NULL;
484 if (rpmsg_rpc_state.eventState [index].head == NULL) {
485 rpmsg_rpc_state.eventState [index].head = item;
486 rpmsg_rpc_state.eventState [index].tail = item;
487 }
488 else {
489 rpmsg_rpc_state.eventState [index].tail->next = item;
490 rpmsg_rpc_state.eventState [index].tail = item;
491 }
492 return(EOK);
493 }
495 /* we have the right locks when calling this function */
496 /* caller frees item */
497 /*!
498 * @brief Function to remove a waiting reader from the list.
499 *
500 * @param index Index of the client process waiting reader to dequeue.
501 *
502 * @sa None
503 */
504 static WaitingReaders_t *dequeue_waiting_reader(int index)
505 {
506 WaitingReaders_t *item = NULL;
507 if (rpmsg_rpc_state.eventState [index].head) {
508 item = rpmsg_rpc_state.eventState [index].head;
509 rpmsg_rpc_state.eventState [index].head = rpmsg_rpc_state.eventState [index].head->next;
510 if (rpmsg_rpc_state.eventState [index].head == NULL) {
511 rpmsg_rpc_state.eventState [index].tail = NULL;
512 }
513 }
514 return(item);
515 }
517 /*!
518 * @brief Function find a specified waiting reader.
519 *
520 * @param index Index of the client process waiting for the message.
521 * @param rcvid Receive ID of the client process that was passed
522 * when the client called read().
523 *
524 * @sa None
525 */
527 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
528 {
529 WaitingReaders_t *item = NULL;
530 WaitingReaders_t *prev = NULL;
531 if (rpmsg_rpc_state.eventState [index].head) {
532 item = rpmsg_rpc_state.eventState [index].head;
533 while (item) {
534 if (item->rcvid == rcvid) {
535 /* remove item from list */
536 if (prev)
537 prev->next = item->next;
538 if (item == rpmsg_rpc_state.eventState [index].head)
539 rpmsg_rpc_state.eventState [index].head = item->next;
540 break;
541 }
542 else {
543 prev = item;
544 item = item->next;
545 }
546 }
547 }
548 return item;
549 }
551 /*!
552 * @brief Function used to check if there is a waiting reader with an
553 * event (message) ready to be delivered.
554 *
555 * @param index Index of the client process waiting for the message.
556 * @param item Pointer to the waiting reader.
557 *
558 * @sa dequeue_notify_list_item
559 * dequeue_waiting_reader
560 */
562 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
563 {
564 MsgList_t *temp;
565 if (rpmsg_rpc_state.head == NULL) {
566 return(0);
567 }
568 temp = rpmsg_rpc_state.head;
569 while (temp) {
570 if (rpmsg_rpc_state.eventState [temp->index].head) {
571 // event and reader found
572 if (dequeue_notify_list_item(temp) >= 0) {
573 *index = temp->index;
574 *item = dequeue_waiting_reader(temp->index);
575 }
576 else {
577 /* error occurred, return 0 as item has not been set */
578 return(0);
579 }
580 return(1);
581 }
582 temp = temp->next;
583 }
584 return(0);
585 }
587 /*!
588 * @brief Function used to deliver the notification to the client that
589 * it has received a message.
590 *
591 * @param index Index of the client process receiving hte message.
592 * @param rcvid Receive ID of the client process that was passed
593 * when the client called read().
594 *
595 * @sa put_uBuf
596 */
598 static void deliver_notification(int index, int rcvid)
599 {
600 int err = EOK;
601 rpmsg_rpc_EventPacket * uBuf = NULL;
603 uBuf = (rpmsg_rpc_EventPacket *) List_get (rpmsg_rpc_state.eventState [index].bufList);
605 /* Let the check remain at run-time. */
606 if (uBuf != NULL) {
607 err = MsgReply(rcvid, uBuf->len, uBuf->data, uBuf->len);
608 if (err == -1)
609 perror("deliver_notification: MsgReply");
610 /* Free the processed event callback packet. */
611 put_uBuf(uBuf);
612 }
613 else {
614 MsgReply(rcvid, EOK, NULL, 0);
615 }
616 return;
617 }
619 /*!
620 * @brief Thread used for notifying waiting readers of messages.
621 *
622 * @param arg Thread-specific private arg.
623 *
624 * @sa find_available_reader_and_event
625 * deliver_notification
626 * put_wr
627 */
628 static void *notifier_thread(void *arg)
629 {
630 int status;
631 int index;
632 WaitingReaders_t *item = NULL;
633 pthread_mutex_lock(&rpmsg_rpc_state.lock);
634 while (rpmsg_rpc_state.run) {
635 status = find_available_reader_and_event(&index, &item);
636 if ( (status == 0) || (item == NULL) ) {
637 status = pthread_cond_wait(&rpmsg_rpc_state.cond, &rpmsg_rpc_state.lock);
638 if ((status != EOK) && (status != EINTR)) {
639 // false wakeup
640 break;
641 }
642 status = find_available_reader_and_event(&index, &item);
643 if ( (status == 0) || (item == NULL) ) {
644 continue;
645 }
646 }
647 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
648 // we have unlocked, and now we have an event to deliver
649 // we deliver one event at a time, relock, check and continue
650 deliver_notification(index, item->rcvid);
651 pthread_mutex_lock(&rpmsg_rpc_state.lock);
652 put_wr(item);
653 }
654 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
655 return(NULL);
656 }
659 static
660 Int
661 _rpmsg_rpc_create(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_rpc_ocb_t *ocb)
662 {
663 Int status = EOK;
664 struct rppc_create_instance * cargs =
665 (struct rppc_create_instance *)(_DEVCTL_DATA (msg->i));
666 struct rppc_msg_header * msg_hdr = NULL;
667 rpmsg_rpc_object * rpc = ocb->rpc;
668 Char * msg_data = NULL;
669 UInt8 buf[sizeof(struct rppc_create_instance) + sizeof(struct rppc_msg_header)];
671 if (rpc->created == TRUE) {
672 GT_0trace(curTrace, GT_4CLASS, "Already created.");
673 status = (EINVAL);
674 }
675 else if ((ctp->info.msglen - sizeof(msg->i)) <
676 sizeof (struct rppc_create_instance)) {
677 status = (EINVAL);
678 }
679 else if (String_nlen(cargs->name, 47) == -1) {
680 status = (EINVAL);
681 }
682 else {
683 msg_hdr = (struct rppc_msg_header *)buf;
684 msg_hdr->msg_type = RPPC_MSG_CREATE_INSTANCE;
685 msg_hdr->msg_len = sizeof(struct rppc_create_instance);
686 msg_data = (Char *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
687 Memory_copy(msg_data, cargs, sizeof(struct rppc_create_instance));
689 status = MessageQCopy_send (rpc->conn->procId, // remote procid
690 MultiProc_self(), // local procid
691 rpc->conn->addr, // remote server
692 rpc->addr, // local address
693 buf, // connect msg
694 sizeof(buf), // msg size
695 TRUE); // wait for available bufs
696 if (status != MessageQCopy_S_SUCCESS) {
697 GT_0trace(curTrace, GT_4CLASS, "Failed to send create message.");
698 status = (EIO);
699 }
700 else {
701 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
702 if (rpc->created == TRUE) {
703 msg->o.ret_val = EOK;
704 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o)));
705 }
706 else if (status < 0) {
707 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
708 status = (EIO);
709 }
710 else {
711 status = (ETIMEDOUT);
712 }
713 }
714 }
716 return status;
717 }
720 static
721 Int
722 _rpmsg_rpc_destroy(resmgr_context_t *ctp, io_devctl_t *msg,
723 rpmsg_rpc_ocb_t *ocb)
724 {
725 Int status = EOK;
726 struct rppc_msg_header * hdr = NULL;
727 rpmsg_rpc_object * rpc = ocb->rpc;
728 UInt8 buf[sizeof(struct rppc_instance_handle) + sizeof(struct rppc_msg_header)];
729 struct rppc_instance_handle * instance = NULL;
731 if (rpc->created != TRUE) {
732 GT_0trace(curTrace, GT_4CLASS, "Already destroyed.");
733 status = (EINVAL);
734 }
735 else {
736 hdr = (struct rppc_msg_header *)buf;
737 hdr->msg_type = RPPC_MSG_DESTROY_INSTANCE;
738 hdr->msg_len = sizeof(struct rppc_instance_handle);
739 instance = (struct rppc_instance_handle *)((UInt32)hdr + sizeof(struct rppc_msg_header));
740 instance->endpoint_address = rpc->remoteAddr;
741 instance->status = 0;
743 status = MessageQCopy_send (rpc->conn->procId, // remote procid
744 MultiProc_self(), // local procid
745 rpc->conn->addr, // remote server
746 rpc->addr, // local address
747 buf, // connect msg
748 sizeof(buf), // msg size
749 TRUE); // wait for available bufs
750 if (status != MessageQCopy_S_SUCCESS) {
751 GT_0trace(curTrace, GT_4CLASS, "Failed to send disconnect message.");
752 status = (EIO);
753 }
754 else {
755 status = OsalSemaphore_pend(rpmsg_rpc_state.sem, 5000);
756 if (rpc->created != FALSE || status < 0) {
757 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
758 status = (EIO);
759 }
760 else {
761 status = (ETIMEDOUT);
762 }
763 }
764 }
766 return status;
767 }
770 Int
771 rpmsg_rpc_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
772 {
773 Int status = 0;
774 rpmsg_rpc_ocb_t *ocb = (rpmsg_rpc_ocb_t *)i_ocb;
776 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
777 return(_RESMGR_ERRNO(status));
778 status = 0;
780 switch (msg->i.dcmd)
781 {
782 case RPPC_IOC_CREATE:
783 status = _rpmsg_rpc_create (ctp, msg, ocb);
784 break;
785 #if 0
786 case RPPC_IOC_DESTROY:
787 status = _rpmsg_rpc_destroy (ctp, msg, ocb);
788 break;
789 #endif
790 default:
791 status = (ENOSYS);
792 break;
793 }
795 return status;
796 }
799 /*!
800 * @brief Attach a process to rpmsg-rpc user support framework.
801 *
802 * @param pid Process identifier
803 *
804 * @sa _rpmsg_rpc_detach
805 */
806 static
807 Int
808 _rpmsg_rpc_attach (rpmsg_rpc_object * rpc)
809 {
810 Int32 status = EOK;
811 Bool flag = FALSE;
812 Bool isInit = FALSE;
813 List_Object * bufList = NULL;
814 IArg key = 0;
815 List_Params listparams;
816 UInt32 i;
818 GT_1trace (curTrace, GT_ENTER, "_rpmsg_rpc_attach", rpc);
820 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
821 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
822 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
823 rpmsg_rpc_state.eventState [i].refCount++;
824 isInit = TRUE;
825 status = EOK;
826 break;
827 }
828 }
830 if (isInit == FALSE) {
831 List_Params_init (&listparams);
832 bufList = List_create (&listparams) ;
833 /* Search for an available slot for user process. */
834 for (i = 0 ; i < MAX_PROCESSES ; i++) {
835 if (rpmsg_rpc_state.eventState [i].rpc == NULL) {
836 rpmsg_rpc_state.eventState [i].rpc = rpc;
837 rpmsg_rpc_state.eventState [i].refCount = 1;
838 rpmsg_rpc_state.eventState [i].bufList = bufList;
839 flag = TRUE;
840 break;
841 }
842 }
844 /* No free slots found. Let this check remain at run-time,
845 * since it is dependent on user environment.
846 */
847 if (flag != TRUE) {
848 /*! @retval Notify_E_RESOURCE Maximum number of
849 supported user clients have already been registered. */
850 status = -ENOMEM;
851 GT_setFailureReason (curTrace,
852 GT_4CLASS,
853 "rpmsgDrv_attach",
854 status,
855 "Maximum number of supported user"
856 " clients have already been "
857 "registered.");
858 if (bufList != NULL) {
859 List_delete (&bufList);
860 }
861 }
862 }
863 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
865 GT_1trace (curTrace, GT_LEAVE, "rpmsgDrv_attach", status);
867 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
868 return status ;
869 }
872 /*!
873 * @brief This function adds a data to a registered process.
874 *
875 * @param dce RPC object associated with the client
876 * @param src Source address (endpoint) sending the data
877 * @param pid Process ID associated with the client
878 * @param data Data to be added
879 * @param len Length of data to be added
880 *
881 * @sa
882 */
883 Int
884 _rpmsg_rpc_addBufByPid (rpmsg_rpc_object *rpc,
885 UInt32 src,
886 UInt32 pid,
887 void * data,
888 UInt32 len)
889 {
890 Int32 status = EOK;
891 Bool flag = FALSE;
892 rpmsg_rpc_EventPacket * uBuf = NULL;
893 IArg key;
894 UInt32 i;
895 WaitingReaders_t *item;
896 MsgList_t *msgItem;
898 GT_5trace (curTrace,
899 GT_ENTER,
900 "_rpmsg_rpc_addBufByPid",
901 rpc,
902 src,
903 pid,
904 data,
905 len);
907 GT_assert (curTrace, (rpmsg_rpc_state.isSetup == TRUE));
909 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
910 /* Find the registration for this callback */
911 for (i = 0 ; i < MAX_PROCESSES ; i++) {
912 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
913 flag = TRUE;
914 break;
915 }
916 }
917 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
919 #if !defined(SYSLINK_BUILD_OPTIMIZE)
920 if (flag != TRUE) {
921 /*! @retval ENOMEM Could not find a registered handler
922 for this process. */
923 status = -ENOMEM;
924 GT_setFailureReason (curTrace,
925 GT_4CLASS,
926 "_rpmsgDrv_addBufByPid",
927 status,
928 "Could not find a registered handler "
929 "for this process.!");
930 }
931 else {
932 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
933 /* Allocate memory for the buf */
934 pthread_mutex_lock(&rpmsg_rpc_state.lock);
935 uBuf = get_uBuf();
936 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
938 #if !defined(SYSLINK_BUILD_OPTIMIZE)
939 if (uBuf == NULL) {
940 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
941 packet for received callback. */
942 status = -ENOMEM;
943 GT_setFailureReason (curTrace,
944 GT_4CLASS,
945 "_rpmsgDrv_addBufByPid",
946 status,
947 "Failed to allocate memory for event"
948 " packet for received callback.!");
949 }
950 else {
951 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
952 List_elemClear (&(uBuf->element));
953 GT_assert (curTrace,
954 (rpmsg_rpc_state.eventState [i].bufList != NULL));
956 if (data) {
957 Memory_copy(uBuf->data, data, len);
958 }
959 uBuf->len = len;
961 List_put (rpmsg_rpc_state.eventState [i].bufList,
962 &(uBuf->element));
963 pthread_mutex_lock(&rpmsg_rpc_state.lock);
964 item = dequeue_waiting_reader(i);
965 if (item) {
966 // there is a waiting reader
967 deliver_notification(i, item->rcvid);
968 put_wr(item);
969 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
970 status = EOK;
971 }
972 else {
973 if (enqueue_notify_list(i) < 0) {
974 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
975 status = -ENOMEM;
976 GT_setFailureReason (curTrace,
977 GT_4CLASS,
978 "_rpmsgDrv_addBufByPid",
979 status,
980 "Failed to allocate memory for notifier");
981 }
982 else {
983 msgItem = find_nl(i);
984 /* TODO: rpc could be NULL in some cases */
985 if (rpc && msgItem) {
986 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, msgItem->num_events, 0)) {
987 iofunc_notify_trigger(rpc->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
988 }
989 }
990 status = EOK;
991 pthread_cond_signal(&rpmsg_rpc_state.cond);
992 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
993 }
994 }
995 #if !defined(SYSLINK_BUILD_OPTIMIZE)
996 }
997 }
998 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1000 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
1002 return status;
1003 }
1006 /*!
1007 * @brief This function implements the callback registered with
1008 * MessageQCopy_create for each client. This function
1009 * adds the message from the remote proc to a list
1010 * where it is routed to the appropriate waiting reader.
1011 *
1012 * @param procId processor Id from which interrupt is received
1013 * @param lineId Interrupt line ID to be used
1014 * @param eventId eventId registered
1015 * @param arg argument to call back
1016 * @param payload payload received
1017 *
1018 * @sa
1019 */
1020 Void
1021 _rpmsg_rpc_cb (MessageQCopy_Handle handle, void * data, int len, void * priv,
1022 UInt32 src, UInt16 srcProc)
1023 {
1024 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1025 Int32 status = 0;
1026 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1027 rpmsg_rpc_object * rpc = NULL;
1028 struct rppc_msg_header * msg_hdr = NULL;
1029 struct rppc_instance_handle * instance;
1030 struct rppc_packet * packet = NULL;
1032 GT_6trace (curTrace,
1033 GT_ENTER,
1034 "_rpmsg_rpc_cb",
1035 handle,
1036 data,
1037 len,
1038 priv,
1039 src,
1040 srcProc);
1042 if (len < sizeof(struct rppc_msg_header)) {
1043 status = EINVAL;
1044 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb", status,
1045 "len is smaller than sizeof rppc_msg_header");
1046 return;
1047 }
1049 rpc = (rpmsg_rpc_object *) priv;
1050 msg_hdr = (struct rppc_msg_header *)data;
1052 switch (msg_hdr->msg_type) {
1053 case RPPC_MSG_INSTANCE_CREATED:
1054 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1055 status = EINVAL;
1056 rpc->created = FALSE;
1057 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1058 status, "msg_len is invalid");
1059 }
1060 else {
1061 instance = (struct rppc_instance_handle *)
1062 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1063 rpc->remoteAddr = instance->endpoint_address;
1064 if (instance->status != 0) {
1065 rpc->created = FALSE;
1066 }
1067 else {
1068 rpc->created = TRUE;
1069 }
1070 }
1071 /* post the semaphore to have the ioctl reply */
1072 OsalSemaphore_post(rpmsg_rpc_state.sem);
1073 break;
1074 case RPPC_MSG_INSTANCE_DESTROYED:
1075 if (msg_hdr->msg_len != sizeof(struct rppc_instance_handle)) {
1076 status = EINVAL;
1077 rpc->created = FALSE;
1078 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1079 status, "msg_len is invalid");
1080 }
1081 else {
1082 instance = (struct rppc_instance_handle *)
1083 ((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1084 rpc->remoteAddr = instance->endpoint_address;
1085 if (instance->status != 0) {
1086 rpc->created = TRUE;
1087 }
1088 else {
1089 rpc->created = FALSE;
1090 }
1091 }
1092 /* post the semaphore to have the ioctl reply */
1093 OsalSemaphore_post(rpmsg_rpc_state.sem);
1094 break;
1096 case RPPC_MSG_CALL_FUNCTION:
1097 if ((len != sizeof(struct rppc_msg_header) + msg_hdr->msg_len) ||
1098 (msg_hdr->msg_len < sizeof(struct rppc_packet))) {
1099 status = EINVAL;
1100 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1101 status, "msg_len is invalid");
1102 }
1103 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1104 if (len != sizeof(struct rppc_msg_header) + sizeof(struct rppc_packet) + packet->data_size) {
1105 status = EINVAL;
1106 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_cb",
1107 status, "msg_len is invalid");
1108 }
1109 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1110 status =
1111 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1112 _rpmsg_rpc_addBufByPid (rpc,
1113 src,
1114 rpc->pid,
1115 &packet->fxn_id,
1116 sizeof(packet->fxn_id) + sizeof(packet->result));
1117 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1118 if (status < 0) {
1119 GT_setFailureReason (curTrace,
1120 GT_4CLASS,
1121 "_rpmsg_rpc_cb",
1122 status,
1123 "Failed to add callback packet for pid");
1124 }
1125 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1126 break;
1127 default:
1128 break;
1129 }
1131 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_rpc_cb");
1132 }
1134 /**
1135 * Handler for ocb_calloc() requests.
1136 *
1137 * Special handler for ocb_calloc() requests that we export for control. An
1138 * open request from the client will result in a call to our special ocb_calloc
1139 * handler. This function attaches the client's pid using _rpmsg_dce_attach
1140 * and allocates client-specific information. This function creates an
1141 * endpoint for the client to communicate with the dCE server on the
1142 * remote core also.
1143 *
1144 * \param ctp Thread's associated context information.
1145 * \param device Device attributes structure.
1146 *
1147 * \return Pointer to an iofunc_ocb_t OCB structure.
1148 */
1150 IOFUNC_OCB_T *
1151 rpmsg_rpc_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
1152 {
1153 rpmsg_rpc_ocb_t *ocb = NULL;
1154 rpmsg_rpc_object *obj = NULL;
1155 struct _msg_info cl_info;
1156 rpmsg_rpc_dev_t * dev = NULL;
1157 int i = 0;
1158 Bool found = FALSE;
1159 char path1[20];
1160 char path2[20];
1162 GT_2trace (curTrace, GT_ENTER, "rpmsg_rpc_ocb_calloc",
1163 ctp, device);
1165 /* Allocate the OCB */
1166 ocb = (rpmsg_rpc_ocb_t *) calloc (1, sizeof (rpmsg_rpc_ocb_t));
1167 if (ocb == NULL){
1168 errno = ENOMEM;
1169 return (NULL);
1170 }
1172 ocb->pid = ctp->info.pid;
1174 /* Allocate memory for the rpmsg object. */
1175 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_object), 0u, NULL);
1176 if (obj == NULL) {
1177 errno = ENOMEM;
1178 free(ocb);
1179 return (NULL);
1180 }
1181 else {
1182 ocb->rpc = obj;
1183 IOFUNC_NOTIFY_INIT(obj->notify);
1184 obj->created = FALSE;
1185 /* determine conn and procId for communication based on which device
1186 * was opened */
1187 MsgInfo(ctp->rcvid, &cl_info);
1188 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
1189 for (i = 0; i < MAX_CONNS; i++) {
1190 if (rpmsg_rpc_state.objects[i] != NULL) {
1191 dev = rpmsg_rpc_state.objects[i]->dev;
1192 resmgr_pathname(dev->rpmsg_rpc.resmgr_id, 0, path2,
1193 sizeof(path2));
1194 if (!strcmp(path1, path2)) {
1195 found = TRUE;
1196 break;
1197 }
1198 }
1199 }
1200 if (found) {
1201 obj->conn = rpmsg_rpc_state.objects[i];
1202 obj->procId = obj->conn->procId;
1203 obj->pid = ctp->info.pid;
1204 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL,
1205 _rpmsg_rpc_cb, obj, &obj->addr);
1206 if (obj->mq == NULL) {
1207 errno = ENOMEM;
1208 free(obj);
1209 free(ocb);
1210 return (NULL);
1211 }
1212 else {
1213 if (_rpmsg_rpc_attach (ocb->rpc) < 0) {
1214 errno = ENOMEM;
1215 MessageQCopy_delete (&obj->mq);
1216 free(obj);
1217 free(ocb);
1218 return (NULL);
1219 }
1220 }
1221 }
1222 }
1224 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_ocb_calloc", ocb);
1226 return (IOFUNC_OCB_T *)(ocb);
1227 }
1230 /*!
1231 * @brief Detach a process from rpmsg-rpc user support framework.
1232 *
1233 * @param pid Process identifier
1234 *
1235 * @sa _rpmsg_rpc_attach
1236 */
1237 static
1238 Int
1239 _rpmsg_rpc_detach (rpmsg_rpc_object * rpc)
1240 {
1241 Int32 status = EOK;
1242 Int32 tmpStatus = EOK;
1243 Bool flag = FALSE;
1244 List_Object * bufList = NULL;
1245 UInt32 i;
1246 IArg key;
1247 MsgList_t * item;
1248 WaitingReaders_t * wr = NULL;
1249 struct _msg_info info;
1251 GT_1trace (curTrace, GT_ENTER, "rpmsg_rpc_detach", rpc);
1253 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1255 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1256 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1257 if (rpmsg_rpc_state.eventState [i].refCount == 1) {
1258 rpmsg_rpc_state.eventState [i].refCount = 0;
1260 flag = TRUE;
1261 break;
1262 }
1263 else {
1264 rpmsg_rpc_state.eventState [i].refCount--;
1265 status = EOK;
1266 break;
1267 }
1268 }
1269 }
1270 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1272 if (flag == TRUE) {
1273 key = IGateProvider_enter (rpmsg_rpc_state.gateHandle);
1274 /* Last client being unregistered for this process. */
1275 rpmsg_rpc_state.eventState [i].rpc = NULL;
1277 /* Store in local variable to delete outside lock. */
1278 bufList = rpmsg_rpc_state.eventState [i].bufList;
1280 rpmsg_rpc_state.eventState [i].bufList = NULL;
1282 IGateProvider_leave (rpmsg_rpc_state.gateHandle, key);
1283 }
1285 if (flag != TRUE) {
1286 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1287 if (i == MAX_PROCESSES) {
1288 /*! @retval Notify_E_NOTFOUND The specified user process was
1289 not found registered with Notify Driver module. */
1290 status = -ENOMEM;
1291 GT_setFailureReason (curTrace,
1292 GT_4CLASS,
1293 "rpmsg_rpc_detach",
1294 status,
1295 "The specified user process was not found"
1296 " registered with rpmsg Driver module.");
1297 }
1298 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1299 }
1300 else {
1301 if (bufList != NULL) {
1302 /* Dequeue waiting readers and reply to them */
1303 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1304 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1305 /* Check if rcvid is still valid */
1306 if (MsgInfo(wr->rcvid, &info) != -1) {
1307 put_wr(wr);
1308 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1309 MsgError(wr->rcvid, EINTR);
1310 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1311 }
1312 }
1313 /* Check for pending ionotify/select calls */
1314 if (rpc) {
1315 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
1316 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
1317 }
1318 }
1320 /* Free event packets for any received but unprocessed events. */
1321 while ((item = find_nl(i)) != NULL) {
1322 if (dequeue_notify_list_item(item) >= 0) {
1323 rpmsg_rpc_EventPacket * uBuf = NULL;
1325 uBuf = (rpmsg_rpc_EventPacket *) List_get (bufList);
1327 /* Let the check remain at run-time. */
1328 if (uBuf != NULL) {
1329 put_uBuf(uBuf);
1330 }
1331 }
1332 }
1333 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1335 /* Last client being unregistered with Notify module. */
1336 List_delete (&bufList);
1337 }
1339 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1340 if ((tmpStatus < 0) && (status >= 0)) {
1341 status = tmpStatus;
1342 GT_setFailureReason (curTrace,
1343 GT_4CLASS,
1344 "rpmsg_rpc_detach",
1345 status,
1346 "Failed to delete termination semaphore!");
1347 }
1348 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1349 }
1351 GT_1trace (curTrace, GT_LEAVE, "rpmsg_rpc_detach", status);
1353 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1354 return status;
1355 }
1357 /**
1358 * Handler for ocb_free() requests.
1359 *
1360 * Special handler for ocb_free() requests that we export for control. A
1361 * close request from the client will result in a call to our special ocb_free
1362 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1363 * and frees any client-specific information that was allocated.
1364 *
1365 * \param i_ocb OCB associated with client's session.
1366 *
1367 * \return POSIX errno value.
1368 *
1369 * \retval None.
1370 */
1372 void
1373 rpmsg_rpc_ocb_free (IOFUNC_OCB_T * i_ocb)
1374 {
1375 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)i_ocb;
1376 rpmsg_rpc_object *obj;
1378 if (ocb && ocb->rpc) {
1379 obj = ocb->rpc;
1380 if (obj->created == TRUE) {
1381 /* Need to disconnect this device */
1382 _rpmsg_rpc_destroy(NULL, NULL, ocb);
1383 }
1384 _rpmsg_rpc_detach(ocb->rpc);
1385 if (obj->mq) {
1386 MessageQCopy_delete (&obj->mq);
1387 obj->mq = NULL;
1388 }
1389 free (obj);
1390 free (ocb);
1391 }
1392 }
1394 /**
1395 * Handler for close_ocb() requests.
1396 *
1397 * This function removes the notification entries associated with the current
1398 * client.
1399 *
1400 * \param ctp Thread's associated context information.
1401 * \param reserved This argument must be NULL.
1402 * \param ocb OCB associated with client's session.
1403 *
1404 * \return POSIX errno value.
1405 *
1406 * \retval EOK Success.
1407 */
1409 Int
1410 rpmsg_rpc_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1411 {
1412 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1413 iofunc_notify_remove(ctp, rpc_ocb->rpc->notify);
1414 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1415 }
1417 /**
1418 * Handler for read() requests.
1419 *
1420 * Handles special read() requests that we export for control. A read
1421 * request will get a message from the remote processor that is associated
1422 * with the client that is calling read().
1423 *
1424 * \param ctp Thread's associated context information.
1425 * \param msg The actual read() message.
1426 * \param ocb OCB associated with client's session.
1427 *
1428 * \return POSIX errno value.
1429 *
1430 * \retval EOK Success.
1431 * \retval EAGAIN Call is non-blocking and no messages available.
1432 * \retval ENOMEM Not enough memory to preform the read.
1433 */
1435 int rpmsg_rpc_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *ocb)
1436 {
1437 Int status;
1438 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1439 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1440 Bool flag = FALSE;
1441 Int retVal = EOK;
1442 UInt32 i;
1443 MsgList_t * item;
1444 Int nonblock;
1446 if ((status = iofunc_read_verify(ctp, msg, ocb, &nonblock)) != EOK)
1447 return (status);
1449 if (rpc->created != TRUE) {
1450 return (ENOTCONN);
1451 }
1453 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1454 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1455 flag = TRUE;
1456 break;
1457 }
1458 }
1460 /* Let the check remain at run-time. */
1461 if (flag == TRUE) {
1462 /* Let the check remain at run-time for handling any run-time
1463 * race conditions.
1464 */
1465 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1466 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1467 item = find_nl(i);
1468 if (dequeue_notify_list_item(item) < 0) {
1469 if (nonblock) {
1470 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1471 return EAGAIN;
1472 }
1473 else {
1474 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1475 if (retVal == EOK) {
1476 pthread_cond_signal(&rpmsg_rpc_state.cond);
1477 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1478 return(_RESMGR_NOREPLY);
1479 }
1480 retVal = ENOMEM;
1481 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1482 }
1483 }
1484 else {
1485 deliver_notification(i, ctp->rcvid);
1486 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1487 return(_RESMGR_NOREPLY);
1488 }
1489 }
1490 }
1492 /*! @retval Number-of-bytes-read Number of bytes read. */
1493 return retVal;
1494 }
1496 /**
1497 * Unblock read calls
1498 *
1499 * This function checks if the client is blocked on a read call and if so,
1500 * unblocks the client.
1501 *
1502 * \param ctp Thread's associated context information.
1503 * \param msg The pulse message.
1504 * \param ocb OCB associated with client's session.
1505 *
1506 * \return POSIX errno value.
1507 *
1508 * \retval EINTR The client has been unblocked.
1509 * \retval other The client has not been unblocked or the client was not
1510 * blocked.
1511 */
1513 int rpmsg_rpc_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *ocb)
1514 {
1515 UInt32 i;
1516 Bool flag = FALSE;
1517 WaitingReaders_t * wr;
1518 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1519 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1521 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1522 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1523 flag = TRUE;
1524 break;
1525 }
1526 }
1528 /* Let the check remain at run-time. */
1529 if (flag == TRUE) {
1530 /* Let the check remain at run-time for handling any run-time
1531 * race conditions.
1532 */
1533 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1534 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1535 wr = find_waiting_reader(i, ctp->rcvid);
1536 if (wr) {
1537 put_wr(wr);
1538 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1539 return (EINTR);
1540 }
1541 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1542 }
1543 }
1545 return _RESMGR_NOREPLY;
1546 }
1548 /**
1549 * Handler for unblock() requests.
1550 *
1551 * Handles unblock request for the client which is requesting to no longer be
1552 * blocked on the rpmsg-rpc driver.
1553 *
1554 * \param ctp Thread's associated context information.
1555 * \param msg The pulse message.
1556 * \param ocb OCB associated with client's session.
1557 *
1558 * \return POSIX errno value.
1559 *
1560 * \retval EINTR The rcvid has been unblocked.
1561 */
1563 int rpmsg_rpc_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1564 {
1565 int status = _RESMGR_NOREPLY;
1566 struct _msg_info info;
1568 /*
1569 * Try to run the default unblock for this message.
1570 */
1571 if ((status = iofunc_unblock_default(ctp,msg,ocb)) != _RESMGR_DEFAULT) {
1572 return status;
1573 }
1575 /*
1576 * Check if rcvid is still valid and still has an unblock
1577 * request pending.
1578 */
1579 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1580 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1581 return _RESMGR_NOREPLY;
1582 }
1584 if (rpmsg_rpc_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1585 return _RESMGR_ERRNO(EINTR);
1586 }
1588 return _RESMGR_ERRNO(EINTR);
1589 }
1592 uint32_t
1593 _rpmsg_rpc_pa2da(ProcMgr_Handle handle, uint32_t pa)
1594 {
1595 Int status = 0;
1596 uint32_t da;
1598 if (pa >= TILER_MEM_8BIT && pa < TILER_MEM_END) {
1599 return pa;
1600 }
1601 else {
1602 status = ProcMgr_translateAddr(handle, (Ptr *)&da,
1603 ProcMgr_AddrType_SlaveVirt,
1604 (Ptr)pa, ProcMgr_AddrType_MasterPhys);
1605 if (status >= 0)
1606 return da;
1607 else
1608 return 0;
1609 }
1610 }
1612 int
1613 _rpmsg_rpc_translate(ProcMgr_Handle handle, char *data, uint32_t bytes, pid_t pid)
1614 {
1615 int status = EOK;
1616 struct rppc_function * function = NULL;
1617 struct rppc_param_translation * translation = NULL;
1618 int i = 0;
1619 off64_t phys_addr;
1620 off64_t paddr[RPPC_MAX_PARAMETERS];
1621 uint32_t ipu_addr;
1622 size_t phys_len = 0;
1623 uintptr_t ptr;
1624 void * vptr[RPPC_MAX_PARAMETERS];
1625 uint32_t idx = 0;
1627 function = (struct rppc_function *)data;
1628 memset(vptr, 0, sizeof(void *) * RPPC_MAX_PARAMETERS);
1629 memset(paddr, 0, sizeof(off64_t) * RPPC_MAX_PARAMETERS);
1631 translation = (struct rppc_param_translation *)function->translations;
1632 for (i = 0; i < function->num_translations; i++) {
1633 idx = translation[i].index;
1634 if (idx >= function->num_params) {
1635 status = -EINVAL;
1636 break;
1637 }
1638 if (translation[i].offset + sizeof(uint32_t) > function->params[idx].size) {
1639 status = -EINVAL;
1640 break;
1641 }
1642 if (!vptr[idx]) {
1643 /* get the physical address of ptr */
1644 status = mem_offset64_peer(pid,
1645 function->params[idx].data,
1646 function->params[idx].size,
1647 &paddr[idx], &phys_len);
1648 if (status >= 0 && phys_len == function->params[idx].size) {
1649 /* map into my process space */
1650 vptr[idx] = mmap64(NULL, function->params[idx].size,
1651 PROT_NOCACHE | PROT_READ | PROT_WRITE,
1652 MAP_PHYS, NOFD, paddr[idx]);
1653 if (vptr == MAP_FAILED) {
1654 status = -ENOMEM;
1655 break;
1656 }
1657 }
1658 else {
1659 status = -EINVAL;
1660 break;
1661 }
1662 }
1663 /* Get physical address of the contents */
1664 ptr = (uint32_t)vptr[idx] + translation[i].offset;
1665 status = mem_offset64_peer(pid, *(uint32_t *)ptr, sizeof(uint32_t),
1666 &phys_addr, &phys_len);
1667 if (status >= 0 && phys_len == sizeof(uint32_t)) {
1668 /* translate pa2da */
1669 if ((ipu_addr =
1670 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1671 /* update vptr contents */
1672 *(uint32_t *)ptr = ipu_addr;
1673 else {
1674 status = -EINVAL;
1675 break;
1676 }
1677 }
1678 else {
1679 status = -EINVAL;
1680 break;
1681 }
1682 }
1684 for (i = 0; i < function->num_params && status >= 0; i++) {
1685 if (function->params[i].type == RPPC_PARAM_TYPE_PTR) {
1686 if (paddr[i]) {
1687 phys_addr = paddr[i];
1688 }
1689 else {
1690 /* translate the param pointer */
1691 status = mem_offset64_peer(pid,
1692 (uintptr_t)(function->params[i].data),
1693 function->params[i].size, &phys_addr, &phys_len);
1694 }
1695 if (status >= 0) {
1696 if ((ipu_addr =
1697 _rpmsg_rpc_pa2da(handle, (uint32_t)phys_addr)) != 0)
1698 function->params[i].data = ipu_addr;
1699 else {
1700 status = -EINVAL;
1701 break;
1702 }
1703 }
1704 else {
1705 status = -EINVAL;
1706 break;
1707 }
1708 }
1709 }
1711 for (i = 0; i < function->num_params; i++) {
1712 if (vptr[i])
1713 munmap(vptr[i], function->params[i].size);
1714 }
1716 return status;
1717 }
1719 /**
1720 * Handler for write() requests.
1721 *
1722 * Handles special write() requests that we export for control. A write()
1723 * request will send a message to the remote processor which is associated with
1724 * the client.
1725 *
1726 * \param ctp Thread's associated context information.
1727 * \param msg The actual write() message.
1728 * \param io_ocb OCB associated with client's session.
1729 *
1730 * \return POSIX errno value.
1731 *
1732 * \retval EOK Success.
1733 * \retval ENOMEM Not enough memory to preform the write.
1734 * \retval EIO MessageQCopy_send failed.
1735 * \retval EINVAL msg->i.bytes is negative.
1736 */
1738 int
1739 rpmsg_rpc_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1740 {
1741 int status;
1742 char buf[MessageQCopy_BUFSIZE];
1743 int bytes;
1744 rpmsg_rpc_ocb_t * ocb = (rpmsg_rpc_ocb_t *)io_ocb;
1745 rpmsg_rpc_object * rpc = ocb->rpc;
1746 struct rppc_msg_header * msg_hdr = NULL;
1747 struct rppc_packet *packet = NULL;
1748 struct rppc_function *function = NULL;
1749 char usr_msg[MessageQCopy_BUFSIZE];
1750 int i = 0;
1752 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1753 return (status);
1754 }
1756 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct rppc_msg_header) > MessageQCopy_BUFSIZE ?
1757 MessageQCopy_BUFSIZE - sizeof(struct rppc_msg_header) : msg->i.nbytes;
1758 if (bytes < 0) {
1759 return EINVAL;
1760 }
1761 _IO_SET_WRITE_NBYTES (ctp, bytes);
1763 msg_hdr = (struct rppc_msg_header *)buf;
1764 packet = (struct rppc_packet *)((UInt32)msg_hdr + sizeof(struct rppc_msg_header));
1766 status = resmgr_msgread(ctp, usr_msg, bytes, sizeof(msg->i));
1767 if (status != bytes) {
1768 return (errno);
1769 }
1770 else if (bytes < sizeof(struct rppc_function)) {
1771 return (EINVAL);
1772 }
1773 function = (struct rppc_function *)usr_msg;
1775 if (bytes < sizeof(struct rppc_function) +
1776 (function->num_translations * \
1777 sizeof(struct rppc_param_translation))) {
1778 return (EINVAL);
1779 }
1781 /* check that we're in the correct state */
1782 if (rpc->created != TRUE) {
1783 return (EINVAL);
1784 }
1786 status = _rpmsg_rpc_translate(rpc->conn->procH, (char *)function, bytes,
1787 ctp->info.pid);
1788 if (status < 0) {
1789 return -status;
1790 }
1792 msg_hdr->msg_type = RPPC_MSG_CALL_FUNCTION;
1793 msg_hdr->msg_len = sizeof(struct rppc_packet);
1795 /* initialize the packet structure */
1796 packet->desc = RPPC_DESC_EXEC_SYNC;
1797 packet->msg_id = 0;
1798 packet->flags = (0x8000);//OMAPRPC_POOLID_DEFAULT;
1799 packet->fxn_id = RPPC_SET_FXN_IDX(function->fxn_id);
1800 packet->result = 0;
1801 packet->data_size = 0;
1803 for (i = 0; i < function->num_params; i++) {
1804 ((UInt32 *)(packet->data))[i*2] = function->params[i].size;
1805 ((UInt32 *)(packet->data))[(i*2)+1] = function->params[i].data;
1806 packet->data_size += (sizeof(UInt32) * 2);
1807 }
1808 msg_hdr->msg_len += packet->data_size;
1810 status = MessageQCopy_send(rpc->conn->procId, MultiProc_self(),
1811 rpc->remoteAddr, rpc->addr, buf,
1812 msg_hdr->msg_len + sizeof(struct rppc_msg_header), TRUE);
1813 if (status < 0) {
1814 return (EIO);
1815 }
1817 return(EOK);
1818 }
1822 /**
1823 * Handler for notify() requests.
1824 *
1825 * Handles special notify() requests that we export for control. A notify
1826 * request results from the client calling select().
1827 *
1828 * \param ctp Thread's associated context information.
1829 * \param msg The actual notify() message.
1830 * \param ocb OCB associated with client's session.
1831 *
1832 * \return POSIX errno value.
1833 */
1835 Int rpmsg_rpc_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1836 {
1837 rpmsg_rpc_ocb_t * rpc_ocb = (rpmsg_rpc_ocb_t *)ocb;
1838 rpmsg_rpc_object * rpc = rpc_ocb->rpc;
1839 int trig;
1840 int i = 0;
1841 Bool flag = FALSE;
1842 MsgList_t * item = NULL;
1843 int status = EOK;
1845 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1847 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1848 if (rpmsg_rpc_state.eventState [i].rpc == rpc) {
1849 flag = TRUE;
1850 break;
1851 }
1852 }
1854 pthread_mutex_lock(&rpmsg_rpc_state.lock);
1855 /* Let the check remain at run-time. */
1856 if (flag == TRUE) {
1857 /* Let the check remain at run-time for handling any run-time
1858 * race conditions.
1859 */
1860 if (rpmsg_rpc_state.eventState [i].bufList != NULL) {
1861 item = find_nl(i);
1862 if (item && item->num_events > 0) {
1863 trig |= _NOTIFY_COND_INPUT;
1864 }
1865 }
1866 }
1867 status = iofunc_notify(ctp, msg, rpc_ocb->rpc->notify, trig, NULL, NULL);
1868 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
1869 return status;
1870 }
1872 /**
1873 * Detaches an rpmsg-dce resource manager device name.
1874 *
1875 * \param dev The device to detach.
1876 *
1877 * \return POSIX errno value.
1878 */
1880 static
1881 Void
1882 _deinit_rpmsg_rpc_device (rpmsg_rpc_dev_t * dev)
1883 {
1884 resmgr_detach(syslink_dpp, dev->rpmsg_rpc.resmgr_id, _RESMGR_DETACH_CLOSE);
1886 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
1888 free (dev);
1890 return;
1891 }
1893 /**
1894 * Initializes and attaches rpmsg-dce resource manager functions to an
1895 * rpmsg-dce device name.
1896 *
1897 * \param num The number to append to the end of the device name.
1898 *
1899 * \return Pointer to the created rpmsg_dce_dev_t device.
1900 */
1902 static
1903 rpmsg_rpc_dev_t *
1904 _init_rpmsg_rpc_device (char * name)
1905 {
1906 iofunc_attr_t * attr;
1907 resmgr_attr_t resmgr_attr;
1908 rpmsg_rpc_dev_t * dev = NULL;
1910 dev = malloc(sizeof(*dev));
1911 if (dev == NULL) {
1912 return NULL;
1913 }
1915 memset(&resmgr_attr, 0, sizeof resmgr_attr);
1916 resmgr_attr.nparts_max = 10;
1917 resmgr_attr.msg_max_size = 2048;
1918 memset(&dev->rpmsg_rpc.mattr, 0, sizeof(iofunc_mount_t));
1919 dev->rpmsg_rpc.mattr.flags = ST_NOSUID | ST_NOEXEC;
1920 dev->rpmsg_rpc.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
1921 IOFUNC_PC_NO_TRUNC |
1922 IOFUNC_PC_SYNC_IO;
1923 dev->rpmsg_rpc.mattr.funcs = &dev->rpmsg_rpc.mfuncs;
1924 memset(&dev->rpmsg_rpc.mfuncs, 0, sizeof(iofunc_funcs_t));
1925 dev->rpmsg_rpc.mfuncs.nfuncs = _IOFUNC_NFUNCS;
1926 dev->rpmsg_rpc.mfuncs.ocb_calloc = rpmsg_rpc_ocb_calloc;
1927 dev->rpmsg_rpc.mfuncs.ocb_free = rpmsg_rpc_ocb_free;
1928 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_rpc.cfuncs,
1929 _RESMGR_IO_NFUNCS, &dev->rpmsg_rpc.iofuncs);
1930 iofunc_attr_init(attr = &dev->rpmsg_rpc.cattr, S_IFCHR | 0777, NULL, NULL);
1931 dev->rpmsg_rpc.iofuncs.devctl = rpmsg_rpc_devctl;
1932 dev->rpmsg_rpc.iofuncs.notify = rpmsg_rpc_notify;
1933 dev->rpmsg_rpc.iofuncs.close_ocb = rpmsg_rpc_close_ocb;
1934 dev->rpmsg_rpc.iofuncs.read = rpmsg_rpc_read;
1935 dev->rpmsg_rpc.iofuncs.write = rpmsg_rpc_write;
1936 dev->rpmsg_rpc.iofuncs.unblock = rpmsg_rpc_read_unblock;
1937 attr->mount = &dev->rpmsg_rpc.mattr;
1938 iofunc_time_update(attr);
1939 pthread_mutex_init(&dev->rpmsg_rpc.mutex, NULL);
1941 snprintf (dev->rpmsg_rpc.device_name, _POSIX_PATH_MAX, "/dev/%s", name);
1942 if (-1 == (dev->rpmsg_rpc.resmgr_id =
1943 resmgr_attach(syslink_dpp, &resmgr_attr,
1944 dev->rpmsg_rpc.device_name, _FTYPE_ANY, 0,
1945 &dev->rpmsg_rpc.cfuncs,
1946 &dev->rpmsg_rpc.iofuncs, attr))) {
1947 pthread_mutex_destroy(&dev->rpmsg_rpc.mutex);
1948 free(dev);
1949 return(NULL);
1950 }
1952 return(dev);
1953 }
1955 /**
1956 * Callback passed to MessageQCopy_registerNotify.
1957 *
1958 * This callback is called when a remote processor creates a MessageQCopy
1959 * handle with the same name as the local MessageQCopy handle and then
1960 * calls NameMap_register to notify the HOST of the handle.
1961 *
1962 * \param handle The remote handle.
1963 * \param procId The remote proc ID of the remote handle.
1964 * \param endpoint The endpoint address of the remote handle.
1965 *
1966 * \return None.
1967 */
1969 static
1970 Void
1971 _rpmsg_rpc_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
1972 UInt32 endpoint, Char * desc, Bool create)
1973 {
1974 Int status = 0, i = 0, j = 0;
1975 Bool found = FALSE;
1976 rpmsg_rpc_conn_object * obj = NULL;
1977 char msg[512];
1978 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)msg;
1980 for (i = 0; i < MAX_CONNS; i++) {
1981 if (rpmsg_rpc_state.objects[i] == NULL) {
1982 found = TRUE;
1983 break;
1984 }
1985 }
1987 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
1988 if (rpmsg_rpc_state.mqHandle[j] == handle) {
1989 break;
1990 }
1991 }
1993 if (found && j < NUM_RPMSG_RPC_QUEUES) {
1994 /* found a space to save this mq handle, allocate memory */
1995 obj = Memory_calloc (NULL, sizeof (rpmsg_rpc_conn_object), 0x0, NULL);
1996 if (obj) {
1997 /* store the object in the module info */
1998 rpmsg_rpc_state.objects[i] = obj;
2000 /* store the mq info in the object */
2001 obj->mq = handle;
2002 obj->procId = procId;
2003 status = ProcMgr_open(&obj->procH, obj->procId);
2004 if (status < 0) {
2005 Osal_printf("Failed to open handle to proc %d", procId);
2006 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2007 }
2008 else {
2009 obj->addr = endpoint;
2011 /* create a /dev/rpmsg-rpc instance for users to open */
2012 obj->dev = _init_rpmsg_rpc_device(desc);
2013 if (obj->dev == NULL) {
2014 Osal_printf("Failed to create %s", desc);
2015 ProcMgr_close(&obj->procH);
2016 Memory_free(NULL, obj, sizeof(rpmsg_rpc_object));
2017 }
2018 }
2019 }
2021 /* Send a message to query the chan info. Handle creating of the conn
2022 * in the callback */
2023 msg_hdr->msg_type = RPPC_MSG_QUERY_CHAN_INFO;
2024 msg_hdr->msg_len = 0;
2025 status = MessageQCopy_send(procId, MultiProc_self(), endpoint,
2026 rpmsg_rpc_state.endpoint[j],
2027 msg, sizeof(struct rppc_msg_header),
2028 TRUE);
2029 }
2030 }
2032 /**
2033 * Callback passed to MessageQCopy_create for the module.
2034 *
2035 * This callback is called when a message is received for the rpmsg-dce
2036 * module. This callback will never be called, since each client connection
2037 * gets it's own endpoint for message passing.
2038 *
2039 * \param handle The local MessageQCopy handle.
2040 * \param data Data message
2041 * \param len Length of data message
2042 * \param priv Private information for the endpoint
2043 * \param src Remote endpoint sending this message
2044 * \param srcProc Remote proc ID sending this message
2045 *
2046 * \return None.
2047 */
2049 static
2050 Void
2051 _rpmsg_rpc_module_cb (MessageQCopy_Handle handle, void * data, int len,
2052 void * priv, UInt32 src, UInt16 srcProc)
2053 {
2054 Int status = 0, i = 0, j = 0;
2055 rpmsg_rpc_conn_object * obj = NULL;
2056 struct rppc_msg_header * msg_hdr = (struct rppc_msg_header *)data;
2058 Osal_printf ("_rpmsg_rpc_module_cb callback");
2060 for (i = 0; i < MAX_CONNS; i++) {
2061 if (rpmsg_rpc_state.objects[i] != NULL &&
2062 rpmsg_rpc_state.objects[i]->addr == src) {
2063 obj = rpmsg_rpc_state.objects[i];
2064 break;
2065 }
2066 }
2068 for (j = 0; j < NUM_RPMSG_RPC_QUEUES; j++) {
2069 if (rpmsg_rpc_state.mqHandle[j] == handle) {
2070 break;
2071 }
2072 }
2074 if (obj && j < NUM_RPMSG_RPC_QUEUES) {
2075 switch (msg_hdr->msg_type) {
2076 case RPPC_MSG_CHAN_INFO:
2077 {
2078 struct rppc_channel_info * chan_info =
2079 (struct rppc_channel_info *)(msg_hdr + 1);
2080 obj->numFuncs = chan_info->num_funcs;
2081 /* TODO: Query the info about each function */
2082 break;
2083 }
2084 default:
2085 status = EINVAL;
2086 GT_setFailureReason(curTrace, GT_4CLASS, "_rpmsg_rpc_module_cb",
2087 status, "invalid msg_type received");
2088 break;
2089 }
2090 }
2091 }
2094 /*!
2095 * @brief Module setup function.
2096 *
2097 * @sa rpmsg_rpc_destroy
2098 */
2099 Int
2100 rpmsg_rpc_setup (Void)
2101 {
2102 UInt16 i;
2103 List_Params listparams;
2104 Int status = 0;
2105 Error_Block eb;
2106 pthread_attr_t thread_attr;
2107 struct sched_param sched_param;
2109 GT_0trace (curTrace, GT_ENTER, "rpmsg_rpc_setup");
2111 Error_init(&eb);
2113 List_Params_init (&listparams);
2114 rpmsg_rpc_state.gateHandle = (IGateProvider_Handle)
2115 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
2116 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2117 if (rpmsg_rpc_state.gateHandle == NULL) {
2118 status = -ENOMEM;
2119 GT_setFailureReason (curTrace,
2120 GT_4CLASS,
2121 "_rpmsg_rpc_setup",
2122 status,
2123 "Failed to create spinlock gate!");
2124 }
2125 else {
2126 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2127 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2128 rpmsg_rpc_state.eventState [i].bufList = NULL;
2129 rpmsg_rpc_state.eventState [i].rpc = NULL;
2130 rpmsg_rpc_state.eventState [i].refCount = 0;
2131 rpmsg_rpc_state.eventState [i].head = NULL;
2132 rpmsg_rpc_state.eventState [i].tail = NULL;
2133 }
2135 pthread_attr_init(&thread_attr);
2136 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
2137 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
2138 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
2139 pthread_attr_setschedparam(&thread_attr, &sched_param);
2141 rpmsg_rpc_state.run = TRUE;
2142 if (pthread_create(&rpmsg_rpc_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
2143 pthread_setname_np(rpmsg_rpc_state.nt, "rpmsg-rpc-notifier");
2144 /* Initialize the driver mapping array. */
2145 Memory_set (&rpmsg_rpc_state.objects,
2146 0,
2147 (sizeof (rpmsg_rpc_conn_object *)
2148 * MAX_CONNS));
2149 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2150 /* create a local handle and register for notifications with MessageQCopy */
2151 rpmsg_rpc_state.mqHandle[i] = MessageQCopy_create (
2152 MessageQCopy_ADDRANY,
2153 rpmsg_rpc_names[i].name,
2154 _rpmsg_rpc_module_cb,
2155 NULL,
2156 &rpmsg_rpc_state.endpoint[i]);
2157 if (rpmsg_rpc_state.mqHandle[i] == NULL) {
2158 /*! @retval RPC_FAIL Failed to create MessageQCopy handle! */
2159 status = -ENOMEM;
2160 GT_setFailureReason (curTrace,
2161 GT_4CLASS,
2162 "rpmsg_rpc_setup",
2163 status,
2164 "Failed to create MessageQCopy handle!");
2165 break;
2166 }
2167 else {
2168 /* TBD: This could be replaced with a messageqcopy_open type call, one for
2169 * each core */
2170 status = MessageQCopy_registerNotify (rpmsg_rpc_state.mqHandle[i],
2171 _rpmsg_rpc_notify_cb);
2172 if (status < 0) {
2173 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2174 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2175 status = -ENOMEM;
2176 GT_setFailureReason (curTrace,
2177 GT_4CLASS,
2178 "rpmsg_rpc_setup",
2179 status,
2180 "Failed to register MQCopy handle!");
2181 break;
2182 }
2183 }
2184 }
2185 if (status >= 0){
2186 rpmsg_rpc_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
2187 if (rpmsg_rpc_state.sem == NULL) {
2188 //MessageQCopy_unregisterNotify();
2189 /*! @retval RPC_FAIL Failed to register MQCopy handle! */
2190 status = -ENOMEM;
2191 GT_setFailureReason (curTrace,
2192 GT_4CLASS,
2193 "rpmsg_rpc_setup",
2194 status,
2195 "Failed to register MQCopy handle!");
2196 }
2197 }
2198 if (status >= 0) {
2199 rpmsg_rpc_state.isSetup = TRUE;
2200 }
2201 else {
2202 for (; i > 0; --i) {
2203 MessageQCopy_delete (&rpmsg_rpc_state.mqHandle[i]);
2204 }
2205 rpmsg_rpc_state.run = FALSE;
2206 }
2207 }
2208 else {
2209 rpmsg_rpc_state.run = FALSE;
2210 }
2211 pthread_attr_destroy(&thread_attr);
2212 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2213 }
2214 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2216 GT_0trace (curTrace, GT_LEAVE, "rpmsg_rpc_setup");
2217 return status;
2218 }
2221 /*!
2222 * @brief Module destroy function.
2223 *
2224 * @sa rpmsg_rpc_setup
2225 */
2226 Void
2227 rpmsg_rpc_destroy (Void)
2228 {
2229 rpmsg_rpc_EventPacket * packet;
2230 UInt32 i;
2231 List_Handle bufList;
2232 rpmsg_rpc_object * rpc = NULL;
2233 WaitingReaders_t * wr = NULL;
2234 struct _msg_info info;
2236 GT_0trace (curTrace, GT_ENTER, "_rpmsg_rpc_destroy");
2238 for (i = 0; i < MAX_CONNS; i++) {
2239 if (rpmsg_rpc_state.objects[i]) {
2240 rpmsg_rpc_conn_object * obj = rpmsg_rpc_state.objects[i];
2241 _deinit_rpmsg_rpc_device(obj->dev);
2242 ProcMgr_close(&obj->procH);
2243 Memory_free(NULL, obj, sizeof(rpmsg_rpc_conn_object));
2244 rpmsg_rpc_state.objects[i] = NULL;
2245 }
2246 }
2248 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2249 rpc = NULL;
2250 if (rpmsg_rpc_state.eventState [i].rpc != NULL) {
2251 /* This is recovery. Need to mark rpc structures as invalid */
2252 rpc = rpmsg_rpc_state.eventState[i].rpc;
2253 MessageQCopy_delete(&rpc->mq);
2254 rpc->mq = NULL;
2255 }
2256 bufList = rpmsg_rpc_state.eventState [i].bufList;
2258 rpmsg_rpc_state.eventState [i].bufList = NULL;
2259 rpmsg_rpc_state.eventState [i].rpc = NULL;
2260 rpmsg_rpc_state.eventState [i].refCount = 0;
2261 if (bufList != NULL) {
2262 /* Dequeue waiting readers and reply to them */
2263 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2264 while ((wr = dequeue_waiting_reader(i)) != NULL) {
2265 /* Check if rcvid is still valid */
2266 if (MsgInfo(wr->rcvid, &info) != -1) {
2267 put_wr(wr);
2268 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2269 MsgError(wr->rcvid, EINTR);
2270 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2271 }
2272 }
2273 /* Check for pending ionotify/select calls */
2274 if (rpc) {
2275 if (IOFUNC_NOTIFY_INPUT_CHECK(rpc->notify, 1, 0)) {
2276 iofunc_notify_trigger(rpc->notify, 1, IOFUNC_NOTIFY_INPUT);
2277 }
2278 }
2279 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2281 /* Free event packets for any received but unprocessed events. */
2282 while (List_empty (bufList) != TRUE){
2283 packet = (rpmsg_rpc_EventPacket *)
2284 List_get (bufList);
2285 if (packet != NULL){
2286 Memory_free (NULL, packet, sizeof(*packet));
2287 }
2288 }
2289 List_delete (&(bufList));
2290 }
2291 }
2293 /* Free the cached list */
2294 flush_uBuf();
2296 if (rpmsg_rpc_state.sem) {
2297 OsalSemaphore_delete(&rpmsg_rpc_state.sem);
2298 }
2300 for (i = 0; i < NUM_RPMSG_RPC_QUEUES; i++) {
2301 if (rpmsg_rpc_state.mqHandle[i]) {
2302 //MessageQCopy_unregisterNotify();
2303 MessageQCopy_delete(&rpmsg_rpc_state.mqHandle[i]);
2304 }
2305 }
2307 if (rpmsg_rpc_state.gateHandle != NULL) {
2308 GateSpinlock_delete ((GateSpinlock_Handle *)
2309 &(rpmsg_rpc_state.gateHandle));
2310 }
2312 rpmsg_rpc_state.isSetup = FALSE ;
2313 rpmsg_rpc_state.run = FALSE;
2314 // run through and destroy the thread, and all outstanding
2315 // rpc structures
2316 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2317 pthread_cond_signal(&rpmsg_rpc_state.cond);
2318 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2319 pthread_join(rpmsg_rpc_state.nt, NULL);
2320 pthread_mutex_lock(&rpmsg_rpc_state.lock);
2321 while (rpmsg_rpc_state.head != NULL) {
2322 int index;
2323 WaitingReaders_t *item;
2324 index = dequeue_notify_list_item(rpmsg_rpc_state.head);
2325 if (index < 0)
2326 break;
2327 item = dequeue_waiting_reader(index);
2328 while (item) {
2329 put_wr(item);
2330 item = dequeue_waiting_reader(index);
2331 }
2332 }
2333 rpmsg_rpc_state.head = NULL ;
2334 rpmsg_rpc_state.tail = NULL ;
2335 pthread_mutex_unlock(&rpmsg_rpc_state.lock);
2337 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2338 }