1 /*
2 * @file ti-ipc.c
3 *
4 * @brief fileops handler for ti-ipc component.
5 *
6 *
7 * @ver
8 *
9 * ============================================================================
10 *
11 * Copyright (c) 2013, Texas Instruments Incorporated
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * * Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * * Neither the name of Texas Instruments Incorporated nor the names of
25 * its contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
30 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
32 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
33 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
34 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
35 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
36 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
37 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
38 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Contact information for paper mail:
40 * Texas Instruments
41 * Post Office Box 655303
42 * Dallas, Texas 75265
43 * Contact information:
44 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
45 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
46 * ============================================================================
47 *
48 */
51 /* Standard headers */
52 #include <ti/syslink/Std.h>
54 /* OSAL & Utils headers */
55 #include <ti/syslink/utils/List.h>
56 #include <ti/syslink/utils/String.h>
57 #include <ti/syslink/utils/Trace.h>
58 #include <ti/syslink/utils/Memory.h>
59 #include <ti/syslink/utils/IGateProvider.h>
60 #include <ti/syslink/utils/GateSpinlock.h>
61 #include <_MultiProc.h>
63 /*QNX specific header include */
64 #include <errno.h>
65 #include <unistd.h>
66 #include <sys/iofunc.h>
67 #include <sys/dispatch.h>
68 #include <sys/netmgr.h>
69 #include <devctl.h>
71 /* Module headers */
72 #include <ti/ipc/ti_ipc.h>
73 #include <ti/ipc/MessageQCopy.h>
74 #include <_MessageQCopy.h>
75 #include <_MessageQCopyDefs.h>
76 #include "OsalSemaphore.h"
77 #include "std_qnx.h"
78 #include <pthread.h>
80 #include "ti-ipc.h"
82 #define PRIORITY_REALTIME_LOW 29
84 #define TIIPC_DEVICE_NAME "/dev/tiipc"
87 /* structure to hold rpmsg-rpc device information */
88 typedef struct named_device {
89 iofunc_mount_t mattr;
90 iofunc_attr_t cattr;
91 int resmgr_id;
92 iofunc_funcs_t mfuncs;
93 resmgr_connect_funcs_t cfuncs;
94 resmgr_io_funcs_t iofuncs;
95 } named_device_t;
97 /* ti-ipc device structure */
98 typedef struct ti_ipc_dev {
99 dispatch_t * dpp;
100 thread_pool_t * tpool;
101 named_device_t ti_ipc;
102 } ti_ipc_dev_t;
104 /*!
105 * @brief ti ipc instance object
106 */
107 typedef struct ti_ipc_object_tag {
108 MessageQCopy_Handle mq;
109 UInt32 addr;
110 UInt32 remoteAddr;
111 UInt16 procId;
112 pid_t pid;
113 bool isValid;
114 iofunc_notify_t notify[3];
115 } ti_ipc_object;
117 /*!
118 * @brief Keeps the information related to Event.
119 */
120 typedef struct ipc_EventState_tag {
121 List_Handle bufList;
122 /*!< Head of received event list. */
123 ti_ipc_object *ipc;
124 /*!< ipc instacne. */
125 UInt32 refCount;
126 /*!< Reference count, used when multiple Notify_registerEvent are called
127 from same process space (multi threads/processes). */
128 WaitingReaders_t * head;
129 /*!< Waiting readers head. */
130 WaitingReaders_t * tail;
131 /*!< Waiting readers tail. */
132 } ipc_EventState;
134 /*!
135 * @brief Per-connection information
136 */
137 typedef struct ti_ipc_ocb {
138 iofunc_ocb_t hdr;
139 pid_t pid;
140 ti_ipc_object * ipc;
141 } ti_ipc_ocb_t;
143 /*!
144 * @brief ti_ipc Module state object
145 */
146 typedef struct ti_ipc_ModuleObject_tag {
147 Bool isSetup;
148 /*!< Indicates whether the module has been already setup */
149 IGateProvider_Handle gateHandle;
150 /*!< Handle of gate to be used for local thread safety */
151 ipc_EventState eventState [MAX_PROCESSES];
152 /*!< List for all user processes registered. */
153 pthread_t nt;
154 /*!< notifier thread */
155 pthread_mutex_t lock;
156 /*!< protection between notifier and event */
157 pthread_cond_t cond;
158 /*!< protection between notifier and event */
159 MsgList_t *head;
160 /*!< list head */
161 MsgList_t *tail;
162 /*!< list tail */
163 int run;
164 /*!< notifier thread must keep running */
165 ti_ipc_dev_t *dev;
166 /*!< device for this module */
167 } ti_ipc_ModuleObject;
169 /*!
170 * @brief Structure of Event Packet read from notify kernel-side.
171 */
172 typedef struct ipc_EventPacket_tag {
173 List_Elem element;
174 /*!< List element header */
175 UInt32 pid;
176 /* Processor identifier */
177 ti_ipc_object * obj;
178 /*!< Pointer to the channel associated with this callback */
179 UInt32 len;
180 /*!< Length of the data associated with event. */
181 UInt8 data[MessageQCopy_BUFSIZE];
182 /*!< Data associated with event. */
183 UInt32 src;
184 /*!< Src endpoint associated with event. */
185 struct ipc_EventPacket * next;
186 struct ipc_EventPacket * prev;
187 } ipc_EventPacket ;
190 /** ============================================================================
191 * Globals
192 * ============================================================================
193 */
194 /*!
195 * @var ti_ipc_state
196 *
197 * @brief ti-ipc state object variable
198 */
199 static ti_ipc_ModuleObject ti_ipc_state =
200 {
201 .gateHandle = NULL,
202 .isSetup = FALSE,
203 .nt = 0,
204 .lock = PTHREAD_MUTEX_INITIALIZER,
205 .cond = PTHREAD_COND_INITIALIZER,
206 .head = NULL,
207 .tail = NULL,
208 .run = 0,
209 .dev = NULL
210 };
212 static MsgList_t *nl_cache;
213 static int num_nl = 0;
214 static WaitingReaders_t *wr_cache;
215 static int num_wr = 0;
217 extern dispatch_t * syslink_dpp;
220 /** ============================================================================
221 * Internal functions
222 * ============================================================================
223 */
225 /*
226 * Instead of constantly allocating and freeing the uBuf structures
227 * we just cache a few of them, and recycle them instead.
228 * The cache count is set with CACHE_NUM in rpmsg-omxdrv.h.
229 */
230 static ipc_EventPacket *uBuf_cache;
231 static int num_uBuf = 0;
233 static void flush_uBuf()
234 {
235 ipc_EventPacket *uBuf = NULL;
237 while(uBuf_cache) {
238 num_uBuf--;
239 uBuf = uBuf_cache;
240 uBuf_cache = (ipc_EventPacket *)uBuf_cache->next;
241 Memory_free(NULL, uBuf, sizeof(*uBuf));
242 }
243 }
245 static ipc_EventPacket *get_uBuf()
246 {
247 ipc_EventPacket *uBuf;
248 uBuf = uBuf_cache;
249 if (uBuf != NULL) {
250 uBuf_cache = (ipc_EventPacket *)uBuf_cache->next;
251 num_uBuf--;
252 } else {
253 uBuf = Memory_alloc(NULL, sizeof(ipc_EventPacket), 0, NULL);
254 }
255 return(uBuf);
256 }
258 static void put_uBuf(ipc_EventPacket * uBuf)
259 {
260 if (num_uBuf >= CACHE_NUM) {
261 Memory_free(NULL, uBuf, sizeof(*uBuf));
262 } else {
263 uBuf->next = (struct ipc_EventPacket *)uBuf_cache;
264 uBuf_cache = uBuf;
265 num_uBuf++;
266 }
267 return;
268 }
270 /*
271 * Instead of constantly allocating and freeing the notifier structures
272 * we just cache a few of them, and recycle them instead.
273 * The cache count is set with CACHE_NUM in ti-ipc.h.
274 */
276 static MsgList_t *get_nl()
277 {
278 MsgList_t *item;
279 item = nl_cache;
280 if (item != NULL) {
281 nl_cache = nl_cache->next;
282 num_nl--;
283 } else {
284 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
285 }
286 return(item);
287 }
289 static void put_nl(MsgList_t *item)
290 {
291 if (num_nl >= CACHE_NUM) {
292 Memory_free(NULL, item, sizeof(*item));
293 } else {
294 item->next = nl_cache;
295 nl_cache = item;
296 num_nl++;
297 }
298 return;
299 }
301 static WaitingReaders_t *get_wr()
302 {
303 WaitingReaders_t *item;
304 item = wr_cache;
305 if (item != NULL) {
306 wr_cache = wr_cache->next;
307 num_wr--;
308 } else {
309 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
310 }
311 return(item);
312 }
314 static void put_wr(WaitingReaders_t *item)
315 {
316 if (num_wr >= CACHE_NUM) {
317 Memory_free(NULL, item, sizeof(*item));
318 } else {
319 item->next = wr_cache;
320 wr_cache = item;
321 num_wr++;
322 }
323 return;
324 }
325 /* The following functions are used for list/waiting reader management */
326 static MsgList_t *find_nl(int index)
327 {
328 MsgList_t *item=NULL;
329 item = ti_ipc_state.head;
330 while (item) {
331 if (item->index == index)
332 return(item);
333 item = item->next;
334 }
335 return(item);
336 }
338 /* we have the right locks when calling this function */
339 /*!
340 * @brief Function to enqueue a notify list item.
341 *
342 * @param index Index of the client process associated with the item.
343 *
344 * @sa find_nl
345 * get_nl
346 */
347 static int enqueue_notify_list(int index)
348 {
349 MsgList_t *item;
350 item = find_nl(index);
351 if (item == NULL) {
352 item = get_nl();
353 if (item == NULL) {
354 return(-1);
355 }
356 item->next = NULL;
357 item->index = index;
358 item->num_events=1;
359 if (ti_ipc_state.head == NULL) {
360 ti_ipc_state.head = item;
361 ti_ipc_state.tail = item;
362 item->prev = NULL;
363 }
364 else {
365 item->prev = ti_ipc_state.tail;
366 ti_ipc_state.tail->next = item;
367 ti_ipc_state.tail = item;
368 }
369 }
370 else {
371 item->num_events++;
372 }
373 return(0);
374 }
376 /* we have the right locks when calling this function */
377 /*!
378 * @brief Function to dequeue a notify list item.
379 *
380 * @param item The item to remove.
381 *
382 * @sa put_nl
383 */
384 static inline int dequeue_notify_list_item(MsgList_t *item)
385 {
386 int index;
387 if (item == NULL) {
388 return(-1);
389 }
390 index = item->index;
391 item->num_events--;
392 if (item->num_events > 0) {
393 return(index);
394 }
395 if (ti_ipc_state.head == item) {
396 // removing head
397 ti_ipc_state.head = item->next;
398 if (ti_ipc_state.head != NULL) {
399 ti_ipc_state.head->prev = NULL;
400 }
401 else {
402 // removing head and tail
403 ti_ipc_state.tail = NULL;
404 }
405 }
406 else {
407 item->prev->next = item->next;
408 if (item->next != NULL) {
409 item->next->prev = item->prev;
410 }
411 else {
412 // removing tail
413 ti_ipc_state.tail = item->prev;
414 }
415 }
416 put_nl(item);
417 return(index);
418 }
420 /* we have the right locks when calling this function */
421 /*!
422 * @brief Function to add a waiting reader to the list.
423 *
424 * @param index Index of the client process waiting reader to add.
425 * @param rcvid Receive ID of the client process that was passed
426 * when the client called read().
427 *
428 * @sa None
429 */
430 static int enqueue_waiting_reader(int index, int rcvid)
431 {
432 WaitingReaders_t *item;
433 item = get_wr();
434 if (item == NULL) {
435 return(-1);
436 }
437 item->rcvid = rcvid;
438 item->next = NULL;
439 if (ti_ipc_state.eventState [index].head == NULL) {
440 ti_ipc_state.eventState [index].head = item;
441 ti_ipc_state.eventState [index].tail = item;
442 }
443 else {
444 ti_ipc_state.eventState [index].tail->next = item;
445 ti_ipc_state.eventState [index].tail = item;
446 }
447 return(EOK);
448 }
450 /* we have the right locks when calling this function */
451 /* caller frees item */
452 /*!
453 * @brief Function to remove a waiting reader from the list.
454 *
455 * @param index Index of the client process waiting reader to dequeue.
456 *
457 * @sa None
458 */
459 static WaitingReaders_t *dequeue_waiting_reader(int index)
460 {
461 WaitingReaders_t *item = NULL;
462 if (ti_ipc_state.eventState [index].head) {
463 item = ti_ipc_state.eventState [index].head;
464 ti_ipc_state.eventState [index].head =
465 ti_ipc_state.eventState [index].head->next;
466 if (ti_ipc_state.eventState [index].head == NULL) {
467 ti_ipc_state.eventState [index].tail = NULL;
468 }
469 }
470 return(item);
471 }
473 /*!
474 * @brief Function find a specified waiting reader.
475 *
476 * @param index Index of the client process waiting for the message.
477 * @param rcvid Receive ID of the client process that was passed
478 * when the client called read().
479 *
480 * @sa None
481 */
483 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
484 {
485 WaitingReaders_t *item = NULL;
486 WaitingReaders_t *prev = NULL;
487 if (ti_ipc_state.eventState [index].head) {
488 item = ti_ipc_state.eventState [index].head;
489 while (item) {
490 if (item->rcvid == rcvid) {
491 /* remove item from list */
492 if (prev)
493 prev->next = item->next;
494 if (item == ti_ipc_state.eventState [index].head)
495 ti_ipc_state.eventState [index].head = item->next;
496 break;
497 }
498 else {
499 prev = item;
500 item = item->next;
501 }
502 }
503 }
504 return item;
505 }
507 /*!
508 * @brief Function used to check if there is a waiting reader with an
509 * event (message) ready to be delivered.
510 *
511 * @param index Index of the client process waiting for the message.
512 * @param item Pointer to the waiting reader.
513 *
514 * @sa dequeue_notify_list_item
515 * dequeue_waiting_reader
516 */
518 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
519 {
520 MsgList_t *temp;
521 if (ti_ipc_state.head == NULL) {
522 return(0);
523 }
524 temp = ti_ipc_state.head;
525 while (temp) {
526 if (ti_ipc_state.eventState [temp->index].head) {
527 // event and reader found
528 if (dequeue_notify_list_item(temp) >= 0) {
529 *index = temp->index;
530 *item = dequeue_waiting_reader(temp->index);
531 }
532 else {
533 /* error occurred, return 0 as item has not been set */
534 return(0);
535 }
536 return(1);
537 }
538 temp = temp->next;
539 }
540 return(0);
541 }
543 /*!
544 * @brief Function used to deliver the notification to the client that
545 * it has received a message.
546 *
547 * @param index Index of the client process receiving hte message.
548 * @param rcvid Receive ID of the client process that was passed
549 * when the client called read().
550 *
551 * @sa put_uBuf
552 */
554 static void deliver_notification(int index, int rcvid)
555 {
556 int err = EOK;
557 ipc_EventPacket * uBuf = NULL;
559 uBuf = (ipc_EventPacket *) List_get (ti_ipc_state.eventState [index].bufList);
561 /* Let the check remain at run-time. */
562 if (uBuf != NULL) {
563 err = MsgReply(rcvid, uBuf->len, uBuf->data, uBuf->len);
564 if (err == -1)
565 perror("deliver_notification: MsgReply");
566 /* Free the processed event callback packet. */
567 put_uBuf(uBuf);
568 }
569 else {
570 MsgReply(rcvid, EOK, NULL, 0);
571 }
572 return;
573 }
575 /*!
576 * @brief Thread used for notifying waiting readers of messages.
577 *
578 * @param arg Thread-specific private arg.
579 *
580 * @sa find_available_reader_and_event
581 * deliver_notification
582 * put_wr
583 */
584 static void *notifier_thread(void *arg)
585 {
586 int status;
587 int index;
588 WaitingReaders_t *item = NULL;
589 pthread_mutex_lock(&ti_ipc_state.lock);
590 while (ti_ipc_state.run) {
591 status = find_available_reader_and_event(&index, &item);
592 if ( (status == 0) || (item == NULL) ) {
593 status = pthread_cond_wait(&ti_ipc_state.cond, &ti_ipc_state.lock);
594 if ((status != EOK) && (status != EINTR)) {
595 // false wakeup
596 break;
597 }
598 status = find_available_reader_and_event(&index, &item);
599 if ( (status == 0) || (item == NULL) ) {
600 continue;
601 }
602 }
603 pthread_mutex_unlock(&ti_ipc_state.lock);
604 // we have unlocked, and now we have an event to deliver
605 // we deliver one event at a time, relock, check and continue
606 deliver_notification(index, item->rcvid);
607 pthread_mutex_lock(&ti_ipc_state.lock);
608 put_wr(item);
609 }
610 pthread_mutex_unlock(&ti_ipc_state.lock);
611 return(NULL);
612 }
615 /*!
616 * @brief Attach a process to ti-ipc user support framework.
617 *
618 * @param obj TI IPC instance
619 *
620 * @sa _ti_ipc_detach
621 */
622 static
623 Int
624 _ti_ipc_attach (ti_ipc_object * obj)
625 {
626 Int32 status = EOK;
627 Bool flag = FALSE;
628 Bool isInit = FALSE;
629 List_Object * bufList = NULL;
630 IArg key = 0;
631 List_Params listparams;
632 UInt32 i;
634 GT_1trace (curTrace, GT_ENTER, "_ti_ipc_attach", obj);
636 key = IGateProvider_enter (ti_ipc_state.gateHandle);
637 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
638 if (ti_ipc_state.eventState [i].ipc == obj) {
639 ti_ipc_state.eventState [i].refCount++;
640 isInit = TRUE;
641 status = EOK;
642 break;
643 }
644 }
646 if (isInit == FALSE) {
647 List_Params_init (&listparams);
648 bufList = List_create (&listparams) ;
649 /* Search for an available slot for user process. */
650 for (i = 0 ; i < MAX_PROCESSES ; i++) {
651 if (ti_ipc_state.eventState [i].ipc == NULL) {
652 ti_ipc_state.eventState [i].ipc = obj;
653 ti_ipc_state.eventState [i].refCount = 1;
654 ti_ipc_state.eventState [i].bufList = bufList;
655 flag = TRUE;
656 break;
657 }
658 }
660 /* No free slots found. Let this check remain at run-time,
661 * since it is dependent on user environment.
662 */
663 if (flag != TRUE) {
664 /*! @retval Notify_E_RESOURCE Maximum number of
665 supported user clients have already been registered. */
666 status = -ENOMEM;
667 GT_setFailureReason (curTrace,
668 GT_4CLASS,
669 "_ti_ipc_attach",
670 status,
671 "Maximum number of supported user"
672 " clients have already been "
673 "registered.");
674 if (bufList != NULL) {
675 List_delete (&bufList);
676 }
677 }
678 }
679 IGateProvider_leave (ti_ipc_state.gateHandle, key);
681 GT_1trace (curTrace, GT_LEAVE, "_ti_ipc_attach", status);
683 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
684 return status ;
685 }
688 /*!
689 * @brief This function adds a data to a registered process.
690 *
691 * @param obj Instance object associated with the client
692 * @param src Source address (endpoint) sending the data
693 * @param pid Process ID associated with the client
694 * @param data Data to be added
695 * @param len Length of data to be added
696 *
697 * @sa
698 */
699 Int
700 _ti_ipc_addBufByPid (ti_ipc_object * obj,
701 UInt32 src,
702 UInt32 pid,
703 void * data,
704 UInt32 len)
705 {
706 Int32 status = EOK;
707 Bool flag = FALSE;
708 ipc_EventPacket * uBuf = NULL;
709 IArg key;
710 UInt32 i;
711 WaitingReaders_t *item;
712 MsgList_t *msgItem;
714 GT_assert (curTrace, (ti_ipc_state.isSetup == TRUE));
716 key = IGateProvider_enter (ti_ipc_state.gateHandle);
717 /* Find the registration for this callback */
718 for (i = 0 ; i < MAX_PROCESSES ; i++) {
719 if (ti_ipc_state.eventState [i].ipc == obj) {
720 flag = TRUE;
721 break;
722 }
723 }
724 IGateProvider_leave (ti_ipc_state.gateHandle, key);
726 #if !defined(SYSLINK_BUILD_OPTIMIZE)
727 if (flag != TRUE) {
728 /*! @retval ENOMEM Could not find a registered handler
729 for this process. */
730 status = -ENOMEM;
731 GT_setFailureReason (curTrace,
732 GT_4CLASS,
733 "_ti_ipc_addBufByPid",
734 status,
735 "Could not find a registered handler "
736 "for this process.!");
737 }
738 else {
739 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
740 /* Allocate memory for the buf */
741 pthread_mutex_lock(&ti_ipc_state.lock);
742 uBuf = get_uBuf();
743 pthread_mutex_unlock(&ti_ipc_state.lock);
745 #if !defined(SYSLINK_BUILD_OPTIMIZE)
746 if (uBuf == NULL) {
747 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
748 packet for received callback. */
749 status = -ENOMEM;
750 GT_setFailureReason (curTrace,
751 GT_4CLASS,
752 "_ti_ipc_addBufByPid",
753 status,
754 "Failed to allocate memory for event"
755 " packet for received callback.!");
756 }
757 else {
758 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
759 List_elemClear (&(uBuf->element));
760 GT_assert (curTrace,
761 (ti_ipc_state.eventState [i].bufList != NULL));
763 if (data) {
764 Memory_copy(uBuf->data, data, len);
765 }
766 uBuf->len = len;
768 List_put (ti_ipc_state.eventState [i].bufList,
769 &(uBuf->element));
770 pthread_mutex_lock(&ti_ipc_state.lock);
771 item = dequeue_waiting_reader(i);
772 if (item) {
773 // there is a waiting reader
774 deliver_notification(i, item->rcvid);
775 put_wr(item);
776 pthread_mutex_unlock(&ti_ipc_state.lock);
777 status = EOK;
778 }
779 else {
780 if (enqueue_notify_list(i) < 0) {
781 pthread_mutex_unlock(&ti_ipc_state.lock);
782 status = -ENOMEM;
783 GT_setFailureReason (curTrace,
784 GT_4CLASS,
785 "_ti_ipc_addBufByPid",
786 status,
787 "Failed to allocate memory for notifier");
788 }
789 else {
790 msgItem = find_nl(i);
791 /* TODO: obj could be NULL in some cases */
792 if (obj && msgItem) {
793 if (IOFUNC_NOTIFY_INPUT_CHECK(obj->notify,
794 msgItem->num_events, 0)) {
795 iofunc_notify_trigger(obj->notify,
796 msgItem->num_events,
797 IOFUNC_NOTIFY_INPUT);
798 }
799 }
800 status = EOK;
801 pthread_cond_signal(&ti_ipc_state.cond);
802 pthread_mutex_unlock(&ti_ipc_state.lock);
803 }
804 }
805 #if !defined(SYSLINK_BUILD_OPTIMIZE)
806 }
807 }
808 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
810 GT_1trace (curTrace, GT_LEAVE, "_ti_ipc_addBufByPid", status);
812 return status;
813 }
816 /*!
817 * @brief This function implements the callback registered with
818 * MessageQCopy_create for each client. This function
819 * adds the message from the remote proc to a list
820 * where it is routed to the appropriate waiting reader.
821 *
822 * @param handle Destinatino MessageQCopy_Handle instance for the msg
823 * @param data Message buffer
824 * @param len Length of the message data
825 * @param priv Private information given when callback was registered
826 * @param src Source address of the message
827 * @param srcProc Source proc of the message
828 *
829 * @sa
830 */
831 Void
832 _ti_ipc_cb (MessageQCopy_Handle handle, void * data, int len, void * priv,
833 UInt32 src, UInt16 srcProc)
834 {
835 #if !defined(SYSLINK_BUILD_OPTIMIZE)
836 Int32 status = 0;
837 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
838 ti_ipc_object * obj = NULL;
840 obj = (ti_ipc_object *) priv;
842 #if !defined(SYSLINK_BUILD_OPTIMIZE)
843 status =
844 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
845 _ti_ipc_addBufByPid (obj,
846 src,
847 obj->pid,
848 data,
849 len);
850 #if !defined(SYSLINK_BUILD_OPTIMIZE)
851 if (status < 0) {
852 GT_setFailureReason (curTrace,
853 GT_4CLASS,
854 "_ti_ipc_cb",
855 status,
856 "Failed to add callback packet for pid");
857 }
858 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
859 }
861 /**
862 * Handler for ocb_calloc() requests.
863 *
864 * Special handler for ocb_calloc() requests that we export for control. An
865 * open request from the client will result in a call to our special ocb_calloc
866 * handler. This function allocates client-specific information.
867 *
868 * \param ctp Thread's associated context information.
869 * \param device Device attributes structure.
870 *
871 * \return Pointer to an iofunc_ocb_t OCB structure.
872 */
874 IOFUNC_OCB_T *
875 ti_ipc_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
876 {
877 ti_ipc_ocb_t * ocb = NULL;
878 ti_ipc_object * obj = NULL;
880 /* Allocate the OCB */
881 ocb = (ti_ipc_ocb_t *) calloc (1, sizeof (ti_ipc_ocb_t));
882 if (ocb == NULL){
883 errno = ENOMEM;
884 return (NULL);
885 }
887 ocb->pid = ctp->info.pid;
889 /* Allocate memory for the rpmsg object. */
890 obj = Memory_calloc (NULL, sizeof (ti_ipc_object), 0u, NULL);
891 if (obj == NULL) {
892 errno = ENOMEM;
893 free(ocb);
894 return (NULL);
895 }
896 else if (_ti_ipc_attach(obj) < 0) {
897 errno = ENOMEM;
898 free(ocb);
899 return (NULL);
900 }
901 else {
902 ocb->ipc = obj;
903 IOFUNC_NOTIFY_INIT(obj->notify);
904 obj->addr = MessageQCopy_ADDRANY;
905 obj->remoteAddr = MessageQCopy_ADDRANY;
906 obj->procId = MultiProc_INVALIDID;
907 obj->mq = NULL;
908 obj->isValid = TRUE;
909 }
911 return (IOFUNC_OCB_T *)(ocb);
912 }
915 /*!
916 * @brief Detach a process from ti-ipc user support framework.
917 *
918 * @param obj TI IPC instance
919 * @param force Tells if detach should be forced even if conditions
920 * are not met.
921 *
922 * @sa _ti_ipc_attach
923 */
924 static
925 Int
926 _ti_ipc_detach (ti_ipc_object * obj, Bool force)
927 {
928 Int32 status = EOK;
929 Int32 tmpStatus = EOK;
930 Bool flag = FALSE;
931 List_Object * bufList = NULL;
932 UInt32 i;
933 IArg key;
934 MsgList_t * item;
935 WaitingReaders_t * wr = NULL;
936 struct _msg_info info;
938 GT_1trace (curTrace, GT_ENTER, "_ti_ipc_detach", obj);
940 key = IGateProvider_enter (ti_ipc_state.gateHandle);
942 for (i = 0 ; i < MAX_PROCESSES ; i++) {
943 if (ti_ipc_state.eventState [i].ipc == obj) {
944 if (ti_ipc_state.eventState [i].refCount == 1) {
945 ti_ipc_state.eventState [i].refCount = 0;
947 flag = TRUE;
948 break;
949 }
950 else {
951 ti_ipc_state.eventState [i].refCount--;
952 status = EOK;
953 break;
954 }
955 }
956 }
957 IGateProvider_leave (ti_ipc_state.gateHandle, key);
959 if (flag == TRUE) {
960 key = IGateProvider_enter (ti_ipc_state.gateHandle);
961 /* Last client being unregistered for this process. */
962 ti_ipc_state.eventState [i].ipc = NULL;
964 /* Store in local variable to delete outside lock. */
965 bufList = ti_ipc_state.eventState [i].bufList;
967 ti_ipc_state.eventState [i].bufList = NULL;
969 IGateProvider_leave (ti_ipc_state.gateHandle, key);
970 }
972 if (flag != TRUE) {
973 #if !defined(SYSLINK_BUILD_OPTIMIZE)
974 if (i == MAX_PROCESSES) {
975 /*! @retval Notify_E_NOTFOUND The specified user process was
976 not found registered with Notify Driver module. */
977 status = -ENOMEM;
978 GT_setFailureReason (curTrace,
979 GT_4CLASS,
980 "_ti_ipc_detach",
981 status,
982 "The specified user process was not found"
983 " registered with rpmsg Driver module.");
984 }
985 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
986 }
987 else {
988 if (bufList != NULL) {
989 /* Dequeue waiting readers and reply to them */
990 pthread_mutex_lock(&ti_ipc_state.lock);
991 while ((wr = dequeue_waiting_reader(i)) != NULL) {
992 /* Check if rcvid is still valid */
993 if (MsgInfo(wr->rcvid, &info) != -1) {
994 put_wr(wr);
995 pthread_mutex_unlock(&ti_ipc_state.lock);
996 MsgError(wr->rcvid, EINTR);
997 pthread_mutex_lock(&ti_ipc_state.lock);
998 }
999 }
1000 /* Check for pending ionotify/select calls */
1001 if (obj) {
1002 if (IOFUNC_NOTIFY_INPUT_CHECK(obj->notify, 1, 0)) {
1003 iofunc_notify_trigger(obj->notify, 1, IOFUNC_NOTIFY_INPUT);
1004 }
1005 }
1007 /* Free event packets for any received but unprocessed events. */
1008 while ((item = find_nl(i)) != NULL) {
1009 if (dequeue_notify_list_item(item) >= 0) {
1010 ipc_EventPacket * uBuf = NULL;
1012 uBuf = (ipc_EventPacket *) List_get (bufList);
1014 /* Let the check remain at run-time. */
1015 if (uBuf != NULL) {
1016 put_uBuf(uBuf);
1017 }
1018 }
1019 }
1020 pthread_mutex_unlock(&ti_ipc_state.lock);
1022 /* Last client being unregistered with Notify module. */
1023 List_delete (&bufList);
1024 }
1026 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1027 if ((tmpStatus < 0) && (status >= 0)) {
1028 status = tmpStatus;
1029 GT_setFailureReason (curTrace,
1030 GT_4CLASS,
1031 "_ti_ipc_detach",
1032 status,
1033 "Failed to delete termination semaphore!");
1034 }
1035 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1036 }
1038 GT_1trace (curTrace, GT_LEAVE, "_ti_ipc_detach", status);
1040 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1041 return status;
1042 }
1044 /**
1045 * Handler for ocb_free() requests.
1046 *
1047 * Special handler for ocb_free() requests that we export for control. A
1048 * close request from the client will result in a call to our special ocb_free
1049 * handler. This function frees any client-specific information that was
1050 * allocated.
1051 *
1052 * \param i_ocb OCB associated with client's session.
1053 *
1054 * \return POSIX errno value.
1055 *
1056 * \retval None.
1057 */
1059 void
1060 ti_ipc_ocb_free (IOFUNC_OCB_T * i_ocb)
1061 {
1062 ti_ipc_ocb_t * ocb = (ti_ipc_ocb_t *)i_ocb;
1063 ti_ipc_object * obj;
1065 if (ocb) {
1066 if (ocb->ipc) {
1067 obj = ocb->ipc;
1068 /* TBD: Notification to remote core of endpoint closure? */
1069 if (obj->mq) {
1070 MessageQCopy_delete (&obj->mq);
1071 obj->mq = NULL;
1072 }
1073 _ti_ipc_detach(ocb->ipc, FALSE);
1074 free (obj);
1075 }
1076 free (ocb);
1077 }
1078 }
1080 /**
1081 * Handler for close_ocb() requests.
1082 *
1083 * This function removes the notification entries associated with the current
1084 * client.
1085 *
1086 * \param ctp Thread's associated context information.
1087 * \param reserved This argument must be NULL.
1088 * \param ocb OCB associated with client's session.
1089 *
1090 * \return POSIX errno value.
1091 *
1092 * \retval EOK Success.
1093 */
1095 Int
1096 ti_ipc_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1097 {
1098 ti_ipc_ocb_t * ipc_ocb = (ti_ipc_ocb_t *)ocb;
1099 iofunc_notify_remove(ctp, ipc_ocb->ipc->notify);
1100 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1101 }
1103 /**
1104 * Handler for read() requests.
1105 *
1106 * Handles special read() requests that we export for control. A read
1107 * request will get a message from the remote processor that is associated
1108 * with the client that is calling read().
1109 *
1110 * \param ctp Thread's associated context information.
1111 * \param msg The actual read() message.
1112 * \param ocb OCB associated with client's session.
1113 *
1114 * \return POSIX errno value.
1115 *
1116 * \retval EOK Success.
1117 * \retval EAGAIN Call is non-blocking and no messages available.
1118 * \retval ENOMEM Not enough memory to preform the read.
1119 */
1121 int
1122 ti_ipc_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *i_ocb)
1123 {
1124 Int status;
1125 Bool flag = FALSE;
1126 Int retVal = EOK;
1127 UInt32 i;
1128 MsgList_t * item;
1129 Int nonblock;
1130 ti_ipc_ocb_t * ocb = (ti_ipc_ocb_t *)i_ocb;
1131 ti_ipc_object * obj = ocb->ipc;
1133 if ((status = iofunc_read_verify(ctp, msg, i_ocb, &nonblock)) != EOK)
1134 return (status);
1136 if (!obj->isValid) {
1137 return EIO;
1138 }
1140 if (obj->addr == MessageQCopy_ADDRANY) {
1141 return ENOTCONN;
1142 }
1144 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1145 if (ti_ipc_state.eventState [i].ipc == obj) {
1146 flag = TRUE;
1147 break;
1148 }
1149 }
1151 /* Let the check remain at run-time. */
1152 if (flag == TRUE) {
1153 /* Let the check remain at run-time for handling any run-time
1154 * race conditions.
1155 */
1156 if (ti_ipc_state.eventState [i].bufList != NULL) {
1157 pthread_mutex_lock(&ti_ipc_state.lock);
1158 item = find_nl(i);
1159 if (dequeue_notify_list_item(item) < 0) {
1160 if (nonblock) {
1161 pthread_mutex_unlock(&ti_ipc_state.lock);
1162 return EAGAIN;
1163 }
1164 else {
1165 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1166 if (retVal == EOK) {
1167 pthread_cond_signal(&ti_ipc_state.cond);
1168 pthread_mutex_unlock(&ti_ipc_state.lock);
1169 return(_RESMGR_NOREPLY);
1170 }
1171 retVal = ENOMEM;
1172 pthread_mutex_unlock(&ti_ipc_state.lock);
1173 }
1174 }
1175 else {
1176 deliver_notification(i, ctp->rcvid);
1177 pthread_mutex_unlock(&ti_ipc_state.lock);
1178 return(_RESMGR_NOREPLY);
1179 }
1180 }
1181 }
1183 /*! @retval Number-of-bytes-read Number of bytes read. */
1184 return retVal;
1185 }
1187 /**
1188 * Handler for write() requests.
1189 *
1190 * Handles special write() requests that we export for control. A write()
1191 * request will send a message to the remote processor which is associated with
1192 * the client.
1193 *
1194 * \param ctp Thread's associated context information.
1195 * \param msg The actual write() message.
1196 * \param io_ocb OCB associated with client's session.
1197 *
1198 * \return POSIX errno value.
1199 *
1200 * \retval EOK Success.
1201 * \retval ENOTCONN Remote address has not been set.
1202 * \retval ENOMEM Not enough memory to perform the write.
1203 * \retval EIO MessageQCopy_send failed.
1204 * \retval EINVAL msg->i.bytes is negative.
1205 */
1207 int
1208 ti_ipc_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1209 {
1210 int status;
1211 char buf[MessageQCopy_BUFSIZE];
1212 int bytes;
1213 ti_ipc_ocb_t * ocb = (ti_ipc_ocb_t *)io_ocb;
1214 ti_ipc_object * obj = ocb->ipc;
1216 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1217 return (status);
1218 }
1220 if (!obj->isValid) {
1221 return EIO;
1222 }
1224 if (obj->remoteAddr == MessageQCopy_ADDRANY) {
1225 return ENOTCONN;
1226 }
1228 bytes = ((int64_t) msg->i.nbytes) > MessageQCopy_BUFSIZE ?
1229 MessageQCopy_BUFSIZE : msg->i.nbytes;
1230 if (bytes < 0) {
1231 return EINVAL;
1232 }
1233 _IO_SET_WRITE_NBYTES (ctp, bytes);
1235 status = resmgr_msgread(ctp, buf, bytes, sizeof(msg->i));
1236 if (status != bytes) {
1237 return (errno);
1238 }
1240 status = MessageQCopy_send(obj->procId, MultiProc_self(), obj->remoteAddr,
1241 obj->addr, buf, bytes, TRUE);
1243 if (status < 0) {
1244 return (EIO);
1245 }
1247 return(EOK);
1248 }
1250 /**
1251 * Handler for TIIPC_IOCSETLOCAL requests.
1252 *
1253 * Handles TIIPC_IOCSETLOCAL requests to set the local endpoint address.
1254 *
1255 * \param ctp Thread's associated context information.
1256 * \param msg The actual devctl() message.
1257 * \param io_ocb OCB associated with client's session.
1258 *
1259 * \return POSIX errno value.
1260 *
1261 * \retval EOK Success.
1262 * \retval EISCONN Local address is already set.
1263 * \retval ENOMEM Not enough memory to create the endpoint.
1264 * \retval EINVAL ctp->info.msglen or ctp->info.dstmsglen is not big enough.
1265 */
1266 static
1267 Int
1268 _ti_ipc_set_local(resmgr_context_t *ctp, io_devctl_t *msg, ti_ipc_ocb_t *ocb)
1269 {
1270 Int status = EOK;
1271 tiipc_local_params * cargs = (tiipc_local_params *)(_DEVCTL_DATA (msg->i));
1272 tiipc_local_params * out = (tiipc_local_params *)(_DEVCTL_DATA (msg->o));
1273 ti_ipc_object * obj = ocb->ipc;
1275 if ((ctp->info.msglen - sizeof(msg->i) < sizeof(tiipc_local_params)) ||
1276 (ctp->info.dstmsglen - sizeof(msg->o) < sizeof (tiipc_local_params))) {
1277 status = (EINVAL);
1278 }
1279 else if (obj->mq) {
1280 /* already a local endpoint associated with this instance */
1281 status = (EISCONN);
1282 }
1283 else {
1284 if (cargs->local_addr == TIIPC_ADDRANY) {
1285 cargs->local_addr = MessageQCopy_ADDRANY;
1286 }
1287 /* Create the local endpoint based on the request */
1288 obj->mq = MessageQCopy_create (cargs->local_addr, NULL, _ti_ipc_cb,
1289 obj, &obj->addr);
1290 if (obj->mq == NULL) {
1291 status = (ENOMEM);
1292 }
1293 else {
1294 out->local_addr = obj->addr;
1295 msg->o.ret_val = EOK;
1296 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o) +\
1297 sizeof(tiipc_local_params)));
1298 }
1299 }
1301 return status;
1302 }
1304 /**
1305 * Handler for TIIPC_IOCGETLOCAL requests.
1306 *
1307 * Handles TIIPC_IOCGETLOCAL requests to get the local endpoint address info.
1308 *
1309 * \param ctp Thread's associated context information.
1310 * \param msg The actual devctl() message.
1311 * \param io_ocb OCB associated with client's session.
1312 *
1313 * \return POSIX errno value.
1314 *
1315 * \retval EOK Success.
1316 * \retval EINVAL ctp->info.dstmsglen is not big enough.
1317 */
1318 static
1319 Int
1320 _ti_ipc_get_local(resmgr_context_t *ctp, io_devctl_t *msg, ti_ipc_ocb_t *ocb)
1321 {
1322 Int status = EOK;
1323 tiipc_local_params * out = (tiipc_local_params *)(_DEVCTL_DATA (msg->o));
1324 ti_ipc_object * obj = ocb->ipc;
1326 if (ctp->info.dstmsglen - sizeof(msg->o) < sizeof (tiipc_local_params)) {
1327 status = (EINVAL);
1328 }
1329 else {
1330 if (obj->addr == MessageQCopy_ADDRANY)
1331 out->local_addr = TIIPC_ADDRANY;
1332 else
1333 out->local_addr = obj->addr;
1334 msg->o.ret_val = EOK;
1335 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o) +\
1336 sizeof(tiipc_local_params)));
1337 }
1339 return status;
1340 }
1342 /**
1343 * Handler for TIIPC_IOCSETREMOTE requests.
1344 *
1345 * Handles TIIPC_IOCSETREMOTE requests to set the remote endpoint address and
1346 * proc ID used for write() commands.
1347 *
1348 * \param ctp Thread's associated context information.
1349 * \param msg The actual devctl() message.
1350 * \param io_ocb OCB associated with client's session.
1351 *
1352 * \return POSIX errno value.
1353 *
1354 * \retval EOK Success.
1355 * \retval EISCONN Remote address is already set.
1356 * \retval ENOMEM Not enough memory to create the endpoint.
1357 * \retval EINVAL ctp->info.msglen or ctp->info.dstmsglen is not big enough
1358 * or the specified remote proc ID is invalid.
1359 */
1360 static
1361 Int
1362 _ti_ipc_set_remote(resmgr_context_t *ctp, io_devctl_t *msg, ti_ipc_ocb_t *ocb)
1363 {
1364 Int status = EOK;
1365 tiipc_remote_params * cargs =
1366 (tiipc_remote_params *)(_DEVCTL_DATA (msg->i));
1367 ti_ipc_object *obj = ocb->ipc;
1369 if ((ctp->info.msglen - sizeof(msg->i) < sizeof (tiipc_remote_params)) ||
1370 (ctp->info.dstmsglen - sizeof(msg->o) < sizeof (tiipc_remote_params))) {
1371 status = (EINVAL);
1372 }
1373 else if (obj->remoteAddr != MessageQCopy_ADDRANY) {
1374 /* already a remote endpoint associated with this instance */
1375 status = (EISCONN);
1376 }
1377 else if (cargs->remote_proc == MultiProc_self() ||
1378 cargs->remote_proc >= MultiProc_getNumProcessors()) {
1379 /* Don't support sending to self and remote proc ID must be valid */
1380 status = (EINVAL);
1381 }
1382 else {
1383 obj->remoteAddr = cargs->remote_addr;
1384 obj->procId = cargs->remote_proc;
1385 msg->o.ret_val = EOK;
1386 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o) +\
1387 sizeof(tiipc_remote_params)));
1388 }
1390 return status;
1391 }
1393 /**
1394 * Handler for TIIPC_IOCGETREMOTE requests.
1395 *
1396 * Handles TIIPC_IOCGETREMOTE requests to get the remote endpoint address info.
1397 *
1398 * \param ctp Thread's associated context information.
1399 * \param msg The actual devctl() message.
1400 * \param io_ocb OCB associated with client's session.
1401 *
1402 * \return POSIX errno value.
1403 *
1404 * \retval EOK Success.
1405 * \retval EINVAL ctp->info.dstmsglen is not big enough.
1406 */
1407 static
1408 Int
1409 _ti_ipc_get_remote(resmgr_context_t *ctp, io_devctl_t *msg, ti_ipc_ocb_t *ocb)
1410 {
1411 Int status = EOK;
1412 tiipc_remote_params * out = (tiipc_remote_params *)(_DEVCTL_DATA (msg->o));
1413 ti_ipc_object * obj = ocb->ipc;
1415 if (ctp->info.dstmsglen - sizeof(msg->i) < sizeof (tiipc_remote_params)) {
1416 status = (EINVAL);
1417 }
1418 else {
1419 if (obj->remoteAddr == MessageQCopy_ADDRANY)
1420 out->remote_addr = TIIPC_ADDRANY;
1421 else
1422 out->remote_addr = obj->remoteAddr;
1423 out->remote_proc = obj->procId;
1424 msg->o.ret_val = EOK;
1425 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o) +\
1426 sizeof(tiipc_remote_params)));
1427 }
1429 return status;
1430 }
1432 /**
1433 * Handler for devctl() requests.
1434 *
1435 * Handles special devctl() requests that we export for control. A devctl()
1436 * request will perform different functions depending on the dcmd.
1437 *
1438 * \param ctp Thread's associated context information.
1439 * \param msg The actual devctl() message.
1440 * \param i_ocb OCB associated with client's session.
1441 *
1442 * \return POSIX errno value.
1443 *
1444 * \retval EOK Success.
1445 * \retval other Fail.
1446 */
1447 static
1448 Int
1449 ti_ipc_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
1450 {
1451 Int status = 0;
1452 ti_ipc_ocb_t *ocb = (ti_ipc_ocb_t *)i_ocb;
1454 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
1455 return(_RESMGR_ERRNO(status));
1456 status = 0;
1458 if (!ocb->ipc->isValid) {
1459 return EIO;
1460 }
1462 switch (msg->i.dcmd)
1463 {
1464 case TIIPC_IOCSETLOCAL:
1465 /* Must be called before receiving messages */
1466 status = _ti_ipc_set_local (ctp, msg, ocb);
1467 break;
1468 case TIIPC_IOCGETLOCAL:
1469 status = _ti_ipc_get_local (ctp, msg, ocb);
1470 break;
1471 case TIIPC_IOCSETREMOTE:
1472 /* Must be called before sending messages */
1473 status = _ti_ipc_set_remote (ctp, msg, ocb);
1474 break;
1475 case TIIPC_IOCGETREMOTE:
1476 status = _ti_ipc_get_remote (ctp, msg, ocb);
1477 break;
1478 default:
1479 status = (ENOSYS);
1480 break;
1481 }
1483 return status;
1484 }
1485 /**
1486 * Unblock read calls
1487 *
1488 * This function checks if the client is blocked on a read call and if so,
1489 * unblocks the client.
1490 *
1491 * \param ctp Thread's associated context information.
1492 * \param msg The pulse message.
1493 * \param ocb OCB associated with client's session.
1494 *
1495 * \return POSIX errno value.
1496 *
1497 * \retval EINTR The client has been unblocked.
1498 * \retval other The client has not been unblocked or the client was not
1499 * blocked.
1500 */
1502 int
1503 ti_ipc_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *i_ocb)
1504 {
1505 UInt32 i;
1506 Bool flag = FALSE;
1507 WaitingReaders_t * wr;
1508 ti_ipc_ocb_t * ocb = (ti_ipc_ocb_t *)i_ocb;
1509 ti_ipc_object * obj = ocb->ipc;
1511 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1512 if (ti_ipc_state.eventState [i].ipc == obj) {
1513 flag = TRUE;
1514 break;
1515 }
1516 }
1518 /* Let the check remain at run-time. */
1519 if (flag == TRUE) {
1520 /* Let the check remain at run-time for handling any run-time
1521 * race conditions.
1522 */
1523 if (ti_ipc_state.eventState [i].bufList != NULL) {
1524 pthread_mutex_lock(&ti_ipc_state.lock);
1525 wr = find_waiting_reader(i, ctp->rcvid);
1526 if (wr) {
1527 put_wr(wr);
1528 pthread_mutex_unlock(&ti_ipc_state.lock);
1529 return (EINTR);
1530 }
1531 pthread_mutex_unlock(&ti_ipc_state.lock);
1532 }
1533 }
1535 return _RESMGR_NOREPLY;
1536 }
1538 /**
1539 * Handler for unblock() requests.
1540 *
1541 * Handles unblock request for the client which is requesting to no longer be
1542 * blocked on the ti-ipc driver.
1543 *
1544 * \param ctp Thread's associated context information.
1545 * \param msg The pulse message.
1546 * \param ocb OCB associated with client's session.
1547 *
1548 * \return POSIX errno value.
1549 *
1550 * \retval EINTR The rcvid has been unblocked.
1551 */
1553 int
1554 ti_ipc_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1555 {
1556 int status = _RESMGR_NOREPLY;
1557 struct _msg_info info;
1559 /*
1560 * Try to run the default unblock for this message.
1561 */
1562 if ((status = iofunc_unblock_default(ctp,msg, ocb)) != _RESMGR_DEFAULT) {
1563 return status;
1564 }
1566 /*
1567 * Check if rcvid is still valid and still has an unblock
1568 * request pending.
1569 */
1570 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1571 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1572 return _RESMGR_NOREPLY;
1573 }
1575 if (ti_ipc_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1576 return _RESMGR_ERRNO(EINTR);
1577 }
1579 return _RESMGR_ERRNO(EINTR);
1580 }
1582 /**
1583 * Handler for notify() requests.
1584 *
1585 * Handles special notify() requests that we export for control. A notify
1586 * request results from the client calling select().
1587 *
1588 * \param ctp Thread's associated context information.
1589 * \param msg The actual notify() message.
1590 * \param ocb OCB associated with client's session.
1591 *
1592 * \return POSIX errno value.
1593 */
1595 Int
1596 ti_ipc_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1597 {
1598 ti_ipc_ocb_t * ipc_ocb = (ti_ipc_ocb_t *)ocb;
1599 int trig;
1600 int i = 0;
1601 Bool flag = FALSE;
1602 MsgList_t * item = NULL;
1603 int status = EOK;
1604 ti_ipc_object * obj = ipc_ocb->ipc;
1606 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1608 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1609 if (ti_ipc_state.eventState [i].ipc == obj) {
1610 flag = TRUE;
1611 break;
1612 }
1613 }
1615 pthread_mutex_lock(&ti_ipc_state.lock);
1616 /* Let the check remain at run-time. */
1617 if (flag == TRUE) {
1618 /* Let the check remain at run-time for handling any run-time
1619 * race conditions.
1620 */
1621 if (ti_ipc_state.eventState [i].bufList != NULL) {
1622 item = find_nl(i);
1623 if (item && item->num_events > 0) {
1624 trig |= _NOTIFY_COND_INPUT;
1625 }
1626 }
1627 }
1628 status = iofunc_notify(ctp, msg, ipc_ocb->ipc->notify, trig, NULL, NULL);
1629 pthread_mutex_unlock(&ti_ipc_state.lock);
1630 return status;
1631 }
1633 /**
1634 * Initializes and attaches ti-ipc resource manager functions to an
1635 * ti-ipc device name.
1636 *
1637 * \param num The number to append to the end of the device name.
1638 *
1639 * \return Pointer to the created ti_ipc_dev_t device.
1640 */
1642 static
1643 ti_ipc_dev_t *
1644 _init_device ()
1645 {
1646 iofunc_attr_t * attr;
1647 resmgr_attr_t resmgr_attr;
1648 ti_ipc_dev_t * dev = NULL;
1650 dev = malloc(sizeof(*dev));
1651 if (dev == NULL) {
1652 return NULL;
1653 }
1655 memset(&resmgr_attr, 0, sizeof resmgr_attr);
1656 resmgr_attr.nparts_max = 10;
1657 resmgr_attr.msg_max_size = 2048;
1658 memset(&dev->ti_ipc.mattr, 0, sizeof(iofunc_mount_t));
1659 dev->ti_ipc.mattr.flags = ST_NOSUID | ST_NOEXEC;
1660 dev->ti_ipc.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
1661 IOFUNC_PC_NO_TRUNC |
1662 IOFUNC_PC_SYNC_IO;
1663 dev->ti_ipc.mattr.funcs = &dev->ti_ipc.mfuncs;
1664 memset(&dev->ti_ipc.mfuncs, 0, sizeof(iofunc_funcs_t));
1665 dev->ti_ipc.mfuncs.nfuncs = _IOFUNC_NFUNCS;
1666 dev->ti_ipc.mfuncs.ocb_calloc = ti_ipc_ocb_calloc;
1667 dev->ti_ipc.mfuncs.ocb_free = ti_ipc_ocb_free;
1668 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->ti_ipc.cfuncs,
1669 _RESMGR_IO_NFUNCS, &dev->ti_ipc.iofuncs);
1670 iofunc_attr_init(attr = &dev->ti_ipc.cattr, S_IFCHR | 0777, NULL, NULL);
1671 dev->ti_ipc.iofuncs.unblock = ti_ipc_unblock;
1672 dev->ti_ipc.iofuncs.devctl = ti_ipc_devctl;
1673 dev->ti_ipc.iofuncs.notify = ti_ipc_notify;
1674 dev->ti_ipc.iofuncs.close_ocb = ti_ipc_close_ocb;
1675 dev->ti_ipc.iofuncs.read = ti_ipc_read;
1676 dev->ti_ipc.iofuncs.write = ti_ipc_write;
1677 attr->mount = &dev->ti_ipc.mattr;
1678 iofunc_time_update(attr);
1680 if (-1 == (dev->ti_ipc.resmgr_id =
1681 resmgr_attach(syslink_dpp, &resmgr_attr,
1682 TIIPC_DEVICE_NAME, _FTYPE_ANY, 0,
1683 &dev->ti_ipc.cfuncs,
1684 &dev->ti_ipc.iofuncs, attr))) {
1685 free(dev);
1686 return(NULL);
1687 }
1689 return(dev);
1690 }
1692 /**
1693 * Detaches an ti-ipc resource manager device name.
1694 *
1695 * \param dev The device to detach.
1696 *
1697 * \return POSIX errno value.
1698 */
1700 static
1701 Void
1702 _deinit_device (ti_ipc_dev_t * dev)
1703 {
1704 resmgr_detach(syslink_dpp, dev->ti_ipc.resmgr_id, 0);
1706 free (dev);
1708 return;
1709 }
1711 /*!
1712 * @brief Module setup function.
1713 *
1714 * @sa ti_ipc_destroy
1715 */
1716 Int
1717 ti_ipc_setup (Void)
1718 {
1719 UInt16 i;
1720 List_Params listparams;
1721 Int status = 0;
1722 Error_Block eb;
1723 pthread_attr_t thread_attr;
1724 struct sched_param sched_param;
1726 GT_0trace (curTrace, GT_ENTER, "ti_ipc_setup");
1728 Error_init(&eb);
1730 List_Params_init (&listparams);
1731 ti_ipc_state.gateHandle = (IGateProvider_Handle)
1732 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
1733 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1734 if (ti_ipc_state.gateHandle == NULL) {
1735 status = -ENOMEM;
1736 GT_setFailureReason (curTrace,
1737 GT_4CLASS,
1738 "_ti_ipc_setup",
1739 status,
1740 "Failed to create spinlock gate!");
1741 }
1742 else {
1743 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1744 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1745 ti_ipc_state.eventState [i].bufList = NULL;
1746 ti_ipc_state.eventState [i].ipc = NULL;
1747 ti_ipc_state.eventState [i].refCount = 0;
1748 ti_ipc_state.eventState [i].head = NULL;
1749 ti_ipc_state.eventState [i].tail = NULL;
1750 }
1752 pthread_attr_init(&thread_attr );
1753 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
1754 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
1755 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
1756 pthread_attr_setschedparam(&thread_attr, &sched_param);
1758 ti_ipc_state.run = TRUE;
1759 if (pthread_create(&ti_ipc_state.nt,
1760 &thread_attr, notifier_thread, NULL) == EOK) {
1761 pthread_setname_np(ti_ipc_state.nt, "tiipc-notifier");
1762 /* create a /dev/tiipc instance for users to open */
1763 if (!ti_ipc_state.dev)
1764 ti_ipc_state.dev = _init_device();
1765 if (ti_ipc_state.dev == NULL) {
1766 Osal_printf("Failed to create tiipc");
1767 ti_ipc_state.run = FALSE;
1768 }
1769 else {
1770 ti_ipc_state.isSetup = TRUE;
1771 }
1772 }
1773 else {
1774 ti_ipc_state.run = FALSE;
1775 }
1776 pthread_attr_destroy(&thread_attr);
1777 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1778 }
1779 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1781 GT_0trace (curTrace, GT_LEAVE, "ti_ipc_setup");
1782 return status;
1783 }
1786 /*!
1787 * @brief Module destroy function.
1788 *
1789 * @sa ti_ipc_setup
1790 */
1791 Void
1792 ti_ipc_destroy (bool recover)
1793 {
1794 ipc_EventPacket * packet;
1795 UInt32 i;
1796 List_Handle bufList;
1797 ti_ipc_object * obj = NULL;
1798 WaitingReaders_t * wr = NULL;
1799 struct _msg_info info;
1801 GT_0trace (curTrace, GT_ENTER, "_ti_ipc_destroy");
1803 if (!recover)
1804 _deinit_device(ti_ipc_state.dev);
1806 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1807 obj = NULL;
1808 if (ti_ipc_state.eventState [i].ipc != NULL) {
1809 /* This is recovery. Need to mark mq structures as invalid */
1810 obj = ti_ipc_state.eventState[i].ipc;
1811 MessageQCopy_delete(&obj->mq);
1812 obj->mq = NULL;
1813 obj->isValid = FALSE;
1814 }
1815 bufList = ti_ipc_state.eventState [i].bufList;
1817 ti_ipc_state.eventState [i].bufList = NULL;
1818 ti_ipc_state.eventState [i].ipc = NULL;
1819 ti_ipc_state.eventState [i].refCount = 0;
1820 if (bufList != NULL) {
1821 /* Dequeue waiting readers and reply to them */
1822 pthread_mutex_lock(&ti_ipc_state.lock);
1823 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1824 /* Check if rcvid is still valid */
1825 if (MsgInfo(wr->rcvid, &info) != -1) {
1826 put_wr(wr);
1827 pthread_mutex_unlock(&ti_ipc_state.lock);
1828 MsgError(wr->rcvid, EINTR);
1829 pthread_mutex_lock(&ti_ipc_state.lock);
1830 }
1831 }
1832 /* Check for pending ionotify/select calls */
1833 if (obj) {
1834 if (IOFUNC_NOTIFY_INPUT_CHECK(obj->notify, 1, 0)) {
1835 iofunc_notify_trigger(obj->notify, 1, IOFUNC_NOTIFY_INPUT);
1836 }
1837 }
1838 pthread_mutex_unlock(&ti_ipc_state.lock);
1840 /* Free event packets for any received but unprocessed events. */
1841 while (List_empty (bufList) != TRUE){
1842 packet = (ipc_EventPacket *)
1843 List_get (bufList);
1844 if (packet != NULL){
1845 Memory_free (NULL, packet, sizeof(*packet));
1846 }
1847 }
1848 List_delete (&(bufList));
1849 }
1850 }
1852 /* Free the cached list */
1853 pthread_mutex_lock(&ti_ipc_state.lock);
1854 flush_uBuf();
1855 pthread_mutex_unlock(&ti_ipc_state.lock);
1857 if (ti_ipc_state.gateHandle != NULL) {
1858 GateSpinlock_delete ((GateSpinlock_Handle *)
1859 &(ti_ipc_state.gateHandle));
1860 }
1862 ti_ipc_state.isSetup = FALSE ;
1863 ti_ipc_state.run = FALSE;
1864 // run through and destroy the thread, and all outstanding
1865 // notify structures
1866 pthread_mutex_lock(&ti_ipc_state.lock);
1867 pthread_cond_signal(&ti_ipc_state.cond);
1868 pthread_mutex_unlock(&ti_ipc_state.lock);
1869 pthread_join(ti_ipc_state.nt, NULL);
1870 pthread_mutex_lock(&ti_ipc_state.lock);
1871 while (ti_ipc_state.head != NULL) {
1872 int index;
1873 WaitingReaders_t *item;
1874 index = dequeue_notify_list_item(ti_ipc_state.head);
1875 if (index < 0)
1876 break;
1877 item = dequeue_waiting_reader(index);
1878 while (item) {
1879 put_wr(item);
1880 item = dequeue_waiting_reader(index);
1881 }
1882 }
1883 ti_ipc_state.head = NULL ;
1884 ti_ipc_state.tail = NULL ;
1885 pthread_mutex_unlock(&ti_ipc_state.lock);
1887 GT_0trace (curTrace, GT_LEAVE, "_ti_ipc_destroy");
1888 }