[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / rpmsg-dce / hlos / knl / Qnx / rpmsg-dcedrv.c
1 /*
2 * @file rpmsg-dcedrv.c
3 *
4 * @brief fileops handler for dCE component.
5 *
6 *
7 * @ver 02.00.00.46_alpha1
8 *
9 * ============================================================================
10 *
11 * Copyright (c) 2010-2011, Texas Instruments Incorporated
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * * Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * * Neither the name of Texas Instruments Incorporated nor the names of
25 * its contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
30 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
32 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
33 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
34 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
35 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
36 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
37 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
38 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Contact information for paper mail:
40 * Texas Instruments
41 * Post Office Box 655303
42 * Dallas, Texas 75265
43 * Contact information:
44 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
45 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
46 * ============================================================================
47 *
48 */
51 /* Standard headers */
52 #include <ti/syslink/Std.h>
54 /* OSAL & Utils headers */
55 #include <ti/syslink/utils/List.h>
56 #include <ti/syslink/utils/String.h>
57 #include <ti/syslink/utils/Trace.h>
58 #include <ti/syslink/utils/Memory.h>
59 #include <ti/syslink/utils/IGateProvider.h>
60 #include <ti/syslink/utils/GateSpinlock.h>
61 #include <_MultiProc.h>
63 /*QNX specific header include */
64 #include <errno.h>
65 #include <unistd.h>
66 #include <sys/iofunc.h>
67 #include <sys/dispatch.h>
68 #include <sys/netmgr.h>
69 #include <devctl.h>
71 /* Module headers */
72 #include <ti/ipc/rpmsg_dce.h>
73 #include <ti/ipc/MessageQCopy.h>
74 #include <_MessageQCopy.h>
75 #include <_MessageQCopyDefs.h>
76 #include "OsalSemaphore.h"
77 #include "std_qnx.h"
78 #include <pthread.h>
80 #include "rpmsg-dcedrv.h"
82 #define PRIORITY_REALTIME_LOW 29
84 /* structure to hold rpmsg-dce device information */
85 typedef struct named_device {
86 iofunc_mount_t mattr;
87 iofunc_attr_t cattr;
88 int resmgr_id;
89 pthread_mutex_t mutex;
90 iofunc_funcs_t mfuncs;
91 resmgr_connect_funcs_t cfuncs;
92 resmgr_io_funcs_t iofuncs;
93 char device_name[_POSIX_PATH_MAX];
94 } named_device_t;
96 /* rpmsg-dce device structure */
97 typedef struct rpmsg_dce_dev {
98 dispatch_t * dpp;
99 thread_pool_t * tpool;
100 named_device_t rpmsg_dce;
101 } rpmsg_dce_dev_t;
104 typedef enum {
105 DCE_CONN_REQ,
106 DCE_DISCON_REQ
107 }dce_conn_req_type;
109 typedef struct {
110 UInt32 type;
111 UInt32 addr;
112 } dce_conn_req;
114 /*!
115 * @brief Remote connection object
116 */
117 typedef struct rpmsg_dce_conn_object {
118 rpmsg_dce_dev_t * dev;
119 MessageQCopy_Handle mq;
120 UInt32 addr;
121 UInt16 procId;
122 } rpmsg_dce_conn_object;
124 /*!
125 * @brief dce instance object
126 */
127 typedef struct rpmsg_dce_object_tag {
128 MessageQCopy_Handle mq;
129 rpmsg_dce_conn_object * conn;
130 UInt32 addr;
131 UInt32 remoteAddr;
132 UInt16 procId;
133 pid_t pid;
134 iofunc_notify_t notify[3];
135 } rpmsg_dce_object;
137 /*!
138 * @brief Structure of Event callback argument passed to register fucntion.
139 */
140 typedef struct rpmsg_dce_EventCbck_tag {
141 List_Elem element;
142 /*!< List element header */
143 rpmsg_dce_object * dce;
144 /*!< User ch info pointer. Passed back to user callback function */
145 } rpmsg_dce_EventCbck ;
147 /*!
148 * @brief Keeps the information related to Event.
149 */
150 typedef struct rpmsg_dce_EventState_tag {
151 List_Handle bufList;
152 /*!< Head of received event list. */
153 rpmsg_dce_object *dce;
154 /*!< dce instacne. */
155 UInt32 refCount;
156 /*!< Reference count, used when multiple Notify_registerEvent are called
157 from same process space (multi threads/processes). */
158 WaitingReaders_t * head;
159 /*!< Waiting readers head. */
160 WaitingReaders_t * tail;
161 /*!< Waiting readers tail. */
162 } rpmsg_dce_EventState;
164 /*!
165 * @brief Per-connection information
166 */
167 typedef struct rpmsg_dce_ocb {
168 iofunc_ocb_t hdr;
169 pid_t pid;
170 rpmsg_dce_object * dce;
171 } rpmsg_dce_ocb_t;
173 /*!
174 * @brief rpmsg-dce Module state object
175 */
176 typedef struct rpmsg_dce_ModuleObject_tag {
177 Bool isSetup;
178 /*!< Indicates whether the module has been already setup */
179 Bool openRefCount;
180 /*!< Open reference count. */
181 IGateProvider_Handle gateHandle;
182 /*!< Handle of gate to be used for local thread safety */
183 rpmsg_dce_EventState eventState [MAX_PROCESSES];
184 /*!< List for all user processes registered. */
185 rpmsg_dce_conn_object * objects [MultiProc_MAXPROCESSORS];
186 /*!< List of all remote connections. */
187 MessageQCopy_Handle mqHandle;
188 /*!< Local mq handle associated with this module */
189 UInt32 endpoint;
190 /*!< Local endpoint associated with the mq handle */
191 MessageQCopy_Handle conn_handle;
192 /*!< Local mq conn handle associated with this module */
193 UInt32 conn_endpoint;
194 /*!< Local endpoint associated with the mq conn handle*/
195 UInt32 conn_remote_endpoint;
196 /*!< Remote endpoint associated with the mq conn handle*/
197 OsalSemaphore_Handle sem;
198 /*!< Handle to semaphore used for dce instance connection notifications */
199 pthread_t nt;
200 /*!< notifier thread */
201 pthread_mutex_t lock;
202 /*!< protection between notifier and event */
203 pthread_cond_t cond;
204 /*!< protection between notifier and event */
205 MsgList_t *head;
206 /*!< list head */
207 MsgList_t *tail;
208 /*!< list tail */
209 int run;
210 /*!< notifier thread must keep running */
211 } rpmsg_dce_ModuleObject;
213 /*!
214 * @brief Structure of Event Packet read from notify kernel-side.
215 */
216 typedef struct rpmsg_dce_EventPacket_tag {
217 List_Elem element;
218 /*!< List element header */
219 UInt32 pid;
220 /* Processor identifier */
221 rpmsg_dce_object * obj;
222 /*!< Pointer to the channel associated with this callback */
223 UInt32 len;
224 /*!< Length of the data associated with event. */
225 UInt8 data[MessageQCopy_BUFSIZE];
226 /*!< Data associated with event. */
227 UInt32 src;
228 /*!< Src endpoint associated with event. */
229 struct rpmsg_dce_EventPacket * next;
230 struct rpmsg_dce_EventPacket * prev;
231 } rpmsg_dce_EventPacket ;
234 /** ============================================================================
235 * Globals
236 * ============================================================================
237 */
238 /*!
239 * @var rpmsg_dce_state
240 *
241 * @brief rpmsg-dce state object variable
242 */
243 static rpmsg_dce_ModuleObject rpmsg_dce_state =
244 {
245 .gateHandle = NULL,
246 .isSetup = FALSE,
247 .openRefCount = 0,
248 .nt = 0,
249 .lock = PTHREAD_MUTEX_INITIALIZER,
250 .cond = PTHREAD_COND_INITIALIZER,
251 .head = NULL,
252 .tail = NULL,
253 .run = 0
254 };
256 static MsgList_t *nl_cache;
257 static int num_nl = 0;
258 static WaitingReaders_t *wr_cache;
259 static int num_wr = 0;
261 extern dispatch_t * syslink_dpp;
264 /** ============================================================================
265 * Internal functions
266 * ============================================================================
267 */
269 /*
270 * Instead of constantly allocating and freeing the uBuf structures
271 * we just cache a few of them, and recycle them instead.
272 * The cache count is set with CACHE_NUM in rpmsg-omxdrv.h.
273 */
274 static rpmsg_dce_EventPacket *uBuf_cache;
275 static int num_uBuf = 0;
277 static void flush_uBuf()
278 {
279 rpmsg_dce_EventPacket *uBuf = NULL;
281 while(uBuf_cache) {
282 num_uBuf--;
283 uBuf = uBuf_cache;
284 uBuf_cache = (rpmsg_dce_EventPacket *)uBuf_cache->next;
285 Memory_free(NULL, uBuf, sizeof(*uBuf));
286 }
287 }
289 static rpmsg_dce_EventPacket *get_uBuf()
290 {
291 rpmsg_dce_EventPacket *uBuf;
292 uBuf = uBuf_cache;
293 if (uBuf != NULL) {
294 uBuf_cache = (rpmsg_dce_EventPacket *)uBuf_cache->next;
295 num_uBuf--;
296 } else {
297 uBuf = Memory_alloc(NULL, sizeof(rpmsg_dce_EventPacket), 0, NULL);
298 }
299 return(uBuf);
300 }
302 static void put_uBuf(rpmsg_dce_EventPacket * uBuf)
303 {
304 if (num_uBuf >= CACHE_NUM) {
305 Memory_free(NULL, uBuf, sizeof(*uBuf));
306 } else {
307 uBuf->next = (struct rpmsg_dce_EventPacket *)uBuf_cache;
308 uBuf_cache = uBuf;
309 num_uBuf++;
310 }
311 return;
312 }
314 /*
315 * Instead of constantly allocating and freeing the notifier structures
316 * we just cache a few of them, and recycle them instead.
317 * The cache count is set with CACHE_NUM in rpmsg-dcedrv.h.
318 */
320 static MsgList_t *get_nl()
321 {
322 MsgList_t *item;
323 item = nl_cache;
324 if (item != NULL) {
325 nl_cache = nl_cache->next;
326 num_nl--;
327 } else {
328 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
329 }
330 return(item);
331 }
333 static void put_nl(MsgList_t *item)
334 {
335 if (num_nl >= CACHE_NUM) {
336 Memory_free(NULL, item, sizeof(*item));
337 } else {
338 item->next = nl_cache;
339 nl_cache = item;
340 num_nl++;
341 }
342 return;
343 }
345 static WaitingReaders_t *get_wr()
346 {
347 WaitingReaders_t *item;
348 item = wr_cache;
349 if (item != NULL) {
350 wr_cache = wr_cache->next;
351 num_wr--;
352 } else {
353 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
354 }
355 return(item);
356 }
358 static void put_wr(WaitingReaders_t *item)
359 {
360 if (num_wr >= CACHE_NUM) {
361 Memory_free(NULL, item, sizeof(*item));
362 } else {
363 item->next = wr_cache;
364 wr_cache = item;
365 num_wr++;
366 }
367 return;
368 }
369 /* The following functions are used for list/waiting reader management */
370 static MsgList_t *find_nl(int index)
371 {
372 MsgList_t *item=NULL;
373 item = rpmsg_dce_state.head;
374 while (item) {
375 if (item->index == index)
376 return(item);
377 item = item->next;
378 }
379 return(item);
380 }
382 /* we have the right locks when calling this function */
383 /*!
384 * @brief Function to enqueue a notify list item.
385 *
386 * @param index Index of the client process associated with the item.
387 *
388 * @sa find_nl
389 * get_nl
390 */
391 static int enqueue_notify_list(int index)
392 {
393 MsgList_t *item;
394 item = find_nl(index);
395 if (item == NULL) {
396 item = get_nl();
397 if (item == NULL) {
398 return(-1);
399 }
400 item->next = NULL;
401 item->index = index;
402 item->num_events=1;
403 if (rpmsg_dce_state.head == NULL) {
404 rpmsg_dce_state.head = item;
405 rpmsg_dce_state.tail = item;
406 item->prev = NULL;
407 }
408 else {
409 item->prev = rpmsg_dce_state.tail;
410 rpmsg_dce_state.tail->next = item;
411 rpmsg_dce_state.tail = item;
412 }
413 }
414 else {
415 item->num_events++;
416 }
417 return(0);
418 }
420 /* we have the right locks when calling this function */
421 /*!
422 * @brief Function to dequeue a notify list item.
423 *
424 * @param item The item to remove.
425 *
426 * @sa put_nl
427 */
428 static inline int dequeue_notify_list_item(MsgList_t *item)
429 {
430 int index;
431 if (item == NULL) {
432 return(-1);
433 }
434 index = item->index;
435 item->num_events--;
436 if (item->num_events > 0) {
437 return(index);
438 }
439 if (rpmsg_dce_state.head == item) {
440 // removing head
441 rpmsg_dce_state.head = item->next;
442 if (rpmsg_dce_state.head != NULL) {
443 rpmsg_dce_state.head->prev = NULL;
444 }
445 else {
446 // removing head and tail
447 rpmsg_dce_state.tail = NULL;
448 }
449 }
450 else {
451 item->prev->next = item->next;
452 if (item->next != NULL) {
453 item->next->prev = item->prev;
454 }
455 else {
456 // removing tail
457 rpmsg_dce_state.tail = item->prev;
458 }
459 }
460 put_nl(item);
461 return(index);
462 }
464 /* we have the right locks when calling this function */
465 /*!
466 * @brief Function to add a waiting reader to the list.
467 *
468 * @param index Index of the client process waiting reader to add.
469 * @param rcvid Receive ID of the client process that was passed
470 * when the client called read().
471 *
472 * @sa None
473 */
474 static int enqueue_waiting_reader(int index, int rcvid)
475 {
476 WaitingReaders_t *item;
477 item = get_wr();
478 if (item == NULL) {
479 return(-1);
480 }
481 item->rcvid = rcvid;
482 item->next = NULL;
483 if (rpmsg_dce_state.eventState [index].head == NULL) {
484 rpmsg_dce_state.eventState [index].head = item;
485 rpmsg_dce_state.eventState [index].tail = item;
486 }
487 else {
488 rpmsg_dce_state.eventState [index].tail->next = item;
489 rpmsg_dce_state.eventState [index].tail = item;
490 }
491 return(EOK);
492 }
494 /* we have the right locks when calling this function */
495 /* caller frees item */
496 /*!
497 * @brief Function to remove a waiting reader from the list.
498 *
499 * @param index Index of the client process waiting reader to dequeue.
500 *
501 * @sa None
502 */
503 static WaitingReaders_t *dequeue_waiting_reader(int index)
504 {
505 WaitingReaders_t *item = NULL;
506 if (rpmsg_dce_state.eventState [index].head) {
507 item = rpmsg_dce_state.eventState [index].head;
508 rpmsg_dce_state.eventState [index].head = rpmsg_dce_state.eventState [index].head->next;
509 if (rpmsg_dce_state.eventState [index].head == NULL) {
510 rpmsg_dce_state.eventState [index].tail = NULL;
511 }
512 }
513 return(item);
514 }
516 /*!
517 * @brief Function find a specified waiting reader.
518 *
519 * @param index Index of the client process waiting for the message.
520 * @param rcvid Receive ID of the client process that was passed
521 * when the client called read().
522 *
523 * @sa None
524 */
526 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
527 {
528 WaitingReaders_t *item = NULL;
529 WaitingReaders_t *prev = NULL;
530 if (rpmsg_dce_state.eventState [index].head) {
531 item = rpmsg_dce_state.eventState [index].head;
532 while (item) {
533 if (item->rcvid == rcvid) {
534 /* remove item from list */
535 if (prev)
536 prev->next = item->next;
537 if (item == rpmsg_dce_state.eventState [index].head)
538 rpmsg_dce_state.eventState [index].head = item->next;
539 break;
540 }
541 else {
542 prev = item;
543 item = item->next;
544 }
545 }
546 }
547 return item;
548 }
550 /*!
551 * @brief Function used to check if there is a waiting reader with an
552 * event (message) ready to be delivered.
553 *
554 * @param index Index of the client process waiting for the message.
555 * @param item Pointer to the waiting reader.
556 *
557 * @sa dequeue_notify_list_item
558 * dequeue_waiting_reader
559 */
561 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
562 {
563 MsgList_t *temp;
564 if (rpmsg_dce_state.head == NULL) {
565 return(0);
566 }
567 temp = rpmsg_dce_state.head;
568 while (temp) {
569 if (rpmsg_dce_state.eventState [temp->index].head) {
570 // event and reader found
571 if (dequeue_notify_list_item(temp) >= 0) {
572 *index = temp->index;
573 *item = dequeue_waiting_reader(temp->index);
574 }
575 else {
576 /* error occurred, return 0 as item has not been set */
577 return(0);
578 }
579 return(1);
580 }
581 temp = temp->next;
582 }
583 return(0);
584 }
586 /*!
587 * @brief Function used to deliver the notification to the client that
588 * it has received a message.
589 *
590 * @param index Index of the client process receiving hte message.
591 * @param rcvid Receive ID of the client process that was passed
592 * when the client called read().
593 *
594 * @sa put_uBuf
595 */
597 static void deliver_notification(int index, int rcvid)
598 {
599 int err = EOK;
600 rpmsg_dce_EventPacket * uBuf = NULL;
601 struct omx_msg_hdr * hdr = NULL;
603 uBuf = (rpmsg_dce_EventPacket *) List_get (rpmsg_dce_state.eventState [index].bufList);
604 hdr = (struct omx_msg_hdr *)uBuf->data;
606 /* Let the check remain at run-time. */
607 if (uBuf != NULL) {
608 err = MsgReply(rcvid, hdr->len, hdr->data, hdr->len);
609 if (err == -1)
610 perror("deliver_notification: MsgReply");
611 /* Free the processed event callback packet. */
612 put_uBuf(uBuf);
613 }
614 else {
615 MsgReply(rcvid, EOK, NULL, 0);
616 }
617 return;
618 }
620 /*!
621 * @brief Thread used for notifying waiting readers of messages.
622 *
623 * @param arg Thread-specific private arg.
624 *
625 * @sa find_available_reader_and_event
626 * deliver_notification
627 * put_wr
628 */
629 static void *notifier_thread(void *arg)
630 {
631 int status;
632 int index;
633 WaitingReaders_t *item = NULL;
634 pthread_mutex_lock(&rpmsg_dce_state.lock);
635 while (rpmsg_dce_state.run) {
636 status = find_available_reader_and_event(&index, &item);
637 if ( (status == 0) || (item == NULL) ) {
638 status = pthread_cond_wait(&rpmsg_dce_state.cond, &rpmsg_dce_state.lock);
639 if ((status != EOK) && (status != EINTR)) {
640 // false wakeup
641 break;
642 }
643 status = find_available_reader_and_event(&index, &item);
644 if ( (status == 0) || (item == NULL) ) {
645 continue;
646 }
647 }
648 pthread_mutex_unlock(&rpmsg_dce_state.lock);
649 // we have unlocked, and now we have an event to deliver
650 // we deliver one event at a time, relock, check and continue
651 deliver_notification(index, item->rcvid);
652 pthread_mutex_lock(&rpmsg_dce_state.lock);
653 put_wr(item);
654 }
655 pthread_mutex_unlock(&rpmsg_dce_state.lock);
656 return(NULL);
657 }
660 /*!
661 * @brief Attach a process to rpmsg-dce user support framework.
662 *
663 * @param pid Process identifier
664 *
665 * @sa _rpmsg_dce_detach
666 */
667 static
668 Int
669 _rpmsg_dce_attach (rpmsg_dce_object *dce)
670 {
671 Int32 status = EOK;
672 Bool flag = FALSE;
673 Bool isInit = FALSE;
674 List_Object * bufList = NULL;
675 IArg key = 0;
676 List_Params listparams;
677 UInt32 i;
679 GT_1trace (curTrace, GT_ENTER, "_rpmsg_dce_attach", dce);
681 key = IGateProvider_enter (rpmsg_dce_state.gateHandle);
682 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
683 if (rpmsg_dce_state.eventState [i].dce == dce) {
684 rpmsg_dce_state.eventState [i].refCount++;
685 isInit = TRUE;
686 status = EOK;
687 break;
688 }
689 }
691 if (isInit == FALSE) {
692 List_Params_init (&listparams);
693 bufList = List_create (&listparams) ;
694 /* Search for an available slot for user process. */
695 for (i = 0 ; i < MAX_PROCESSES ; i++) {
696 if (rpmsg_dce_state.eventState [i].dce == NULL) {
697 rpmsg_dce_state.eventState [i].dce = dce;
698 rpmsg_dce_state.eventState [i].refCount = 1;
699 rpmsg_dce_state.eventState [i].bufList = bufList;
700 flag = TRUE;
701 break;
702 }
703 }
705 /* No free slots found. Let this check remain at run-time,
706 * since it is dependent on user environment.
707 */
708 if (flag != TRUE) {
709 /*! @retval Notify_E_RESOURCE Maximum number of
710 supported user clients have already been registered. */
711 status = -ENOMEM;
712 GT_setFailureReason (curTrace,
713 GT_4CLASS,
714 "rpmsgDrv_attach",
715 status,
716 "Maximum number of supported user"
717 " clients have already been "
718 "registered.");
719 if (bufList != NULL) {
720 List_delete (&bufList);
721 }
722 }
723 }
724 IGateProvider_leave (rpmsg_dce_state.gateHandle, key);
726 GT_1trace (curTrace, GT_LEAVE, "rpmsgDrv_attach", status);
728 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
729 return status ;
730 }
733 /*!
734 * @brief This function adds a data to a registered process.
735 *
736 * @param dce Dce object associated with the client
737 * @param src Source address (endpoint) sending the data
738 * @param pid Process ID associated with the client
739 * @param data Data to be added
740 * @param len Length of data to be added
741 *
742 * @sa
743 */
744 Int
745 _rpmsg_dce_addBufByPid (rpmsg_dce_object *dce,
746 UInt32 src,
747 UInt32 pid,
748 void * data,
749 UInt32 len)
750 {
751 Int32 status = EOK;
752 Bool flag = FALSE;
753 rpmsg_dce_EventPacket * uBuf = NULL;
754 IArg key;
755 UInt32 i;
756 WaitingReaders_t *item;
757 MsgList_t *msgItem;
759 GT_5trace (curTrace,
760 GT_ENTER,
761 "_rpmsg_dce_addBufByPid",
762 dce,
763 src,
764 pid,
765 data,
766 len);
768 GT_assert (curTrace, (rpmsg_dce_state.isSetup == TRUE));
770 key = IGateProvider_enter (rpmsg_dce_state.gateHandle);
771 /* Find the registration for this callback */
772 for (i = 0 ; i < MAX_PROCESSES ; i++) {
773 if (rpmsg_dce_state.eventState [i].dce == dce) {
774 flag = TRUE;
775 break;
776 }
777 }
778 IGateProvider_leave (rpmsg_dce_state.gateHandle, key);
780 #if !defined(SYSLINK_BUILD_OPTIMIZE)
781 if (flag != TRUE) {
782 /*! @retval ENOMEM Could not find a registered handler
783 for this process. */
784 status = -ENOMEM;
785 GT_setFailureReason (curTrace,
786 GT_4CLASS,
787 "_rpmsgDrv_addBufByPid",
788 status,
789 "Could not find a registered handler "
790 "for this process.!");
791 }
792 else {
793 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
794 /* Allocate memory for the buf */
795 pthread_mutex_lock(&rpmsg_dce_state.lock);
796 uBuf = get_uBuf();
797 pthread_mutex_unlock(&rpmsg_dce_state.lock);
799 #if !defined(SYSLINK_BUILD_OPTIMIZE)
800 if (uBuf == NULL) {
801 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
802 packet for received callback. */
803 status = -ENOMEM;
804 GT_setFailureReason (curTrace,
805 GT_4CLASS,
806 "_rpmsgDrv_addBufByPid",
807 status,
808 "Failed to allocate memory for event"
809 " packet for received callback.!");
810 }
811 else {
812 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
813 List_elemClear (&(uBuf->element));
814 GT_assert (curTrace,
815 (rpmsg_dce_state.eventState [i].bufList != NULL));
817 if (data) {
818 Memory_copy(uBuf->data, data, len);
819 }
820 uBuf->len = len;
822 List_put (rpmsg_dce_state.eventState [i].bufList,
823 &(uBuf->element));
824 pthread_mutex_lock(&rpmsg_dce_state.lock);
825 item = dequeue_waiting_reader(i);
826 if (item) {
827 // there is a waiting reader
828 deliver_notification(i, item->rcvid);
829 put_wr(item);
830 pthread_mutex_unlock(&rpmsg_dce_state.lock);
831 status = EOK;
832 }
833 else {
834 if (enqueue_notify_list(i) < 0) {
835 pthread_mutex_unlock(&rpmsg_dce_state.lock);
836 status = -ENOMEM;
837 GT_setFailureReason (curTrace,
838 GT_4CLASS,
839 "_rpmsgDrv_addBufByPid",
840 status,
841 "Failed to allocate memory for notifier");
842 }
843 else {
844 msgItem = find_nl(i);
845 /* TODO: dce could be NULL in some cases */
846 if (dce && msgItem) {
847 if (IOFUNC_NOTIFY_INPUT_CHECK(dce->notify, msgItem->num_events, 0)) {
848 iofunc_notify_trigger(dce->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
849 }
850 }
851 status = EOK;
852 pthread_cond_signal(&rpmsg_dce_state.cond);
853 pthread_mutex_unlock(&rpmsg_dce_state.lock);
854 }
855 }
856 #if !defined(SYSLINK_BUILD_OPTIMIZE)
857 }
858 }
859 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
861 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
863 return status;
864 }
867 /*!
868 * @brief This function implements the callback registered with
869 * MessageQCopy_create for each client. This function
870 * adds the message from the remote proc to a list
871 * where it is routed to the appropriate waiting reader.
872 *
873 * @param procId processor Id from which interrupt is received
874 * @param lineId Interrupt line ID to be used
875 * @param eventId eventId registered
876 * @param arg argument to call back
877 * @param payload payload received
878 *
879 * @sa
880 */
881 Void
882 _rpmsg_dce_cb (MessageQCopy_Handle handle, void * data, int len, void * priv, UInt32 src, UInt16 srcProc)
883 {
884 #if !defined(SYSLINK_BUILD_OPTIMIZE)
885 Int32 status = 0;
886 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
887 rpmsg_dce_object * dce = NULL;
888 struct omx_msg_hdr * msg_hdr = NULL;
890 GT_6trace (curTrace,
891 GT_ENTER,
892 "_rpmsg_dce_cb",
893 handle,
894 data,
895 len,
896 priv,
897 src,
898 srcProc);
900 dce = (rpmsg_dce_object *) priv;
901 msg_hdr = (struct omx_msg_hdr *)data;
903 switch (msg_hdr->type) {
904 case OMX_RAW_MSG:
905 #if !defined(SYSLINK_BUILD_OPTIMIZE)
906 status =
907 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
908 _rpmsg_dce_addBufByPid (dce,
909 src,
910 dce->pid,
911 data,
912 len);
913 #if !defined(SYSLINK_BUILD_OPTIMIZE)
914 if (status < 0) {
915 GT_setFailureReason (curTrace,
916 GT_4CLASS,
917 "_rpmsg_dce_cb",
918 status,
919 "Failed to add callback packet for pid");
920 }
921 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
922 break;
923 default:
924 break;
925 }
927 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_dce_cb");
928 }
930 /**
931 * Handler for ocb_calloc() requests.
932 *
933 * Special handler for ocb_calloc() requests that we export for control. An
934 * open request from the client will result in a call to our special ocb_calloc
935 * handler. This function attaches the client's pid using _rpmsg_dce_attach
936 * and allocates client-specific information. This function creates an
937 * endpoint for the client to communicate with the dCE server on the
938 * remote core also.
939 *
940 * \param ctp Thread's associated context information.
941 * \param device Device attributes structure.
942 *
943 * \return Pointer to an iofunc_ocb_t OCB structure.
944 */
946 IOFUNC_OCB_T *
947 rpmsg_dce_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
948 {
949 rpmsg_dce_ocb_t *ocb = NULL;
950 rpmsg_dce_object *obj = NULL;
951 struct _msg_info cl_info;
952 rpmsg_dce_dev_t * dev = NULL;
953 int i = 0;
954 Bool found = FALSE;
955 char path1[20];
956 char path2[20];
957 Char data[MessageQCopy_BUFSIZE];
958 dce_conn_req *req = (dce_conn_req *)data;
959 Int status = 0;
961 GT_2trace (curTrace, GT_ENTER, "rpmsg_dce_ocb_calloc",
962 ctp, device);
964 /* Allocate the OCB */
965 ocb = (rpmsg_dce_ocb_t *) calloc (1, sizeof (rpmsg_dce_ocb_t));
966 if (ocb == NULL){
967 errno = ENOMEM;
968 return (NULL);
969 }
971 ocb->pid = ctp->info.pid;
973 /* Allocate memory for the rpmsg object. */
974 obj = Memory_calloc (NULL, sizeof (rpmsg_dce_object), 0u, NULL);
975 if (obj == NULL) {
976 errno = ENOMEM;
977 free(ocb);
978 return (NULL);
979 }
980 else {
981 ocb->dce = obj;
982 IOFUNC_NOTIFY_INIT(obj->notify);
983 /* determine conn and procId for communication based on which device was opened */
984 MsgInfo(ctp->rcvid, &cl_info);
985 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
986 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
987 if (rpmsg_dce_state.objects[i] != NULL) {
988 dev = rpmsg_dce_state.objects[i]->dev;
989 resmgr_pathname(dev->rpmsg_dce.resmgr_id, 0, path2, sizeof(path2));
990 if (!strcmp(path1, path2)) {
991 found = TRUE;
992 break;
993 }
994 }
995 }
996 if (found) {
997 obj->conn = rpmsg_dce_state.objects[i];
998 obj->procId = obj->conn->procId;
999 obj->pid = ctp->info.pid;
1000 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL, _rpmsg_dce_cb, obj, &obj->addr);
1001 if (obj->mq == NULL) {
1002 errno = ENOMEM;
1003 free(obj);
1004 free(ocb);
1005 return (NULL);
1006 }
1007 else {
1008 if (_rpmsg_dce_attach (obj) < 0) {
1009 errno = ENOMEM;
1010 MessageQCopy_delete (&obj->mq);
1011 free(obj);
1012 free(ocb);
1013 return (NULL);
1014 }
1015 else {
1016 req->type = DCE_CONN_REQ;
1017 req->addr = obj->addr;
1018 status = MessageQCopy_send(obj->procId, MultiProc_self(),
1019 rpmsg_dce_state.conn_remote_endpoint,
1020 rpmsg_dce_state.conn_endpoint, req,
1021 sizeof(dce_conn_req), TRUE);
1022 if (status < 0) {
1023 errno = ENOMEM;
1024 MessageQCopy_delete (&obj->mq);
1025 free(obj);
1026 free(ocb);
1027 return (NULL);
1028 }
1029 else {
1030 status = OsalSemaphore_pend(rpmsg_dce_state.sem, 5000);
1031 if (status < 0) {
1032 errno = ENOMEM;
1033 MessageQCopy_delete (&obj->mq);
1034 free(obj);
1035 free(ocb);
1036 return (NULL);
1037 }
1038 }
1039 }
1040 }
1041 }
1042 }
1044 GT_1trace (curTrace, GT_LEAVE, "rpmsg_dce_ocb_calloc", ocb);
1046 return (IOFUNC_OCB_T *)(ocb);
1047 }
1050 /*!
1051 * @brief Detach a process from rpmsg-dce user support framework.
1052 *
1053 * @param pid Process identifier
1054 *
1055 * @sa _rpmsg_dce_attach
1056 */
1057 static
1058 Int
1059 _rpmsg_dce_detach (rpmsg_dce_object *dce, Bool force)
1060 {
1061 Int32 status = EOK;
1062 Int32 tmpStatus = EOK;
1063 Bool flag = FALSE;
1064 List_Object * bufList = NULL;
1065 UInt32 i;
1066 IArg key;
1067 MsgList_t * item;
1068 WaitingReaders_t * wr = NULL;
1069 struct _msg_info info;
1071 GT_1trace (curTrace, GT_ENTER, "rpmsg_dce_detach", dce);
1073 key = IGateProvider_enter (rpmsg_dce_state.gateHandle);
1075 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1076 if (rpmsg_dce_state.eventState [i].dce == dce) {
1077 if (rpmsg_dce_state.eventState [i].refCount == 1) {
1078 rpmsg_dce_state.eventState [i].refCount = 0;
1080 flag = TRUE;
1081 break;
1082 }
1083 else {
1084 rpmsg_dce_state.eventState [i].refCount--;
1085 status = EOK;
1086 break;
1087 }
1088 }
1089 }
1090 IGateProvider_leave (rpmsg_dce_state.gateHandle, key);
1092 if (flag == TRUE) {
1093 key = IGateProvider_enter (rpmsg_dce_state.gateHandle);
1094 /* Last client being unregistered for this process. */
1095 rpmsg_dce_state.eventState [i].dce = NULL;
1097 /* Store in local variable to delete outside lock. */
1098 bufList = rpmsg_dce_state.eventState [i].bufList;
1100 rpmsg_dce_state.eventState [i].bufList = NULL;
1102 IGateProvider_leave (rpmsg_dce_state.gateHandle, key);
1103 }
1105 if (flag != TRUE) {
1106 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1107 if (i == MAX_PROCESSES) {
1108 /*! @retval Notify_E_NOTFOUND The specified user process was
1109 not found registered with Notify Driver module. */
1110 status = -ENOMEM;
1111 GT_setFailureReason (curTrace,
1112 GT_4CLASS,
1113 "rpmsg_dce_detach",
1114 status,
1115 "The specified user process was not found"
1116 " registered with rpmsg Driver module.");
1117 }
1118 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1119 }
1120 else {
1121 if (bufList != NULL) {
1122 /* Dequeue waiting readers and reply to them */
1123 pthread_mutex_lock(&rpmsg_dce_state.lock);
1124 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1125 /* Check if rcvid is still valid */
1126 if (MsgInfo(wr->rcvid, &info) != -1) {
1127 put_wr(wr);
1128 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1129 MsgError(wr->rcvid, EINTR);
1130 pthread_mutex_lock(&rpmsg_dce_state.lock);
1131 }
1132 }
1133 /* Check for pending ionotify/select calls */
1134 if (dce) {
1135 if (IOFUNC_NOTIFY_INPUT_CHECK(dce->notify, 1, 0)) {
1136 iofunc_notify_trigger(dce->notify, 1, IOFUNC_NOTIFY_INPUT);
1137 }
1138 }
1140 /* Free event packets for any received but unprocessed events. */
1141 while ((item = find_nl(i)) != NULL) {
1142 if (dequeue_notify_list_item(item) >= 0) {
1143 rpmsg_dce_EventPacket * uBuf = NULL;
1145 uBuf = (rpmsg_dce_EventPacket *) List_get (bufList);
1147 /* Let the check remain at run-time. */
1148 if (uBuf != NULL) {
1149 put_uBuf(uBuf);
1150 }
1151 }
1152 }
1153 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1155 /* Last client being unregistered with Notify module. */
1156 List_delete (&bufList);
1157 }
1159 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1160 if ((tmpStatus < 0) && (status >= 0)) {
1161 status = tmpStatus;
1162 GT_setFailureReason (curTrace,
1163 GT_4CLASS,
1164 "rpmsg_dce_detach",
1165 status,
1166 "Failed to delete termination semaphore!");
1167 }
1168 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1169 }
1171 GT_1trace (curTrace, GT_LEAVE, "rpmsg_dce_detach", status);
1173 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1174 return status;
1175 }
1177 /**
1178 * Handler for ocb_free() requests.
1179 *
1180 * Special handler for ocb_free() requests that we export for control. A
1181 * close request from the client will result in a call to our special ocb_free
1182 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1183 * and frees any client-specific information that was allocated.
1184 *
1185 * \param i_ocb OCB associated with client's session.
1186 *
1187 * \return POSIX errno value.
1188 *
1189 * \retval None.
1190 */
1192 void
1193 rpmsg_dce_ocb_free (IOFUNC_OCB_T * i_ocb)
1194 {
1195 rpmsg_dce_ocb_t * ocb = (rpmsg_dce_ocb_t *)i_ocb;
1196 rpmsg_dce_object *obj;
1197 Char data[MessageQCopy_BUFSIZE];
1198 dce_conn_req *req = (dce_conn_req *)data;
1199 Int status = 0;
1201 if (ocb && ocb->dce) {
1202 obj = ocb->dce;
1203 req->type = DCE_DISCON_REQ;
1204 req->addr = obj->addr;
1205 status = MessageQCopy_send(obj->procId, MultiProc_self(),
1206 rpmsg_dce_state.conn_remote_endpoint,
1207 rpmsg_dce_state.conn_endpoint, req,
1208 sizeof(dce_conn_req), TRUE);
1209 if (status < 0) {
1210 GT_setFailureReason (curTrace,
1211 GT_4CLASS,
1212 "rpmsg_dce_ocb_free",
1213 status,
1214 "Failed to send disconnect msg!");
1215 }
1216 _rpmsg_dce_detach(ocb->dce, FALSE);
1217 if (obj->mq) {
1218 MessageQCopy_delete (&obj->mq);
1219 obj->mq = NULL;
1220 }
1221 free (obj);
1222 free (ocb);
1223 }
1224 }
1226 /**
1227 * Handler for close_ocb() requests.
1228 *
1229 * This function removes the notification entries associated with the current
1230 * client.
1231 *
1232 * \param ctp Thread's associated context information.
1233 * \param reserved This argument must be NULL.
1234 * \param ocb OCB associated with client's session.
1235 *
1236 * \return POSIX errno value.
1237 *
1238 * \retval EOK Success.
1239 */
1241 Int
1242 rpmsg_dce_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1243 {
1244 rpmsg_dce_ocb_t * dce_ocb = (rpmsg_dce_ocb_t *)ocb;
1245 iofunc_notify_remove(ctp, dce_ocb->dce->notify);
1246 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1247 }
1249 /**
1250 * Handler for read() requests.
1251 *
1252 * Handles special read() requests that we export for control. A read
1253 * request will get a message from the remote processor that is associated
1254 * with the client that is calling read().
1255 *
1256 * \param ctp Thread's associated context information.
1257 * \param msg The actual read() message.
1258 * \param ocb OCB associated with client's session.
1259 *
1260 * \return POSIX errno value.
1261 *
1262 * \retval EOK Success.
1263 * \retval EAGAIN Call is non-blocking and no messages available.
1264 * \retval ENOMEM Not enough memory to preform the read.
1265 */
1267 int rpmsg_dce_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *i_ocb)
1268 {
1269 Int status;
1270 Bool flag = FALSE;
1271 Int retVal = EOK;
1272 UInt32 i;
1273 MsgList_t * item;
1274 Int nonblock;
1275 rpmsg_dce_ocb_t * ocb = (rpmsg_dce_ocb_t *)i_ocb;
1276 rpmsg_dce_object * dce = ocb->dce;
1278 if ((status = iofunc_read_verify(ctp, msg, i_ocb, &nonblock)) != EOK)
1279 return (status);
1281 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1282 if (rpmsg_dce_state.eventState [i].dce == dce) {
1283 flag = TRUE;
1284 break;
1285 }
1286 }
1288 /* Let the check remain at run-time. */
1289 if (flag == TRUE) {
1290 /* Let the check remain at run-time for handling any run-time
1291 * race conditions.
1292 */
1293 if (rpmsg_dce_state.eventState [i].bufList != NULL) {
1294 pthread_mutex_lock(&rpmsg_dce_state.lock);
1295 item = find_nl(i);
1296 if (dequeue_notify_list_item(item) < 0) {
1297 if (nonblock) {
1298 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1299 return EAGAIN;
1300 }
1301 else {
1302 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1303 if (retVal == EOK) {
1304 pthread_cond_signal(&rpmsg_dce_state.cond);
1305 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1306 return(_RESMGR_NOREPLY);
1307 }
1308 retVal = ENOMEM;
1309 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1310 }
1311 }
1312 else {
1313 deliver_notification(i, ctp->rcvid);
1314 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1315 return(_RESMGR_NOREPLY);
1316 }
1317 }
1318 }
1320 /*! @retval Number-of-bytes-read Number of bytes read. */
1321 return retVal;
1322 }
1324 /**
1325 * Handler for write() requests.
1326 *
1327 * Handles special write() requests that we export for control. A write()
1328 * request will send a message to the remote processor which is associated with
1329 * the client.
1330 *
1331 * \param ctp Thread's associated context information.
1332 * \param msg The actual write() message.
1333 * \param io_ocb OCB associated with client's session.
1334 *
1335 * \return POSIX errno value.
1336 *
1337 * \retval EOK Success.
1338 * \retval ENOMEM Not enough memory to preform the write.
1339 * \retval EIO MessageQCopy_send failed.
1340 * \retval EINVAL msg->i.bytes is negative.
1341 */
1343 int
1344 rpmsg_dce_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1345 {
1346 int status;
1347 char *buf;
1348 int bytes;
1349 rpmsg_dce_ocb_t * ocb = (rpmsg_dce_ocb_t *)io_ocb;
1350 rpmsg_dce_object * dce = ocb->dce;
1351 struct omx_msg_hdr * msg_hdr = NULL;
1353 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1354 return (status);
1355 }
1357 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct omx_msg_hdr) > MessageQCopy_BUFSIZE ?
1358 MessageQCopy_BUFSIZE - sizeof(struct omx_msg_hdr) : msg->i.nbytes;
1359 if (bytes < 0) {
1360 return EINVAL;
1361 }
1362 _IO_SET_WRITE_NBYTES (ctp, bytes);
1364 buf = (char *) malloc(bytes + sizeof(struct omx_msg_hdr));
1365 if (buf == NULL) {
1366 return (ENOMEM);
1367 }
1368 msg_hdr = (struct omx_msg_hdr *)buf;
1370 status = resmgr_msgread(ctp, msg_hdr->data, bytes, sizeof(msg->i));
1371 if (status != bytes) {
1372 free(buf);
1373 return (errno);
1374 }
1376 msg_hdr->type = OMX_RAW_MSG;
1377 msg_hdr->len = bytes;
1379 status = MessageQCopy_send(dce->conn->procId, MultiProc_self(),
1380 dce->conn->addr, dce->addr, buf,
1381 bytes + sizeof(struct omx_msg_hdr), TRUE);
1382 if (status < 0) {
1383 free(buf);
1384 return (EIO);
1385 }
1386 free (buf);
1388 return(EOK);
1389 }
1391 static
1392 Int
1393 _rpmsg_dce_getaddr(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_dce_ocb_t *ocb)
1394 {
1395 Int status = EOK;
1396 UInt32 * cargs = (UInt32 *)(_DEVCTL_DATA (msg->o));
1397 rpmsg_dce_object * dce = ocb->dce;
1399 if (ctp->info.dstmsglen - sizeof(msg->o) < sizeof (UInt32)) {
1400 status = (EINVAL);
1401 }
1403 else {
1404 *cargs = dce->addr;
1405 msg->o.ret_val = EOK;
1406 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o) + sizeof(UInt32)));
1407 }
1409 return status;
1410 }
1412 Int
1413 rpmsg_dce_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
1414 {
1415 Int status = 0;
1416 rpmsg_dce_ocb_t *ocb = (rpmsg_dce_ocb_t *)i_ocb;
1418 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
1419 return(_RESMGR_ERRNO(status));
1420 status = 0;
1422 switch (msg->i.dcmd)
1423 {
1424 case DCE_IOCGETADDR:
1425 status = _rpmsg_dce_getaddr (ctp, msg, ocb);
1426 break;
1427 default:
1428 status = (ENOSYS);
1429 break;
1430 }
1432 return status;
1433 }
1434 /**
1435 * Unblock read calls
1436 *
1437 * This function checks if the client is blocked on a read call and if so,
1438 * unblocks the client.
1439 *
1440 * \param ctp Thread's associated context information.
1441 * \param msg The pulse message.
1442 * \param ocb OCB associated with client's session.
1443 *
1444 * \return POSIX errno value.
1445 *
1446 * \retval EINTR The client has been unblocked.
1447 * \retval other The client has not been unblocked or the client was not
1448 * blocked.
1449 */
1451 int rpmsg_dce_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *i_ocb)
1452 {
1453 UInt32 i;
1454 Bool flag = FALSE;
1455 WaitingReaders_t * wr;
1456 rpmsg_dce_ocb_t * ocb = (rpmsg_dce_ocb_t *)i_ocb;
1457 rpmsg_dce_object * dce = ocb->dce;
1459 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1460 if (rpmsg_dce_state.eventState [i].dce == dce) {
1461 flag = TRUE;
1462 break;
1463 }
1464 }
1466 /* Let the check remain at run-time. */
1467 if (flag == TRUE) {
1468 /* Let the check remain at run-time for handling any run-time
1469 * race conditions.
1470 */
1471 if (rpmsg_dce_state.eventState [i].bufList != NULL) {
1472 pthread_mutex_lock(&rpmsg_dce_state.lock);
1473 wr = find_waiting_reader(i, ctp->rcvid);
1474 if (wr) {
1475 put_wr(wr);
1476 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1477 return (EINTR);
1478 }
1479 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1480 }
1481 }
1483 return _RESMGR_NOREPLY;
1484 }
1486 /**
1487 * Handler for unblock() requests.
1488 *
1489 * Handles unblock request for the client which is requesting to no longer be
1490 * blocked on the rpmsg-dce driver.
1491 *
1492 * \param ctp Thread's associated context information.
1493 * \param msg The pulse message.
1494 * \param ocb OCB associated with client's session.
1495 *
1496 * \return POSIX errno value.
1497 *
1498 * \retval EINTR The rcvid has been unblocked.
1499 */
1501 int rpmsg_dce_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1502 {
1503 int status = _RESMGR_NOREPLY;
1504 struct _msg_info info;
1506 /*
1507 * Try to run the default unblock for this message.
1508 */
1509 if ((status = iofunc_unblock_default(ctp,msg, ocb)) != _RESMGR_DEFAULT) {
1510 return status;
1511 }
1513 /*
1514 * Check if rcvid is still valid and still has an unblock
1515 * request pending.
1516 */
1517 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1518 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1519 return _RESMGR_NOREPLY;
1520 }
1522 if (rpmsg_dce_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1523 return _RESMGR_ERRNO(EINTR);
1524 }
1526 return _RESMGR_ERRNO(EINTR);
1527 }
1529 /**
1530 * Handler for notify() requests.
1531 *
1532 * Handles special notify() requests that we export for control. A notify
1533 * request results from the client calling select().
1534 *
1535 * \param ctp Thread's associated context information.
1536 * \param msg The actual notify() message.
1537 * \param ocb OCB associated with client's session.
1538 *
1539 * \return POSIX errno value.
1540 */
1542 Int rpmsg_dce_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1543 {
1544 rpmsg_dce_ocb_t * dce_ocb = (rpmsg_dce_ocb_t *)ocb;
1545 int trig;
1546 int i = 0;
1547 Bool flag = FALSE;
1548 MsgList_t * item = NULL;
1549 int status = EOK;
1550 rpmsg_dce_object * dce = dce_ocb->dce;
1552 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1554 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1555 if (rpmsg_dce_state.eventState [i].dce == dce) {
1556 flag = TRUE;
1557 break;
1558 }
1559 }
1561 pthread_mutex_lock(&rpmsg_dce_state.lock);
1562 /* Let the check remain at run-time. */
1563 if (flag == TRUE) {
1564 /* Let the check remain at run-time for handling any run-time
1565 * race conditions.
1566 */
1567 if (rpmsg_dce_state.eventState [i].bufList != NULL) {
1568 item = find_nl(i);
1569 if (item && item->num_events > 0) {
1570 trig |= _NOTIFY_COND_INPUT;
1571 }
1572 }
1573 }
1574 status = iofunc_notify(ctp, msg, dce_ocb->dce->notify, trig, NULL, NULL);
1575 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1576 return status;
1577 }
1579 /**
1580 * Detaches an rpmsg-dce resource manager device name.
1581 *
1582 * \param dev The device to detach.
1583 *
1584 * \return POSIX errno value.
1585 */
1587 static
1588 Void
1589 _deinit_rpmsg_dce_device (rpmsg_dce_dev_t * dev)
1590 {
1591 resmgr_detach(syslink_dpp, dev->rpmsg_dce.resmgr_id, 0);
1593 pthread_mutex_destroy(&dev->rpmsg_dce.mutex);
1595 free (dev);
1597 return;
1598 }
1600 /**
1601 * Initializes and attaches rpmsg-dce resource manager functions to an
1602 * rpmsg-dce device name.
1603 *
1604 * \param num The number to append to the end of the device name.
1605 *
1606 * \return Pointer to the created rpmsg_dce_dev_t device.
1607 */
1609 static
1610 rpmsg_dce_dev_t *
1611 _init_rpmsg_dce_device (int num)
1612 {
1613 iofunc_attr_t * attr;
1614 resmgr_attr_t resmgr_attr;
1615 rpmsg_dce_dev_t * dev = NULL;
1617 dev = malloc(sizeof(*dev));
1618 if (dev == NULL) {
1619 return NULL;
1620 }
1622 memset(&resmgr_attr, 0, sizeof resmgr_attr);
1623 resmgr_attr.nparts_max = 10;
1624 resmgr_attr.msg_max_size = 2048;
1625 memset(&dev->rpmsg_dce.mattr, 0, sizeof(iofunc_mount_t));
1626 dev->rpmsg_dce.mattr.flags = ST_NOSUID | ST_NOEXEC;
1627 dev->rpmsg_dce.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
1628 IOFUNC_PC_NO_TRUNC |
1629 IOFUNC_PC_SYNC_IO;
1630 dev->rpmsg_dce.mattr.funcs = &dev->rpmsg_dce.mfuncs;
1631 memset(&dev->rpmsg_dce.mfuncs, 0, sizeof(iofunc_funcs_t));
1632 dev->rpmsg_dce.mfuncs.nfuncs = _IOFUNC_NFUNCS;
1633 dev->rpmsg_dce.mfuncs.ocb_calloc = rpmsg_dce_ocb_calloc;
1634 dev->rpmsg_dce.mfuncs.ocb_free = rpmsg_dce_ocb_free;
1635 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_dce.cfuncs,
1636 _RESMGR_IO_NFUNCS, &dev->rpmsg_dce.iofuncs);
1637 iofunc_attr_init(attr = &dev->rpmsg_dce.cattr, S_IFCHR | 0777, NULL, NULL);
1638 dev->rpmsg_dce.iofuncs.unblock = rpmsg_dce_unblock;
1639 dev->rpmsg_dce.iofuncs.devctl = rpmsg_dce_devctl;
1640 dev->rpmsg_dce.iofuncs.notify = rpmsg_dce_notify;
1641 dev->rpmsg_dce.iofuncs.close_ocb = rpmsg_dce_close_ocb;
1642 dev->rpmsg_dce.iofuncs.read = rpmsg_dce_read;
1643 dev->rpmsg_dce.iofuncs.write = rpmsg_dce_write;
1644 attr->mount = &dev->rpmsg_dce.mattr;
1645 iofunc_time_update(attr);
1646 pthread_mutex_init(&dev->rpmsg_dce.mutex, NULL);
1648 snprintf (dev->rpmsg_dce.device_name, _POSIX_PATH_MAX, "/dev/rpmsg-dce%d", num);
1649 if (-1 == (dev->rpmsg_dce.resmgr_id =
1650 resmgr_attach(syslink_dpp, &resmgr_attr,
1651 dev->rpmsg_dce.device_name, _FTYPE_ANY, 0,
1652 &dev->rpmsg_dce.cfuncs,
1653 &dev->rpmsg_dce.iofuncs, attr))) {
1654 pthread_mutex_destroy(&dev->rpmsg_dce.mutex);
1655 free(dev);
1656 return(NULL);
1657 }
1659 return(dev);
1660 }
1662 /**
1663 * Callback passed to MessageQCopy_registerNotify.
1664 *
1665 * This callback is called when a remote processor creates a MessageQCopy
1666 * handle with the same name as the local MessageQCopy handle and then
1667 * calls NameMap_register to notify the HOST of the handle.
1668 *
1669 * \param handle The remote handle.
1670 * \param procId The remote proc ID of the remote handle.
1671 * \param endpoint The endpoint address of the remote handle.
1672 *
1673 * \return None.
1674 */
1676 static
1677 Void
1678 _rpmsg_dce_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
1679 UInt32 endpoint, Char * desc, Bool create)
1680 {
1681 Int i = 0;
1682 Bool found = FALSE;
1683 rpmsg_dce_conn_object * obj = NULL;
1685 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
1686 if (rpmsg_dce_state.objects[i] == NULL) {
1687 found = TRUE;
1688 break;
1689 }
1690 }
1692 if (found) {
1693 /* found a space to save this mq handle, allocate memory */
1694 obj = Memory_calloc (NULL, sizeof (rpmsg_dce_conn_object), 0x0, NULL);
1695 if (obj) {
1696 /* store the object in the module info */
1697 rpmsg_dce_state.objects[i] = obj;
1699 /* store the mq info in the object */
1700 obj->mq = handle;
1701 obj->procId = procId;
1702 obj->addr = endpoint;
1704 /* create a /dev/rpmsg-dce instance for users to open */
1705 obj->dev = _init_rpmsg_dce_device(i);
1706 if (obj->dev == NULL) {
1707 Osal_printf("Failed to create rpmsg-dce%d", i);
1708 Memory_free(NULL, obj, sizeof(rpmsg_dce_object));
1709 }
1710 }
1711 }
1712 }
1714 static
1715 Void
1716 _rpmsg_dce_conn_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
1717 UInt32 endpoint, Char * desc, Bool create)
1718 {
1719 rpmsg_dce_state.conn_remote_endpoint = endpoint;
1720 }
1722 /**
1723 * Callback passed to MessageQCopy_create for the module.
1724 *
1725 * This callback is called when a message is received for the rpmsg-dce
1726 * module. This callback will never be called, since each client connection
1727 * gets it's own endpoint for message passing.
1728 *
1729 * \param handle The local MessageQCopy handle.
1730 * \param data Data message
1731 * \param len Length of data message
1732 * \param priv Private information for the endpoint
1733 * \param src Remote endpoint sending this message
1734 * \param srcProc Remote proc ID sending this message
1735 *
1736 * \return None.
1737 */
1739 static
1740 Void
1741 _rpmsg_dce_module_cb (MessageQCopy_Handle handle, void * data, int len,
1742 void * priv, UInt32 src, UInt16 srcProc)
1743 {
1744 Osal_printf ("_rpmsg_dce_module_cb callback");
1745 }
1747 /**
1748 * Callback passed to MessageQCopy_create for the module conn handler.
1749 *
1750 * This callback is called when a message is received for the rpmsg-dce
1751 * conn addr. This callback will never be called, since the remote endpoint
1752 * never sends a response.
1753 *
1754 * \param handle The local MessageQCopy handle.
1755 * \param data Data message
1756 * \param len Length of data message
1757 * \param priv Private information for the endpoint
1758 * \param src Remote endpoint sending this message
1759 * \param srcProc Remote proc ID sending this message
1760 *
1761 * \return None.
1762 */static
1763 Void
1764 _rpmsg_dce_conn_cb (MessageQCopy_Handle handle, void * data, int len,
1765 void * priv, UInt32 src, UInt16 srcProc)
1766 {
1767 Osal_printf ("_rpmsg_dce_conn_cb callback");
1768 OsalSemaphore_post(rpmsg_dce_state.sem);
1769 }
1771 /*!
1772 * @brief Module setup function.
1773 *
1774 * @sa rpmsg_dce_destroy
1775 */
1776 Int
1777 rpmsg_dce_setup (Void)
1778 {
1779 UInt16 i;
1780 List_Params listparams;
1781 Int status = 0;
1782 Error_Block eb;
1783 pthread_attr_t thread_attr;
1784 struct sched_param sched_param;
1786 GT_0trace (curTrace, GT_ENTER, "rpmsg_dce_setup");
1788 Error_init(&eb);
1790 List_Params_init (&listparams);
1791 rpmsg_dce_state.gateHandle = (IGateProvider_Handle)
1792 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
1793 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1794 if (rpmsg_dce_state.gateHandle == NULL) {
1795 status = -ENOMEM;
1796 GT_setFailureReason (curTrace,
1797 GT_4CLASS,
1798 "_rpmsg_dce_setup",
1799 status,
1800 "Failed to create spinlock gate!");
1801 }
1802 else {
1803 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1804 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1805 rpmsg_dce_state.eventState [i].bufList = NULL;
1806 rpmsg_dce_state.eventState [i].dce = NULL;
1807 rpmsg_dce_state.eventState [i].refCount = 0;
1808 rpmsg_dce_state.eventState [i].head = NULL;
1809 rpmsg_dce_state.eventState [i].tail = NULL;
1810 }
1812 pthread_attr_init(&thread_attr );
1813 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
1814 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
1815 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
1816 pthread_attr_setschedparam(&thread_attr, &sched_param);
1818 rpmsg_dce_state.run = TRUE;
1819 if (pthread_create(&rpmsg_dce_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
1820 pthread_setname_np(rpmsg_dce_state.nt, "rpmsg-dce-notifier");
1821 /* Initialize the driver mapping array. */
1822 Memory_set (&rpmsg_dce_state.objects,
1823 0,
1824 (sizeof (rpmsg_dce_conn_object *)
1825 * MultiProc_MAXPROCESSORS));
1827 /* create a local handle and register for notifications with MessageQCopy */
1828 rpmsg_dce_state.mqHandle = MessageQCopy_create (
1829 MessageQCopy_ADDRANY,
1830 "dCE",
1831 _rpmsg_dce_module_cb,
1832 NULL,
1833 &rpmsg_dce_state.endpoint);
1834 if (rpmsg_dce_state.mqHandle == NULL) {
1835 /*! @retval DCE_FAIL Failed to create MessageQCopy handle! */
1836 status = -ENOMEM;
1837 GT_setFailureReason (curTrace,
1838 GT_4CLASS,
1839 "rpmsg_dce_setup",
1840 status,
1841 "Failed to create MessageQCopy handle!");
1842 }
1843 else {
1844 /* TBD: This could be replaced with a messageqcopy_open type call, one for
1845 * each core */
1846 status = MessageQCopy_registerNotify (rpmsg_dce_state.mqHandle,
1847 _rpmsg_dce_notify_cb);
1848 if (status < 0) {
1849 MessageQCopy_delete (&rpmsg_dce_state.mqHandle);
1850 /*! @retval DCE_FAIL Failed to register MQCopy handle! */
1851 status = -ENOMEM;
1852 GT_setFailureReason (curTrace,
1853 GT_4CLASS,
1854 "rpmsg_dce_setup",
1855 status,
1856 "Failed to register MQCopy handle!");
1857 }
1858 else {
1859 rpmsg_dce_state.conn_handle = MessageQCopy_create (
1860 MessageQCopy_ADDRANY,
1861 "dCE_conn",
1862 _rpmsg_dce_conn_cb,
1863 NULL,
1864 &rpmsg_dce_state.conn_endpoint);
1865 if (rpmsg_dce_state.conn_handle == NULL) {
1866 /*! @retval DCE_FAIL Failed to create MessageQCopy handle! */
1867 status = -ENOMEM;
1868 GT_setFailureReason (curTrace,
1869 GT_4CLASS,
1870 "rpmsg_dce_setup",
1871 status,
1872 "Failed to create MessageQCopy conn handle!");
1873 }
1874 else {
1875 status = MessageQCopy_registerNotify (rpmsg_dce_state.conn_handle,
1876 _rpmsg_dce_conn_notify_cb);
1877 if (status < 0) {
1878 MessageQCopy_delete (&rpmsg_dce_state.conn_handle);
1879 MessageQCopy_delete (&rpmsg_dce_state.mqHandle);
1880 /*! @retval DCE_FAIL Failed to register MQCopy handle! */
1881 status = -ENOMEM;
1882 GT_setFailureReason (curTrace,
1883 GT_4CLASS,
1884 "rpmsg_dce_setup",
1885 status,
1886 "Failed to register MQCopy conn handle!");
1887 }
1888 else {
1889 rpmsg_dce_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
1890 if (rpmsg_dce_state.sem == NULL) {
1891 //MessageQCopy_unregisterNotify();
1892 MessageQCopy_delete (&rpmsg_dce_state.conn_handle);
1893 //MessageQCopy_unregisterNotify();
1894 MessageQCopy_delete (&rpmsg_dce_state.mqHandle);
1895 /*! @retval DCE_FAIL Failed to register MQCopy handle! */
1896 status = -ENOMEM;
1897 GT_setFailureReason (curTrace,
1898 GT_4CLASS,
1899 "rpmsg_dce_setup",
1900 status,
1901 "Failed to register MQCopy handle!");
1902 }
1903 }
1904 }
1905 }
1906 }
1907 if (status >= 0) {
1908 rpmsg_dce_state.isSetup = TRUE;
1909 }
1910 else {
1911 rpmsg_dce_state.run = FALSE;
1912 }
1913 }
1914 else {
1915 rpmsg_dce_state.run = FALSE;
1916 }
1917 pthread_attr_destroy(&thread_attr);
1918 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1919 }
1920 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1922 GT_0trace (curTrace, GT_LEAVE, "rpmsg_dce_setup");
1923 return status;
1924 }
1927 /*!
1928 * @brief Module destroy function.
1929 *
1930 * @sa rpmsg_dce_setup
1931 */
1932 Void
1933 rpmsg_dce_destroy (Void)
1934 {
1935 rpmsg_dce_EventPacket * packet;
1936 UInt32 i;
1937 List_Handle bufList;
1938 rpmsg_dce_object * dce = NULL;
1939 WaitingReaders_t * wr = NULL;
1940 struct _msg_info info;
1942 GT_0trace (curTrace, GT_ENTER, "_rpmsg_dce_destroy");
1944 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
1945 if (rpmsg_dce_state.objects[i]) {
1946 rpmsg_dce_conn_object * obj = rpmsg_dce_state.objects[i];
1947 _deinit_rpmsg_dce_device(obj->dev);
1948 Memory_free(NULL, obj, sizeof(rpmsg_dce_conn_object));
1949 rpmsg_dce_state.objects[i] = NULL;
1950 }
1951 }
1953 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1954 dce = NULL;
1955 if (rpmsg_dce_state.eventState [i].dce != NULL) {
1956 /* This is recovery. Need to mark dce structures as invalid */
1957 dce = rpmsg_dce_state.eventState[i].dce;
1958 MessageQCopy_delete(&dce->mq);
1959 dce->mq = NULL;
1960 }
1961 bufList = rpmsg_dce_state.eventState [i].bufList;
1963 rpmsg_dce_state.eventState [i].bufList = NULL;
1964 rpmsg_dce_state.eventState [i].dce = NULL;
1965 rpmsg_dce_state.eventState [i].refCount = 0;
1966 if (bufList != NULL) {
1967 /* Dequeue waiting readers and reply to them */
1968 pthread_mutex_lock(&rpmsg_dce_state.lock);
1969 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1970 /* Check if rcvid is still valid */
1971 if (MsgInfo(wr->rcvid, &info) != -1) {
1972 put_wr(wr);
1973 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1974 MsgError(wr->rcvid, EINTR);
1975 pthread_mutex_lock(&rpmsg_dce_state.lock);
1976 }
1977 }
1978 /* Check for pending ionotify/select calls */
1979 if (dce) {
1980 if (IOFUNC_NOTIFY_INPUT_CHECK(dce->notify, 1, 0)) {
1981 iofunc_notify_trigger(dce->notify, 1, IOFUNC_NOTIFY_INPUT);
1982 }
1983 }
1984 pthread_mutex_unlock(&rpmsg_dce_state.lock);
1986 /* Free event packets for any received but unprocessed events. */
1987 while (List_empty (bufList) != TRUE){
1988 packet = (rpmsg_dce_EventPacket *)
1989 List_get (bufList);
1990 if (packet != NULL){
1991 Memory_free (NULL, packet, sizeof(*packet));
1992 }
1993 }
1994 List_delete (&(bufList));
1995 }
1996 }
1998 /* Free the cached list */
1999 pthread_mutex_lock(&rpmsg_dce_state.lock);
2000 flush_uBuf();
2001 pthread_mutex_unlock(&rpmsg_dce_state.lock);
2003 if (rpmsg_dce_state.sem) {
2004 OsalSemaphore_delete(&rpmsg_dce_state.sem);
2005 }
2007 //MessageQCopy_unregisterNotify();
2008 MessageQCopy_delete(&rpmsg_dce_state.conn_handle);
2010 //MessageQCopy_unregisterNotify();
2011 MessageQCopy_delete(&rpmsg_dce_state.mqHandle);
2013 if (rpmsg_dce_state.gateHandle != NULL) {
2014 GateSpinlock_delete ((GateSpinlock_Handle *)
2015 &(rpmsg_dce_state.gateHandle));
2016 }
2018 rpmsg_dce_state.isSetup = FALSE ;
2019 rpmsg_dce_state.run = FALSE;
2020 // run through and destroy the thread, and all outstanding
2021 // notify structures
2022 pthread_mutex_lock(&rpmsg_dce_state.lock);
2023 pthread_cond_signal(&rpmsg_dce_state.cond);
2024 pthread_mutex_unlock(&rpmsg_dce_state.lock);
2025 pthread_join(rpmsg_dce_state.nt, NULL);
2026 pthread_mutex_lock(&rpmsg_dce_state.lock);
2027 while (rpmsg_dce_state.head != NULL) {
2028 int index;
2029 WaitingReaders_t *item;
2030 index = dequeue_notify_list_item(rpmsg_dce_state.head);
2031 if (index < 0)
2032 break;
2033 item = dequeue_waiting_reader(index);
2034 while (item) {
2035 put_wr(item);
2036 item = dequeue_waiting_reader(index);
2037 }
2038 }
2039 rpmsg_dce_state.head = NULL ;
2040 rpmsg_dce_state.tail = NULL ;
2041 pthread_mutex_unlock(&rpmsg_dce_state.lock);
2043 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2044 }
2047 /** ============================================================================
2048 * Internal functions
2049 * ============================================================================
2050 */