[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / rpmsg-omx / hlos / knl / Qnx / rpmsg-omxdrv.c
1 /*
2 * @file rpmsg-omxdrv.c
3 *
4 * @brief devctl handler for OMX component.
5 *
6 *
7 * @ver 02.00.00.46_alpha1
8 *
9 * ============================================================================
10 *
11 * Copyright (c) 2010-2011, Texas Instruments Incorporated
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * * Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * * Neither the name of Texas Instruments Incorporated nor the names of
25 * its contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
30 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
32 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
33 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
34 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
35 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
36 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
37 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
38 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Contact information for paper mail:
40 * Texas Instruments
41 * Post Office Box 655303
42 * Dallas, Texas 75265
43 * Contact information:
44 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
45 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
46 * ============================================================================
47 *
48 */
51 /* Standard headers */
52 #include <ti/syslink/Std.h>
54 /* OSAL & Utils headers */
55 #include <ti/syslink/utils/List.h>
56 #include <ti/syslink/utils/String.h>
57 #include <ti/syslink/utils/Trace.h>
58 #include <ti/syslink/utils/Memory.h>
59 #include <ti/syslink/utils/IGateProvider.h>
60 #include <ti/syslink/utils/GateSpinlock.h>
61 #include <_MultiProc.h>
63 /*QNX specific header include */
64 #include <errno.h>
65 #include <unistd.h>
66 #include <sys/iofunc.h>
67 #include <sys/dispatch.h>
68 #include <sys/netmgr.h>
69 #include <devctl.h>
71 /* Module headers */
72 #include <ti/ipc/rpmsg_omx.h>
73 #include <ti/ipc/MessageQCopy.h>
74 #include <_MessageQCopy.h>
75 #include <_MessageQCopyDefs.h>
76 #include "OsalSemaphore.h"
77 #include "std_qnx.h"
78 #include <pthread.h>
80 #include <memmgr/tilermem.h>
81 #include <memmgr/tiler.h>
83 #include "rpmsg-omxdrv.h"
84 #include <rpmsg.h>
86 #define PRIORITY_REALTIME_LOW 29
88 extern int mem_offset64_peer(pid_t pid, const uintptr_t addr, size_t len,
89 off64_t *offset, size_t *contig_len);
91 static MsgList_t *nl_cache;
92 static int num_nl = 0;
93 static WaitingReaders_t *wr_cache;
94 static int num_wr = 0;
96 /*
97 * Instead of constantly allocating and freeing the notifier structures
98 * we just cache a few of them, and recycle them instead.
99 * The cache count is set with CACHE_NUM in rpmsg-omxdrv.h.
100 */
102 static MsgList_t *get_nl()
103 {
104 MsgList_t *item;
105 item = nl_cache;
106 if (item != NULL) {
107 nl_cache = nl_cache->next;
108 num_nl--;
109 } else {
110 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
111 }
112 return(item);
113 }
115 static void put_nl(MsgList_t *item)
116 {
117 if (num_nl >= CACHE_NUM) {
118 Memory_free(NULL, item, sizeof(*item));
119 } else {
120 item->next = nl_cache;
121 nl_cache = item;
122 num_nl++;
123 }
124 return;
125 }
127 static WaitingReaders_t *get_wr()
128 {
129 WaitingReaders_t *item;
130 item = wr_cache;
131 if (item != NULL) {
132 wr_cache = wr_cache->next;
133 num_wr--;
134 } else {
135 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
136 }
137 return(item);
138 }
140 static void put_wr(WaitingReaders_t *item)
141 {
142 if (num_wr >= CACHE_NUM) {
143 Memory_free(NULL, item, sizeof(*item));
144 } else {
145 item->next = wr_cache;
146 wr_cache = item;
147 num_wr++;
148 }
149 return;
150 }
152 typedef enum RPC_OMX_MAP_INFO_TYPE
153 {
154 RPC_OMX_MAP_INFO_NONE = 0,
155 RPC_OMX_MAP_INFO_ONE_BUF = 1,
156 RPC_OMX_MAP_INFO_TWO_BUF = 2,
157 RPC_OMX_MAP_INFO_THREE_BUF = 3,
158 RPC_OMX_MAP_INFO_MAX = 0x7FFFFFFF
159 } RPC_OMX_MAP_INFO_TYPE;
161 /* structure to hold rpmsg-omx device information */
162 typedef struct named_device {
163 iofunc_mount_t mattr;
164 iofunc_attr_t cattr;
165 int resmgr_id;
166 pthread_mutex_t mutex;
167 iofunc_funcs_t mfuncs;
168 resmgr_connect_funcs_t cfuncs;
169 resmgr_io_funcs_t iofuncs;
170 char device_name[_POSIX_PATH_MAX];
171 } named_device_t;
173 /* rpmsg-omx device structure */
174 typedef struct rpmsg_omx_dev {
175 dispatch_t * dpp;
176 thread_pool_t * tpool;
177 named_device_t rpmsg_omx;
178 } rpmsg_omx_dev_t;
180 /*!
181 * @brief Remote connection object
182 */
183 typedef struct rpmsg_omx_conn_object {
184 rpmsg_omx_dev_t * dev;
185 MessageQCopy_Handle mq;
186 UInt32 addr;
187 UInt16 procId;
188 ProcMgr_Handle procH;
189 } rpmsg_omx_conn_object;
191 /*!
192 * @brief omx instance object
193 */
194 typedef struct rpmsg_omx_object_tag {
195 MessageQCopy_Handle mq;
196 rpmsg_omx_conn_object * conn;
197 UInt32 addr;
198 UInt32 remoteAddr;
199 UInt16 procId;
200 pid_t pid;
201 Int state;
202 iofunc_notify_t notify[3];
203 } rpmsg_omx_object;
205 /*!
206 * @brief Structure of Event callback argument passed to register fucntion.
207 */
208 typedef struct rpmsg_omx_EventCbck_tag {
209 List_Elem element;
210 /*!< List element header */
211 rpmsg_omx_object * omx;
212 /*!< User omx info pointer. Passed back to user callback function */
213 UInt32 pid;
214 /*!< Process Identifier for user process. */
215 } rpmsg_omx_EventCbck ;
217 /*!
218 * @brief Keeps the information related to Event.
219 */
220 typedef struct rpmsg_omx_EventState_tag {
221 List_Handle bufList;
222 /*!< Head of received event list. */
223 UInt32 pid;
224 /*!< User process ID. */
225 rpmsg_omx_object * omx;
226 /*!< User omx comp. */
227 UInt32 refCount;
228 /*!< Reference count, used when multiple Notify_registerEvent are called
229 from same process space (multi threads/processes). */
230 WaitingReaders_t * head;
231 /*!< Waiting readers head. */
232 WaitingReaders_t * tail;
233 /*!< Waiting readers tail. */
234 } rpmsg_omx_EventState;
236 /*!
237 * @brief Per-connection information
238 */
239 typedef struct rpmsg_omx_ocb {
240 iofunc_ocb_t hdr;
241 pid_t pid;
242 rpmsg_omx_object * omx;
243 } rpmsg_omx_ocb_t;
245 typedef struct rpmsg_omx_name {
246 char name[RPMSG_NAME_SIZE];
247 }rpmsg_omx_name_t;
249 #define RPMSG_OMX_MODULE_NAME "rpmsg-omx"
251 /*!
252 * @brief rpmsg-omx Module state object
253 */
254 typedef struct rpmsg_omx_ModuleObject_tag {
255 Bool isSetup;
256 /*!< Indicates whether the module has been already setup */
257 Bool openRefCount;
258 /*!< Open reference count. */
259 IGateProvider_Handle gateHandle;
260 /*!< Handle of gate to be used for local thread safety */
261 rpmsg_omx_EventState eventState [MAX_PROCESSES];
262 /*!< List for all user processes registered. */
263 rpmsg_omx_conn_object * objects [MultiProc_MAXPROCESSORS];
264 /*!< List of all remote connections. */
265 MessageQCopy_Handle mqHandle;
266 /*!< Local mq handle associated with this module */
267 UInt32 endpoint;
268 /*!< Local endpoint associated with the mq handle */
269 OsalSemaphore_Handle sem;
270 /*!< Handle to semaphore used for omx instance connection notifications */
271 pthread_t nt;
272 /*!< notifier thread */
273 pthread_mutex_t lock;
274 /*!< protection between notifier and event */
275 pthread_cond_t cond;
276 /*!< protection between notifier and event */
277 MsgList_t *head;
278 /*!< list head */
279 MsgList_t *tail;
280 /*!< list tail */
281 int run;
282 /*!< notifier thread must keep running */
283 } rpmsg_omx_ModuleObject;
285 /*!
286 * @brief Structure of Event Packet read from notify kernel-side.
287 */
288 typedef struct rpmsg_omx_EventPacket_tag {
289 List_Elem element;
290 /*!< List element header */
291 UInt32 pid;
292 /* Processor identifier */
293 rpmsg_omx_object * obj;
294 /*!< Pointer to the channel associated with this callback */
295 UInt8 data[MessageQCopy_BUFSIZE];
296 /*!< Data associated with event. */
297 UInt32 len;
298 /*!< Length of the data associated with event. */
299 UInt32 src;
300 /*!< Src endpoint associated with event. */
301 struct rpmsg_omx_EventPacket * next;
302 struct rpmsg_omx_EventPacket * prev;
303 } rpmsg_omx_EventPacket ;
306 /*
307 * Instead of constantly allocating and freeing the uBuf structures
308 * we just cache a few of them, and recycle them instead.
309 * The cache count is set with CACHE_NUM in rpmsg-omxdrv.h.
310 */
311 static rpmsg_omx_EventPacket *uBuf_cache;
312 static int num_uBuf = 0;
314 static void flush_uBuf()
315 {
316 rpmsg_omx_EventPacket *uBuf = NULL;
318 while(uBuf_cache) {
319 num_uBuf--;
320 uBuf = uBuf_cache;
321 uBuf_cache = (rpmsg_omx_EventPacket *)uBuf_cache->next;
322 Memory_free(NULL, uBuf, sizeof(*uBuf));
323 }
324 }
326 static rpmsg_omx_EventPacket *get_uBuf()
327 {
328 rpmsg_omx_EventPacket *uBuf;
329 uBuf = uBuf_cache;
330 if (uBuf != NULL) {
331 uBuf_cache = (rpmsg_omx_EventPacket *)uBuf_cache->next;
332 num_uBuf--;
333 } else {
334 uBuf = Memory_alloc(NULL, sizeof(rpmsg_omx_EventPacket), 0, NULL);
335 }
336 return(uBuf);
337 }
339 static void put_uBuf(rpmsg_omx_EventPacket * uBuf)
340 {
341 if (num_uBuf >= CACHE_NUM) {
342 Memory_free(NULL, uBuf, sizeof(*uBuf));
343 } else {
344 uBuf->next = (struct rpmsg_omx_EventPacket *)uBuf_cache;
345 uBuf_cache = uBuf;
346 num_uBuf++;
347 }
348 return;
349 }
352 /** ============================================================================
353 * Globals
354 * ============================================================================
355 */
356 /*!
357 * @var rpmsg_omx_state
358 *
359 * @brief rpmsg-omx state object variable
360 */
361 static rpmsg_omx_ModuleObject rpmsg_omx_state =
362 {
363 .gateHandle = NULL,
364 .isSetup = FALSE,
365 .openRefCount = 0,
366 .nt = 0,
367 .lock = PTHREAD_MUTEX_INITIALIZER,
368 .cond = PTHREAD_COND_INITIALIZER,
369 .head = NULL,
370 .tail = NULL,
371 .run = 0
372 };
374 extern dispatch_t * syslink_dpp;
377 static MsgList_t *find_nl(int index)
378 {
379 MsgList_t *item=NULL;
380 item = rpmsg_omx_state.head;
381 while (item) {
382 if (item->index == index)
383 return(item);
384 item = item->next;
385 }
386 return(item);
387 }
389 /* we have the right locks when calling this function */
390 /*!
391 * @brief Function to enqueue a notify list item.
392 *
393 * @param index Index of the client process associated with the item.
394 *
395 * @sa find_nl
396 * get_nl
397 */
398 static int enqueue_notify_list(int index)
399 {
400 MsgList_t *item;
401 item = find_nl(index);
402 if (item == NULL) {
403 item = get_nl();
404 if (item == NULL) {
405 return(-1);
406 }
407 item->next = NULL;
408 item->index = index;
409 item->num_events=1;
410 if (rpmsg_omx_state.head == NULL) {
411 rpmsg_omx_state.head = item;
412 rpmsg_omx_state.tail = item;
413 item->prev = NULL;
414 }
415 else {
416 item->prev = rpmsg_omx_state.tail;
417 rpmsg_omx_state.tail->next = item;
418 rpmsg_omx_state.tail = item;
419 }
420 }
421 else {
422 item->num_events++;
423 }
424 return(0);
425 }
427 /* we have the right locks when calling this function */
428 /*!
429 * @brief Function to dequeue a notify list item.
430 *
431 * @param item The item to remove.
432 *
433 * @sa put_nl
434 */
435 static inline int dequeue_notify_list_item(MsgList_t *item)
436 {
437 int index;
438 if (item == NULL) {
439 return(-1);
440 }
441 index = item->index;
442 item->num_events--;
443 if (item->num_events > 0) {
444 return(index);
445 }
446 if (rpmsg_omx_state.head == item) {
447 // removing head
448 rpmsg_omx_state.head = item->next;
449 if (rpmsg_omx_state.head != NULL) {
450 rpmsg_omx_state.head->prev = NULL;
451 }
452 else {
453 // removing head and tail
454 rpmsg_omx_state.tail = NULL;
455 }
456 }
457 else {
458 item->prev->next = item->next;
459 if (item->next != NULL) {
460 item->next->prev = item->prev;
461 }
462 else {
463 // removing tail
464 rpmsg_omx_state.tail = item->prev;
465 }
466 }
467 put_nl(item);
468 return(index);
469 }
471 /* we have the right locks when calling this function */
472 /*!
473 * @brief Function to add a waiting reader to the list.
474 *
475 * @param index Index of the client process waiting reader to add.
476 * @param rcvid Receive ID of the client process that was passed
477 * when the client called read().
478 *
479 * @sa None
480 */
481 static int enqueue_waiting_reader(int index, int rcvid)
482 {
483 WaitingReaders_t *item;
484 item = get_wr();
485 if (item == NULL) {
486 return(-1);
487 }
488 item->rcvid = rcvid;
489 item->next = NULL;
490 if (rpmsg_omx_state.eventState [index].head == NULL) {
491 rpmsg_omx_state.eventState [index].head = item;
492 rpmsg_omx_state.eventState [index].tail = item;
493 }
494 else {
495 rpmsg_omx_state.eventState [index].tail->next = item;
496 rpmsg_omx_state.eventState [index].tail = item;
497 }
498 return(EOK);
499 }
501 /* we have the right locks when calling this function */
502 /* caller frees item */
503 /*!
504 * @brief Function to remove a waiting reader from the list.
505 *
506 * @param index Index of the client process waiting reader to dequeue.
507 *
508 * @sa None
509 */
510 static WaitingReaders_t *dequeue_waiting_reader(int index)
511 {
512 WaitingReaders_t *item = NULL;
513 if (rpmsg_omx_state.eventState [index].head) {
514 item = rpmsg_omx_state.eventState [index].head;
515 rpmsg_omx_state.eventState [index].head = rpmsg_omx_state.eventState [index].head->next;
516 if (rpmsg_omx_state.eventState [index].head == NULL) {
517 rpmsg_omx_state.eventState [index].tail = NULL;
518 }
519 }
520 return(item);
521 }
523 /*!
524 * @brief Function find a specified waiting reader.
525 *
526 * @param index Index of the client process waiting for the message.
527 * @param rcvid Receive ID of the client process that was passed
528 * when the client called read().
529 *
530 * @sa None
531 */
533 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
534 {
535 WaitingReaders_t *item = NULL;
536 WaitingReaders_t *prev = NULL;
537 if (rpmsg_omx_state.eventState [index].head) {
538 item = rpmsg_omx_state.eventState [index].head;
539 while (item) {
540 if (item->rcvid == rcvid) {
541 /* remove item from list */
542 if (prev)
543 prev->next = item->next;
544 if (item == rpmsg_omx_state.eventState [index].head)
545 rpmsg_omx_state.eventState [index].head = item->next;
546 break;
547 }
548 else {
549 prev = item;
550 item = item->next;
551 }
552 }
553 }
554 return item;
555 }
557 /*!
558 * @brief Function used to check if there is a waiting reader with an
559 * event (message) ready to be delivered.
560 *
561 * @param index Index of the client process waiting for the message.
562 * @param item Pointer to the waiting reader.
563 *
564 * @sa dequeue_notify_list_item
565 * dequeue_waiting_reader
566 */
568 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
569 {
570 MsgList_t *temp;
571 if (rpmsg_omx_state.head == NULL) {
572 return(0);
573 }
574 temp = rpmsg_omx_state.head;
575 while (temp) {
576 if (rpmsg_omx_state.eventState [temp->index].head) {
577 // event and reader found
578 if (dequeue_notify_list_item(temp) >= 0) {
579 *index = temp->index;
580 *item = dequeue_waiting_reader(temp->index);
581 }
582 else {
583 /* error occurred, return 0 as item has not been set */
584 return(0);
585 }
586 return(1);
587 }
588 temp = temp->next;
589 }
590 return(0);
591 }
593 /*!
594 * @brief Function used to deliver the notification to the client that
595 * it has received a message.
596 *
597 * @param index Index of the client process receiving hte message.
598 * @param rcvid Receive ID of the client process that was passed
599 * when the client called read().
600 *
601 * @sa put_uBuf
602 */
604 static void deliver_notification(int index, int rcvid)
605 {
606 int err = EOK;
607 rpmsg_omx_EventPacket * uBuf = NULL;
608 struct omx_msg_hdr * hdr = NULL;
610 uBuf = (rpmsg_omx_EventPacket *) List_get (rpmsg_omx_state.eventState [index].bufList);
611 hdr = (struct omx_msg_hdr *)uBuf->data;
613 /* Let the check remain at run-time. */
614 if (uBuf != NULL) {
615 err = MsgReply(rcvid, hdr->len, hdr->data, hdr->len);
616 if (err == -1)
617 perror("deliver_notification: MsgReply");
618 /* Free the processed event callback packet. */
619 put_uBuf(uBuf);
620 }
621 else {
622 MsgReply(rcvid, EOK, NULL, 0);
623 }
624 return;
625 }
627 /*!
628 * @brief Thread used for notifying waiting readers of messages.
629 *
630 * @param arg Thread-specific private arg.
631 *
632 * @sa find_available_reader_and_event
633 * deliver_notification
634 * put_wr
635 */
636 static void *notifier_thread(void *arg)
637 {
638 int status;
639 int index;
640 WaitingReaders_t *item = NULL;
641 pthread_mutex_lock(&rpmsg_omx_state.lock);
642 while (rpmsg_omx_state.run) {
643 status = find_available_reader_and_event(&index, &item);
644 if ( (status == 0) || (item == NULL) ) {
645 status = pthread_cond_wait(&rpmsg_omx_state.cond, &rpmsg_omx_state.lock);
646 if ((status != EOK) && (status != EINTR)) {
647 // false wakeup
648 break;
649 }
650 status = find_available_reader_and_event(&index, &item);
651 if ( (status == 0) || (item == NULL) ) {
652 continue;
653 }
654 }
655 pthread_mutex_unlock(&rpmsg_omx_state.lock);
656 // we have unlocked, and now we have an event to deliver
657 // we deliver one event at a time, relock, check and continue
658 deliver_notification(index, item->rcvid);
659 pthread_mutex_lock(&rpmsg_omx_state.lock);
660 put_wr(item);
661 }
662 pthread_mutex_unlock(&rpmsg_omx_state.lock);
663 return(NULL);
664 }
667 static
668 Int
669 _rpmsg_omx_connect(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_omx_ocb_t *ocb)
670 {
671 Int status = EOK;
672 struct omx_conn_req * cargs = (struct omx_conn_req *)(_DEVCTL_DATA (msg->i));
673 struct omx_msg_hdr * hdr = NULL;
674 rpmsg_omx_object * omx = ocb->omx;
675 UInt8 buf[sizeof(struct omx_conn_req) + sizeof(struct omx_msg_hdr)];
677 if (omx->state == OMX_CONNECTED) {
678 GT_0trace(curTrace, GT_4CLASS, "Already connected.");
679 status = (EISCONN);
680 }
681 else if (ctp->info.msglen - sizeof(msg->i) < sizeof (struct omx_conn_req)) {
682 status = (EINVAL);
683 }
684 else if (String_nlen(cargs->name, 47) == -1) {
685 status = (EINVAL);
686 }
687 else {
688 hdr = (struct omx_msg_hdr *)buf;
689 hdr->type = OMX_CONN_REQ;
690 hdr->len = sizeof(struct omx_conn_req);
691 Memory_copy(hdr->data, cargs, sizeof(struct omx_conn_req));
693 status = MessageQCopy_send (omx->conn->procId, // remote procid
694 MultiProc_self(), // local procid
695 omx->conn->addr, // remote server
696 omx->addr, // local address
697 buf, // connect msg
698 sizeof(buf), // msg size
699 TRUE); // wait for available bufs
700 if (status != MessageQCopy_S_SUCCESS) {
701 GT_0trace(curTrace, GT_4CLASS, "Failed to send connect message.");
702 status = (EIO);
703 }
704 else {
705 status = OsalSemaphore_pend(rpmsg_omx_state.sem, 5000);
706 if (omx->state == OMX_CONNECTED) {
707 msg->o.ret_val = EOK;
708 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o)));
709 }
710 else if (omx->state == OMX_FAIL) {
711 GT_0trace(curTrace, GT_4CLASS, "Failed to connect message.");
712 status = (ENXIO);
713 }
714 else if (status < 0) {
715 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
716 status = (EIO);
717 }
718 else {
719 status = (ETIMEDOUT);
720 }
721 }
722 }
724 return status;
725 }
728 static
729 Int
730 _rpmsg_omx_disconnect(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_omx_ocb_t *ocb)
731 {
732 Int status = EOK;
733 struct omx_msg_hdr * hdr = NULL;
734 rpmsg_omx_object * omx = ocb->omx;
735 UInt8 buf[sizeof(struct omx_disc_req) + sizeof(struct omx_msg_hdr)];
736 struct omx_disc_req * dreq = NULL;
738 if (omx->state != OMX_CONNECTED) {
739 GT_0trace(curTrace, GT_4CLASS, "Already disconnected.");
740 status = (ENOTCONN);
741 }
742 else {
743 hdr = (struct omx_msg_hdr *)buf;
744 hdr->type = OMX_DISCONNECT;
745 hdr->len = sizeof(struct omx_conn_req);
746 dreq = (struct omx_disc_req *)hdr->data;
747 dreq->addr = omx->remoteAddr;
749 status = MessageQCopy_send (omx->conn->procId, // remote procid
750 MultiProc_self(), // local procid
751 omx->conn->addr, // remote server
752 omx->addr, // local address
753 buf, // connect msg
754 sizeof(buf), // msg size
755 TRUE); // wait for available bufs
756 if (status != MessageQCopy_S_SUCCESS) {
757 GT_0trace(curTrace, GT_4CLASS, "Failed to send disconnect message.");
758 status = (EIO);
759 }
760 else {
761 /* There will be no response, so don't wait. */
762 omx->state = OMX_UNCONNECTED;
763 }
764 }
766 return status;
767 }
770 Int
771 rpmsg_omx_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
772 {
773 Int status = 0;
774 rpmsg_omx_ocb_t *ocb = (rpmsg_omx_ocb_t *)i_ocb;
776 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
777 return(_RESMGR_ERRNO(status));
778 status = 0;
780 switch (msg->i.dcmd)
781 {
782 case OMX_IOCCONNECT:
783 status = _rpmsg_omx_connect (ctp, msg, ocb);
784 break;
785 default:
786 status = (ENOSYS);
787 break;
788 }
790 return status;
791 }
794 /*!
795 * @brief Attach a process to rpmsg-omx user support framework.
796 *
797 * @param pid Process identifier
798 *
799 * @sa _rpmsg_omx_detach
800 */
801 static
802 Int
803 _rpmsg_omx_attach (rpmsg_omx_object * omx)
804 {
805 Int32 status = EOK;
806 Bool flag = FALSE;
807 Bool isInit = FALSE;
808 List_Object * bufList = NULL;
809 IArg key = 0;
810 List_Params listparams;
811 UInt32 i;
813 GT_1trace (curTrace, GT_ENTER, "_rpmsg_omx_attach", omx);
815 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
816 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
817 if (rpmsg_omx_state.eventState [i].omx == omx) {
818 rpmsg_omx_state.eventState [i].refCount++;
819 isInit = TRUE;
820 status = EOK;
821 break;
822 }
823 }
825 if (isInit == FALSE) {
826 List_Params_init (&listparams);
827 bufList = List_create (&listparams) ;
828 /* Search for an available slot for user process. */
829 for (i = 0 ; i < MAX_PROCESSES ; i++) {
830 if (rpmsg_omx_state.eventState [i].omx == NULL) {
831 rpmsg_omx_state.eventState [i].omx = omx;
832 rpmsg_omx_state.eventState [i].refCount = 1;
833 rpmsg_omx_state.eventState [i].bufList = bufList;
834 flag = TRUE;
835 break;
836 }
837 }
839 /* No free slots found. Let this check remain at run-time,
840 * since it is dependent on user environment.
841 */
842 if (flag != TRUE) {
843 /*! @retval Notify_E_RESOURCE Maximum number of
844 supported user clients have already been registered. */
845 status = -ENOMEM;
846 GT_setFailureReason (curTrace,
847 GT_4CLASS,
848 "rpmsgDrv_attach",
849 status,
850 "Maximum number of supported user"
851 " clients have already been "
852 "registered.");
853 if (bufList != NULL) {
854 List_delete (&bufList);
855 }
856 }
857 }
858 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
860 GT_1trace (curTrace, GT_LEAVE, "rpmsgDrv_attach", status);
862 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
863 return status ;
864 }
867 /*!
868 * @brief This function adds a data to a registered process.
869 *
870 * @param dce OMX object associated with the client
871 * @param src Source address (endpoint) sending the data
872 * @param pid Process ID associated with the client
873 * @param data Data to be added
874 * @param len Length of data to be added
875 *
876 * @sa
877 */
878 Int
879 _rpmsg_omx_addBufByPid (rpmsg_omx_object *omx,
880 UInt32 src,
881 UInt32 pid,
882 void * data,
883 UInt32 len)
884 {
885 Int32 status = EOK;
886 Bool flag = FALSE;
887 rpmsg_omx_EventPacket * uBuf = NULL;
888 IArg key;
889 UInt32 i;
890 WaitingReaders_t *item;
891 MsgList_t *msgItem;
893 GT_5trace (curTrace,
894 GT_ENTER,
895 "_rpmsg_omx_addBufByPid",
896 omx,
897 src,
898 pid,
899 data,
900 len);
902 GT_assert (curTrace, (rpmsg_omx_state.isSetup == TRUE));
904 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
905 /* Find the registration for this callback */
906 for (i = 0 ; i < MAX_PROCESSES ; i++) {
907 if (rpmsg_omx_state.eventState [i].omx == omx) {
908 flag = TRUE;
909 break;
910 }
911 }
912 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
914 #if !defined(SYSLINK_BUILD_OPTIMIZE)
915 if (flag != TRUE) {
916 /*! @retval ENOMEM Could not find a registered handler
917 for this process. */
918 status = -ENOMEM;
919 GT_setFailureReason (curTrace,
920 GT_4CLASS,
921 "_rpmsgDrv_addBufByPid",
922 status,
923 "Could not find a registered handler "
924 "for this process.!");
925 }
926 else {
927 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
928 /* Allocate memory for the buf */
929 pthread_mutex_lock(&rpmsg_omx_state.lock);
930 uBuf = get_uBuf();
931 pthread_mutex_unlock(&rpmsg_omx_state.lock);
933 #if !defined(SYSLINK_BUILD_OPTIMIZE)
934 if (uBuf == NULL) {
935 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
936 packet for received callback. */
937 status = -ENOMEM;
938 GT_setFailureReason (curTrace,
939 GT_4CLASS,
940 "_rpmsgDrv_addBufByPid",
941 status,
942 "Failed to allocate memory for event"
943 " packet for received callback.!");
944 }
945 else {
946 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
947 List_elemClear (&(uBuf->element));
948 GT_assert (curTrace,
949 (rpmsg_omx_state.eventState [i].bufList != NULL));
951 if (data) {
952 Memory_copy(uBuf->data, data, len);
953 }
954 uBuf->len = len;
956 List_put (rpmsg_omx_state.eventState [i].bufList,
957 &(uBuf->element));
958 pthread_mutex_lock(&rpmsg_omx_state.lock);
959 item = dequeue_waiting_reader(i);
960 if (item) {
961 // there is a waiting reader
962 deliver_notification(i, item->rcvid);
963 put_wr(item);
964 pthread_mutex_unlock(&rpmsg_omx_state.lock);
965 status = EOK;
966 }
967 else {
968 if (enqueue_notify_list(i) < 0) {
969 pthread_mutex_unlock(&rpmsg_omx_state.lock);
970 status = -ENOMEM;
971 GT_setFailureReason (curTrace,
972 GT_4CLASS,
973 "_rpmsgDrv_addBufByPid",
974 status,
975 "Failed to allocate memory for notifier");
976 }
977 else {
978 msgItem = find_nl(i);
979 /* TODO: omx could be NULL in some cases */
980 if (omx && msgItem) {
981 if (IOFUNC_NOTIFY_INPUT_CHECK(omx->notify, msgItem->num_events, 0)) {
982 iofunc_notify_trigger(omx->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
983 }
984 }
985 status = EOK;
986 pthread_cond_signal(&rpmsg_omx_state.cond);
987 pthread_mutex_unlock(&rpmsg_omx_state.lock);
988 }
989 }
990 #if !defined(SYSLINK_BUILD_OPTIMIZE)
991 }
992 }
993 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
995 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
997 return status;
998 }
1001 /*!
1002 * @brief This function implements the callback registered with
1003 * MessageQCopy_create for each client. This function
1004 * adds the message from the remote proc to a list
1005 * where it is routed to the appropriate waiting reader.
1006 *
1007 * @param procId processor Id from which interrupt is received
1008 * @param lineId Interrupt line ID to be used
1009 * @param eventId eventId registered
1010 * @param arg argument to call back
1011 * @param payload payload received
1012 *
1013 * @sa
1014 */
1015 Void
1016 _rpmsg_omx_cb (MessageQCopy_Handle handle, void * data, int len, void * priv, UInt32 src, UInt16 srcProc)
1017 {
1018 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1019 Int32 status = 0;
1020 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1021 rpmsg_omx_object * omx = NULL;
1022 struct omx_msg_hdr * msg_hdr = NULL;
1023 struct omx_conn_rsp * reply;
1025 GT_6trace (curTrace,
1026 GT_ENTER,
1027 "_rpmsg_omx_cb",
1028 handle,
1029 data,
1030 len,
1031 priv,
1032 src,
1033 srcProc);
1035 omx = (rpmsg_omx_object *) priv;
1036 msg_hdr = (struct omx_msg_hdr *)data;
1038 switch (msg_hdr->type) {
1039 case OMX_CONN_RSP:
1040 reply = (struct omx_conn_rsp *)msg_hdr->data;
1041 omx->remoteAddr = reply->addr;
1042 if (reply->status != OMX_SUCCESS) {
1043 omx->state = OMX_FAIL;
1044 }
1045 else {
1046 omx->state = OMX_CONNECTED;
1047 }
1048 /* post the semaphore to have the ioctl reply */
1049 OsalSemaphore_post(rpmsg_omx_state.sem);
1050 break;
1051 case OMX_RAW_MSG:
1052 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1053 status =
1054 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1055 _rpmsg_omx_addBufByPid (omx,
1056 src,
1057 omx->pid,
1058 data,
1059 len);
1060 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1061 if (status < 0) {
1062 GT_setFailureReason (curTrace,
1063 GT_4CLASS,
1064 "_rpmsg_omx_cb",
1065 status,
1066 "Failed to add callback packet for pid");
1067 }
1068 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1069 break;
1070 default:
1071 break;
1072 }
1074 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_omx_cb");
1075 }
1077 /**
1078 * Handler for ocb_calloc() requests.
1079 *
1080 * Special handler for ocb_calloc() requests that we export for control. An
1081 * open request from the client will result in a call to our special ocb_calloc
1082 * handler. This function attaches the client's pid using _rpmsg_dce_attach
1083 * and allocates client-specific information. This function creates an
1084 * endpoint for the client to communicate with the dCE server on the
1085 * remote core also.
1086 *
1087 * \param ctp Thread's associated context information.
1088 * \param device Device attributes structure.
1089 *
1090 * \return Pointer to an iofunc_ocb_t OCB structure.
1091 */
1093 IOFUNC_OCB_T *
1094 rpmsg_omx_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
1095 {
1096 rpmsg_omx_ocb_t *ocb = NULL;
1097 rpmsg_omx_object *obj = NULL;
1098 struct _msg_info cl_info;
1099 rpmsg_omx_dev_t * dev = NULL;
1100 int i = 0;
1101 Bool found = FALSE;
1102 char path1[20];
1103 char path2[20];
1105 GT_2trace (curTrace, GT_ENTER, "rpmsg_omx_ocb_calloc",
1106 ctp, device);
1108 /* Allocate the OCB */
1109 ocb = (rpmsg_omx_ocb_t *) calloc (1, sizeof (rpmsg_omx_ocb_t));
1110 if (ocb == NULL){
1111 errno = ENOMEM;
1112 return (NULL);
1113 }
1115 ocb->pid = ctp->info.pid;
1117 /* Allocate memory for the rpmsg object. */
1118 obj = Memory_calloc (NULL, sizeof (rpmsg_omx_object), 0u, NULL);
1119 if (obj == NULL) {
1120 errno = ENOMEM;
1121 free(ocb);
1122 return (NULL);
1123 }
1124 else {
1125 ocb->omx = obj;
1126 IOFUNC_NOTIFY_INIT(obj->notify);
1127 obj->state = OMX_UNCONNECTED;
1128 /* determine conn and procId for communication based on which device was opened */
1129 MsgInfo(ctp->rcvid, &cl_info);
1130 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
1131 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
1132 if (rpmsg_omx_state.objects[i] != NULL) {
1133 dev = rpmsg_omx_state.objects[i]->dev;
1134 resmgr_pathname(dev->rpmsg_omx.resmgr_id, 0, path2, sizeof(path2));
1135 if (!strcmp(path1, path2)) {
1136 found = TRUE;
1137 break;
1138 }
1139 }
1140 }
1141 if (found) {
1142 obj->conn = rpmsg_omx_state.objects[i];
1143 obj->procId = obj->conn->procId;
1144 obj->pid = ctp->info.pid;
1145 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL, _rpmsg_omx_cb, obj, &obj->addr);
1146 if (obj->mq == NULL) {
1147 errno = ENOMEM;
1148 free(obj);
1149 free(ocb);
1150 return (NULL);
1151 }
1152 else {
1153 if (_rpmsg_omx_attach (ocb->omx) < 0) {
1154 errno = ENOMEM;
1155 MessageQCopy_delete (&obj->mq);
1156 free(obj);
1157 free(ocb);
1158 return (NULL);
1159 }
1160 }
1161 }
1162 }
1164 GT_1trace (curTrace, GT_LEAVE, "rpmsg_omx_ocb_calloc", ocb);
1166 return (IOFUNC_OCB_T *)(ocb);
1167 }
1170 /*!
1171 * @brief Detach a process from rpmsg-omx user support framework.
1172 *
1173 * @param pid Process identifier
1174 *
1175 * @sa _rpmsg_omx_attach
1176 */
1177 static
1178 Int
1179 _rpmsg_omx_detach (rpmsg_omx_object * omx)
1180 {
1181 Int32 status = EOK;
1182 Int32 tmpStatus = EOK;
1183 Bool flag = FALSE;
1184 List_Object * bufList = NULL;
1185 UInt32 i;
1186 IArg key;
1187 MsgList_t * item;
1188 WaitingReaders_t * wr = NULL;
1189 struct _msg_info info;
1191 GT_1trace (curTrace, GT_ENTER, "rpmsg_omx_detach", omx);
1193 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
1195 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1196 if (rpmsg_omx_state.eventState [i].omx == omx) {
1197 if (rpmsg_omx_state.eventState [i].refCount == 1) {
1198 rpmsg_omx_state.eventState [i].refCount = 0;
1200 flag = TRUE;
1201 break;
1202 }
1203 else {
1204 rpmsg_omx_state.eventState [i].refCount--;
1205 status = EOK;
1206 break;
1207 }
1208 }
1209 }
1210 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
1212 if (flag == TRUE) {
1213 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
1214 /* Last client being unregistered for this process. */
1215 rpmsg_omx_state.eventState [i].omx = NULL;
1217 /* Store in local variable to delete outside lock. */
1218 bufList = rpmsg_omx_state.eventState [i].bufList;
1220 rpmsg_omx_state.eventState [i].bufList = NULL;
1222 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
1223 }
1225 if (flag != TRUE) {
1226 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1227 if (i == MAX_PROCESSES) {
1228 /*! @retval Notify_E_NOTFOUND The specified user process was
1229 not found registered with Notify Driver module. */
1230 status = -ENOMEM;
1231 GT_setFailureReason (curTrace,
1232 GT_4CLASS,
1233 "rpmsg_omx_detach",
1234 status,
1235 "The specified user process was not found"
1236 " registered with rpmsg Driver module.");
1237 }
1238 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1239 }
1240 else {
1241 if (bufList != NULL) {
1242 /* Dequeue waiting readers and reply to them */
1243 pthread_mutex_lock(&rpmsg_omx_state.lock);
1244 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1245 /* Check if rcvid is still valid */
1246 if (MsgInfo(wr->rcvid, &info) != -1) {
1247 put_wr(wr);
1248 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1249 MsgError(wr->rcvid, EINTR);
1250 pthread_mutex_lock(&rpmsg_omx_state.lock);
1251 }
1252 }
1253 /* Check for pending ionotify/select calls */
1254 if (omx) {
1255 if (IOFUNC_NOTIFY_INPUT_CHECK(omx->notify, 1, 0)) {
1256 iofunc_notify_trigger(omx->notify, 1, IOFUNC_NOTIFY_INPUT);
1257 }
1258 }
1260 /* Free event packets for any received but unprocessed events. */
1261 while ((item = find_nl(i)) != NULL) {
1262 if (dequeue_notify_list_item(item) >= 0) {
1263 rpmsg_omx_EventPacket * uBuf = NULL;
1265 uBuf = (rpmsg_omx_EventPacket *) List_get (bufList);
1267 /* Let the check remain at run-time. */
1268 if (uBuf != NULL) {
1269 put_uBuf(uBuf);
1270 }
1271 }
1272 }
1273 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1275 /* Last client being unregistered with Notify module. */
1276 List_delete (&bufList);
1277 }
1279 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1280 if ((tmpStatus < 0) && (status >= 0)) {
1281 status = tmpStatus;
1282 GT_setFailureReason (curTrace,
1283 GT_4CLASS,
1284 "rpmsg_omx_detach",
1285 status,
1286 "Failed to delete termination semaphore!");
1287 }
1288 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1289 }
1291 GT_1trace (curTrace, GT_LEAVE, "rpmsg_omx_detach", status);
1293 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1294 return status;
1295 }
1297 /**
1298 * Handler for ocb_free() requests.
1299 *
1300 * Special handler for ocb_free() requests that we export for control. A
1301 * close request from the client will result in a call to our special ocb_free
1302 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1303 * and frees any client-specific information that was allocated.
1304 *
1305 * \param i_ocb OCB associated with client's session.
1306 *
1307 * \return POSIX errno value.
1308 *
1309 * \retval None.
1310 */
1312 void
1313 rpmsg_omx_ocb_free (IOFUNC_OCB_T * i_ocb)
1314 {
1315 rpmsg_omx_ocb_t * ocb = (rpmsg_omx_ocb_t *)i_ocb;
1316 rpmsg_omx_object *obj;
1318 if (ocb && ocb->omx) {
1319 obj = ocb->omx;
1320 if (obj->state == OMX_CONNECTED) {
1321 /* Need to disconnect this device */
1322 _rpmsg_omx_disconnect(NULL, NULL, ocb);
1323 }
1324 _rpmsg_omx_detach(ocb->omx);
1325 if (obj->mq) {
1326 MessageQCopy_delete (&obj->mq);
1327 obj->mq = NULL;
1328 }
1329 free (obj);
1330 free (ocb);
1331 }
1332 }
1334 /**
1335 * Handler for close_ocb() requests.
1336 *
1337 * This function removes the notification entries associated with the current
1338 * client.
1339 *
1340 * \param ctp Thread's associated context information.
1341 * \param reserved This argument must be NULL.
1342 * \param ocb OCB associated with client's session.
1343 *
1344 * \return POSIX errno value.
1345 *
1346 * \retval EOK Success.
1347 */
1349 Int
1350 rpmsg_omx_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1351 {
1352 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1353 iofunc_notify_remove(ctp, omx_ocb->omx->notify);
1354 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1355 }
1357 /**
1358 * Handler for read() requests.
1359 *
1360 * Handles special read() requests that we export for control. A read
1361 * request will get a message from the remote processor that is associated
1362 * with the client that is calling read().
1363 *
1364 * \param ctp Thread's associated context information.
1365 * \param msg The actual read() message.
1366 * \param ocb OCB associated with client's session.
1367 *
1368 * \return POSIX errno value.
1369 *
1370 * \retval EOK Success.
1371 * \retval EAGAIN Call is non-blocking and no messages available.
1372 * \retval ENOMEM Not enough memory to preform the read.
1373 */
1375 int rpmsg_omx_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *ocb)
1376 {
1377 Int status;
1378 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1379 rpmsg_omx_object * omx = omx_ocb->omx;
1380 Bool flag = FALSE;
1381 Int retVal = EOK;
1382 UInt32 i;
1383 MsgList_t * item;
1384 Int nonblock;
1386 if ((status = iofunc_read_verify(ctp, msg, ocb, &nonblock)) != EOK)
1387 return (status);
1389 if (omx->state != OMX_CONNECTED) {
1390 return (ENOTCONN);
1391 }
1393 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1394 if (rpmsg_omx_state.eventState [i].omx == omx) {
1395 flag = TRUE;
1396 break;
1397 }
1398 }
1400 /* Let the check remain at run-time. */
1401 if (flag == TRUE) {
1402 /* Let the check remain at run-time for handling any run-time
1403 * race conditions.
1404 */
1405 if (rpmsg_omx_state.eventState [i].bufList != NULL) {
1406 pthread_mutex_lock(&rpmsg_omx_state.lock);
1407 item = find_nl(i);
1408 if (dequeue_notify_list_item(item) < 0) {
1409 if (nonblock) {
1410 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1411 return EAGAIN;
1412 }
1413 else {
1414 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1415 if (retVal == EOK) {
1416 pthread_cond_signal(&rpmsg_omx_state.cond);
1417 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1418 return(_RESMGR_NOREPLY);
1419 }
1420 retVal = ENOMEM;
1421 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1422 }
1423 }
1424 else {
1425 deliver_notification(i, ctp->rcvid);
1426 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1427 return(_RESMGR_NOREPLY);
1428 }
1429 }
1430 }
1432 /*! @retval Number-of-bytes-read Number of bytes read. */
1433 return retVal;
1434 }
1436 /**
1437 * Unblock read calls
1438 *
1439 * This function checks if the client is blocked on a read call and if so,
1440 * unblocks the client.
1441 *
1442 * \param ctp Thread's associated context information.
1443 * \param msg The pulse message.
1444 * \param ocb OCB associated with client's session.
1445 *
1446 * \return POSIX errno value.
1447 *
1448 * \retval EINTR The client has been unblocked.
1449 * \retval other The client has not been unblocked or the client was not
1450 * blocked.
1451 */
1453 int rpmsg_omx_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *ocb)
1454 {
1455 UInt32 i;
1456 Bool flag = FALSE;
1457 WaitingReaders_t * wr;
1458 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1459 rpmsg_omx_object * omx = omx_ocb->omx;
1461 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1462 if (rpmsg_omx_state.eventState [i].omx == omx) {
1463 flag = TRUE;
1464 break;
1465 }
1466 }
1468 /* Let the check remain at run-time. */
1469 if (flag == TRUE) {
1470 /* Let the check remain at run-time for handling any run-time
1471 * race conditions.
1472 */
1473 if (rpmsg_omx_state.eventState [i].bufList != NULL) {
1474 pthread_mutex_lock(&rpmsg_omx_state.lock);
1475 wr = find_waiting_reader(i, ctp->rcvid);
1476 if (wr) {
1477 put_wr(wr);
1478 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1479 return (EINTR);
1480 }
1481 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1482 }
1483 }
1485 return _RESMGR_NOREPLY;
1486 }
1488 /**
1489 * Handler for unblock() requests.
1490 *
1491 * Handles unblock request for the client which is requesting to no longer be
1492 * blocked on the rpmsg-omx driver.
1493 *
1494 * \param ctp Thread's associated context information.
1495 * \param msg The pulse message.
1496 * \param ocb OCB associated with client's session.
1497 *
1498 * \return POSIX errno value.
1499 *
1500 * \retval EINTR The rcvid has been unblocked.
1501 */
1503 int rpmsg_omx_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1504 {
1505 int status = _RESMGR_NOREPLY;
1506 struct _msg_info info;
1508 /*
1509 * Try to run the default unblock for this message.
1510 */
1511 if ((status = iofunc_unblock_default(ctp,msg,ocb)) != _RESMGR_DEFAULT) {
1512 return status;
1513 }
1515 /*
1516 * Check if rcvid is still valid and still has an unblock
1517 * request pending.
1518 */
1519 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1520 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1521 return _RESMGR_NOREPLY;
1522 }
1524 if (rpmsg_omx_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1525 return _RESMGR_ERRNO(EINTR);
1526 }
1528 return _RESMGR_ERRNO(EINTR);
1529 }
1532 uint32_t
1533 _rpmsg_omx_pa2da(ProcMgr_Handle handle, uint32_t pa)
1534 {
1535 Int status = 0;
1536 uint32_t da;
1538 if (pa >= TILER_MEM_8BIT && pa < TILER_MEM_END) {
1539 return pa;
1540 }
1541 else {
1542 status = ProcMgr_translateAddr(handle, (Ptr *)&da,
1543 ProcMgr_AddrType_SlaveVirt,
1544 (Ptr)pa, ProcMgr_AddrType_MasterPhys);
1545 if (status >= 0)
1546 return da;
1547 else
1548 return 0;
1549 }
1550 }
1552 int
1553 _rpmsg_omx_map(ProcMgr_Handle handle, char *data, uint32_t bytes, pid_t pid)
1554 {
1555 int status = EOK;
1556 struct omx_packet *packet = (struct omx_packet *)data;
1557 char *map_info = NULL;
1558 RPC_OMX_MAP_INFO_TYPE type;
1559 int i = 0;
1560 int buf_offset = 0;
1561 uint32_t *buffer = NULL;
1562 off64_t phys_addr;
1563 uint32_t ipu_addr;
1564 uint32_t msg_size;
1565 size_t phys_len = 0;
1567 if (bytes <= sizeof(struct omx_packet)) {
1568 msg_size = 0;
1569 }
1570 else {
1571 msg_size = bytes - sizeof(struct omx_packet);
1572 }
1573 if (msg_size < sizeof(RPC_OMX_MAP_INFO_TYPE))
1574 return (-EINVAL);
1576 type = *(RPC_OMX_MAP_INFO_TYPE *)(packet->data);
1578 if (type == RPC_OMX_MAP_INFO_NONE)
1579 return EOK;
1580 if (type != RPC_OMX_MAP_INFO_ONE_BUF &&
1581 type != RPC_OMX_MAP_INFO_TWO_BUF &&
1582 type != RPC_OMX_MAP_INFO_THREE_BUF) {
1583 return (-EINVAL);
1584 }
1586 map_info = (char *)((uint32_t)packet->data);
1588 if (msg_size < sizeof(int) + sizeof(RPC_OMX_MAP_INFO_TYPE))
1589 return (-EINVAL);
1591 buf_offset = *(int *)((uint32_t)map_info + sizeof(RPC_OMX_MAP_INFO_TYPE));
1592 if (buf_offset < 0 || (buf_offset + (sizeof(*buffer) * type)) > msg_size)
1593 return (-EINVAL);
1595 map_info = (char *)((uint32_t)map_info + buf_offset);
1597 for (i = 0; i < type; i++) {
1598 buffer = (uint32_t *)((uint32_t)map_info + (i * sizeof(*buffer)));
1599 if (*buffer) {
1600 /* currently only Tiler buffers are supported */
1601 status = mem_offset64_peer(pid, (uintptr_t)((uint32_t)*buffer), 4, &phys_addr, &phys_len);
1602 if (status >= 0) {
1603 if ((ipu_addr = _rpmsg_omx_pa2da(handle, (uint32_t)phys_addr)) != 0)
1604 *buffer = ipu_addr;
1605 else {
1606 status = -EINVAL;
1607 break;
1608 }
1609 }
1610 else {
1611 status = -EINVAL;
1612 break;
1613 }
1614 }
1615 }
1617 return status;
1618 }
1620 /**
1621 * Handler for write() requests.
1622 *
1623 * Handles special write() requests that we export for control. A write()
1624 * request will send a message to the remote processor which is associated with
1625 * the client.
1626 *
1627 * \param ctp Thread's associated context information.
1628 * \param msg The actual write() message.
1629 * \param io_ocb OCB associated with client's session.
1630 *
1631 * \return POSIX errno value.
1632 *
1633 * \retval EOK Success.
1634 * \retval ENOMEM Not enough memory to preform the write.
1635 * \retval EIO MessageQCopy_send failed.
1636 * \retval EINVAL msg->i.bytes is negative.
1637 */
1639 int
1640 rpmsg_omx_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1641 {
1642 int status;
1643 char buf[MessageQCopy_BUFSIZE];
1644 int bytes;
1645 rpmsg_omx_ocb_t * ocb = (rpmsg_omx_ocb_t *)io_ocb;
1646 rpmsg_omx_object * omx = ocb->omx;
1647 struct omx_msg_hdr * msg_hdr = NULL;
1649 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1650 return (status);
1651 }
1653 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct omx_msg_hdr) > MessageQCopy_BUFSIZE ?
1654 MessageQCopy_BUFSIZE - sizeof(struct omx_msg_hdr) : msg->i.nbytes;
1655 if (bytes < 0) {
1656 return EINVAL;
1657 }
1658 _IO_SET_WRITE_NBYTES (ctp, bytes);
1660 msg_hdr = (struct omx_msg_hdr *)buf;
1662 status = resmgr_msgread(ctp, msg_hdr->data, bytes, sizeof(msg->i));
1663 if (status != bytes) {
1664 return (errno);
1665 }
1667 /* check that we're in the correct state */
1668 if (omx->state != OMX_CONNECTED) {
1669 return (ENOTCONN);
1670 }
1672 status = _rpmsg_omx_map(omx->conn->procH, msg_hdr->data, bytes, ctp->info.pid);
1673 if (status < 0) {
1674 return -status;
1675 }
1677 msg_hdr->type = OMX_RAW_MSG;
1678 msg_hdr->len = bytes;
1680 status = MessageQCopy_send(omx->conn->procId, MultiProc_self(),
1681 omx->remoteAddr, omx->addr, buf,
1682 bytes + sizeof(struct omx_msg_hdr), TRUE);
1683 if (status < 0) {
1684 return (EIO);
1685 }
1687 return(EOK);
1688 }
1692 /**
1693 * Handler for notify() requests.
1694 *
1695 * Handles special notify() requests that we export for control. A notify
1696 * request results from the client calling select().
1697 *
1698 * \param ctp Thread's associated context information.
1699 * \param msg The actual notify() message.
1700 * \param ocb OCB associated with client's session.
1701 *
1702 * \return POSIX errno value.
1703 */
1705 Int rpmsg_omx_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1706 {
1707 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1708 rpmsg_omx_object * omx = omx_ocb->omx;
1709 int trig;
1710 int i = 0;
1711 Bool flag = FALSE;
1712 MsgList_t * item = NULL;
1713 int status = EOK;
1715 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1717 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1718 if (rpmsg_omx_state.eventState [i].omx == omx) {
1719 flag = TRUE;
1720 break;
1721 }
1722 }
1724 pthread_mutex_lock(&rpmsg_omx_state.lock);
1725 /* Let the check remain at run-time. */
1726 if (flag == TRUE) {
1727 /* Let the check remain at run-time for handling any run-time
1728 * race conditions.
1729 */
1730 if (rpmsg_omx_state.eventState [i].bufList != NULL) {
1731 item = find_nl(i);
1732 if (item && item->num_events > 0) {
1733 trig |= _NOTIFY_COND_INPUT;
1734 }
1735 }
1736 }
1737 status = iofunc_notify(ctp, msg, omx_ocb->omx->notify, trig, NULL, NULL);
1738 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1739 return status;
1740 }
1742 /**
1743 * Detaches an rpmsg-dce resource manager device name.
1744 *
1745 * \param dev The device to detach.
1746 *
1747 * \return POSIX errno value.
1748 */
1750 static
1751 Void
1752 _deinit_rpmsg_omx_device (rpmsg_omx_dev_t * dev)
1753 {
1754 resmgr_detach(syslink_dpp, dev->rpmsg_omx.resmgr_id, _RESMGR_DETACH_CLOSE);
1756 pthread_mutex_destroy(&dev->rpmsg_omx.mutex);
1758 free (dev);
1760 return;
1761 }
1763 /**
1764 * Initializes and attaches rpmsg-dce resource manager functions to an
1765 * rpmsg-dce device name.
1766 *
1767 * \param num The number to append to the end of the device name.
1768 *
1769 * \return Pointer to the created rpmsg_dce_dev_t device.
1770 */
1772 static
1773 rpmsg_omx_dev_t *
1774 _init_rpmsg_omx_device (char * name)
1775 {
1776 iofunc_attr_t * attr;
1777 resmgr_attr_t resmgr_attr;
1778 rpmsg_omx_dev_t * dev = NULL;
1780 dev = malloc(sizeof(*dev));
1781 if (dev == NULL) {
1782 return NULL;
1783 }
1785 memset(&resmgr_attr, 0, sizeof resmgr_attr);
1786 resmgr_attr.nparts_max = 10;
1787 resmgr_attr.msg_max_size = 2048;
1788 memset(&dev->rpmsg_omx.mattr, 0, sizeof(iofunc_mount_t));
1789 dev->rpmsg_omx.mattr.flags = ST_NOSUID | ST_NOEXEC;
1790 dev->rpmsg_omx.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
1791 IOFUNC_PC_NO_TRUNC |
1792 IOFUNC_PC_SYNC_IO;
1793 dev->rpmsg_omx.mattr.funcs = &dev->rpmsg_omx.mfuncs;
1794 memset(&dev->rpmsg_omx.mfuncs, 0, sizeof(iofunc_funcs_t));
1795 dev->rpmsg_omx.mfuncs.nfuncs = _IOFUNC_NFUNCS;
1796 dev->rpmsg_omx.mfuncs.ocb_calloc = rpmsg_omx_ocb_calloc;
1797 dev->rpmsg_omx.mfuncs.ocb_free = rpmsg_omx_ocb_free;
1798 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_omx.cfuncs,
1799 _RESMGR_IO_NFUNCS, &dev->rpmsg_omx.iofuncs);
1800 iofunc_attr_init(attr = &dev->rpmsg_omx.cattr, S_IFCHR | 0777, NULL, NULL);
1801 dev->rpmsg_omx.iofuncs.devctl = rpmsg_omx_devctl;
1802 dev->rpmsg_omx.iofuncs.notify = rpmsg_omx_notify;
1803 dev->rpmsg_omx.iofuncs.close_ocb = rpmsg_omx_close_ocb;
1804 dev->rpmsg_omx.iofuncs.read = rpmsg_omx_read;
1805 dev->rpmsg_omx.iofuncs.write = rpmsg_omx_write;
1806 dev->rpmsg_omx.iofuncs.unblock = rpmsg_omx_read_unblock;
1807 attr->mount = &dev->rpmsg_omx.mattr;
1808 iofunc_time_update(attr);
1809 pthread_mutex_init(&dev->rpmsg_omx.mutex, NULL);
1811 snprintf (dev->rpmsg_omx.device_name, _POSIX_PATH_MAX, "/dev/%s", name);
1812 if (-1 == (dev->rpmsg_omx.resmgr_id =
1813 resmgr_attach(syslink_dpp, &resmgr_attr,
1814 dev->rpmsg_omx.device_name, _FTYPE_ANY, 0,
1815 &dev->rpmsg_omx.cfuncs,
1816 &dev->rpmsg_omx.iofuncs, attr))) {
1817 pthread_mutex_destroy(&dev->rpmsg_omx.mutex);
1818 free(dev);
1819 return(NULL);
1820 }
1822 return(dev);
1823 }
1825 /**
1826 * Callback passed to MessageQCopy_registerNotify.
1827 *
1828 * This callback is called when a remote processor creates a MessageQCopy
1829 * handle with the same name as the local MessageQCopy handle and then
1830 * calls NameMap_register to notify the HOST of the handle.
1831 *
1832 * \param handle The remote handle.
1833 * \param procId The remote proc ID of the remote handle.
1834 * \param endpoint The endpoint address of the remote handle.
1835 *
1836 * \return None.
1837 */
1839 static
1840 Void
1841 _rpmsg_omx_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
1842 UInt32 endpoint, Char * desc, Bool create)
1843 {
1844 Int status = 0, i = 0;
1845 Bool found = FALSE;
1846 rpmsg_omx_conn_object * obj = NULL;
1848 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
1849 if (rpmsg_omx_state.objects[i] == NULL) {
1850 found = TRUE;
1851 break;
1852 }
1853 }
1855 if (found) {
1856 /* found a space to save this mq handle, allocate memory */
1857 obj = Memory_calloc (NULL, sizeof (rpmsg_omx_conn_object), 0x0, NULL);
1858 if (obj) {
1859 /* store the object in the module info */
1860 rpmsg_omx_state.objects[i] = obj;
1862 /* store the mq info in the object */
1863 obj->mq = handle;
1864 obj->procId = procId;
1865 status = ProcMgr_open(&obj->procH, obj->procId);
1866 if (status < 0) {
1867 Osal_printf("Failed to open handle to proc %d", procId);
1868 Memory_free(NULL, obj, sizeof(rpmsg_omx_object));
1869 }
1870 else {
1871 obj->addr = endpoint;
1873 /* create a /dev/rpmsg-omx instance for users to open */
1874 obj->dev = _init_rpmsg_omx_device(desc);
1875 if (obj->dev == NULL) {
1876 Osal_printf("Failed to create %s", desc);
1877 ProcMgr_close(&obj->procH);
1878 Memory_free(NULL, obj, sizeof(rpmsg_omx_object));
1879 }
1880 }
1881 }
1882 }
1883 }
1885 /**
1886 * Callback passed to MessageQCopy_create for the module.
1887 *
1888 * This callback is called when a message is received for the rpmsg-dce
1889 * module. This callback will never be called, since each client connection
1890 * gets it's own endpoint for message passing.
1891 *
1892 * \param handle The local MessageQCopy handle.
1893 * \param data Data message
1894 * \param len Length of data message
1895 * \param priv Private information for the endpoint
1896 * \param src Remote endpoint sending this message
1897 * \param srcProc Remote proc ID sending this message
1898 *
1899 * \return None.
1900 */
1902 static
1903 Void
1904 _rpmsg_omx_module_cb (MessageQCopy_Handle handle, void * data, int len,
1905 void * priv, UInt32 src, UInt16 srcProc)
1906 {
1907 Osal_printf ("_rpmsg_omx_module_cb callback");
1908 }
1911 /*!
1912 * @brief Module setup function.
1913 *
1914 * @sa rpmsg_omx_destroy
1915 */
1916 Int
1917 rpmsg_omx_setup (Void)
1918 {
1919 UInt16 i;
1920 List_Params listparams;
1921 Int status = 0;
1922 Error_Block eb;
1923 pthread_attr_t thread_attr;
1924 struct sched_param sched_param;
1926 GT_0trace (curTrace, GT_ENTER, "rpmsg_omx_setup");
1928 Error_init(&eb);
1930 List_Params_init (&listparams);
1931 rpmsg_omx_state.gateHandle = (IGateProvider_Handle)
1932 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
1933 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1934 if (rpmsg_omx_state.gateHandle == NULL) {
1935 status = OMX_NOMEM;
1936 GT_setFailureReason (curTrace,
1937 GT_4CLASS,
1938 "_rpmsg_omx_setup",
1939 status,
1940 "Failed to create spinlock gate!");
1941 }
1942 else {
1943 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1944 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1945 rpmsg_omx_state.eventState [i].bufList = NULL;
1946 rpmsg_omx_state.eventState [i].omx = NULL;
1947 rpmsg_omx_state.eventState [i].refCount = 0;
1948 rpmsg_omx_state.eventState [i].head = NULL;
1949 rpmsg_omx_state.eventState [i].tail = NULL;
1950 }
1952 pthread_attr_init(&thread_attr);
1953 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
1954 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
1955 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
1956 pthread_attr_setschedparam(&thread_attr, &sched_param);
1958 rpmsg_omx_state.run = TRUE;
1959 if (pthread_create(&rpmsg_omx_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
1960 pthread_setname_np(rpmsg_omx_state.nt, "rpmsg-omx-notifier");
1961 /* Initialize the driver mapping array. */
1962 Memory_set (&rpmsg_omx_state.objects,
1963 0,
1964 (sizeof (rpmsg_omx_conn_object *)
1965 * MultiProc_MAXPROCESSORS));
1966 /* create a local handle and register for notifications with MessageQCopy */
1967 rpmsg_omx_state.mqHandle = MessageQCopy_create (
1968 MessageQCopy_ADDRANY,
1969 RPMSG_OMX_MODULE_NAME,
1970 _rpmsg_omx_module_cb,
1971 NULL,
1972 &rpmsg_omx_state.endpoint);
1973 if (rpmsg_omx_state.mqHandle == NULL) {
1974 /*! @retval OMX_FAIL Failed to create MessageQCopy handle! */
1975 status = -ENOMEM;
1976 GT_setFailureReason (curTrace,
1977 GT_4CLASS,
1978 "rpmsg_omx_setup",
1979 status,
1980 "Failed to create MessageQCopy handle!");
1981 }
1982 else {
1983 /* TBD: This could be replaced with a messageqcopy_open type call, one for
1984 * each core */
1985 status = MessageQCopy_registerNotify (rpmsg_omx_state.mqHandle,
1986 _rpmsg_omx_notify_cb);
1987 if (status < 0) {
1988 MessageQCopy_delete (&rpmsg_omx_state.mqHandle);
1989 /*! @retval OMX_FAIL Failed to register MQCopy handle! */
1990 status = -ENOMEM;
1991 GT_setFailureReason (curTrace,
1992 GT_4CLASS,
1993 "rpmsg_omx_setup",
1994 status,
1995 "Failed to register MQCopy handle!");
1996 }
1997 }
1998 if (status >= 0){
1999 rpmsg_omx_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
2000 if (rpmsg_omx_state.sem == NULL) {
2001 //MessageQCopy_unregisterNotify();
2002 /*! @retval OMX_FAIL Failed to register MQCopy handle! */
2003 status = OMX_NOMEM;
2004 GT_setFailureReason (curTrace,
2005 GT_4CLASS,
2006 "rpmsg_omx_setup",
2007 status,
2008 "Failed to register MQCopy handle!");
2009 }
2010 }
2011 if (status >= 0) {
2012 rpmsg_omx_state.isSetup = TRUE;
2013 }
2014 else {
2015 MessageQCopy_delete (&rpmsg_omx_state.mqHandle);
2016 rpmsg_omx_state.run = FALSE;
2017 }
2018 }
2019 else {
2020 rpmsg_omx_state.run = FALSE;
2021 }
2022 pthread_attr_destroy(&thread_attr);
2023 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2024 }
2025 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2027 GT_0trace (curTrace, GT_LEAVE, "rpmsg_omx_setup");
2028 return status;
2029 }
2032 /*!
2033 * @brief Module destroy function.
2034 *
2035 * @sa rpmsg_omx_setup
2036 */
2037 Void
2038 rpmsg_omx_destroy (Void)
2039 {
2040 rpmsg_omx_EventPacket * packet;
2041 UInt32 i;
2042 List_Handle bufList;
2043 rpmsg_omx_object * omx = NULL;
2044 WaitingReaders_t * wr = NULL;
2045 struct _msg_info info;
2047 GT_0trace (curTrace, GT_ENTER, "_rpmsg_omx_destroy");
2049 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
2050 if (rpmsg_omx_state.objects[i]) {
2051 rpmsg_omx_conn_object * obj = rpmsg_omx_state.objects[i];
2052 _deinit_rpmsg_omx_device(obj->dev);
2053 ProcMgr_close(&obj->procH);
2054 Memory_free(NULL, obj, sizeof(rpmsg_omx_conn_object));
2055 rpmsg_omx_state.objects[i] = NULL;
2056 }
2057 }
2059 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2060 omx = NULL;
2061 if (rpmsg_omx_state.eventState [i].omx != NULL) {
2062 /* This is recovery. Need to mark omx structures as invalid */
2063 omx = rpmsg_omx_state.eventState[i].omx;
2064 MessageQCopy_delete(&omx->mq);
2065 omx->mq = NULL;
2066 }
2067 bufList = rpmsg_omx_state.eventState [i].bufList;
2069 rpmsg_omx_state.eventState [i].bufList = NULL;
2070 rpmsg_omx_state.eventState [i].omx = NULL;
2071 rpmsg_omx_state.eventState [i].refCount = 0;
2072 if (bufList != NULL) {
2073 /* Dequeue waiting readers and reply to them */
2074 pthread_mutex_lock(&rpmsg_omx_state.lock);
2075 while ((wr = dequeue_waiting_reader(i)) != NULL) {
2076 /* Check if rcvid is still valid */
2077 if (MsgInfo(wr->rcvid, &info) != -1) {
2078 put_wr(wr);
2079 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2080 MsgError(wr->rcvid, EINTR);
2081 pthread_mutex_lock(&rpmsg_omx_state.lock);
2082 }
2083 }
2084 /* Check for pending ionotify/select calls */
2085 if (omx) {
2086 if (IOFUNC_NOTIFY_INPUT_CHECK(omx->notify, 1, 0)) {
2087 iofunc_notify_trigger(omx->notify, 1, IOFUNC_NOTIFY_INPUT);
2088 }
2089 }
2090 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2092 /* Free event packets for any received but unprocessed events. */
2093 while (List_empty (bufList) != TRUE){
2094 packet = (rpmsg_omx_EventPacket *)
2095 List_get (bufList);
2096 if (packet != NULL){
2097 Memory_free (NULL, packet, sizeof(*packet));
2098 }
2099 }
2100 List_delete (&(bufList));
2101 }
2102 }
2104 /* Free the cached list */
2105 flush_uBuf();
2107 if (rpmsg_omx_state.sem) {
2108 OsalSemaphore_delete(&rpmsg_omx_state.sem);
2109 }
2111 if (rpmsg_omx_state.mqHandle) {
2112 //MessageQCopy_unregisterNotify();
2113 MessageQCopy_delete(&rpmsg_omx_state.mqHandle);
2114 }
2116 if (rpmsg_omx_state.gateHandle != NULL) {
2117 GateSpinlock_delete ((GateSpinlock_Handle *)
2118 &(rpmsg_omx_state.gateHandle));
2119 }
2121 rpmsg_omx_state.isSetup = FALSE ;
2122 rpmsg_omx_state.run = FALSE;
2123 // run through and destroy the thread, and all outstanding
2124 // omx structures
2125 pthread_mutex_lock(&rpmsg_omx_state.lock);
2126 pthread_cond_signal(&rpmsg_omx_state.cond);
2127 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2128 pthread_join(rpmsg_omx_state.nt, NULL);
2129 pthread_mutex_lock(&rpmsg_omx_state.lock);
2130 while (rpmsg_omx_state.head != NULL) {
2131 int index;
2132 WaitingReaders_t *item;
2133 index = dequeue_notify_list_item(rpmsg_omx_state.head);
2134 if (index < 0)
2135 break;
2136 item = dequeue_waiting_reader(index);
2137 while (item) {
2138 put_wr(item);
2139 item = dequeue_waiting_reader(index);
2140 }
2141 }
2142 rpmsg_omx_state.head = NULL ;
2143 rpmsg_omx_state.tail = NULL ;
2144 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2146 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2147 }
2150 /** ============================================================================
2151 * Internal functions
2152 * ============================================================================
2153 */