ed910b5828cbb4bdfc6e91852dfe79ca80dc7932
[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / rpmsg-omx / hlos / knl / Qnx / rpmsg-omxdrv.c
1 /*
2 * @file rpmsg-omxdrv.c
3 *
4 * @brief devctl handler for OMX component.
5 *
6 *
7 * @ver 02.00.00.46_alpha1
8 *
9 * ============================================================================
10 *
11 * Copyright (c) 2010-2011, Texas Instruments Incorporated
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * * Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * * Neither the name of Texas Instruments Incorporated nor the names of
25 * its contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
30 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
32 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
33 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
34 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
35 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
36 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
37 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
38 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Contact information for paper mail:
40 * Texas Instruments
41 * Post Office Box 655303
42 * Dallas, Texas 75265
43 * Contact information:
44 * http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
45 * DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
46 * ============================================================================
47 *
48 */
51 /* Standard headers */
52 #include <ti/syslink/Std.h>
54 /* OSAL & Utils headers */
55 #include <ti/syslink/utils/List.h>
56 #include <ti/syslink/utils/String.h>
57 #include <ti/syslink/utils/Trace.h>
58 #include <ti/syslink/utils/Memory.h>
59 #include <ti/syslink/utils/IGateProvider.h>
60 #include <ti/syslink/utils/GateSpinlock.h>
61 #include <_MultiProc.h>
63 /*QNX specific header include */
64 #include <errno.h>
65 #include <unistd.h>
66 #include <sys/iofunc.h>
67 #include <sys/dispatch.h>
68 #include <sys/netmgr.h>
69 #include <devctl.h>
71 /* Module headers */
72 #include <ti/ipc/rpmsg_omx.h>
73 #include <ti/ipc/MessageQCopy.h>
74 #include <_MessageQCopy.h>
75 #include <_MessageQCopyDefs.h>
76 #include "OsalSemaphore.h"
77 #include "std_qnx.h"
78 #include <pthread.h>
80 #include <memmgr/tilermem.h>
81 #include <memmgr/tiler.h>
83 #include "rpmsg-omxdrv.h"
84 #include <rpmsg.h>
86 #define PRIORITY_REALTIME_LOW 29
88 extern int mem_offset64_peer(pid_t pid, const uintptr_t addr, size_t len,
89 off64_t *offset, size_t *contig_len);
91 static MsgList_t *nl_cache;
92 static int num_nl = 0;
93 static WaitingReaders_t *wr_cache;
94 static int num_wr = 0;
96 /*
97 * Instead of constantly allocating and freeing the notifier structures
98 * we just cache a few of them, and recycle them instead.
99 * The cache count is set with CACHE_NUM in rpmsg-omxdrv.h.
100 */
102 static MsgList_t *get_nl()
103 {
104 MsgList_t *item;
105 item = nl_cache;
106 if (item != NULL) {
107 nl_cache = nl_cache->next;
108 num_nl--;
109 } else {
110 item = Memory_alloc(NULL, sizeof(MsgList_t), 0, NULL);
111 }
112 return(item);
113 }
115 static void put_nl(MsgList_t *item)
116 {
117 if (num_nl >= CACHE_NUM) {
118 Memory_free(NULL, item, sizeof(*item));
119 } else {
120 item->next = nl_cache;
121 nl_cache = item;
122 num_nl++;
123 }
124 return;
125 }
127 static WaitingReaders_t *get_wr()
128 {
129 WaitingReaders_t *item;
130 item = wr_cache;
131 if (item != NULL) {
132 wr_cache = wr_cache->next;
133 num_wr--;
134 } else {
135 item = Memory_alloc(NULL, sizeof(WaitingReaders_t), 0, NULL);
136 }
137 return(item);
138 }
140 static void put_wr(WaitingReaders_t *item)
141 {
142 if (num_wr >= CACHE_NUM) {
143 Memory_free(NULL, item, sizeof(*item));
144 } else {
145 item->next = wr_cache;
146 wr_cache = item;
147 num_wr++;
148 }
149 return;
150 }
152 typedef enum RPC_OMX_MAP_INFO_TYPE
153 {
154 RPC_OMX_MAP_INFO_NONE = 0,
155 RPC_OMX_MAP_INFO_ONE_BUF = 1,
156 RPC_OMX_MAP_INFO_TWO_BUF = 2,
157 RPC_OMX_MAP_INFO_THREE_BUF = 3,
158 RPC_OMX_MAP_INFO_MAX = 0x7FFFFFFF
159 } RPC_OMX_MAP_INFO_TYPE;
161 /* structure to hold rpmsg-omx device information */
162 typedef struct named_device {
163 iofunc_mount_t mattr;
164 iofunc_attr_t cattr;
165 int resmgr_id;
166 pthread_mutex_t mutex;
167 iofunc_funcs_t mfuncs;
168 resmgr_connect_funcs_t cfuncs;
169 resmgr_io_funcs_t iofuncs;
170 char device_name[_POSIX_PATH_MAX];
171 } named_device_t;
173 /* rpmsg-omx device structure */
174 typedef struct rpmsg_omx_dev {
175 dispatch_t * dpp;
176 thread_pool_t * tpool;
177 named_device_t rpmsg_omx;
178 } rpmsg_omx_dev_t;
180 /*!
181 * @brief Remote connection object
182 */
183 typedef struct rpmsg_omx_conn_object {
184 rpmsg_omx_dev_t * dev;
185 MessageQCopy_Handle mq;
186 UInt32 addr;
187 UInt16 procId;
188 ProcMgr_Handle procH;
189 } rpmsg_omx_conn_object;
191 /*!
192 * @brief omx instance object
193 */
194 typedef struct rpmsg_omx_object_tag {
195 MessageQCopy_Handle mq;
196 rpmsg_omx_conn_object * conn;
197 UInt32 addr;
198 UInt32 remoteAddr;
199 UInt16 procId;
200 pid_t pid;
201 Int state;
202 iofunc_notify_t notify[3];
203 } rpmsg_omx_object;
205 /*!
206 * @brief Structure of Event callback argument passed to register fucntion.
207 */
208 typedef struct rpmsg_omx_EventCbck_tag {
209 List_Elem element;
210 /*!< List element header */
211 rpmsg_omx_object * omx;
212 /*!< User omx info pointer. Passed back to user callback function */
213 UInt32 pid;
214 /*!< Process Identifier for user process. */
215 } rpmsg_omx_EventCbck ;
217 /*!
218 * @brief Keeps the information related to Event.
219 */
220 typedef struct rpmsg_omx_EventState_tag {
221 List_Handle bufList;
222 /*!< Head of received event list. */
223 UInt32 pid;
224 /*!< User process ID. */
225 rpmsg_omx_object * omx;
226 /*!< User omx comp. */
227 UInt32 refCount;
228 /*!< Reference count, used when multiple Notify_registerEvent are called
229 from same process space (multi threads/processes). */
230 WaitingReaders_t * head;
231 /*!< Waiting readers head. */
232 WaitingReaders_t * tail;
233 /*!< Waiting readers tail. */
234 } rpmsg_omx_EventState;
236 /*!
237 * @brief Per-connection information
238 */
239 typedef struct rpmsg_omx_ocb {
240 iofunc_ocb_t hdr;
241 pid_t pid;
242 rpmsg_omx_object * omx;
243 } rpmsg_omx_ocb_t;
245 typedef struct rpmsg_omx_name {
246 char name[RPMSG_NAME_SIZE];
247 }rpmsg_omx_name_t;
249 #define RPMSG_OMX_MODULE_NAME "rpmsg-omx"
251 /*!
252 * @brief rpmsg-omx Module state object
253 */
254 typedef struct rpmsg_omx_ModuleObject_tag {
255 Bool isSetup;
256 /*!< Indicates whether the module has been already setup */
257 Bool openRefCount;
258 /*!< Open reference count. */
259 IGateProvider_Handle gateHandle;
260 /*!< Handle of gate to be used for local thread safety */
261 rpmsg_omx_EventState eventState [MAX_PROCESSES];
262 /*!< List for all user processes registered. */
263 rpmsg_omx_conn_object * objects [MultiProc_MAXPROCESSORS];
264 /*!< List of all remote connections. */
265 MessageQCopy_Handle mqHandle;
266 /*!< Local mq handle associated with this module */
267 UInt32 endpoint;
268 /*!< Local endpoint associated with the mq handle */
269 OsalSemaphore_Handle sem;
270 /*!< Handle to semaphore used for omx instance connection notifications */
271 pthread_t nt;
272 /*!< notifier thread */
273 pthread_mutex_t lock;
274 /*!< protection between notifier and event */
275 pthread_cond_t cond;
276 /*!< protection between notifier and event */
277 MsgList_t *head;
278 /*!< list head */
279 MsgList_t *tail;
280 /*!< list tail */
281 int run;
282 /*!< notifier thread must keep running */
283 } rpmsg_omx_ModuleObject;
285 /*!
286 * @brief Structure of Event Packet read from notify kernel-side.
287 */
288 typedef struct rpmsg_omx_EventPacket_tag {
289 List_Elem element;
290 /*!< List element header */
291 UInt32 pid;
292 /* Processor identifier */
293 rpmsg_omx_object * obj;
294 /*!< Pointer to the channel associated with this callback */
295 UInt8 data[MessageQCopy_BUFSIZE];
296 /*!< Data associated with event. */
297 UInt32 len;
298 /*!< Length of the data associated with event. */
299 UInt32 src;
300 /*!< Src endpoint associated with event. */
301 struct rpmsg_omx_EventPacket * next;
302 struct rpmsg_omx_EventPacket * prev;
303 } rpmsg_omx_EventPacket ;
306 /*
307 * Instead of constantly allocating and freeing the uBuf structures
308 * we just cache a few of them, and recycle them instead.
309 * The cache count is set with CACHE_NUM in rpmsg-omxdrv.h.
310 */
311 static rpmsg_omx_EventPacket *uBuf_cache;
312 static int num_uBuf = 0;
314 static void flush_uBuf()
315 {
316 rpmsg_omx_EventPacket *uBuf = NULL;
318 while(uBuf_cache) {
319 num_uBuf--;
320 uBuf = uBuf_cache;
321 uBuf_cache = (rpmsg_omx_EventPacket *)uBuf_cache->next;
322 Memory_free(NULL, uBuf, sizeof(*uBuf));
323 }
324 }
326 static rpmsg_omx_EventPacket *get_uBuf()
327 {
328 rpmsg_omx_EventPacket *uBuf;
329 uBuf = uBuf_cache;
330 if (uBuf != NULL) {
331 uBuf_cache = (rpmsg_omx_EventPacket *)uBuf_cache->next;
332 num_uBuf--;
333 } else {
334 uBuf = Memory_alloc(NULL, sizeof(rpmsg_omx_EventPacket), 0, NULL);
335 }
336 return(uBuf);
337 }
339 static void put_uBuf(rpmsg_omx_EventPacket * uBuf)
340 {
341 if (num_uBuf >= CACHE_NUM) {
342 Memory_free(NULL, uBuf, sizeof(*uBuf));
343 } else {
344 uBuf->next = (struct rpmsg_omx_EventPacket *)uBuf_cache;
345 uBuf_cache = uBuf;
346 num_uBuf++;
347 }
348 return;
349 }
352 /** ============================================================================
353 * Globals
354 * ============================================================================
355 */
356 /*!
357 * @var rpmsg_omx_state
358 *
359 * @brief rpmsg-omx state object variable
360 */
361 static rpmsg_omx_ModuleObject rpmsg_omx_state =
362 {
363 .gateHandle = NULL,
364 .isSetup = FALSE,
365 .openRefCount = 0,
366 .nt = 0,
367 .lock = PTHREAD_MUTEX_INITIALIZER,
368 .cond = PTHREAD_COND_INITIALIZER,
369 .head = NULL,
370 .tail = NULL,
371 .run = 0
372 };
374 extern dispatch_t * syslink_dpp;
377 static MsgList_t *find_nl(int index)
378 {
379 MsgList_t *item=NULL;
380 item = rpmsg_omx_state.head;
381 while (item) {
382 if (item->index == index)
383 return(item);
384 item = item->next;
385 }
386 return(item);
387 }
389 /* we have the right locks when calling this function */
390 /*!
391 * @brief Function to enqueue a notify list item.
392 *
393 * @param index Index of the client process associated with the item.
394 *
395 * @sa find_nl
396 * get_nl
397 */
398 static int enqueue_notify_list(int index)
399 {
400 MsgList_t *item;
401 item = find_nl(index);
402 if (item == NULL) {
403 item = get_nl();
404 if (item == NULL) {
405 return(-1);
406 }
407 item->next = NULL;
408 item->index = index;
409 item->num_events=1;
410 if (rpmsg_omx_state.head == NULL) {
411 rpmsg_omx_state.head = item;
412 rpmsg_omx_state.tail = item;
413 item->prev = NULL;
414 }
415 else {
416 item->prev = rpmsg_omx_state.tail;
417 rpmsg_omx_state.tail->next = item;
418 rpmsg_omx_state.tail = item;
419 }
420 }
421 else {
422 item->num_events++;
423 }
424 return(0);
425 }
427 /* we have the right locks when calling this function */
428 /*!
429 * @brief Function to dequeue a notify list item.
430 *
431 * @param item The item to remove.
432 *
433 * @sa put_nl
434 */
435 static inline int dequeue_notify_list_item(MsgList_t *item)
436 {
437 int index;
438 if (item == NULL) {
439 return(-1);
440 }
441 index = item->index;
442 item->num_events--;
443 if (item->num_events > 0) {
444 return(index);
445 }
446 if (rpmsg_omx_state.head == item) {
447 // removing head
448 rpmsg_omx_state.head = item->next;
449 if (rpmsg_omx_state.head != NULL) {
450 rpmsg_omx_state.head->prev = NULL;
451 }
452 else {
453 // removing head and tail
454 rpmsg_omx_state.tail = NULL;
455 }
456 }
457 else {
458 item->prev->next = item->next;
459 if (item->next != NULL) {
460 item->next->prev = item->prev;
461 }
462 else {
463 // removing tail
464 rpmsg_omx_state.tail = item->prev;
465 }
466 }
467 put_nl(item);
468 return(index);
469 }
471 /* we have the right locks when calling this function */
472 /*!
473 * @brief Function to add a waiting reader to the list.
474 *
475 * @param index Index of the client process waiting reader to add.
476 * @param rcvid Receive ID of the client process that was passed
477 * when the client called read().
478 *
479 * @sa None
480 */
481 static int enqueue_waiting_reader(int index, int rcvid)
482 {
483 WaitingReaders_t *item;
484 item = get_wr();
485 if (item == NULL) {
486 return(-1);
487 }
488 item->rcvid = rcvid;
489 item->next = NULL;
490 if (rpmsg_omx_state.eventState [index].head == NULL) {
491 rpmsg_omx_state.eventState [index].head = item;
492 rpmsg_omx_state.eventState [index].tail = item;
493 }
494 else {
495 rpmsg_omx_state.eventState [index].tail->next = item;
496 rpmsg_omx_state.eventState [index].tail = item;
497 }
498 return(EOK);
499 }
501 /* we have the right locks when calling this function */
502 /* caller frees item */
503 /*!
504 * @brief Function to remove a waiting reader from the list.
505 *
506 * @param index Index of the client process waiting reader to dequeue.
507 *
508 * @sa None
509 */
510 static WaitingReaders_t *dequeue_waiting_reader(int index)
511 {
512 WaitingReaders_t *item = NULL;
513 if (rpmsg_omx_state.eventState [index].head) {
514 item = rpmsg_omx_state.eventState [index].head;
515 rpmsg_omx_state.eventState [index].head = rpmsg_omx_state.eventState [index].head->next;
516 if (rpmsg_omx_state.eventState [index].head == NULL) {
517 rpmsg_omx_state.eventState [index].tail = NULL;
518 }
519 }
520 return(item);
521 }
523 /*!
524 * @brief Function find a specified waiting reader.
525 *
526 * @param index Index of the client process waiting for the message.
527 * @param rcvid Receive ID of the client process that was passed
528 * when the client called read().
529 *
530 * @sa None
531 */
533 static WaitingReaders_t *find_waiting_reader(int index, int rcvid)
534 {
535 WaitingReaders_t *item = NULL;
536 WaitingReaders_t *prev = NULL;
537 if (rpmsg_omx_state.eventState [index].head) {
538 item = rpmsg_omx_state.eventState [index].head;
539 while (item) {
540 if (item->rcvid == rcvid) {
541 /* remove item from list */
542 if (prev)
543 prev->next = item->next;
544 if (item == rpmsg_omx_state.eventState [index].head)
545 rpmsg_omx_state.eventState [index].head = item->next;
546 break;
547 }
548 else {
549 prev = item;
550 item = item->next;
551 }
552 }
553 }
554 return item;
555 }
557 /*!
558 * @brief Function used to check if there is a waiting reader with an
559 * event (message) ready to be delivered.
560 *
561 * @param index Index of the client process waiting for the message.
562 * @param item Pointer to the waiting reader.
563 *
564 * @sa dequeue_notify_list_item
565 * dequeue_waiting_reader
566 */
568 static int find_available_reader_and_event(int *index, WaitingReaders_t **item)
569 {
570 MsgList_t *temp;
571 if (rpmsg_omx_state.head == NULL) {
572 return(0);
573 }
574 temp = rpmsg_omx_state.head;
575 while (temp) {
576 if (rpmsg_omx_state.eventState [temp->index].head) {
577 // event and reader found
578 if (dequeue_notify_list_item(temp) >= 0) {
579 *index = temp->index;
580 *item = dequeue_waiting_reader(temp->index);
581 }
582 else {
583 /* error occurred, return 0 as item has not been set */
584 return(0);
585 }
586 return(1);
587 }
588 temp = temp->next;
589 }
590 return(0);
591 }
593 /*!
594 * @brief Function used to deliver the notification to the client that
595 * it has received a message.
596 *
597 * @param index Index of the client process receiving hte message.
598 * @param rcvid Receive ID of the client process that was passed
599 * when the client called read().
600 *
601 * @sa put_uBuf
602 */
604 static void deliver_notification(int index, int rcvid)
605 {
606 int err = EOK;
607 rpmsg_omx_EventPacket * uBuf = NULL;
608 struct omx_msg_hdr * hdr = NULL;
610 uBuf = (rpmsg_omx_EventPacket *) List_get (rpmsg_omx_state.eventState [index].bufList);
611 hdr = (struct omx_msg_hdr *)uBuf->data;
613 /* Let the check remain at run-time. */
614 if (uBuf != NULL) {
615 err = MsgReply(rcvid, hdr->len, hdr->data, hdr->len);
616 if (err == -1)
617 perror("deliver_notification: MsgReply");
618 /* Free the processed event callback packet. */
619 put_uBuf(uBuf);
620 }
621 else {
622 MsgReply(rcvid, EOK, NULL, 0);
623 }
624 return;
625 }
627 /*!
628 * @brief Thread used for notifying waiting readers of messages.
629 *
630 * @param arg Thread-specific private arg.
631 *
632 * @sa find_available_reader_and_event
633 * deliver_notification
634 * put_wr
635 */
636 static void *notifier_thread(void *arg)
637 {
638 int status;
639 int index;
640 WaitingReaders_t *item = NULL;
641 pthread_mutex_lock(&rpmsg_omx_state.lock);
642 while (rpmsg_omx_state.run) {
643 status = find_available_reader_and_event(&index, &item);
644 if ( (status == 0) || (item == NULL) ) {
645 status = pthread_cond_wait(&rpmsg_omx_state.cond, &rpmsg_omx_state.lock);
646 if ((status != EOK) && (status != EINTR)) {
647 // false wakeup
648 break;
649 }
650 status = find_available_reader_and_event(&index, &item);
651 if ( (status == 0) || (item == NULL) ) {
652 continue;
653 }
654 }
655 pthread_mutex_unlock(&rpmsg_omx_state.lock);
656 // we have unlocked, and now we have an event to deliver
657 // we deliver one event at a time, relock, check and continue
658 deliver_notification(index, item->rcvid);
659 pthread_mutex_lock(&rpmsg_omx_state.lock);
660 put_wr(item);
661 }
662 pthread_mutex_unlock(&rpmsg_omx_state.lock);
663 return(NULL);
664 }
667 static
668 Int
669 _rpmsg_omx_connect(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_omx_ocb_t *ocb)
670 {
671 Int status = EOK;
672 struct omx_conn_req * cargs = (struct omx_conn_req *)(_DEVCTL_DATA (msg->i));
673 struct omx_msg_hdr * hdr = NULL;
674 rpmsg_omx_object * omx = ocb->omx;
675 UInt8 buf[sizeof(struct omx_conn_req) + sizeof(struct omx_msg_hdr)];
677 if (omx->state == OMX_CONNECTED) {
678 GT_0trace(curTrace, GT_4CLASS, "Already connected.");
679 status = (EISCONN);
680 }
681 else if (ctp->info.msglen - sizeof(msg->i) < sizeof (struct omx_conn_req)) {
682 status = (EINVAL);
683 }
684 else if (String_nlen(cargs->name, 47) == -1) {
685 status = (EINVAL);
686 }
687 else {
688 hdr = (struct omx_msg_hdr *)buf;
689 hdr->type = OMX_CONN_REQ;
690 hdr->flags = 0;
691 hdr->len = sizeof(struct omx_conn_req);
692 Memory_copy(hdr->data, cargs, sizeof(struct omx_conn_req));
694 status = MessageQCopy_send (omx->conn->procId, // remote procid
695 MultiProc_self(), // local procid
696 omx->conn->addr, // remote server
697 omx->addr, // local address
698 buf, // connect msg
699 sizeof(buf), // msg size
700 TRUE); // wait for available bufs
701 if (status != MessageQCopy_S_SUCCESS) {
702 GT_0trace(curTrace, GT_4CLASS, "Failed to send connect message.");
703 status = (EIO);
704 }
705 else {
706 status = OsalSemaphore_pend(rpmsg_omx_state.sem, 5000);
707 if (omx->state == OMX_CONNECTED) {
708 msg->o.ret_val = EOK;
709 status = (_RESMGR_PTR(ctp, &msg->o, sizeof(msg->o)));
710 }
711 else if (omx->state == OMX_FAIL) {
712 GT_0trace(curTrace, GT_4CLASS, "Failed to connect message.");
713 status = (ENXIO);
714 }
715 else if (status < 0) {
716 GT_0trace(curTrace, GT_4CLASS, "Semaphore pend failed.");
717 status = (EIO);
718 }
719 else {
720 status = (ETIMEDOUT);
721 }
722 }
723 }
725 return status;
726 }
729 static
730 Int
731 _rpmsg_omx_disconnect(resmgr_context_t *ctp, io_devctl_t *msg, rpmsg_omx_ocb_t *ocb)
732 {
733 Int status = EOK;
734 struct omx_msg_hdr * hdr = NULL;
735 rpmsg_omx_object * omx = ocb->omx;
736 UInt8 buf[sizeof(struct omx_disc_req) + sizeof(struct omx_msg_hdr)];
737 struct omx_disc_req * dreq = NULL;
739 if (omx->state != OMX_CONNECTED) {
740 GT_0trace(curTrace, GT_4CLASS, "Already disconnected.");
741 status = (ENOTCONN);
742 }
743 else {
744 hdr = (struct omx_msg_hdr *)buf;
745 hdr->type = OMX_DISCONNECT;
746 hdr->flags = 0;
747 hdr->len = sizeof(struct omx_conn_req);
748 dreq = (struct omx_disc_req *)hdr->data;
749 dreq->addr = omx->remoteAddr;
751 status = MessageQCopy_send (omx->conn->procId, // remote procid
752 MultiProc_self(), // local procid
753 omx->conn->addr, // remote server
754 omx->addr, // local address
755 buf, // connect msg
756 sizeof(buf), // msg size
757 TRUE); // wait for available bufs
758 if (status != MessageQCopy_S_SUCCESS) {
759 GT_0trace(curTrace, GT_4CLASS, "Failed to send disconnect message.");
760 status = (EIO);
761 }
762 else {
763 /* There will be no response, so don't wait. */
764 omx->state = OMX_UNCONNECTED;
765 }
766 }
768 return status;
769 }
772 Int
773 rpmsg_omx_devctl(resmgr_context_t *ctp, io_devctl_t *msg, IOFUNC_OCB_T *i_ocb)
774 {
775 Int status = 0;
776 rpmsg_omx_ocb_t *ocb = (rpmsg_omx_ocb_t *)i_ocb;
778 if ((status = iofunc_devctl_default(ctp, msg, &ocb->hdr)) != _RESMGR_DEFAULT)
779 return(_RESMGR_ERRNO(status));
780 status = 0;
782 switch (msg->i.dcmd)
783 {
784 case OMX_IOCCONNECT:
785 status = _rpmsg_omx_connect (ctp, msg, ocb);
786 break;
787 default:
788 status = (ENOSYS);
789 break;
790 }
792 return status;
793 }
796 /*!
797 * @brief Attach a process to rpmsg-omx user support framework.
798 *
799 * @param pid Process identifier
800 *
801 * @sa _rpmsg_omx_detach
802 */
803 static
804 Int
805 _rpmsg_omx_attach (rpmsg_omx_object * omx)
806 {
807 Int32 status = EOK;
808 Bool flag = FALSE;
809 Bool isInit = FALSE;
810 List_Object * bufList = NULL;
811 IArg key = 0;
812 List_Params listparams;
813 UInt32 i;
815 GT_1trace (curTrace, GT_ENTER, "_rpmsg_omx_attach", omx);
817 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
818 for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
819 if (rpmsg_omx_state.eventState [i].omx == omx) {
820 rpmsg_omx_state.eventState [i].refCount++;
821 isInit = TRUE;
822 status = EOK;
823 break;
824 }
825 }
827 if (isInit == FALSE) {
828 List_Params_init (&listparams);
829 bufList = List_create (&listparams) ;
830 /* Search for an available slot for user process. */
831 for (i = 0 ; i < MAX_PROCESSES ; i++) {
832 if (rpmsg_omx_state.eventState [i].omx == NULL) {
833 rpmsg_omx_state.eventState [i].omx = omx;
834 rpmsg_omx_state.eventState [i].refCount = 1;
835 rpmsg_omx_state.eventState [i].bufList = bufList;
836 flag = TRUE;
837 break;
838 }
839 }
841 /* No free slots found. Let this check remain at run-time,
842 * since it is dependent on user environment.
843 */
844 if (flag != TRUE) {
845 /*! @retval Notify_E_RESOURCE Maximum number of
846 supported user clients have already been registered. */
847 status = -ENOMEM;
848 GT_setFailureReason (curTrace,
849 GT_4CLASS,
850 "rpmsgDrv_attach",
851 status,
852 "Maximum number of supported user"
853 " clients have already been "
854 "registered.");
855 if (bufList != NULL) {
856 List_delete (&bufList);
857 }
858 }
859 }
860 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
862 GT_1trace (curTrace, GT_LEAVE, "rpmsgDrv_attach", status);
864 /*! @retval Notify_S_SUCCESS Operation successfully completed. */
865 return status ;
866 }
869 /*!
870 * @brief This function adds a data to a registered process.
871 *
872 * @param dce OMX object associated with the client
873 * @param src Source address (endpoint) sending the data
874 * @param pid Process ID associated with the client
875 * @param data Data to be added
876 * @param len Length of data to be added
877 *
878 * @sa
879 */
880 Int
881 _rpmsg_omx_addBufByPid (rpmsg_omx_object *omx,
882 UInt32 src,
883 UInt32 pid,
884 void * data,
885 UInt32 len)
886 {
887 Int32 status = EOK;
888 Bool flag = FALSE;
889 rpmsg_omx_EventPacket * uBuf = NULL;
890 IArg key;
891 UInt32 i;
892 WaitingReaders_t *item;
893 MsgList_t *msgItem;
895 GT_5trace (curTrace,
896 GT_ENTER,
897 "_rpmsg_omx_addBufByPid",
898 omx,
899 src,
900 pid,
901 data,
902 len);
904 GT_assert (curTrace, (rpmsg_omx_state.isSetup == TRUE));
906 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
907 /* Find the registration for this callback */
908 for (i = 0 ; i < MAX_PROCESSES ; i++) {
909 if (rpmsg_omx_state.eventState [i].omx == omx) {
910 flag = TRUE;
911 break;
912 }
913 }
914 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
916 #if !defined(SYSLINK_BUILD_OPTIMIZE)
917 if (flag != TRUE) {
918 /*! @retval ENOMEM Could not find a registered handler
919 for this process. */
920 status = -ENOMEM;
921 GT_setFailureReason (curTrace,
922 GT_4CLASS,
923 "_rpmsgDrv_addBufByPid",
924 status,
925 "Could not find a registered handler "
926 "for this process.!");
927 }
928 else {
929 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
930 /* Allocate memory for the buf */
931 pthread_mutex_lock(&rpmsg_omx_state.lock);
932 uBuf = get_uBuf();
933 pthread_mutex_unlock(&rpmsg_omx_state.lock);
935 #if !defined(SYSLINK_BUILD_OPTIMIZE)
936 if (uBuf == NULL) {
937 /*! @retval Notify_E_MEMORY Failed to allocate memory for event
938 packet for received callback. */
939 status = -ENOMEM;
940 GT_setFailureReason (curTrace,
941 GT_4CLASS,
942 "_rpmsgDrv_addBufByPid",
943 status,
944 "Failed to allocate memory for event"
945 " packet for received callback.!");
946 }
947 else {
948 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
949 List_elemClear (&(uBuf->element));
950 GT_assert (curTrace,
951 (rpmsg_omx_state.eventState [i].bufList != NULL));
953 if (data) {
954 Memory_copy(uBuf->data, data, len);
955 }
956 uBuf->len = len;
958 List_put (rpmsg_omx_state.eventState [i].bufList,
959 &(uBuf->element));
960 pthread_mutex_lock(&rpmsg_omx_state.lock);
961 item = dequeue_waiting_reader(i);
962 if (item) {
963 // there is a waiting reader
964 deliver_notification(i, item->rcvid);
965 put_wr(item);
966 pthread_mutex_unlock(&rpmsg_omx_state.lock);
967 status = EOK;
968 }
969 else {
970 if (enqueue_notify_list(i) < 0) {
971 pthread_mutex_unlock(&rpmsg_omx_state.lock);
972 status = -ENOMEM;
973 GT_setFailureReason (curTrace,
974 GT_4CLASS,
975 "_rpmsgDrv_addBufByPid",
976 status,
977 "Failed to allocate memory for notifier");
978 }
979 else {
980 msgItem = find_nl(i);
981 /* TODO: omx could be NULL in some cases */
982 if (omx && msgItem) {
983 if (IOFUNC_NOTIFY_INPUT_CHECK(omx->notify, msgItem->num_events, 0)) {
984 iofunc_notify_trigger(omx->notify, msgItem->num_events, IOFUNC_NOTIFY_INPUT);
985 }
986 }
987 status = EOK;
988 pthread_cond_signal(&rpmsg_omx_state.cond);
989 pthread_mutex_unlock(&rpmsg_omx_state.lock);
990 }
991 }
992 #if !defined(SYSLINK_BUILD_OPTIMIZE)
993 }
994 }
995 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
997 GT_1trace (curTrace, GT_LEAVE, "_rpmsgDrv_addBufByPid", status);
999 return status;
1000 }
1003 /*!
1004 * @brief This function implements the callback registered with
1005 * MessageQCopy_create for each client. This function
1006 * adds the message from the remote proc to a list
1007 * where it is routed to the appropriate waiting reader.
1008 *
1009 * @param procId processor Id from which interrupt is received
1010 * @param lineId Interrupt line ID to be used
1011 * @param eventId eventId registered
1012 * @param arg argument to call back
1013 * @param payload payload received
1014 *
1015 * @sa
1016 */
1017 Void
1018 _rpmsg_omx_cb (MessageQCopy_Handle handle, void * data, int len, void * priv, UInt32 src, UInt16 srcProc)
1019 {
1020 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1021 Int32 status = 0;
1022 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1023 rpmsg_omx_object * omx = NULL;
1024 struct omx_msg_hdr * msg_hdr = NULL;
1025 struct omx_conn_rsp * reply;
1027 GT_6trace (curTrace,
1028 GT_ENTER,
1029 "_rpmsg_omx_cb",
1030 handle,
1031 data,
1032 len,
1033 priv,
1034 src,
1035 srcProc);
1037 omx = (rpmsg_omx_object *) priv;
1038 msg_hdr = (struct omx_msg_hdr *)data;
1040 switch (msg_hdr->type) {
1041 case OMX_CONN_RSP:
1042 reply = (struct omx_conn_rsp *)msg_hdr->data;
1043 omx->remoteAddr = reply->addr;
1044 if (reply->status != OMX_SUCCESS) {
1045 omx->state = OMX_FAIL;
1046 }
1047 else {
1048 omx->state = OMX_CONNECTED;
1049 }
1050 /* post the semaphore to have the ioctl reply */
1051 OsalSemaphore_post(rpmsg_omx_state.sem);
1052 break;
1053 case OMX_RAW_MSG:
1054 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1055 status =
1056 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1057 _rpmsg_omx_addBufByPid (omx,
1058 src,
1059 omx->pid,
1060 data,
1061 len);
1062 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1063 if (status < 0) {
1064 GT_setFailureReason (curTrace,
1065 GT_4CLASS,
1066 "_rpmsg_omx_cb",
1067 status,
1068 "Failed to add callback packet for pid");
1069 }
1070 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1071 break;
1072 default:
1073 break;
1074 }
1076 GT_0trace (curTrace, GT_LEAVE, "_rpmsg_omx_cb");
1077 }
1079 /**
1080 * Handler for ocb_calloc() requests.
1081 *
1082 * Special handler for ocb_calloc() requests that we export for control. An
1083 * open request from the client will result in a call to our special ocb_calloc
1084 * handler. This function attaches the client's pid using _rpmsg_dce_attach
1085 * and allocates client-specific information. This function creates an
1086 * endpoint for the client to communicate with the dCE server on the
1087 * remote core also.
1088 *
1089 * \param ctp Thread's associated context information.
1090 * \param device Device attributes structure.
1091 *
1092 * \return Pointer to an iofunc_ocb_t OCB structure.
1093 */
1095 IOFUNC_OCB_T *
1096 rpmsg_omx_ocb_calloc (resmgr_context_t * ctp, IOFUNC_ATTR_T * device)
1097 {
1098 rpmsg_omx_ocb_t *ocb = NULL;
1099 rpmsg_omx_object *obj = NULL;
1100 struct _msg_info cl_info;
1101 rpmsg_omx_dev_t * dev = NULL;
1102 int i = 0;
1103 Bool found = FALSE;
1104 char path1[20];
1105 char path2[20];
1107 GT_2trace (curTrace, GT_ENTER, "rpmsg_omx_ocb_calloc",
1108 ctp, device);
1110 /* Allocate the OCB */
1111 ocb = (rpmsg_omx_ocb_t *) calloc (1, sizeof (rpmsg_omx_ocb_t));
1112 if (ocb == NULL){
1113 errno = ENOMEM;
1114 return (NULL);
1115 }
1117 ocb->pid = ctp->info.pid;
1119 /* Allocate memory for the rpmsg object. */
1120 obj = Memory_calloc (NULL, sizeof (rpmsg_omx_object), 0u, NULL);
1121 if (obj == NULL) {
1122 errno = ENOMEM;
1123 free(ocb);
1124 return (NULL);
1125 }
1126 else {
1127 ocb->omx = obj;
1128 IOFUNC_NOTIFY_INIT(obj->notify);
1129 obj->state = OMX_UNCONNECTED;
1130 /* determine conn and procId for communication based on which device was opened */
1131 MsgInfo(ctp->rcvid, &cl_info);
1132 resmgr_pathname(ctp->id, 0, path1, sizeof(path1));
1133 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
1134 if (rpmsg_omx_state.objects[i] != NULL) {
1135 dev = rpmsg_omx_state.objects[i]->dev;
1136 resmgr_pathname(dev->rpmsg_omx.resmgr_id, 0, path2, sizeof(path2));
1137 if (!strcmp(path1, path2)) {
1138 found = TRUE;
1139 break;
1140 }
1141 }
1142 }
1143 if (found) {
1144 obj->conn = rpmsg_omx_state.objects[i];
1145 obj->procId = obj->conn->procId;
1146 obj->pid = ctp->info.pid;
1147 obj->mq = MessageQCopy_create (MessageQCopy_ADDRANY, NULL, _rpmsg_omx_cb, obj, &obj->addr);
1148 if (obj->mq == NULL) {
1149 errno = ENOMEM;
1150 free(obj);
1151 free(ocb);
1152 return (NULL);
1153 }
1154 else {
1155 if (_rpmsg_omx_attach (ocb->omx) < 0) {
1156 errno = ENOMEM;
1157 MessageQCopy_delete (&obj->mq);
1158 free(obj);
1159 free(ocb);
1160 return (NULL);
1161 }
1162 }
1163 }
1164 }
1166 GT_1trace (curTrace, GT_LEAVE, "rpmsg_omx_ocb_calloc", ocb);
1168 return (IOFUNC_OCB_T *)(ocb);
1169 }
1172 /*!
1173 * @brief Detach a process from rpmsg-omx user support framework.
1174 *
1175 * @param pid Process identifier
1176 *
1177 * @sa _rpmsg_omx_attach
1178 */
1179 static
1180 Int
1181 _rpmsg_omx_detach (rpmsg_omx_object * omx)
1182 {
1183 Int32 status = EOK;
1184 Int32 tmpStatus = EOK;
1185 Bool flag = FALSE;
1186 List_Object * bufList = NULL;
1187 UInt32 i;
1188 IArg key;
1189 MsgList_t * item;
1190 WaitingReaders_t * wr = NULL;
1191 struct _msg_info info;
1193 GT_1trace (curTrace, GT_ENTER, "rpmsg_omx_detach", omx);
1195 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
1197 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1198 if (rpmsg_omx_state.eventState [i].omx == omx) {
1199 if (rpmsg_omx_state.eventState [i].refCount == 1) {
1200 rpmsg_omx_state.eventState [i].refCount = 0;
1202 flag = TRUE;
1203 break;
1204 }
1205 else {
1206 rpmsg_omx_state.eventState [i].refCount--;
1207 status = EOK;
1208 break;
1209 }
1210 }
1211 }
1212 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
1214 if (flag == TRUE) {
1215 key = IGateProvider_enter (rpmsg_omx_state.gateHandle);
1216 /* Last client being unregistered for this process. */
1217 rpmsg_omx_state.eventState [i].omx = NULL;
1219 /* Store in local variable to delete outside lock. */
1220 bufList = rpmsg_omx_state.eventState [i].bufList;
1222 rpmsg_omx_state.eventState [i].bufList = NULL;
1224 IGateProvider_leave (rpmsg_omx_state.gateHandle, key);
1225 }
1227 if (flag != TRUE) {
1228 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1229 if (i == MAX_PROCESSES) {
1230 /*! @retval Notify_E_NOTFOUND The specified user process was
1231 not found registered with Notify Driver module. */
1232 status = -ENOMEM;
1233 GT_setFailureReason (curTrace,
1234 GT_4CLASS,
1235 "rpmsg_omx_detach",
1236 status,
1237 "The specified user process was not found"
1238 " registered with rpmsg Driver module.");
1239 }
1240 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1241 }
1242 else {
1243 if (bufList != NULL) {
1244 /* Dequeue waiting readers and reply to them */
1245 pthread_mutex_lock(&rpmsg_omx_state.lock);
1246 while ((wr = dequeue_waiting_reader(i)) != NULL) {
1247 /* Check if rcvid is still valid */
1248 if (MsgInfo(wr->rcvid, &info) != -1) {
1249 put_wr(wr);
1250 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1251 MsgError(wr->rcvid, EINTR);
1252 pthread_mutex_lock(&rpmsg_omx_state.lock);
1253 }
1254 }
1255 /* Check for pending ionotify/select calls */
1256 if (omx) {
1257 if (IOFUNC_NOTIFY_INPUT_CHECK(omx->notify, 1, 0)) {
1258 iofunc_notify_trigger(omx->notify, 1, IOFUNC_NOTIFY_INPUT);
1259 }
1260 }
1262 /* Free event packets for any received but unprocessed events. */
1263 while ((item = find_nl(i)) != NULL) {
1264 if (dequeue_notify_list_item(item) >= 0) {
1265 rpmsg_omx_EventPacket * uBuf = NULL;
1267 uBuf = (rpmsg_omx_EventPacket *) List_get (bufList);
1269 /* Let the check remain at run-time. */
1270 if (uBuf != NULL) {
1271 put_uBuf(uBuf);
1272 }
1273 }
1274 }
1275 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1277 /* Last client being unregistered with Notify module. */
1278 List_delete (&bufList);
1279 }
1281 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1282 if ((tmpStatus < 0) && (status >= 0)) {
1283 status = tmpStatus;
1284 GT_setFailureReason (curTrace,
1285 GT_4CLASS,
1286 "rpmsg_omx_detach",
1287 status,
1288 "Failed to delete termination semaphore!");
1289 }
1290 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1291 }
1293 GT_1trace (curTrace, GT_LEAVE, "rpmsg_omx_detach", status);
1295 /*! @retval Notify_S_SUCCESS Operation successfully completed */
1296 return status;
1297 }
1299 /**
1300 * Handler for ocb_free() requests.
1301 *
1302 * Special handler for ocb_free() requests that we export for control. A
1303 * close request from the client will result in a call to our special ocb_free
1304 * handler. This function detaches the client's pid using _rpmsg_dce_detach
1305 * and frees any client-specific information that was allocated.
1306 *
1307 * \param i_ocb OCB associated with client's session.
1308 *
1309 * \return POSIX errno value.
1310 *
1311 * \retval None.
1312 */
1314 void
1315 rpmsg_omx_ocb_free (IOFUNC_OCB_T * i_ocb)
1316 {
1317 rpmsg_omx_ocb_t * ocb = (rpmsg_omx_ocb_t *)i_ocb;
1318 rpmsg_omx_object *obj;
1320 if (ocb && ocb->omx) {
1321 obj = ocb->omx;
1322 if (obj->state == OMX_CONNECTED) {
1323 /* Need to disconnect this device */
1324 _rpmsg_omx_disconnect(NULL, NULL, ocb);
1325 }
1326 _rpmsg_omx_detach(ocb->omx);
1327 if (obj->mq) {
1328 MessageQCopy_delete (&obj->mq);
1329 obj->mq = NULL;
1330 }
1331 free (obj);
1332 free (ocb);
1333 }
1334 }
1336 /**
1337 * Handler for close_ocb() requests.
1338 *
1339 * This function removes the notification entries associated with the current
1340 * client.
1341 *
1342 * \param ctp Thread's associated context information.
1343 * \param reserved This argument must be NULL.
1344 * \param ocb OCB associated with client's session.
1345 *
1346 * \return POSIX errno value.
1347 *
1348 * \retval EOK Success.
1349 */
1351 Int
1352 rpmsg_omx_close_ocb (resmgr_context_t *ctp, void *reserved, RESMGR_OCB_T *ocb)
1353 {
1354 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1355 iofunc_notify_remove(ctp, omx_ocb->omx->notify);
1356 return (iofunc_close_ocb_default(ctp, reserved, ocb));
1357 }
1359 /**
1360 * Handler for read() requests.
1361 *
1362 * Handles special read() requests that we export for control. A read
1363 * request will get a message from the remote processor that is associated
1364 * with the client that is calling read().
1365 *
1366 * \param ctp Thread's associated context information.
1367 * \param msg The actual read() message.
1368 * \param ocb OCB associated with client's session.
1369 *
1370 * \return POSIX errno value.
1371 *
1372 * \retval EOK Success.
1373 * \retval EAGAIN Call is non-blocking and no messages available.
1374 * \retval ENOMEM Not enough memory to preform the read.
1375 */
1377 int rpmsg_omx_read(resmgr_context_t *ctp, io_read_t *msg, RESMGR_OCB_T *ocb)
1378 {
1379 Int status;
1380 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1381 rpmsg_omx_object * omx = omx_ocb->omx;
1382 Bool flag = FALSE;
1383 Int retVal = EOK;
1384 UInt32 i;
1385 MsgList_t * item;
1386 Int nonblock;
1388 if ((status = iofunc_read_verify(ctp, msg, ocb, &nonblock)) != EOK)
1389 return (status);
1391 if (omx->state != OMX_CONNECTED) {
1392 return (ENOTCONN);
1393 }
1395 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1396 if (rpmsg_omx_state.eventState [i].omx == omx) {
1397 flag = TRUE;
1398 break;
1399 }
1400 }
1402 /* Let the check remain at run-time. */
1403 if (flag == TRUE) {
1404 /* Let the check remain at run-time for handling any run-time
1405 * race conditions.
1406 */
1407 if (rpmsg_omx_state.eventState [i].bufList != NULL) {
1408 pthread_mutex_lock(&rpmsg_omx_state.lock);
1409 item = find_nl(i);
1410 if (dequeue_notify_list_item(item) < 0) {
1411 if (nonblock) {
1412 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1413 return EAGAIN;
1414 }
1415 else {
1416 retVal = enqueue_waiting_reader(i, ctp->rcvid);
1417 if (retVal == EOK) {
1418 pthread_cond_signal(&rpmsg_omx_state.cond);
1419 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1420 return(_RESMGR_NOREPLY);
1421 }
1422 retVal = ENOMEM;
1423 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1424 }
1425 }
1426 else {
1427 deliver_notification(i, ctp->rcvid);
1428 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1429 return(_RESMGR_NOREPLY);
1430 }
1431 }
1432 }
1434 /*! @retval Number-of-bytes-read Number of bytes read. */
1435 return retVal;
1436 }
1438 /**
1439 * Unblock read calls
1440 *
1441 * This function checks if the client is blocked on a read call and if so,
1442 * unblocks the client.
1443 *
1444 * \param ctp Thread's associated context information.
1445 * \param msg The pulse message.
1446 * \param ocb OCB associated with client's session.
1447 *
1448 * \return POSIX errno value.
1449 *
1450 * \retval EINTR The client has been unblocked.
1451 * \retval other The client has not been unblocked or the client was not
1452 * blocked.
1453 */
1455 int rpmsg_omx_read_unblock(resmgr_context_t *ctp, io_pulse_t *msg, iofunc_ocb_t *ocb)
1456 {
1457 UInt32 i;
1458 Bool flag = FALSE;
1459 WaitingReaders_t * wr;
1460 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1461 rpmsg_omx_object * omx = omx_ocb->omx;
1463 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1464 if (rpmsg_omx_state.eventState [i].omx == omx) {
1465 flag = TRUE;
1466 break;
1467 }
1468 }
1470 /* Let the check remain at run-time. */
1471 if (flag == TRUE) {
1472 /* Let the check remain at run-time for handling any run-time
1473 * race conditions.
1474 */
1475 if (rpmsg_omx_state.eventState [i].bufList != NULL) {
1476 pthread_mutex_lock(&rpmsg_omx_state.lock);
1477 wr = find_waiting_reader(i, ctp->rcvid);
1478 if (wr) {
1479 put_wr(wr);
1480 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1481 return (EINTR);
1482 }
1483 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1484 }
1485 }
1487 return _RESMGR_NOREPLY;
1488 }
1490 /**
1491 * Handler for unblock() requests.
1492 *
1493 * Handles unblock request for the client which is requesting to no longer be
1494 * blocked on the rpmsg-omx driver.
1495 *
1496 * \param ctp Thread's associated context information.
1497 * \param msg The pulse message.
1498 * \param ocb OCB associated with client's session.
1499 *
1500 * \return POSIX errno value.
1501 *
1502 * \retval EINTR The rcvid has been unblocked.
1503 */
1505 int rpmsg_omx_unblock(resmgr_context_t *ctp, io_pulse_t *msg, RESMGR_OCB_T *ocb)
1506 {
1507 int status = _RESMGR_NOREPLY;
1508 struct _msg_info info;
1510 /*
1511 * Try to run the default unblock for this message.
1512 */
1513 if ((status = iofunc_unblock_default(ctp,msg,ocb)) != _RESMGR_DEFAULT) {
1514 return status;
1515 }
1517 /*
1518 * Check if rcvid is still valid and still has an unblock
1519 * request pending.
1520 */
1521 if (MsgInfo(ctp->rcvid, &info) == -1 ||
1522 !(info.flags & _NTO_MI_UNBLOCK_REQ)) {
1523 return _RESMGR_NOREPLY;
1524 }
1526 if (rpmsg_omx_read_unblock(ctp, msg, ocb) != _RESMGR_NOREPLY) {
1527 return _RESMGR_ERRNO(EINTR);
1528 }
1530 return _RESMGR_ERRNO(EINTR);
1531 }
1534 uint32_t
1535 _rpmsg_omx_pa2da(ProcMgr_Handle handle, uint32_t pa)
1536 {
1537 Int status = 0;
1538 uint32_t da;
1540 if (pa >= TILER_MEM_8BIT && pa < TILER_MEM_END) {
1541 return pa;
1542 }
1543 else {
1544 status = ProcMgr_translateAddr(handle, (Ptr *)&da,
1545 ProcMgr_AddrType_SlaveVirt,
1546 (Ptr)pa, ProcMgr_AddrType_MasterPhys);
1547 if (status >= 0)
1548 return da;
1549 else
1550 return 0;
1551 }
1552 }
1554 int
1555 _rpmsg_omx_map(ProcMgr_Handle handle, char *data, uint32_t bytes, pid_t pid)
1556 {
1557 int status = EOK;
1558 struct omx_packet *packet = (struct omx_packet *)data;
1559 char *map_info = NULL;
1560 RPC_OMX_MAP_INFO_TYPE type;
1561 int i = 0;
1562 int buf_offset = 0;
1563 uint32_t *buffer = NULL;
1564 off64_t phys_addr;
1565 uint32_t ipu_addr;
1566 uint32_t msg_size;
1567 size_t phys_len = 0;
1569 if (bytes <= sizeof(struct omx_packet)) {
1570 msg_size = 0;
1571 }
1572 else {
1573 msg_size = bytes - sizeof(struct omx_packet);
1574 }
1575 if (msg_size < sizeof(RPC_OMX_MAP_INFO_TYPE))
1576 return (-EINVAL);
1578 type = *(RPC_OMX_MAP_INFO_TYPE *)(packet->data);
1580 if (type == RPC_OMX_MAP_INFO_NONE)
1581 return EOK;
1582 if (type != RPC_OMX_MAP_INFO_ONE_BUF &&
1583 type != RPC_OMX_MAP_INFO_TWO_BUF &&
1584 type != RPC_OMX_MAP_INFO_THREE_BUF) {
1585 return (-EINVAL);
1586 }
1588 map_info = (char *)((uint32_t)packet->data);
1590 if (msg_size < sizeof(int) + sizeof(RPC_OMX_MAP_INFO_TYPE))
1591 return (-EINVAL);
1593 buf_offset = *(int *)((uint32_t)map_info + sizeof(RPC_OMX_MAP_INFO_TYPE));
1594 if (buf_offset < 0 || (buf_offset + (sizeof(*buffer) * type)) > msg_size)
1595 return (-EINVAL);
1597 map_info = (char *)((uint32_t)map_info + buf_offset);
1599 for (i = 0; i < type; i++) {
1600 buffer = (uint32_t *)((uint32_t)map_info + (i * sizeof(*buffer)));
1601 if (*buffer) {
1602 /* currently only Tiler buffers are supported */
1603 status = mem_offset64_peer(pid, (uintptr_t)((uint32_t)*buffer), 4, &phys_addr, &phys_len);
1604 if (status >= 0) {
1605 if ((ipu_addr = _rpmsg_omx_pa2da(handle, (uint32_t)phys_addr)) != 0)
1606 *buffer = ipu_addr;
1607 else {
1608 status = -EINVAL;
1609 break;
1610 }
1611 }
1612 else {
1613 status = -EINVAL;
1614 break;
1615 }
1616 }
1617 }
1619 return status;
1620 }
1622 /**
1623 * Handler for write() requests.
1624 *
1625 * Handles special write() requests that we export for control. A write()
1626 * request will send a message to the remote processor which is associated with
1627 * the client.
1628 *
1629 * \param ctp Thread's associated context information.
1630 * \param msg The actual write() message.
1631 * \param io_ocb OCB associated with client's session.
1632 *
1633 * \return POSIX errno value.
1634 *
1635 * \retval EOK Success.
1636 * \retval ENOMEM Not enough memory to preform the write.
1637 * \retval EIO MessageQCopy_send failed.
1638 * \retval EINVAL msg->i.bytes is negative.
1639 */
1641 int
1642 rpmsg_omx_write(resmgr_context_t *ctp, io_write_t *msg, RESMGR_OCB_T *io_ocb)
1643 {
1644 int status;
1645 char buf[MessageQCopy_BUFSIZE];
1646 int bytes;
1647 rpmsg_omx_ocb_t * ocb = (rpmsg_omx_ocb_t *)io_ocb;
1648 rpmsg_omx_object * omx = ocb->omx;
1649 struct omx_msg_hdr * msg_hdr = NULL;
1651 if ((status = iofunc_write_verify(ctp, msg, io_ocb, NULL)) != EOK) {
1652 return (status);
1653 }
1655 bytes = ((int64_t) msg->i.nbytes) + sizeof(struct omx_msg_hdr) > MessageQCopy_BUFSIZE ?
1656 MessageQCopy_BUFSIZE - sizeof(struct omx_msg_hdr) : msg->i.nbytes;
1657 if (bytes < 0) {
1658 return EINVAL;
1659 }
1660 _IO_SET_WRITE_NBYTES (ctp, bytes);
1662 msg_hdr = (struct omx_msg_hdr *)buf;
1664 status = resmgr_msgread(ctp, msg_hdr->data, bytes, sizeof(msg->i));
1665 if (status != bytes) {
1666 return (errno);
1667 }
1669 /* check that we're in the correct state */
1670 if (omx->state != OMX_CONNECTED) {
1671 return (ENOTCONN);
1672 }
1674 status = _rpmsg_omx_map(omx->conn->procH, msg_hdr->data, bytes, ctp->info.pid);
1675 if (status < 0) {
1676 return -status;
1677 }
1679 msg_hdr->type = OMX_RAW_MSG;
1680 msg_hdr->flags = 0;
1681 msg_hdr->len = bytes;
1683 status = MessageQCopy_send(omx->conn->procId, MultiProc_self(),
1684 omx->remoteAddr, omx->addr, buf,
1685 bytes + sizeof(struct omx_msg_hdr), TRUE);
1686 if (status < 0) {
1687 return (EIO);
1688 }
1690 return(EOK);
1691 }
1695 /**
1696 * Handler for notify() requests.
1697 *
1698 * Handles special notify() requests that we export for control. A notify
1699 * request results from the client calling select().
1700 *
1701 * \param ctp Thread's associated context information.
1702 * \param msg The actual notify() message.
1703 * \param ocb OCB associated with client's session.
1704 *
1705 * \return POSIX errno value.
1706 */
1708 Int rpmsg_omx_notify( resmgr_context_t *ctp, io_notify_t *msg, RESMGR_OCB_T *ocb)
1709 {
1710 rpmsg_omx_ocb_t * omx_ocb = (rpmsg_omx_ocb_t *)ocb;
1711 rpmsg_omx_object * omx = omx_ocb->omx;
1712 int trig;
1713 int i = 0;
1714 Bool flag = FALSE;
1715 MsgList_t * item = NULL;
1716 int status = EOK;
1718 trig = _NOTIFY_COND_OUTPUT; /* clients can give us data */
1720 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1721 if (rpmsg_omx_state.eventState [i].omx == omx) {
1722 flag = TRUE;
1723 break;
1724 }
1725 }
1727 pthread_mutex_lock(&rpmsg_omx_state.lock);
1728 /* Let the check remain at run-time. */
1729 if (flag == TRUE) {
1730 /* Let the check remain at run-time for handling any run-time
1731 * race conditions.
1732 */
1733 if (rpmsg_omx_state.eventState [i].bufList != NULL) {
1734 item = find_nl(i);
1735 if (item && item->num_events > 0) {
1736 trig |= _NOTIFY_COND_INPUT;
1737 }
1738 }
1739 }
1740 status = iofunc_notify(ctp, msg, omx_ocb->omx->notify, trig, NULL, NULL);
1741 pthread_mutex_unlock(&rpmsg_omx_state.lock);
1742 return status;
1743 }
1745 /**
1746 * Detaches an rpmsg-dce resource manager device name.
1747 *
1748 * \param dev The device to detach.
1749 *
1750 * \return POSIX errno value.
1751 */
1753 static
1754 Void
1755 _deinit_rpmsg_omx_device (rpmsg_omx_dev_t * dev)
1756 {
1757 resmgr_detach(syslink_dpp, dev->rpmsg_omx.resmgr_id, _RESMGR_DETACH_CLOSE);
1759 pthread_mutex_destroy(&dev->rpmsg_omx.mutex);
1761 free (dev);
1763 return;
1764 }
1766 /**
1767 * Initializes and attaches rpmsg-dce resource manager functions to an
1768 * rpmsg-dce device name.
1769 *
1770 * \param num The number to append to the end of the device name.
1771 *
1772 * \return Pointer to the created rpmsg_dce_dev_t device.
1773 */
1775 static
1776 rpmsg_omx_dev_t *
1777 _init_rpmsg_omx_device (char * name)
1778 {
1779 iofunc_attr_t * attr;
1780 resmgr_attr_t resmgr_attr;
1781 rpmsg_omx_dev_t * dev = NULL;
1783 dev = malloc(sizeof(*dev));
1784 if (dev == NULL) {
1785 return NULL;
1786 }
1788 memset(&resmgr_attr, 0, sizeof resmgr_attr);
1789 resmgr_attr.nparts_max = 10;
1790 resmgr_attr.msg_max_size = 2048;
1791 memset(&dev->rpmsg_omx.mattr, 0, sizeof(iofunc_mount_t));
1792 dev->rpmsg_omx.mattr.flags = ST_NOSUID | ST_NOEXEC;
1793 dev->rpmsg_omx.mattr.conf = IOFUNC_PC_CHOWN_RESTRICTED |
1794 IOFUNC_PC_NO_TRUNC |
1795 IOFUNC_PC_SYNC_IO;
1796 dev->rpmsg_omx.mattr.funcs = &dev->rpmsg_omx.mfuncs;
1797 memset(&dev->rpmsg_omx.mfuncs, 0, sizeof(iofunc_funcs_t));
1798 dev->rpmsg_omx.mfuncs.nfuncs = _IOFUNC_NFUNCS;
1799 dev->rpmsg_omx.mfuncs.ocb_calloc = rpmsg_omx_ocb_calloc;
1800 dev->rpmsg_omx.mfuncs.ocb_free = rpmsg_omx_ocb_free;
1801 iofunc_func_init(_RESMGR_CONNECT_NFUNCS, &dev->rpmsg_omx.cfuncs,
1802 _RESMGR_IO_NFUNCS, &dev->rpmsg_omx.iofuncs);
1803 iofunc_attr_init(attr = &dev->rpmsg_omx.cattr, S_IFCHR | 0777, NULL, NULL);
1804 dev->rpmsg_omx.iofuncs.devctl = rpmsg_omx_devctl;
1805 dev->rpmsg_omx.iofuncs.notify = rpmsg_omx_notify;
1806 dev->rpmsg_omx.iofuncs.close_ocb = rpmsg_omx_close_ocb;
1807 dev->rpmsg_omx.iofuncs.read = rpmsg_omx_read;
1808 dev->rpmsg_omx.iofuncs.write = rpmsg_omx_write;
1809 dev->rpmsg_omx.iofuncs.unblock = rpmsg_omx_read_unblock;
1810 attr->mount = &dev->rpmsg_omx.mattr;
1811 iofunc_time_update(attr);
1812 pthread_mutex_init(&dev->rpmsg_omx.mutex, NULL);
1814 snprintf (dev->rpmsg_omx.device_name, _POSIX_PATH_MAX, "/dev/%s", name);
1815 if (-1 == (dev->rpmsg_omx.resmgr_id =
1816 resmgr_attach(syslink_dpp, &resmgr_attr,
1817 dev->rpmsg_omx.device_name, _FTYPE_ANY, 0,
1818 &dev->rpmsg_omx.cfuncs,
1819 &dev->rpmsg_omx.iofuncs, attr))) {
1820 pthread_mutex_destroy(&dev->rpmsg_omx.mutex);
1821 free(dev);
1822 return(NULL);
1823 }
1825 return(dev);
1826 }
1828 /**
1829 * Callback passed to MessageQCopy_registerNotify.
1830 *
1831 * This callback is called when a remote processor creates a MessageQCopy
1832 * handle with the same name as the local MessageQCopy handle and then
1833 * calls NameMap_register to notify the HOST of the handle.
1834 *
1835 * \param handle The remote handle.
1836 * \param procId The remote proc ID of the remote handle.
1837 * \param endpoint The endpoint address of the remote handle.
1838 *
1839 * \return None.
1840 */
1842 static
1843 Void
1844 _rpmsg_omx_notify_cb (MessageQCopy_Handle handle, UInt16 procId,
1845 UInt32 endpoint, Char * desc, Bool create)
1846 {
1847 Int status = 0, i = 0;
1848 Bool found = FALSE;
1849 rpmsg_omx_conn_object * obj = NULL;
1851 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
1852 if (rpmsg_omx_state.objects[i] == NULL) {
1853 found = TRUE;
1854 break;
1855 }
1856 }
1858 if (found) {
1859 /* found a space to save this mq handle, allocate memory */
1860 obj = Memory_calloc (NULL, sizeof (rpmsg_omx_conn_object), 0x0, NULL);
1861 if (obj) {
1862 /* store the object in the module info */
1863 rpmsg_omx_state.objects[i] = obj;
1865 /* store the mq info in the object */
1866 obj->mq = handle;
1867 obj->procId = procId;
1868 status = ProcMgr_open(&obj->procH, obj->procId);
1869 if (status < 0) {
1870 Osal_printf("Failed to open handle to proc %d", procId);
1871 Memory_free(NULL, obj, sizeof(rpmsg_omx_object));
1872 }
1873 else {
1874 obj->addr = endpoint;
1876 /* create a /dev/rpmsg-omx instance for users to open */
1877 obj->dev = _init_rpmsg_omx_device(desc);
1878 if (obj->dev == NULL) {
1879 Osal_printf("Failed to create %s", desc);
1880 ProcMgr_close(&obj->procH);
1881 Memory_free(NULL, obj, sizeof(rpmsg_omx_object));
1882 }
1883 }
1884 }
1885 }
1886 }
1888 /**
1889 * Callback passed to MessageQCopy_create for the module.
1890 *
1891 * This callback is called when a message is received for the rpmsg-dce
1892 * module. This callback will never be called, since each client connection
1893 * gets it's own endpoint for message passing.
1894 *
1895 * \param handle The local MessageQCopy handle.
1896 * \param data Data message
1897 * \param len Length of data message
1898 * \param priv Private information for the endpoint
1899 * \param src Remote endpoint sending this message
1900 * \param srcProc Remote proc ID sending this message
1901 *
1902 * \return None.
1903 */
1905 static
1906 Void
1907 _rpmsg_omx_module_cb (MessageQCopy_Handle handle, void * data, int len,
1908 void * priv, UInt32 src, UInt16 srcProc)
1909 {
1910 Osal_printf ("_rpmsg_omx_module_cb callback");
1911 }
1914 /*!
1915 * @brief Module setup function.
1916 *
1917 * @sa rpmsg_omx_destroy
1918 */
1919 Int
1920 rpmsg_omx_setup (Void)
1921 {
1922 UInt16 i;
1923 List_Params listparams;
1924 Int status = 0;
1925 Error_Block eb;
1926 pthread_attr_t thread_attr;
1927 struct sched_param sched_param;
1929 GT_0trace (curTrace, GT_ENTER, "rpmsg_omx_setup");
1931 Error_init(&eb);
1933 List_Params_init (&listparams);
1934 rpmsg_omx_state.gateHandle = (IGateProvider_Handle)
1935 GateSpinlock_create ((GateSpinlock_Handle) NULL, &eb);
1936 #if !defined(SYSLINK_BUILD_OPTIMIZE)
1937 if (rpmsg_omx_state.gateHandle == NULL) {
1938 status = OMX_NOMEM;
1939 GT_setFailureReason (curTrace,
1940 GT_4CLASS,
1941 "_rpmsg_omx_setup",
1942 status,
1943 "Failed to create spinlock gate!");
1944 }
1945 else {
1946 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
1947 for (i = 0 ; i < MAX_PROCESSES ; i++) {
1948 rpmsg_omx_state.eventState [i].bufList = NULL;
1949 rpmsg_omx_state.eventState [i].omx = NULL;
1950 rpmsg_omx_state.eventState [i].refCount = 0;
1951 rpmsg_omx_state.eventState [i].head = NULL;
1952 rpmsg_omx_state.eventState [i].tail = NULL;
1953 }
1955 pthread_attr_init(&thread_attr);
1956 sched_param.sched_priority = PRIORITY_REALTIME_LOW;
1957 pthread_attr_setinheritsched(&thread_attr, PTHREAD_EXPLICIT_SCHED);
1958 pthread_attr_setschedpolicy(&thread_attr, SCHED_RR);
1959 pthread_attr_setschedparam(&thread_attr, &sched_param);
1961 rpmsg_omx_state.run = TRUE;
1962 if (pthread_create(&rpmsg_omx_state.nt, &thread_attr, notifier_thread, NULL) == EOK) {
1963 pthread_setname_np(rpmsg_omx_state.nt, "rpmsg-omx-notifier");
1964 /* Initialize the driver mapping array. */
1965 Memory_set (&rpmsg_omx_state.objects,
1966 0,
1967 (sizeof (rpmsg_omx_conn_object *)
1968 * MultiProc_MAXPROCESSORS));
1969 /* create a local handle and register for notifications with MessageQCopy */
1970 rpmsg_omx_state.mqHandle = MessageQCopy_create (
1971 MessageQCopy_ADDRANY,
1972 RPMSG_OMX_MODULE_NAME,
1973 _rpmsg_omx_module_cb,
1974 NULL,
1975 &rpmsg_omx_state.endpoint);
1976 if (rpmsg_omx_state.mqHandle == NULL) {
1977 /*! @retval OMX_FAIL Failed to create MessageQCopy handle! */
1978 status = -ENOMEM;
1979 GT_setFailureReason (curTrace,
1980 GT_4CLASS,
1981 "rpmsg_omx_setup",
1982 status,
1983 "Failed to create MessageQCopy handle!");
1984 }
1985 else {
1986 /* TBD: This could be replaced with a messageqcopy_open type call, one for
1987 * each core */
1988 status = MessageQCopy_registerNotify (rpmsg_omx_state.mqHandle,
1989 _rpmsg_omx_notify_cb);
1990 if (status < 0) {
1991 MessageQCopy_delete (&rpmsg_omx_state.mqHandle);
1992 /*! @retval OMX_FAIL Failed to register MQCopy handle! */
1993 status = -ENOMEM;
1994 GT_setFailureReason (curTrace,
1995 GT_4CLASS,
1996 "rpmsg_omx_setup",
1997 status,
1998 "Failed to register MQCopy handle!");
1999 }
2000 }
2001 if (status >= 0){
2002 rpmsg_omx_state.sem = OsalSemaphore_create(OsalSemaphore_Type_Binary);
2003 if (rpmsg_omx_state.sem == NULL) {
2004 //MessageQCopy_unregisterNotify();
2005 /*! @retval OMX_FAIL Failed to register MQCopy handle! */
2006 status = OMX_NOMEM;
2007 GT_setFailureReason (curTrace,
2008 GT_4CLASS,
2009 "rpmsg_omx_setup",
2010 status,
2011 "Failed to register MQCopy handle!");
2012 }
2013 }
2014 if (status >= 0) {
2015 rpmsg_omx_state.isSetup = TRUE;
2016 }
2017 else {
2018 MessageQCopy_delete (&rpmsg_omx_state.mqHandle);
2019 rpmsg_omx_state.run = FALSE;
2020 }
2021 }
2022 else {
2023 rpmsg_omx_state.run = FALSE;
2024 }
2025 pthread_attr_destroy(&thread_attr);
2026 #if !defined(SYSLINK_BUILD_OPTIMIZE)
2027 }
2028 #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */
2030 GT_0trace (curTrace, GT_LEAVE, "rpmsg_omx_setup");
2031 return status;
2032 }
2035 /*!
2036 * @brief Module destroy function.
2037 *
2038 * @sa rpmsg_omx_setup
2039 */
2040 Void
2041 rpmsg_omx_destroy (Void)
2042 {
2043 rpmsg_omx_EventPacket * packet;
2044 UInt32 i;
2045 List_Handle bufList;
2046 rpmsg_omx_object * omx = NULL;
2047 WaitingReaders_t * wr = NULL;
2048 struct _msg_info info;
2050 GT_0trace (curTrace, GT_ENTER, "_rpmsg_omx_destroy");
2052 for (i = 0; i < MultiProc_MAXPROCESSORS; i++) {
2053 if (rpmsg_omx_state.objects[i]) {
2054 rpmsg_omx_conn_object * obj = rpmsg_omx_state.objects[i];
2055 _deinit_rpmsg_omx_device(obj->dev);
2056 ProcMgr_close(&obj->procH);
2057 Memory_free(NULL, obj, sizeof(rpmsg_omx_conn_object));
2058 rpmsg_omx_state.objects[i] = NULL;
2059 }
2060 }
2062 for (i = 0 ; i < MAX_PROCESSES ; i++) {
2063 omx = NULL;
2064 if (rpmsg_omx_state.eventState [i].omx != NULL) {
2065 /* This is recovery. Need to mark omx structures as invalid */
2066 omx = rpmsg_omx_state.eventState[i].omx;
2067 MessageQCopy_delete(&omx->mq);
2068 omx->mq = NULL;
2069 }
2070 bufList = rpmsg_omx_state.eventState [i].bufList;
2072 rpmsg_omx_state.eventState [i].bufList = NULL;
2073 rpmsg_omx_state.eventState [i].omx = NULL;
2074 rpmsg_omx_state.eventState [i].refCount = 0;
2075 if (bufList != NULL) {
2076 /* Dequeue waiting readers and reply to them */
2077 pthread_mutex_lock(&rpmsg_omx_state.lock);
2078 while ((wr = dequeue_waiting_reader(i)) != NULL) {
2079 /* Check if rcvid is still valid */
2080 if (MsgInfo(wr->rcvid, &info) != -1) {
2081 put_wr(wr);
2082 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2083 MsgError(wr->rcvid, EINTR);
2084 pthread_mutex_lock(&rpmsg_omx_state.lock);
2085 }
2086 }
2087 /* Check for pending ionotify/select calls */
2088 if (omx) {
2089 if (IOFUNC_NOTIFY_INPUT_CHECK(omx->notify, 1, 0)) {
2090 iofunc_notify_trigger(omx->notify, 1, IOFUNC_NOTIFY_INPUT);
2091 }
2092 }
2093 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2095 /* Free event packets for any received but unprocessed events. */
2096 while (List_empty (bufList) != TRUE){
2097 packet = (rpmsg_omx_EventPacket *)
2098 List_get (bufList);
2099 if (packet != NULL){
2100 Memory_free (NULL, packet, sizeof(*packet));
2101 }
2102 }
2103 List_delete (&(bufList));
2104 }
2105 }
2107 /* Free the cached list */
2108 flush_uBuf();
2110 if (rpmsg_omx_state.sem) {
2111 OsalSemaphore_delete(&rpmsg_omx_state.sem);
2112 }
2114 if (rpmsg_omx_state.mqHandle) {
2115 //MessageQCopy_unregisterNotify();
2116 MessageQCopy_delete(&rpmsg_omx_state.mqHandle);
2117 }
2119 if (rpmsg_omx_state.gateHandle != NULL) {
2120 GateSpinlock_delete ((GateSpinlock_Handle *)
2121 &(rpmsg_omx_state.gateHandle));
2122 }
2124 rpmsg_omx_state.isSetup = FALSE ;
2125 rpmsg_omx_state.run = FALSE;
2126 // run through and destroy the thread, and all outstanding
2127 // omx structures
2128 pthread_mutex_lock(&rpmsg_omx_state.lock);
2129 pthread_cond_signal(&rpmsg_omx_state.cond);
2130 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2131 pthread_join(rpmsg_omx_state.nt, NULL);
2132 pthread_mutex_lock(&rpmsg_omx_state.lock);
2133 while (rpmsg_omx_state.head != NULL) {
2134 int index;
2135 WaitingReaders_t *item;
2136 index = dequeue_notify_list_item(rpmsg_omx_state.head);
2137 if (index < 0)
2138 break;
2139 item = dequeue_waiting_reader(index);
2140 while (item) {
2141 put_wr(item);
2142 item = dequeue_waiting_reader(index);
2143 }
2144 }
2145 rpmsg_omx_state.head = NULL ;
2146 rpmsg_omx_state.tail = NULL ;
2147 pthread_mutex_unlock(&rpmsg_omx_state.lock);
2149 GT_0trace (curTrace, GT_LEAVE, "_rpmsgDrv_destroy");
2150 }
2153 /** ============================================================================
2154 * Internal functions
2155 * ============================================================================
2156 */