[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / ipc / hlos / knl / transports / virtio / VirtQueue.c
1 /*
2 * Copyright (c) 2011-2014, Texas Instruments Incorporated
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32 /** ============================================================================
33 * @file VirtQueue.c
34 *
35 * @brief Virtio Queue implementation for BIOS
36 *
37 * Differences between BIOS version and Linux kernel (include/linux/virtio.h):
38 * - Renamed module from virtio.h to VirtQueue_Object.h to match the API prefixes;
39 * - BIOS (XDC) types and CamelCasing used;
40 * - virtio_device concept removed (i.e, assumes no containing device);
41 * - simplified scatterlist from Linux version;
42 * - VirtQueue_Objects are created statically here, so just added a VirtQueue_Object_init()
43 * fxn to take the place of the Virtio vring_new_virtqueue() API;
44 * - The notify function is implicit in the implementation, and not provided
45 * by the client, as it is in Linux virtio.
46 *
47 * All VirtQueue operations can be called in any context.
48 *
49 * The virtio header should be included in an application as follows:
50 * @code
51 * #include <ti/ipc/rpmsg/VirtQueue.h>
52 * @endcode
53 *
54 */
56 #include <ti/syslink/Std.h>
57 #include <ti/syslink/utils/Memory.h>
58 #include <ti/syslink/utils/Trace.h>
60 //#include <ti/sysbios/hal/Hwi.h>
61 //#include <ti/sysbios/knl/Semaphore.h>
62 //#include <ti/sysbios/knl/Clock.h>
63 //#include <ti/sysbios/BIOS.h>
64 //#include <ti/sysbios/hal/Cache.h>
66 #include <_ArchIpcInt.h>
67 #include <ArchIpcInt.h>
68 #include "VirtQueue.h"
70 #include <ti/ipc/MultiProc.h>
71 #include <ti/syslink/ProcMgr.h>
73 #include <ti/syslink/utils/String.h>
75 #include "virtio_ring.h"
76 #include "_rpmsg.h"
77 #include <ipu_pm.h>
79 /* Used for defining the size of the virtqueue registry */
80 #define NUM_QUEUES 2
82 typedef struct VirtQueue_Object {
83 /* Id for this VirtQueue_Object */
84 UInt16 id;
86 /* The function to call when buffers are consumed (can be NULL) */
87 VirtQueue_callback callback;
89 /* Shared state */
90 struct vring vring;
92 /* Number of free buffers */
93 UInt16 num_free;
95 /* Last available index; updated by VirtQueue_getAvailBuf */
96 UInt16 last_avail_idx;
98 /* Last available index; updated by VirtQueue_addUsedBuf */
99 UInt16 last_used_idx;
101 /* Will eventually be used to kick remote processor */
102 UInt16 procId;
104 /* Interrupt Id for kicking remote processor */
105 UInt16 intId;
107 /* Local virtual address for vring struct */
108 UInt32 vaddr;
110 /* Physical address for vring struct */
111 UInt32 paddr;
113 /* Private arg from user */
114 void * arg;
115 } VirtQueue_Object;
117 static UInt numQueues = 0;
118 static struct VirtQueue_Object *queueRegistry[MultiProc_MAXPROCESSORS][NUM_QUEUES];
120 static UInt32 coreIntId[MultiProc_MAXPROCESSORS];
122 static inline Void * mapPAtoVA(VirtQueue_Handle vq, UInt pa)
123 {
124 UInt offset = vq->paddr - pa;
125 return (Void *)(vq->vaddr - offset);
126 }
128 static inline UInt mapVAtoPA(VirtQueue_Handle vq, Void * va)
129 {
130 UInt offset = vq->vaddr - (UInt)va;
131 return (UInt)(vq->paddr - offset);
132 }
134 /*!
135 * ======== VirtQueue_cb ========
136 */
137 Void VirtQueue_cb(Void *buf, VirtQueue_Handle vq)
138 {
139 if (vq/* && vq->cb_enabled*/) {
140 /* Call the registered vq callback */
141 vq->callback(vq, vq->arg);
142 }
143 }
145 /*!
146 * ======== VirtQueue_kick ========
147 */
148 Void VirtQueue_kick(VirtQueue_Handle vq)
149 {
150 /* For now, simply interrupt remote processor */
151 if (vq->vring.used->flags & VRING_USED_F_NO_NOTIFY) {
152 GT_0trace(curTrace, GT_3CLASS,
153 "VirtQueue_kick: no kick because of VRING_USED_F_NO_NOTIFY");
154 return;
155 }
157 GT_2trace(curTrace, GT_2CLASS,
158 "VirtQueue_kick: Sending interrupt to proc %d with payload 0x%x",
159 vq->procId, vq->id);
161 #if defined (SYSLINK_USE_IPU_PM)
162 ipu_pm_restore_ctx(vq->procId);
163 #endif
164 ArchIpcInt_sendInterrupt(vq->procId, vq->intId, vq->id);
165 }
167 /*!
168 * ======== VirtQueue_addUsedBuf ========
169 */
170 Int VirtQueue_addUsedBuf(VirtQueue_Handle vq, Int16 head, UInt32 len)
171 {
172 struct vring_used_elem *used;
173 Int status = 0;
175 if ((head > vq->vring.num) || (head < 0)) {
176 status = -1;
177 GT_setFailureReason (curTrace,
178 GT_4CLASS,
179 "VirtQueue_addUsedBuf",
180 status,
181 "head is invalid!");
182 }
183 else {
184 /*
185 * The virtqueue contains a ring of used buffers. Get a pointer to the
186 * next entry in that used ring.
187 */
188 used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
189 used->id = head;
190 used->len = len;
192 vq->vring.used->idx++;
193 }
195 return status;
196 }
198 /*!
199 * ======== VirtQueue_addUsedBufAddr ========
200 */
201 Int VirtQueue_addUsedBufAddr(VirtQueue_Handle vq, Void *buf, UInt32 len)
202 {
203 struct vring_used_elem *used = NULL;
204 UInt16 head = 0;
205 Int status = 0;
207 if ((head > vq->vring.num) || (head < 0)) {
208 status = -1;
209 GT_setFailureReason (curTrace,
210 GT_4CLASS,
211 "VirtQueue_addUsedBuf",
212 status,
213 "head is invalid!");
214 }
215 else {
216 /*
217 * The virtqueue contains a ring of used buffers. Get a pointer to the
218 * next entry in that used ring.
219 */
220 head = vq->vring.used->idx % vq->vring.num;
221 vq->vring.desc[head].addr = mapVAtoPA(vq, buf);
222 vq->vring.desc[head].len = len;
223 vq->vring.desc[head].flags = 0;
224 used = &vq->vring.used->ring[head];
225 used->id = head;
226 used->len = len;
228 vq->vring.used->idx++;
229 }
231 return status;
232 }
234 /*!
235 * ======== VirtQueue_addAvailBuf ========
236 */
237 Int VirtQueue_addAvailBuf(VirtQueue_Handle vq, Void *buf, UInt32 len, Int16 head)
238 {
239 UInt16 avail;
241 if (vq->num_free == 0) {
242 /* There's no more space */
243 GT_setFailureReason (curTrace,
244 GT_4CLASS,
245 "VirtQueue_addAvailBuf",
246 (-1), // TODO: Make this a valid error code
247 "no more space!");
249 }
250 else {
251 vq->num_free--;
253 avail = vq->vring.avail->idx % vq->vring.num;
254 vq->vring.avail->ring[avail] = head;
256 vq->vring.desc[head].addr = mapVAtoPA(vq, buf);
257 vq->vring.desc[head].len = len;
258 vq->vring.desc[head].flags = 0;
260 vq->vring.avail->idx++;
261 }
263 return (vq->num_free);
264 }
266 /*!
267 * ======== VirtQueue_getUsedBuf ========
268 */
269 Int16 VirtQueue_getUsedBuf(VirtQueue_Object *vq, Void **buf)
270 {
271 UInt16 head;
273 /* There's nothing available? */
274 if (vq->last_used_idx == vq->vring.used->idx) {
275 /* We need to know about added buffers */
276 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
278 return (-1);
279 }
281 /* No need to know be kicked about added buffers anymore */
282 //vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; // disabling for now, since there seems to be a race condition where an M3->A9 message is not detected because the interrupt isn't sent.
284 head = vq->vring.used->ring[vq->last_used_idx % vq->vring.num].id;
285 vq->last_used_idx++;
286 vq->num_free++;
288 *buf = mapPAtoVA(vq, vq->vring.desc[head].addr);
290 return (head);
291 }
293 /*!
294 * ======== VirtQueue_getAvailBuf ========
295 */
296 Int16 VirtQueue_getAvailBuf(VirtQueue_Handle vq, Void **buf)
297 {
298 UInt16 head;
300 GT_6trace(curTrace, GT_2CLASS, "getAvailBuf vq: 0x%x %d %d %d 0x%x 0x%x",
301 (IArg)vq, vq->last_avail_idx, vq->vring.avail->idx, vq->vring.num,
302 (IArg)&vq->vring.avail, (IArg)vq->vring.avail);
304 /* Clear flag here to avoid race condition with remote processor.
305 * This is a negative flag, clearing it means that we want to
306 * receive an interrupt when a buffer has been added to the pool.
307 */
308 vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
309 /* There's nothing available? */
310 if (vq->last_avail_idx == vq->vring.avail->idx) {
311 /* We need to know about added buffers */
312 return (-1);
313 }
315 /* No need to know be kicked about added buffers anymore */
316 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
318 /*
319 * Grab the next descriptor number they're advertising, and increment
320 * the index we've seen.
321 */
322 head = vq->vring.avail->ring[vq->last_avail_idx++ % vq->vring.num];
324 *buf = mapPAtoVA(vq, vq->vring.desc[head].addr);
326 return (head);
327 }
329 /*!
330 * ======== VirtQueue_disableCallback ========
331 */
332 Void VirtQueue_disableCallback(VirtQueue_Handle vq)
333 {
334 //TODO
335 GT_0trace(curTrace, GT_3CLASS, "VirtQueue_disableCallback not supported.");
336 }
338 /*!
339 * ======== VirtQueue_enableCallback ========
340 */
341 Bool VirtQueue_enableCallback(VirtQueue_Handle vq)
342 {
343 GT_0trace(curTrace, GT_3CLASS, "VirtQueue_enableCallback not supported.");
345 //TODO
346 return (FALSE);
347 }
349 /*!
350 * @brief This function implements the interrupt service routine for the
351 * interrupt received from the remote cores.
352 *
353 * @param refData object to be handled in ISR
354 *
355 * @sa VirtQueue_cb
356 */
357 static
358 Bool
359 VirtQueue_ISR (UInt32 msg, Void * arg)
360 {
361 UInt32 procId = (UInt32)arg;
362 ProcMgr_Handle handle = NULL;
363 Int status = 0;
365 GT_2trace (curTrace, GT_ENTER, "_VirtQueue_ISR", msg, arg);
367 /* Interrupt clear is done by ArchIpcInt. */
369 switch(msg) {
370 case (UInt)RP_MBOX_ECHO_REPLY:
371 Osal_printf ("Echo reply from %s",
372 MultiProc_getName(procId));
373 break;
375 case (UInt)RP_MBOX_CRASH:
376 Osal_printf ("Crash notification for %s",
377 MultiProc_getName(procId));
378 status = ProcMgr_open(&handle, procId);
379 if (status >= 0) {
380 ProcMgr_setState(handle, ProcMgr_State_Error);
381 ProcMgr_close(&handle);
382 }
383 else {
384 Osal_printf("Failed to open ProcMgr handle");
385 }
386 break;
388 case (UInt)RP_MBOX_BOOTINIT_DONE:
389 Osal_printf ("Got BootInit done from %s",
390 MultiProc_getName(procId));
391 // TODO: What to do with this message?
392 break;
394 case (UInt)RP_MBOX_HIBERNATION_ACK:
395 Osal_printf ("Got Hibernation ACK from %s",
396 MultiProc_getName(procId));
397 break;
399 case (UInt)RP_MBOX_HIBERNATION_CANCEL:
400 Osal_printf ("Got Hibernation CANCEL from %s",
401 MultiProc_getName(procId));
402 break;
404 default:
405 /*
406 * If the message isn't one of the above, it's a virtqueue
407 * message
408 */
409 if (msg%2 < NUM_QUEUES) {
410 /* This message is for us! */
411 VirtQueue_cb((void *)msg, queueRegistry[procId][msg%2]);
412 }
413 break;
414 }
416 return TRUE;
417 }
419 /*!
420 * ======== VirtQueue_create ========
421 */
422 VirtQueue_Handle VirtQueue_create (VirtQueue_callback callback, UInt16 procId,
423 UInt16 id, UInt32 vaddr, UInt32 paddr,
424 UInt32 num, UInt32 align, Void *arg)
425 {
426 VirtQueue_Object *vq = NULL;
428 vq = Memory_alloc(NULL, sizeof(VirtQueue_Object), 0, NULL);
429 if (!vq) {
430 return (NULL);
431 }
433 vq->callback = callback;
434 vq->id = id;
435 numQueues++;
436 vq->procId = procId;
437 vq->intId = coreIntId[procId];
438 vq->last_avail_idx = 0;
439 vq->arg = arg;
441 /* init the vring */
442 vring_init(&(vq->vring), num, (void *)vaddr, align);
444 vq->num_free = num;
445 vq->last_used_idx = 0;
446 vq->vaddr = vaddr;
447 vq->paddr = paddr;
449 vq->vring.avail->idx = 0;
450 vq->vring.used->idx = 0;
452 /* Initialize the flags */
453 vq->vring.avail->flags = 0;
454 vq->vring.used->flags = 0;
456 /* Store the VirtQueue locally */
457 if (queueRegistry[procId][vq->id%2] == NULL)
458 queueRegistry[procId][vq->id%2] = vq;
459 else {
460 Osal_printf ("VirtQueue ID %d already created", id);
461 Memory_free(NULL, vq, sizeof(VirtQueue_Object));
462 vq = NULL;
463 }
465 return (vq);
466 }
468 /*!
469 * ======== VirtQueue_delete ========
470 */
471 Int VirtQueue_delete (VirtQueue_Handle * vq)
472 {
473 VirtQueue_Object * obj = (VirtQueue_Object *)(*vq);
474 /* Store the VirtQueue locally */
475 queueRegistry[obj->procId][obj->id%2] = NULL;
476 Memory_free(NULL, obj, sizeof(VirtQueue_Object));
477 *vq = NULL;
478 numQueues--;
480 return 0;
481 }
483 /*!
484 * ======== VirtQueue_startup ========
485 */
486 Void VirtQueue_startup(UInt16 procId, UInt32 intId, UInt32 paddr)
487 {
488 Int32 status = 0;
489 UInt32 arg = procId;
491 coreIntId[procId] = intId;
493 /* Register for interrupts with CORE0, CORE1 messages come through CORE0 */
494 status = ArchIpcInt_interruptRegister (procId,
495 intId,
496 VirtQueue_ISR, (Ptr)arg);
497 if (status >= 0) {
498 /* Notify the remote proc that the mbox is ready */
499 status = ArchIpcInt_sendInterrupt (procId,
500 intId,
501 RP_MBOX_READY);
502 }
503 }
505 /*!
506 * ======== VirtQueue_startup ========
507 */
508 Void VirtQueue_destroy(UInt16 procId)
509 {
510 Int32 status = 0;
511 Int i = 0;
513 for (i = 0; i < NUM_QUEUES; i++) {
514 if (queueRegistry[procId][i]) {
515 VirtQueue_delete(&queueRegistry[procId][i]);
516 queueRegistry[procId][i] = NULL;
517 }
518 }
520 /* Un-register for interrupts with CORE0 */
521 status = ArchIpcInt_interruptUnregister (procId);
522 if (status < 0) {
523 GT_setFailureReason (curTrace,
524 GT_4CLASS,
525 "VirtQueue_destroy",
526 status,
527 "ArchIpcInt_interruptUnregister failed");
528 }
529 }