1 /*
2 * Copyright (c) 2011-2013, Texas Instruments Incorporated
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32 /** ============================================================================
33 * @file VirtQueue.c
34 *
35 * @brief Virtio Queue implementation for BIOS
36 *
37 * Differences between BIOS version and Linux kernel (include/linux/virtio.h):
38 * - Renamed module from virtio.h to VirtQueue_Object.h to match the API prefixes;
39 * - BIOS (XDC) types and CamelCasing used;
40 * - virtio_device concept removed (i.e, assumes no containing device);
41 * - simplified scatterlist from Linux version;
42 * - VirtQueue_Objects are created statically here, so just added a VirtQueue_Object_init()
43 * fxn to take the place of the Virtio vring_new_virtqueue() API;
44 * - The notify function is implicit in the implementation, and not provided
45 * by the client, as it is in Linux virtio.
46 *
47 * All VirtQueue operations can be called in any context.
48 *
49 * The virtio header should be included in an application as follows:
50 * @code
51 * #include <ti/ipc/family/omap54xx/VirtQueue.h>
52 * @endcode
53 *
54 */
56 #include <xdc/std.h>
57 #include <xdc/runtime/System.h>
58 #include <xdc/runtime/Error.h>
59 #include <xdc/runtime/Memory.h>
60 #include <xdc/runtime/Log.h>
61 #include <xdc/runtime/Diags.h>
63 #include <ti/sysbios/hal/Hwi.h>
64 #include <ti/sysbios/knl/Clock.h>
65 #include <ti/sysbios/gates/GateAll.h>
66 #include <ti/sysbios/BIOS.h>
67 #include <ti/sysbios/hal/Cache.h>
69 #include <ti/ipc/MultiProc.h>
71 #include <ti/ipc/rpmsg/virtio_ring.h>
72 #include <ti/pm/IpcPower.h>
73 #include <string.h>
75 #include <ti/ipc/rpmsg/_VirtQueue.h>
77 #include "InterruptProxy.h"
78 #include "VirtQueue.h"
81 /* Used for defining the size of the virtqueue registry */
82 #define NUM_QUEUES 4
84 /* Predefined device addresses */
85 #ifndef DSPC674
86 #define IPC_MEM_VRING0 0xA0000000
87 #define IPC_MEM_VRING1 0xA0004000
88 #else
89 #define IPC_MEM_VRING0 0x9FB00000
90 #define IPC_MEM_VRING1 0x9FB04000
91 #endif
92 #define IPC_MEM_VRING2 0xA0008000
93 #define IPC_MEM_VRING3 0xA000c000
95 /*
96 * Sizes of the virtqueues (expressed in number of buffers supported,
97 * and must be power of two)
98 */
99 #define VQ0_SIZE 256
100 #define VQ1_SIZE 256
101 #define VQ2_SIZE 256
102 #define VQ3_SIZE 256
104 /*
105 * enum - Predefined Mailbox Messages
106 *
107 * @RP_MSG_MBOX_READY: informs the M3's that we're up and running. will be
108 * followed by another mailbox message that carries the A9's virtual address
109 * of the shared buffer. This would allow the A9's drivers to send virtual
110 * addresses of the buffers.
111 *
112 * @RP_MSG_MBOX_STATE_CHANGE: informs the receiver that there is an inbound
113 * message waiting in its own receive-side vring. please note that currently
114 * this message is optional: alternatively, one can explicitly send the index
115 * of the triggered virtqueue itself. the preferred approach will be decided
116 * as we progress and experiment with those design ideas.
117 *
118 * @RP_MSG_MBOX_CRASH: this message indicates that the BIOS side is unhappy
119 *
120 * @RP_MBOX_ECHO_REQUEST: this message requests the remote processor to reply
121 * with RP_MBOX_ECHO_REPLY
122 *
123 * @RP_MBOX_ECHO_REPLY: this is a reply that is sent when RP_MBOX_ECHO_REQUEST
124 * is received.
125 *
126 * @RP_MBOX_ABORT_REQUEST: tells the M3 to crash on demand
127 *
128 * @RP_MBOX_BOOTINIT_DONE: this message indicates the BIOS side has reached a
129 * certain state during the boot process. This message is used to inform the
130 * host that the basic BIOS initialization is done, and lets the host use this
131 * notification to perform certain actions.
132 */
133 enum {
134 RP_MSG_MBOX_READY = (Int)0xFFFFFF00,
135 RP_MSG_MBOX_STATE_CHANGE = (Int)0xFFFFFF01,
136 RP_MSG_MBOX_CRASH = (Int)0xFFFFFF02,
137 RP_MBOX_ECHO_REQUEST = (Int)0xFFFFFF03,
138 RP_MBOX_ECHO_REPLY = (Int)0xFFFFFF04,
139 RP_MBOX_ABORT_REQUEST = (Int)0xFFFFFF05,
140 RP_MSG_FLUSH_CACHE = (Int)0xFFFFFF06,
141 RP_MSG_BOOTINIT_DONE = (Int)0xFFFFFF07,
142 RP_MSG_HIBERNATION = (Int)0xFFFFFF10,
143 RP_MSG_HIBERNATION_FORCE = (Int)0xFFFFFF11,
144 RP_MSG_HIBERNATION_ACK = (Int)0xFFFFFF12,
145 RP_MSG_HIBERNATION_CANCEL = (Int)0xFFFFFF13
146 };
148 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
149 #define RP_MSG_NUM_BUFS (VQ0_SIZE) /* must be power of two */
150 #define RP_MSG_BUF_SIZE (512)
151 #define RP_MSG_BUFS_SPACE (RP_MSG_NUM_BUFS * RP_MSG_BUF_SIZE * 2)
153 #define PAGE_SIZE (4096)
154 /*
155 * The alignment to use between consumer and producer parts of vring.
156 * Note: this is part of the "wire" protocol. If you change this, you need
157 * to update your BIOS image as well
158 */
159 #define RP_MSG_VRING_ALIGN (4096)
161 /* With 256 buffers, our vring will occupy 3 pages */
162 #define RP_MSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RP_MSG_NUM_BUFS, \
163 RP_MSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
165 /* The total IPC space needed to communicate with a remote processor */
166 #define RPMSG_IPC_MEM (RP_MSG_BUFS_SPACE + 2 * RP_MSG_RING_SIZE)
168 #define ID_SYSM3_TO_A9 ID_SELF_TO_A9
169 #define ID_A9_TO_SYSM3 ID_A9_TO_SELF
170 #define ID_DSP_TO_A9 ID_SELF_TO_A9
171 #define ID_A9_TO_DSP ID_A9_TO_SELF
172 #define ID_APPM3_TO_A9 2
173 #define ID_A9_TO_APPM3 3
175 typedef struct VirtQueue_Object {
176 /* Id for this VirtQueue_Object */
177 UInt16 id;
179 /* The function to call when buffers are consumed (can be NULL) */
180 VirtQueue_callback callback;
182 /* Shared state */
183 struct vring vring;
185 /* Number of free buffers */
186 UInt16 num_free;
188 /* Last available index; updated by VirtQueue_getAvailBuf */
189 UInt16 last_avail_idx;
191 /* Last available index; updated by VirtQueue_addUsedBuf */
192 UInt16 last_used_idx;
194 /* Will eventually be used to kick remote processor */
195 UInt16 procId;
197 /* Gate to protect from multiple threads */
198 GateAll_Handle gateH;
199 } VirtQueue_Object;
201 static struct VirtQueue_Object *queueRegistry[NUM_QUEUES] = {NULL};
203 static UInt16 hostProcId;
204 #ifndef SMP
205 static UInt16 dspProcId;
206 static UInt16 sysm3ProcId;
207 static UInt16 appm3ProcId;
208 #endif
210 #if defined(M3_ONLY) && !defined(SMP)
211 extern Void OffloadM3_init();
212 extern Int OffloadM3_processSysM3Tasks(UArg msg);
213 #endif
215 static inline Void * mapPAtoVA(UInt pa)
216 {
217 return (Void *)((pa & 0x000fffffU) | IPC_MEM_VRING0);
218 }
220 static inline UInt mapVAtoPA(Void * va)
221 {
222 return ((UInt)va & 0x000fffffU) | 0x9cf00000U;
223 }
225 /*!
226 * ======== VirtQueue_kick ========
227 */
228 Void VirtQueue_kick(VirtQueue_Handle vq)
229 {
230 /* For now, simply interrupt remote processor */
231 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
232 Log_print0(Diags_USER1,
233 "VirtQueue_kick: no kick because of VRING_AVAIL_F_NO_INTERRUPT\n");
234 return;
235 }
237 Log_print2(Diags_USER1,
238 "VirtQueue_kick: Sending interrupt to proc %d with payload 0x%x\n",
239 (IArg)vq->procId, (IArg)vq->id);
240 InterruptProxy_intSend(vq->procId, vq->id);
241 }
243 /*!
244 * ======== VirtQueue_addUsedBuf ========
245 */
246 Int VirtQueue_addUsedBuf(VirtQueue_Handle vq, Int16 head, Int len)
247 {
248 struct vring_used_elem *used;
249 IArg key;
251 key = GateAll_enter(vq->gateH);
252 if ((head > vq->vring.num) || (head < 0)) {
253 GateAll_leave(vq->gateH, key);
254 Error_raise(NULL, Error_E_generic, 0, 0);
255 }
257 /*
258 * The virtqueue contains a ring of used buffers. Get a pointer to the
259 * next entry in that used ring.
260 */
261 used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
262 used->id = head;
263 used->len = len;
265 vq->vring.used->idx++;
266 GateAll_leave(vq->gateH, key);
268 return (0);
269 }
271 /*!
272 * ======== VirtQueue_addAvailBuf ========
273 */
274 Int VirtQueue_addAvailBuf(VirtQueue_Object *vq, Void *buf)
275 {
276 UInt16 avail;
277 IArg key;
279 if (vq->num_free == 0) {
280 /* There's no more space */
281 Error_raise(NULL, Error_E_generic, 0, 0);
282 }
284 vq->num_free--;
286 key = GateAll_enter(vq->gateH);
287 avail = vq->vring.avail->idx++ % vq->vring.num;
289 vq->vring.desc[avail].addr = mapVAtoPA(buf);
290 vq->vring.desc[avail].len = RP_MSG_BUF_SIZE;
291 GateAll_leave(vq->gateH, key);
293 return (vq->num_free);
294 }
296 /*!
297 * ======== VirtQueue_getUsedBuf ========
298 */
299 Void *VirtQueue_getUsedBuf(VirtQueue_Object *vq)
300 {
301 UInt16 head;
302 Void *buf;
303 IArg key;
305 key = GateAll_enter(vq->gateH);
306 /* There's nothing available? */
307 if (vq->last_used_idx == vq->vring.used->idx) {
308 buf = NULL;
309 }
310 else {
311 head = vq->vring.used->ring[vq->last_used_idx % vq->vring.num].id;
312 vq->last_used_idx++;
314 buf = mapPAtoVA(vq->vring.desc[head].addr);
315 }
316 GateAll_leave(vq->gateH, key);
318 return (buf);
319 }
321 /*!
322 * ======== VirtQueue_getAvailBuf ========
323 */
324 Int16 VirtQueue_getAvailBuf(VirtQueue_Handle vq, Void **buf, Int *len)
325 {
326 Int16 head;
327 IArg key;
329 key = GateAll_enter(vq->gateH);
330 Log_print6(Diags_USER1, "getAvailBuf vq: 0x%x %d %d %d 0x%x 0x%x\n",
331 (IArg)vq, vq->last_avail_idx, vq->vring.avail->idx, vq->vring.num,
332 (IArg)&vq->vring.avail, (IArg)vq->vring.avail);
334 /* There's nothing available? */
335 if (vq->last_avail_idx == vq->vring.avail->idx) {
336 /* We need to know about added buffers */
337 vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
338 head = (-1);
339 }
340 else {
341 /*
342 * Grab the next descriptor number they're advertising, and increment
343 * the index we've seen.
344 */
345 head = vq->vring.avail->ring[vq->last_avail_idx++ % vq->vring.num];
347 *buf = mapPAtoVA(vq->vring.desc[head].addr);
348 *len = vq->vring.desc[head].len;
349 }
350 GateAll_leave(vq->gateH, key);
352 return (head);
353 }
355 /*!
356 * ======== VirtQueue_disableCallback ========
357 */
358 Void VirtQueue_disableCallback(VirtQueue_Object *vq)
359 {
360 //TODO
361 Log_print0(Diags_USER1, "VirtQueue_disableCallback called.");
362 }
364 /*!
365 * ======== VirtQueue_enableCallback ========
366 */
367 Bool VirtQueue_enableCallback(VirtQueue_Object *vq)
368 {
369 Log_print0(Diags_USER1, "VirtQueue_enableCallback called.");
371 //TODO
372 return (FALSE);
373 }
375 /*!
376 * ======== VirtQueue_isr ========
377 * Note 'arg' is ignored: it is the Hwi argument, not the mailbox argument.
378 */
379 Void VirtQueue_isr(UArg msg)
380 {
381 VirtQueue_Object *vq;
383 Log_print1(Diags_USER1, "VirtQueue_isr received msg = 0x%x\n", msg);
385 #ifndef SMP
386 if (MultiProc_self() == sysm3ProcId || MultiProc_self() == dspProcId) {
387 #endif
388 switch(msg) {
389 case (UInt)RP_MSG_MBOX_READY:
390 return;
392 case (UInt)RP_MBOX_ECHO_REQUEST:
393 InterruptProxy_intSend(hostProcId, (UInt)(RP_MBOX_ECHO_REPLY));
394 return;
396 case (UInt)RP_MBOX_ABORT_REQUEST:
397 {
398 /* Suppress Coverity Error: FORWARD_NULL: */
399 // coverity[assign_zero]
400 Fxn f = (Fxn)0x0;
401 Log_print0(Diags_USER1, "Crash on demand ...\n");
402 // coverity[var_deref_op]
403 f();
404 }
405 return;
407 case (UInt)RP_MSG_FLUSH_CACHE:
408 Cache_wbAll();
409 return;
411 #ifndef DSPC674
412 case (UInt)RP_MSG_HIBERNATION:
413 if (IpcPower_canHibernate() == FALSE) {
414 InterruptProxy_intSend(hostProcId,
415 (UInt)RP_MSG_HIBERNATION_CANCEL);
416 return;
417 }
419 /* Fall through */
420 case (UInt)RP_MSG_HIBERNATION_FORCE:
421 #ifndef SMP
422 /* Core0 should notify Core1 */
423 if (MultiProc_self() == sysm3ProcId) {
424 InterruptProxy_intSend(appm3ProcId,
425 (UInt)(RP_MSG_HIBERNATION));
426 }
427 #endif
428 /* Ack request */
429 InterruptProxy_intSend(hostProcId,
430 (UInt)RP_MSG_HIBERNATION_ACK);
431 IpcPower_suspend();
432 return;
433 #endif
434 default:
435 #if defined(M3_ONLY) && !defined(SMP)
436 /* Check and process any Inter-M3 Offload messages */
437 if (OffloadM3_processSysM3Tasks(msg))
438 return;
439 #endif
441 /*
442 * If the message isn't one of the above, it's either part of the
443 * 2-message synchronization sequence or it a virtqueue message
444 */
445 break;
446 }
447 #ifndef SMP
448 }
449 #ifndef DSPC674
450 else if (msg & 0xFFFF0000) {
451 if (msg == (UInt)RP_MSG_HIBERNATION) {
452 IpcPower_suspend();
453 }
454 return;
455 }
457 if (MultiProc_self() == sysm3ProcId && (msg == ID_A9_TO_APPM3 || msg == ID_APPM3_TO_A9)) {
458 InterruptProxy_intSend(appm3ProcId, (UInt)msg);
459 }
460 else {
461 #endif
462 #endif
463 /* Don't let unknown messages to pass as a virtqueue index */
464 if (msg >= NUM_QUEUES) {
465 /* Adding print here deliberately, we should never see this */
466 System_printf("VirtQueue_isr: Invalid mailbox message 0x%x "
467 "received\n", msg);
468 return;
469 }
471 vq = queueRegistry[msg];
472 if (vq) {
473 vq->callback(vq);
474 }
475 #ifndef SMP
476 #ifndef DSPC674
477 }
478 #endif
479 #endif
480 }
483 /*!
484 * ======== VirtQueue_create ========
485 */
486 VirtQueue_Handle VirtQueue_create(UInt16 remoteProcId, VirtQueue_Params *params,
487 Error_Block *eb)
488 {
489 VirtQueue_Object *vq;
490 Void *vringAddr;
492 vq = Memory_alloc(NULL, sizeof(VirtQueue_Object), 0, eb);
493 if (NULL == vq) {
494 return (NULL);
495 }
497 /* Create the thread protection gate */
498 vq->gateH = GateAll_create(NULL, eb);
499 if (Error_check(eb)) {
500 Log_error0("VirtQueue_create: could not create gate object");
501 Memory_free(NULL, vq, sizeof(VirtQueue_Object));
502 return (NULL);
503 }
505 vq->callback = params->callback;
506 vq->id = params->vqId;
507 vq->procId = remoteProcId;
508 vq->last_avail_idx = 0;
510 #ifndef SMP
511 if (MultiProc_self() == appm3ProcId) {
512 /* vqindices that belong to AppM3 should be big so they don't
513 * collide with SysM3's virtqueues */
514 vq->id += 2;
515 }
516 #endif
518 switch (vq->id) {
519 /* IPC transport vrings */
520 case ID_SELF_TO_A9:
521 /* IPU/DSP -> A9 */
522 vringAddr = (struct vring *) IPC_MEM_VRING0;
523 break;
524 case ID_A9_TO_SELF:
525 /* A9 -> IPU/DSP */
526 vringAddr = (struct vring *) IPC_MEM_VRING1;
527 break;
528 #ifndef SMP
529 case ID_APPM3_TO_A9:
530 /* APPM3 -> A9 */
531 vringAddr = (struct vring *) IPC_MEM_VRING2;
532 break;
533 case ID_A9_TO_APPM3:
534 /* A9 -> APPM3 */
535 vringAddr = (struct vring *) IPC_MEM_VRING3;
536 break;
537 #endif
538 default:
539 GateAll_delete(&vq->gateH);
540 Memory_free(NULL, vq, sizeof(VirtQueue_Object));
541 return (NULL);
542 }
544 Log_print3(Diags_USER1,
545 "vring: %d 0x%x (0x%x)\n", vq->id, (IArg)vringAddr,
546 RP_MSG_RING_SIZE);
548 /* See coverity related comment in vring_init() */
549 // coverity[overrun-call]
550 vring_init(&(vq->vring), RP_MSG_NUM_BUFS, vringAddr, RP_MSG_VRING_ALIGN);
552 /*
553 * Don't trigger a mailbox message every time MPU makes another buffer
554 * available
555 */
556 if (vq->procId == hostProcId) {
557 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
558 }
560 queueRegistry[vq->id] = vq;
562 return (vq);
563 }
565 /*!
566 * ======== VirtQueue_startup ========
567 */
568 Void VirtQueue_startup()
569 {
570 hostProcId = MultiProc_getId("HOST");
571 #ifndef SMP
572 dspProcId = MultiProc_getId("DSP");
573 sysm3ProcId = MultiProc_getId("CORE0");
574 appm3ProcId = MultiProc_getId("CORE1");
575 #endif
577 #ifndef DSPC674
578 /* Initilize the IpcPower module */
579 IpcPower_init();
580 #endif
582 #if defined(M3_ONLY) && !defined(SMP)
583 if (MultiProc_self() == sysm3ProcId) {
584 OffloadM3_init();
585 }
586 #endif
588 InterruptProxy_intRegister(VirtQueue_isr);
589 }
591 /*!
592 * ======== VirtQueue_postCrashToMailbox ========
593 */
594 Void VirtQueue_postCrashToMailbox(Void)
595 {
596 InterruptProxy_intSend(0, (UInt)RP_MSG_MBOX_CRASH);
597 }
599 #define CACHE_WB_TICK_PERIOD 5
601 /*!
602 * ======== VirtQueue_cacheWb ========
603 *
604 * Used for flushing SysMin trace buffer.
605 */
606 Void VirtQueue_cacheWb()
607 {
608 static UInt32 oldticks = 0;
609 UInt32 newticks;
611 newticks = Clock_getTicks();
612 if (newticks - oldticks < (UInt32)CACHE_WB_TICK_PERIOD) {
613 /* Don't keep flushing cache */
614 return;
615 }
617 oldticks = newticks;
619 /* Flush the cache */
620 Cache_wbAll();
621 }