diff --git a/packages/ti/ipc/family/omap54xx/VirtQueue.c b/packages/ti/ipc/family/omap54xx/VirtQueue.c
index c055ada73ba895e589029bb95bfd5877a942ed79..b43f5143e7a602bc903c8cdd807056ffa6727432 100644 (file)
/*
- * Copyright (c) 2011-2013, Texas Instruments Incorporated
+ * Copyright (c) 2011-2014, Texas Instruments Incorporated
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <ti/sysbios/hal/Hwi.h>
#include <ti/sysbios/knl/Clock.h>
#include <ti/sysbios/gates/GateHwi.h>
-#include <ti/sysbios/BIOS.h>
#include <ti/sysbios/hal/Cache.h>
#include <ti/ipc/MultiProc.h>
VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK)
/* Used for defining the size of the virtqueue registry */
-#define NUM_QUEUES 4
+#define NUM_QUEUES 2
/* Predefined device addresses */
-#ifndef DSPC674
#define IPC_MEM_VRING0 0xA0000000
#define IPC_MEM_VRING1 0xA0004000
-#else
-#define IPC_MEM_VRING0 0x9FB00000
-#define IPC_MEM_VRING1 0x9FB04000
-#endif
-#define IPC_MEM_VRING2 0xA0008000
-#define IPC_MEM_VRING3 0xA000c000
/*
- * Sizes of the virtqueues (expressed in number of buffers supported,
+ * Size of the virtqueues (expressed in number of buffers supported,
* and must be power of two)
*/
-#define VQ0_SIZE 256
-#define VQ1_SIZE 256
-#define VQ2_SIZE 256
-#define VQ3_SIZE 256
+#define VQ_SIZE 256
/*
* enum - Predefined Mailbox Messages
*
- * @RP_MSG_MBOX_READY: informs the M3's that we're up and running. will be
- * followed by another mailbox message that carries the A9's virtual address
- * of the shared buffer. This would allow the A9's drivers to send virtual
+ * @RP_MSG_MBOX_READY: informs the slave that we're up and running. will be
+ * followed by another mailbox message that carries the HOST's virtual address
+ * of the shared buffer. This would allow the HOST's drivers to send virtual
* addresses of the buffers.
*
* @RP_MSG_MBOX_STATE_CHANGE: informs the receiver that there is an inbound
};
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define RP_MSG_NUM_BUFS (VQ0_SIZE) /* must be power of two */
+#define RP_MSG_NUM_BUFS (VQ_SIZE) /* must be power of two */
#define RP_MSG_BUF_SIZE (512)
#define RP_MSG_BUFS_SPACE (RP_MSG_NUM_BUFS * RP_MSG_BUF_SIZE * 2)
/* The total IPC space needed to communicate with a remote processor */
#define RPMSG_IPC_MEM (RP_MSG_BUFS_SPACE + 2 * RP_MSG_RING_SIZE)
-#define ID_SYSM3_TO_A9 ID_SELF_TO_A9
-#define ID_A9_TO_SYSM3 ID_A9_TO_SELF
-#define ID_DSP_TO_A9 ID_SELF_TO_A9
-#define ID_A9_TO_DSP ID_A9_TO_SELF
-#define ID_APPM3_TO_A9 2
-#define ID_A9_TO_APPM3 3
-
typedef struct VirtQueue_Object {
/* Id for this VirtQueue_Object */
UInt16 id;
/* Last available index; updated by VirtQueue_getAvailBuf */
UInt16 last_avail_idx;
- /* Last available index; updated by VirtQueue_addUsedBuf */
- UInt16 last_used_idx;
-
/* Will eventually be used to kick remote processor */
UInt16 procId;
static struct VirtQueue_Object *queueRegistry[NUM_QUEUES] = {NULL};
static UInt16 hostProcId;
-#ifndef SMP
-static UInt16 dspProcId;
-static UInt16 sysm3ProcId;
-static UInt16 appm3ProcId;
-#endif
-
-#if defined(M3_ONLY) && !defined(SMP)
-extern Void OffloadM3_init();
-extern Int OffloadM3_processSysM3Tasks(UArg msg);
-#endif
/*!
* ======== _VirtQueue_init ========
return (Void *)((pa & 0x000fffffU) | IPC_MEM_VRING0);
}
-static inline UInt mapVAtoPA(Void * va)
-{
- return ((UInt)va & 0x000fffffU) | 0x9cf00000U;
-}
-
/*!
* ======== VirtQueue_kick ========
*/
return (0);
}
-/*!
- * ======== VirtQueue_addAvailBuf ========
- */
-Int VirtQueue_addAvailBuf(VirtQueue_Object *vq, Void *buf)
-{
- UInt16 avail;
- IArg key;
-
- if (vq->num_free == 0) {
- /* There's no more space */
- Error_raise(NULL, Error_E_generic, 0, 0);
- }
-
- vq->num_free--;
-
- key = GateHwi_enter(vq->gateH);
- avail = vq->vring.avail->idx++ % vq->vring.num;
-
- vq->vring.desc[avail].addr = mapVAtoPA(buf);
- vq->vring.desc[avail].len = RP_MSG_BUF_SIZE;
- GateHwi_leave(vq->gateH, key);
-
- return (vq->num_free);
-}
-
-/*!
- * ======== VirtQueue_getUsedBuf ========
- */
-Void *VirtQueue_getUsedBuf(VirtQueue_Object *vq)
-{
- UInt16 head;
- Void *buf;
- IArg key;
-
- key = GateHwi_enter(vq->gateH);
- /* There's nothing available? */
- if (vq->last_used_idx == vq->vring.used->idx) {
- buf = NULL;
- }
- else {
- head = vq->vring.used->ring[vq->last_used_idx % vq->vring.num].id;
- vq->last_used_idx++;
-
- buf = mapPAtoVA(vq->vring.desc[head].addr);
- }
- GateHwi_leave(vq->gateH, key);
-
- return (buf);
-}
-
/*!
* ======== VirtQueue_getAvailBuf ========
*/
(IArg)vq, vq->last_avail_idx, vq->vring.avail->idx, vq->vring.num,
(IArg)&vq->vring.avail, (IArg)vq->vring.avail);
+ /* Clear flag here to avoid race condition with remote processor.
+ * This is a negative flag, clearing it means that we want to
+ * receive an interrupt when a buffer has been added to the pool.
+ */
+ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
+
/* There's nothing available? */
if (vq->last_avail_idx == vq->vring.avail->idx) {
- /* We need to know about added buffers */
- vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
head = (-1);
}
else {
+ /* No need to be kicked about added buffers anymore */
+ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
+
/*
* Grab the next descriptor number they're advertising, and increment
* the index we've seen.
Log_print1(Diags_USER1, "VirtQueue_isr received msg = 0x%x\n", msg);
-#ifndef SMP
- if (MultiProc_self() == sysm3ProcId || MultiProc_self() == dspProcId) {
-#endif
- switch(msg) {
- case (UInt)RP_MSG_MBOX_READY:
- return;
-
- case (UInt)RP_MBOX_ECHO_REQUEST:
- InterruptProxy_intSend(hostProcId, (UInt)(RP_MBOX_ECHO_REPLY));
- return;
+ switch(msg) {
+ case (UInt)RP_MSG_MBOX_READY:
+ return;
- case (UInt)RP_MBOX_ABORT_REQUEST:
- {
- /* Suppress Coverity Error: FORWARD_NULL: */
- /* coverity[assign_zero] */
- Fxn f = (Fxn)0x0;
- Log_print0(Diags_USER1, "Crash on demand ...\n");
- /* coverity[var_deref_op] */
- f();
- }
- return;
+ case (UInt)RP_MBOX_ECHO_REQUEST:
+ InterruptProxy_intSend(hostProcId, (UInt)(RP_MBOX_ECHO_REPLY));
+ return;
- case (UInt)RP_MSG_FLUSH_CACHE:
- Cache_wbAll();
- return;
+ case (UInt)RP_MBOX_ABORT_REQUEST:
+ {
+ /* Suppress Coverity Error: FORWARD_NULL: */
+ /* coverity[assign_zero] */
+ Fxn f = (Fxn)0x0;
+ Log_print0(Diags_USER1, "Crash on demand ...\n");
+ /* coverity[var_deref_op] */
+ f();
+ }
+ return;
-#ifndef DSPC674
- case (UInt)RP_MSG_HIBERNATION:
- if (IpcPower_canHibernate() == FALSE) {
- InterruptProxy_intSend(hostProcId,
- (UInt)RP_MSG_HIBERNATION_CANCEL);
- return;
- }
+ case (UInt)RP_MSG_FLUSH_CACHE:
+ Cache_wbAll();
+ return;
- /* Fall through */
- case (UInt)RP_MSG_HIBERNATION_FORCE:
-#ifndef SMP
- /* Core0 should notify Core1 */
- if (MultiProc_self() == sysm3ProcId) {
- InterruptProxy_intSend(appm3ProcId,
- (UInt)(RP_MSG_HIBERNATION));
- }
-#endif
- /* Ack request */
+ case (UInt)RP_MSG_HIBERNATION:
+ if (IpcPower_canHibernate() == FALSE) {
InterruptProxy_intSend(hostProcId,
- (UInt)RP_MSG_HIBERNATION_ACK);
- IpcPower_suspend();
+ (UInt)RP_MSG_HIBERNATION_CANCEL);
return;
-#endif
- default:
-#if defined(M3_ONLY) && !defined(SMP)
- /* Check and process any Inter-M3 Offload messages */
- if (OffloadM3_processSysM3Tasks(msg))
- return;
-#endif
-
- /*
- * If the message isn't one of the above, it's either part of the
- * 2-message synchronization sequence or it a virtqueue message
- */
- break;
- }
-#ifndef SMP
- }
-#ifndef DSPC674
- else if (msg & 0xFFFF0000) {
- if (msg == (UInt)RP_MSG_HIBERNATION) {
+ }
+
+ /* Fall through */
+ case (UInt)RP_MSG_HIBERNATION_FORCE:
+ /* Ack request */
+ InterruptProxy_intSend(hostProcId,
+ (UInt)RP_MSG_HIBERNATION_ACK);
IpcPower_suspend();
- }
- return;
+ return;
+
+ default:
+ /*
+ * If the message isn't one of the above, it's either part of the
+ * 2-message synchronization sequence or it a virtqueue message
+ */
+ break;
}
- if (MultiProc_self() == sysm3ProcId && (msg == ID_A9_TO_APPM3 || msg == ID_APPM3_TO_A9)) {
- InterruptProxy_intSend(appm3ProcId, (UInt)msg);
+ /* Don't let unknown messages to pass as a virtqueue index */
+ if (msg >= NUM_QUEUES) {
+ /* Adding print here deliberately, we should never see this */
+ System_printf("VirtQueue_isr: Invalid mailbox message 0x%x "
+ "received\n", msg);
+ return;
}
- else {
-#endif
-#endif
- /* Don't let unknown messages to pass as a virtqueue index */
- if (msg >= NUM_QUEUES) {
- /* Adding print here deliberately, we should never see this */
- System_printf("VirtQueue_isr: Invalid mailbox message 0x%x "
- "received\n", msg);
- return;
- }
- vq = queueRegistry[msg];
- if (vq) {
- vq->callback(vq);
- }
-#ifndef SMP
-#ifndef DSPC674
+ vq = queueRegistry[msg];
+ if (vq) {
+ vq->callback(vq);
}
-#endif
-#endif
}
@@ -555,34 +439,16 @@ VirtQueue_Handle VirtQueue_create(UInt16 remoteProcId, VirtQueue_Params *params,
vq->procId = remoteProcId;
vq->last_avail_idx = 0;
-#ifndef SMP
- if (MultiProc_self() == appm3ProcId) {
- /* vqindices that belong to AppM3 should be big so they don't
- * collide with SysM3's virtqueues */
- vq->id += 2;
- }
-#endif
-
switch (vq->id) {
/* IPC transport vrings */
- case ID_SELF_TO_A9:
- /* IPU/DSP -> A9 */
+ case ID_SELF_TO_HOST:
+ /* slave -> HOST */
vringAddr = (struct vring *) IPC_MEM_VRING0;
break;
- case ID_A9_TO_SELF:
- /* A9 -> IPU/DSP */
+ case ID_HOST_TO_SELF:
+ /* HOST -> slave */
vringAddr = (struct vring *) IPC_MEM_VRING1;
break;
-#ifndef SMP
- case ID_APPM3_TO_A9:
- /* APPM3 -> A9 */
- vringAddr = (struct vring *) IPC_MEM_VRING2;
- break;
- case ID_A9_TO_APPM3:
- /* A9 -> APPM3 */
- vringAddr = (struct vring *) IPC_MEM_VRING3;
- break;
-#endif
default:
GateHwi_delete(&vq->gateH);
Memory_free(NULL, vq, sizeof(VirtQueue_Object));
@@ -616,22 +482,9 @@ VirtQueue_Handle VirtQueue_create(UInt16 remoteProcId, VirtQueue_Params *params,
Void VirtQueue_startup()
{
hostProcId = MultiProc_getId("HOST");
-#ifndef SMP
- dspProcId = MultiProc_getId("DSP");
- sysm3ProcId = MultiProc_getId("CORE0");
- appm3ProcId = MultiProc_getId("CORE1");
-#endif
-#ifndef DSPC674
/* Initilize the IpcPower module */
IpcPower_init();
-#endif
-
-#if defined(M3_ONLY) && !defined(SMP)
- if (MultiProc_self() == sysm3ProcId) {
- OffloadM3_init();
- }
-#endif
/*
* Wait for HLOS (Virtio device) to indicate that priming of host's receive