1 /*
2 * Copyright (c) 2011-2015 Texas Instruments Incorporated - http://www.ti.com
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
33 /** ============================================================================
34 * @file VirtQueue.c
35 *
36 * @brief Virtio Queue implementation for BIOS
37 *
38 * Differences between BIOS version and Linux kernel (include/linux/virtio.h):
39 * - Renamed module from virtio.h to VirtQueue.h to match the API prefixes
40 * - XDC Standard Types and CamelCasing used
41 * - virtio_device concept removed (i.e, assumes no containing device)
42 * - simplified scatterlist from Linux version
43 * - VirtQueue objects are created statically, added VirtQueue_Instance_init()
44 * fxn to take the place of the Virtio vring_new_virtqueue() API
45 * - The notify function is implicit in the implementation, and not provided
46 * by the client, as it is in Linux virtio
47 *
48 * All VirtQueue operations can be called in any context.
49 */
51 #include <xdc/std.h>
52 #include <xdc/runtime/System.h>
53 #include <xdc/runtime/Assert.h>
54 #include <xdc/runtime/Error.h>
55 #include <xdc/runtime/Memory.h>
56 #include <xdc/runtime/Log.h>
57 #include <xdc/runtime/Diags.h>
58 #include <xdc/runtime/SysMin.h>
59 #include <ti/sysbios/gates/GateAll.h>
61 #include <ti/sysbios/knl/Clock.h>
62 #include <ti/sysbios/family/c66/Cache.h>
63 #include <ti/sysbios/knl/Swi.h>
65 #include <ti/ipc/family/tci6638/Interrupt.h>
66 #include <ti/ipc/remoteproc/Resource.h>
68 #include <ti/ipc/MultiProc.h>
70 #include "package/internal/VirtQueue.xdc.h"
72 #include <string.h>
74 #include <ti/ipc/rpmsg/_VirtQueue.h>
75 #include <ti/ipc/rpmsg/virtio_ring.h>
77 /* Used for defining the size of the virtqueue registry */
78 #define NUM_QUEUES 2
80 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
81 #define RP_MSG_BUFS_SPACE (VirtQueue_RP_MSG_NUM_BUFS * RPMSG_BUF_SIZE * 2)
83 /* With 256 buffers, our vring will occupy 3 pages */
84 #define RP_MSG_RING_SIZE \
85 ((DIV_ROUND_UP(vring_size(VirtQueue_RP_MSG_NUM_BUFS, \
86 VirtQueue_RP_MSG_VRING_ALIGN), \
87 VirtQueue_PAGE_SIZE)) * VirtQueue_PAGE_SIZE)
89 /* The total IPC space needed to communicate with a remote processor */
90 #define RPMSG_IPC_MEM (RP_MSG_BUFS_SPACE + 2 * RP_MSG_RING_SIZE)
92 #define ID_SELF_TO_HOST 0
93 #define ID_HOST_TO_SELF 1
95 extern volatile cregister UInt DNUM;
97 static VirtQueue_Object *queueRegistry[NUM_QUEUES] = {NULL};
99 static inline Void * mapPAtoVA(UInt pa)
100 {
101 return (Void *)(pa | 0x80000000);
102 }
104 static inline UInt mapVAtoPA(Void * va)
105 {
106 return ((UInt)va & 0x000fffffU) | 0xe1000000U;
107 }
109 /*
110 * ======== VirtQueue_init ========
111 */
112 Void VirtQueue_init()
113 {
114 extern cregister volatile UInt DNUM;
115 UInt16 clusterId;
116 UInt16 procId;
118 /* VirtQueue_init() must be called before MultiProcSetup_init().
119 * Check the xdc_runtime_Startup_firstFxns__A array in the XDC
120 * generated code. Abort if the procId has already been set; we
121 * must set it!
122 */
123 if (MultiProc_self() != MultiProc_INVALIDID) {
124 System_abort("VirtQueue_init(): MultiProc_self already set!");
125 return;
126 }
128 /* clusterId is needed to support single image loading */
129 clusterId = MultiProc_getBaseIdOfCluster();
131 /* compute local procId, add one to account for HOST processor */
132 procId = clusterId + DNUM + 1;
134 /* set the local procId */
135 MultiProc_setLocalId(procId);
136 }
138 /*
139 * ======== VirtQueue_Instance_init ========
140 */
141 Int VirtQueue_Instance_init(VirtQueue_Object *vq, UInt16 remoteProcId,
142 const VirtQueue_Params *params, Error_Block *eb)
143 {
144 void *vringAddr = NULL;
145 UInt32 marValue;
147 VirtQueue_module->traceBufPtr = Resource_getTraceBufPtr();
149 /* create the thread protection gate */
150 vq->gateH = GateAll_create(NULL, eb);
151 if (Error_check(eb)) {
152 Log_error0("VirtQueue_create: could not create gate object");
153 Error_raise(NULL, Error_E_generic, 0, 0);
154 return(0);
155 }
157 vq->vringPtr = Memory_calloc(NULL, sizeof(struct vring), 0, eb);
158 Assert_isTrue((vq->vringPtr != NULL), NULL);
160 vq->callback = params->callback;
161 vq->id = params->vqId;
162 vq->procId = remoteProcId;
163 vq->last_avail_idx = 0;
164 vq->last_used_idx = 0;
165 vq->num_free = VirtQueue_RP_MSG_NUM_BUFS;
166 vq->swiHandle = params->swiHandle;
168 switch (vq->id) {
169 case ID_SELF_TO_HOST:
170 case ID_HOST_TO_SELF:
171 vringAddr = (struct vring *)Resource_getVringDA(vq->id);
172 Assert_isTrue(vringAddr != NULL, NULL);
173 /* Add per core offset: must match on host side: */
174 vringAddr = (struct vring *)((UInt32)vringAddr +
175 (DNUM * VirtQueue_VRING_OFFSET));
177 /* Also, assert that the vring address is non-cached: */
178 marValue = Cache_getMar((Ptr)vringAddr);
179 Log_print1(Diags_USER1, "Vring cache is %s",
180 (IArg)(marValue & 0x1 ? "enabled" : "disabled"));
181 Assert_isTrue(!(marValue & 0x1), NULL);
182 break ;
184 default:
185 Log_error1("VirtQueue_create: invalid vq->id: %d", vq->id);
186 GateAll_delete(&vq->gateH);
187 Memory_free(NULL, vq->vringPtr, sizeof(struct vring));
188 Error_raise(NULL, Error_E_generic, 0, 0);
189 return(0);
190 }
192 Log_print3(Diags_USER1, "vring: %d 0x%x (0x%x)", vq->id, (IArg)vringAddr,
193 RP_MSG_RING_SIZE);
195 vring_init(vq->vringPtr, VirtQueue_RP_MSG_NUM_BUFS, vringAddr,
196 VirtQueue_RP_MSG_VRING_ALIGN);
198 queueRegistry[vq->id] = vq;
199 return(0);
200 }
202 /*
203 * ======== VirtQueue_kick ========
204 */
205 Void VirtQueue_kick(VirtQueue_Handle vq)
206 {
207 struct vring *vring = vq->vringPtr;
208 Interrupt_IntInfo intInfo;
210 /* For now, simply interrupt remote processor */
211 if (vring->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
212 Log_print0(Diags_USER1, "VirtQueue_kick: no kick because of "
213 "VRING_AVAIL_F_NO_INTERRUPT");
214 return;
215 }
217 Log_print2(Diags_USER1, "VirtQueue_kick: Sending interrupt to proc %d "
218 "with payload 0x%x", (IArg)vq->procId, (IArg)vq->id);
220 intInfo.localIntId = Interrupt_SRCS_BITPOS_CORE0;
221 Interrupt_intSend(vq->procId, &intInfo, vq->id);
222 }
224 /*
225 * ======== VirtQueue_addUsedBuf ========
226 */
227 Int VirtQueue_addUsedBuf(VirtQueue_Handle vq, Int16 head, Int len)
228 {
229 struct vring_used_elem *used;
230 struct vring *vring = vq->vringPtr;
231 IArg key;
233 key = GateAll_enter(vq->gateH);
234 if ((head > vring->num) || (head < 0)) {
235 Error_raise(NULL, Error_E_generic, 0, 0);
236 }
237 else {
238 /*
239 * The virtqueue contains a ring of used buffers. Get a pointer to the
240 * next entry in that used ring.
241 */
242 used = &vring->used->ring[vring->used->idx % vring->num];
243 used->id = head;
244 used->len = len;
246 vring->used->idx++;
247 }
248 GateAll_leave(vq->gateH, key);
250 return (0);
251 }
253 /*
254 * ======== VirtQueue_addAvailBuf ========
255 */
256 Int VirtQueue_addAvailBuf(VirtQueue_Object *vq, Void *buf)
257 {
258 UInt16 avail;
259 struct vring *vring = vq->vringPtr;
260 IArg key;
262 key = GateAll_enter(vq->gateH);
263 if (vq->num_free == 0) {
264 /* There's no more space */
265 Error_raise(NULL, Error_E_generic, 0, 0);
266 }
267 else {
268 vq->num_free--;
270 avail = vring->avail->idx++ % vring->num;
272 vring->desc[avail].addr = mapVAtoPA(buf);
273 vring->desc[avail].len = RPMSG_BUF_SIZE;
274 }
275 GateAll_leave(vq->gateH, key);
277 return (vq->num_free);
278 }
280 /*
281 * ======== VirtQueue_getUsedBuf ========
282 */
283 Void *VirtQueue_getUsedBuf(VirtQueue_Object *vq)
284 {
285 UInt16 head;
286 Void *buf;
287 struct vring *vring = vq->vringPtr;
288 IArg key;
290 key = GateAll_enter(vq->gateH);
291 /* There's nothing available? */
292 if (vq->last_used_idx == vring->used->idx) {
293 buf = NULL;
294 }
295 else {
296 head = vring->used->ring[vq->last_used_idx % vring->num].id;
297 vq->last_used_idx++;
298 vq->num_free++;
300 buf = mapPAtoVA(vring->desc[head].addr);
301 }
302 GateAll_leave(vq->gateH, key);
304 return (buf);
305 }
307 /*
308 * ======== VirtQueue_getAvailBuf ========
309 */
310 Int16 VirtQueue_getAvailBuf(VirtQueue_Handle vq, Void **buf, Int *len)
311 {
312 Int16 head;
313 struct vring *vring = vq->vringPtr;
314 IArg key;
316 key = GateAll_enter(vq->gateH);
317 Log_print6(Diags_USER1, "getAvailBuf vq: 0x%x %d %d %d 0x%x 0x%x",
318 (IArg)vq, (IArg)vq->last_avail_idx, (IArg)vring->avail->idx,
319 (IArg)vring->num, (IArg)&vring->avail, (IArg)vring->avail);
321 /* Clear flag here to avoid race condition with remote processor.
322 * This is a negative flag, clearing it means that we want to
323 * receive an interrupt when a buffer has been added to the pool.
324 */
325 vring->used->flags &= ~VRING_USED_F_NO_NOTIFY;
327 /* There's nothing available? */
328 if (vq->last_avail_idx == vring->avail->idx) {
329 head = (-1);
330 }
331 else {
332 /* No need to be kicked about added buffers anymore */
333 vring->used->flags |= VRING_USED_F_NO_NOTIFY;
335 /*
336 * Grab the next descriptor number they're advertising, and increment
337 * the index we've seen.
338 */
339 head = vring->avail->ring[vq->last_avail_idx++ % vring->num];
341 *buf = mapPAtoVA(vring->desc[head].addr);
342 *len = vring->desc[head].len;
343 }
344 GateAll_leave(vq->gateH, key);
346 return (head);
347 }
349 /*
350 * ======== VirtQueue_isr ========
351 * Note 'msg' is ignored: it is only used where there is a mailbox payload.
352 */
353 Void VirtQueue_isr(UArg msg)
354 {
355 VirtQueue_Object *vq;
357 Log_print1(Diags_USER1, "VirtQueue_isr received msg = 0x%x", msg);
359 vq = queueRegistry[0];
360 if (vq) {
361 vq->callback(vq);
362 }
363 vq = queueRegistry[1];
364 if (vq) {
365 vq->callback(vq);
366 }
367 }
369 /*
370 * ======== VirtQueue_startup ========
371 */
372 Void VirtQueue_startup(UInt16 remoteProcId, Bool isHost)
373 {
374 Interrupt_IntInfo intInfo;
376 intInfo.intVectorId = Interrupt_DSPINT;
377 intInfo.localIntId = Interrupt_SRCS_BITPOS_HOST;
380 /*
381 * Wait for first kick from host, which happens to coincide with the
382 * priming of host's receive buffers, indicating host is ready to send.
383 * Since interrupt is cleared, we throw away this first kick, which is
384 * OK since we don't process this in the ISR anyway.
385 */
386 Log_print0(Diags_USER1, "VirtQueue_startup: Polling for host int...");
387 while (!Interrupt_checkAndClear(remoteProcId, &intInfo));
389 Interrupt_intRegister(remoteProcId, &intInfo, (Fxn)VirtQueue_isr, 0);
391 Log_print0(Diags_USER1, "Passed VirtQueue_startup");
392 }
394 /* By convention, Host VirtQueues host are the even number in the pair */
395 Bool VirtQueue_isSlave(VirtQueue_Handle vq)
396 {
397 return (vq->id & 0x1);
398 }
400 Bool VirtQueue_isHost(VirtQueue_Handle vq)
401 {
402 return (~(vq->id & 0x1));
403 }
405 UInt16 VirtQueue_getId(VirtQueue_Handle vq)
406 {
407 return (vq->id);
408 }
410 Swi_Handle VirtQueue_getSwiHandle(VirtQueue_Handle vq)
411 {
412 return (vq->swiHandle);
413 }
415 #define CACHE_WB_TICK_PERIOD 5
417 /*
418 * ======== VirtQueue_cacheWb ========
419 *
420 * Used for flushing SysMin trace buffer.
421 */
422 Void VirtQueue_cacheWb()
423 {
424 static UInt32 oldticks = 0;
425 UInt32 newticks;
427 newticks = Clock_getTicks();
428 if (newticks - oldticks < (UInt32)CACHE_WB_TICK_PERIOD) {
429 /* Don't keep flushing cache */
430 return;
431 }
433 oldticks = newticks;
435 /* Flush the cache of the SysMin buffer only: */
436 Assert_isTrue((VirtQueue_module->traceBufPtr != NULL), NULL);
437 Cache_wb(VirtQueue_module->traceBufPtr, SysMin_bufSize, Cache_Type_ALL,
438 FALSE);
439 }