1 /*
2 * Copyright (c) 2011-2019 Texas Instruments Incorporated - http://www.ti.com
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
33 /** ============================================================================
34 * @file VirtQueue.c
35 *
36 * @brief Virtio Queue implementation for BIOS
37 *
38 * Differences between BIOS version and Linux kernel (include/linux/virtio.h):
39 * - Renamed module from virtio.h to VirtQueue.h to match the API prefixes
40 * - XDC Standard Types and CamelCasing used
41 * - virtio_device concept removed (i.e, assumes no containing device)
42 * - simplified scatterlist from Linux version
43 * - VirtQueue objects are created statically, added VirtQueue_Instance_init()
44 * fxn to take the place of the Virtio vring_new_virtqueue() API
45 * - The notify function is implicit in the implementation, and not provided
46 * by the client, as it is in Linux virtio
47 *
48 * All VirtQueue operations can be called in any context.
49 */
51 #include <xdc/std.h>
52 #include <xdc/runtime/System.h>
53 #include <xdc/runtime/Assert.h>
54 #include <xdc/runtime/Error.h>
55 #include <xdc/runtime/Memory.h>
56 #include <xdc/runtime/Log.h>
57 #include <xdc/runtime/Diags.h>
58 #include <ti/sysbios/gates/GateAll.h>
60 #include <ti/sysbios/knl/Clock.h>
61 #include <ti/sysbios/family/c66/Cache.h>
62 #include <ti/sysbios/knl/Swi.h>
64 #include <ti/ipc/remoteproc/Resource.h>
66 #include <ti/ipc/MultiProc.h>
68 #include "package/internal/VirtQueue.xdc.h"
70 #include <string.h>
72 #include <ti/ipc/rpmsg/_VirtQueue.h>
73 #include <ti/ipc/rpmsg/virtio_ring.h>
75 /* Used for defining the size of the virtqueue registry */
76 #define NUM_QUEUES 2
78 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
79 #define RP_MSG_BUFS_SPACE (VirtQueue_RP_MSG_NUM_BUFS * RPMSG_BUF_SIZE * 2)
81 /* With 256 buffers, our vring will occupy 3 pages */
82 #define RP_MSG_RING_SIZE \
83 ((DIV_ROUND_UP(vring_size(VirtQueue_RP_MSG_NUM_BUFS, \
84 VirtQueue_RP_MSG_VRING_ALIGN), \
85 VirtQueue_PAGE_SIZE)) * VirtQueue_PAGE_SIZE)
87 /* The total IPC space needed to communicate with a remote processor */
88 #define RPMSG_IPC_MEM (RP_MSG_BUFS_SPACE + 2 * RP_MSG_RING_SIZE)
90 #define ID_SELF_TO_HOST 0
91 #define ID_HOST_TO_SELF 1
93 extern volatile cregister UInt DNUM;
95 static VirtQueue_Object *queueRegistry[NUM_QUEUES] = {NULL};
97 static inline Void * mapPAtoVA(UInt pa)
98 {
99 return (Void *)(pa | 0x80000000);
100 }
102 static inline UInt mapVAtoPA(Void * va)
103 {
104 return ((UInt)va & 0x000fffffU) | 0xe1000000U;
105 }
107 /*
108 * ======== VirtQueue_init ========
109 */
110 Void VirtQueue_init()
111 {
112 extern cregister volatile UInt DNUM;
113 UInt16 clusterId;
114 UInt16 procId;
116 /* VirtQueue_init() must be called before MultiProcSetup_init().
117 * Check the xdc_runtime_Startup_firstFxns__A array in the XDC
118 * generated code. Abort if the procId has already been set; we
119 * must set it!
120 */
121 if (MultiProc_self() != MultiProc_INVALIDID) {
122 System_abort("VirtQueue_init(): MultiProc_self already set!");
123 return;
124 }
126 /* clusterId is needed to support single image loading */
127 clusterId = MultiProc_getBaseIdOfCluster();
129 /* compute local procId, add one to account for HOST processor */
130 procId = clusterId + DNUM + 1;
132 /* set the local procId */
133 MultiProc_setLocalId(procId);
134 }
136 /*
137 * ======== VirtQueue_Instance_init ========
138 */
139 Int VirtQueue_Instance_init(VirtQueue_Object *vq, UInt16 remoteProcId,
140 const VirtQueue_Params *params, Error_Block *eb)
141 {
142 void *vringAddr = NULL;
143 #if !defined(xdc_runtime_Assert_DISABLE_ALL) \
144 || !defined(xdc_runtime_Log_DISABLE_ALL)
145 UInt32 marValue;
146 #endif
148 VirtQueue_module->traceBufPtr = Resource_getTraceBufPtr();
150 /* create the thread protection gate */
151 vq->gateH = GateAll_create(NULL, eb);
152 if (Error_check(eb)) {
153 Log_error0("VirtQueue_create: could not create gate object");
154 Error_raise(NULL, Error_E_generic, 0, 0);
155 return(0);
156 }
158 vq->vringPtr = Memory_calloc(NULL, sizeof(struct vring), 0, eb);
159 Assert_isTrue((vq->vringPtr != NULL), NULL);
161 vq->callback = params->callback;
162 vq->id = params->vqId;
163 vq->procId = remoteProcId;
164 vq->last_avail_idx = 0;
165 vq->last_used_idx = 0;
166 vq->num_free = VirtQueue_RP_MSG_NUM_BUFS;
167 vq->swiHandle = params->swiHandle;
169 switch (vq->id) {
170 case ID_SELF_TO_HOST:
171 case ID_HOST_TO_SELF:
172 vringAddr = (struct vring *)Resource_getVringDA(vq->id);
173 Assert_isTrue(vringAddr != NULL, NULL);
174 #ifdef PER_CORE_VRING_PATCH
175 /* Add per core offset: must match on host side: */
176 vringAddr = (struct vring *)((UInt32)vringAddr +
177 (DNUM * VirtQueue_VRING_OFFSET));
178 #endif
179 /* Also, assert that the vring address is non-cached: */
180 #if !defined(xdc_runtime_Assert_DISABLE_ALL) \
181 || !defined(xdc_runtime_Log_DISABLE_ALL)
182 marValue = Cache_getMar((Ptr)vringAddr);
183 #endif
184 Log_print1(Diags_USER1, "Vring cache is %s",
185 (IArg)(marValue & 0x1 ? "enabled" : "disabled"));
186 Assert_isTrue(!(marValue & 0x1), NULL);
187 break ;
189 default:
190 Log_error1("VirtQueue_create: invalid vq->id: %d", vq->id);
191 GateAll_delete(&vq->gateH);
192 Memory_free(NULL, vq->vringPtr, sizeof(struct vring));
193 Error_raise(NULL, Error_E_generic, 0, 0);
194 return(0);
195 }
197 Log_print3(Diags_USER1, "vring: %d 0x%x (0x%x)", vq->id, (IArg)vringAddr,
198 RP_MSG_RING_SIZE);
200 vring_init(vq->vringPtr, VirtQueue_RP_MSG_NUM_BUFS, vringAddr,
201 VirtQueue_RP_MSG_VRING_ALIGN);
203 queueRegistry[vq->id] = vq;
204 return(0);
205 }
207 /*
208 * ======== VirtQueue_kick ========
209 */
210 Void VirtQueue_kick(VirtQueue_Handle vq)
211 {
212 struct vring *vring = vq->vringPtr;
214 /* For now, simply interrupt remote processor */
215 if (vring->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
216 Log_print0(Diags_USER1, "VirtQueue_kick: no kick because of "
217 "VRING_AVAIL_F_NO_INTERRUPT");
218 return;
219 }
221 Log_print2(Diags_USER1, "VirtQueue_kick: Sending interrupt to proc %d "
222 "with payload 0x%x", (IArg)vq->procId, (IArg)vq->id);
224 VirtQueue_InterruptProxy_intSend(vq->procId, NULL, 0);
225 }
227 /*
228 * ======== VirtQueue_addUsedBuf ========
229 */
230 Int VirtQueue_addUsedBuf(VirtQueue_Handle vq, Int16 head, Int len)
231 {
232 struct vring_used_elem *used;
233 struct vring *vring = vq->vringPtr;
234 IArg key;
236 key = GateAll_enter(vq->gateH);
237 if (((unsigned int)head > vring->num) || (head < 0)) {
238 Error_raise(NULL, Error_E_generic, 0, 0);
239 }
240 else {
241 /*
242 * The virtqueue contains a ring of used buffers. Get a pointer to the
243 * next entry in that used ring.
244 */
245 used = &vring->used->ring[vring->used->idx % vring->num];
246 used->id = head;
247 used->len = len;
249 vring->used->idx++;
250 }
251 GateAll_leave(vq->gateH, key);
253 return (0);
254 }
256 /*
257 * ======== VirtQueue_addAvailBuf ========
258 */
259 Int VirtQueue_addAvailBuf(VirtQueue_Object *vq, Void *buf)
260 {
261 UInt16 avail;
262 struct vring *vring = vq->vringPtr;
263 IArg key;
265 key = GateAll_enter(vq->gateH);
266 if (vq->num_free == 0) {
267 /* There's no more space */
268 Error_raise(NULL, Error_E_generic, 0, 0);
269 }
270 else {
271 vq->num_free--;
273 avail = vring->avail->idx++ % vring->num;
275 vring->desc[avail].addr = mapVAtoPA(buf);
276 vring->desc[avail].len = RPMSG_BUF_SIZE;
277 }
278 GateAll_leave(vq->gateH, key);
280 return (vq->num_free);
281 }
283 /*
284 * ======== VirtQueue_getUsedBuf ========
285 */
286 Void *VirtQueue_getUsedBuf(VirtQueue_Object *vq)
287 {
288 UInt16 head;
289 Void *buf;
290 struct vring *vring = vq->vringPtr;
291 IArg key;
293 key = GateAll_enter(vq->gateH);
294 /* There's nothing available? */
295 if (vq->last_used_idx == vring->used->idx) {
296 buf = NULL;
297 }
298 else {
299 head = vring->used->ring[vq->last_used_idx % vring->num].id;
300 vq->last_used_idx++;
301 vq->num_free++;
303 buf = mapPAtoVA(vring->desc[head].addr);
304 }
305 GateAll_leave(vq->gateH, key);
307 return (buf);
308 }
310 /*
311 * ======== VirtQueue_getAvailBuf ========
312 */
313 Int16 VirtQueue_getAvailBuf(VirtQueue_Handle vq, Void **buf, Int *len)
314 {
315 Int16 head;
316 struct vring *vring = vq->vringPtr;
317 IArg key;
319 key = GateAll_enter(vq->gateH);
320 Log_print6(Diags_USER1, "getAvailBuf vq: 0x%x %d %d %d 0x%x 0x%x",
321 (IArg)vq, (IArg)vq->last_avail_idx, (IArg)vring->avail->idx,
322 (IArg)vring->num, (IArg)&vring->avail, (IArg)vring->avail);
324 /* Clear flag here to avoid race condition with remote processor.
325 * This is a negative flag, clearing it means that we want to
326 * receive an interrupt when a buffer has been added to the pool.
327 */
328 vring->used->flags &= ~VRING_USED_F_NO_NOTIFY;
330 /* There's nothing available? */
331 if (vq->last_avail_idx == vring->avail->idx) {
332 head = (-1);
333 }
334 else {
335 /* No need to be kicked about added buffers anymore */
336 vring->used->flags |= VRING_USED_F_NO_NOTIFY;
338 /*
339 * Grab the next descriptor number they're advertising, and increment
340 * the index we've seen.
341 */
342 head = vring->avail->ring[vq->last_avail_idx++ % vring->num];
344 *buf = mapPAtoVA(vring->desc[head].addr);
345 *len = vring->desc[head].len;
346 }
347 GateAll_leave(vq->gateH, key);
349 return (head);
350 }
352 /*
353 * ======== VirtQueue_isr ========
354 * Note 'msg' is ignored: it is only used where there is a mailbox payload.
355 */
356 static Void VirtQueue_isr(UArg msg)
357 {
358 VirtQueue_Object *vq;
360 Log_print1(Diags_USER1, "VirtQueue_isr received msg = 0x%x", msg);
362 vq = queueRegistry[0];
363 if (vq) {
364 vq->callback(vq);
365 }
366 vq = queueRegistry[1];
367 if (vq) {
368 vq->callback(vq);
369 }
370 }
372 /*
373 * ======== VirtQueue_startup ========
374 */
375 Void VirtQueue_startup(UInt16 procId, Bool isHost)
376 {
378 /* Wait for first kick from host, which happens to coincide with the
379 * priming of host's receive buffers, indicating host is ready to send.
380 * Since interrupt is cleared, we throw away this first kick, which is
381 * OK since we don't process this in the ISR anyway.
382 */
383 Log_print0(Diags_USER1, "VirtQueue_startup: Polling for host int...");
384 while (!VirtQueue_InterruptProxy_intClear(procId, NULL));
386 VirtQueue_InterruptProxy_intRegister(procId, NULL, (Fxn)VirtQueue_isr, 0);
387 Log_print0(Diags_USER1, "Passed VirtQueue_startup");
388 }
390 /* By convention, Host VirtQueues host are the even number in the pair */
391 Bool VirtQueue_isSlave(VirtQueue_Handle vq)
392 {
393 return (vq->id & 0x1);
394 }
396 Bool VirtQueue_isHost(VirtQueue_Handle vq)
397 {
398 return (~(vq->id & 0x1));
399 }
401 UInt16 VirtQueue_getId(VirtQueue_Handle vq)
402 {
403 return (vq->id);
404 }
406 Swi_Handle VirtQueue_getSwiHandle(VirtQueue_Handle vq)
407 {
408 return (vq->swiHandle);
409 }
411 #define CACHE_WB_TICK_PERIOD 5
413 /*
414 * ======== VirtQueue_cacheWb ========
415 *
416 * Used for flushing SysMin trace buffer.
417 */
418 Void VirtQueue_cacheWb()
419 {
420 static UInt32 oldticks = 0;
421 UInt32 newticks;
423 newticks = Clock_getTicks();
424 if (newticks - oldticks < (UInt32)CACHE_WB_TICK_PERIOD) {
425 /* Don't keep flushing cache */
426 return;
427 }
429 oldticks = newticks;
431 /* Flush the cache of the SysMin buffer only: */
432 Assert_isTrue((VirtQueue_module->traceBufPtr != NULL), NULL);
433 Cache_wb(VirtQueue_module->traceBufPtr, Resource_getTraceBufSize(), Cache_Type_ALL,
434 FALSE);
435 }