[processor-sdk/performance-audio-sr.git] / ipc_3_43_00_00_eng / packages / ti / ipc / family / tci6638 / VirtQueue.c
1 /*
2 * Copyright (c) 2011-2015 Texas Instruments Incorporated - http://www.ti.com
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
33 /** ============================================================================
34 * @file VirtQueue.c
35 *
36 * @brief Virtio Queue implementation for BIOS
37 *
38 * Differences between BIOS version and Linux kernel (include/linux/virtio.h):
39 * - Renamed module from virtio.h to VirtQueue.h to match the API prefixes
40 * - XDC Standard Types and CamelCasing used
41 * - virtio_device concept removed (i.e, assumes no containing device)
42 * - simplified scatterlist from Linux version
43 * - VirtQueue objects are created statically, added VirtQueue_Instance_init()
44 * fxn to take the place of the Virtio vring_new_virtqueue() API
45 * - The notify function is implicit in the implementation, and not provided
46 * by the client, as it is in Linux virtio
47 *
48 * All VirtQueue operations can be called in any context.
49 */
51 #include <xdc/std.h>
52 #include <xdc/runtime/System.h>
53 #include <xdc/runtime/Assert.h>
54 #include <xdc/runtime/Error.h>
55 #include <xdc/runtime/Memory.h>
56 #include <xdc/runtime/Log.h>
57 #include <xdc/runtime/Diags.h>
58 #include <xdc/runtime/SysMin.h>
59 #include <ti/sysbios/gates/GateAll.h>
61 #include <ti/sysbios/knl/Clock.h>
62 #include <ti/sysbios/family/c66/Cache.h>
63 #include <ti/sysbios/knl/Swi.h>
65 #include <ti/ipc/remoteproc/Resource.h>
67 #include <ti/ipc/MultiProc.h>
69 #include "package/internal/VirtQueue.xdc.h"
71 #include <string.h>
73 #include <ti/ipc/rpmsg/_VirtQueue.h>
74 #include <ti/ipc/rpmsg/virtio_ring.h>
76 /* Used for defining the size of the virtqueue registry */
77 #define NUM_QUEUES 2
79 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
80 #define RP_MSG_BUFS_SPACE (VirtQueue_RP_MSG_NUM_BUFS * RPMSG_BUF_SIZE * 2)
82 /* With 256 buffers, our vring will occupy 3 pages */
83 #define RP_MSG_RING_SIZE \
84 ((DIV_ROUND_UP(vring_size(VirtQueue_RP_MSG_NUM_BUFS, \
85 VirtQueue_RP_MSG_VRING_ALIGN), \
86 VirtQueue_PAGE_SIZE)) * VirtQueue_PAGE_SIZE)
88 /* The total IPC space needed to communicate with a remote processor */
89 #define RPMSG_IPC_MEM (RP_MSG_BUFS_SPACE + 2 * RP_MSG_RING_SIZE)
91 #define ID_SELF_TO_HOST 0
92 #define ID_HOST_TO_SELF 1
94 extern volatile cregister UInt DNUM;
96 static VirtQueue_Object *queueRegistry[NUM_QUEUES] = {NULL};
98 static inline Void * mapPAtoVA(UInt pa)
99 {
100 return (Void *)(pa | 0x80000000);
101 }
103 static inline UInt mapVAtoPA(Void * va)
104 {
105 return ((UInt)va & 0x000fffffU) | 0xe1000000U;
106 }
108 /*
109 * ======== VirtQueue_init ========
110 */
111 Void VirtQueue_init()
112 {
113 extern cregister volatile UInt DNUM;
114 UInt16 clusterId;
115 UInt16 procId;
117 /* VirtQueue_init() must be called before MultiProcSetup_init().
118 * Check the xdc_runtime_Startup_firstFxns__A array in the XDC
119 * generated code. Abort if the procId has already been set; we
120 * must set it!
121 */
122 if (MultiProc_self() != MultiProc_INVALIDID) {
123 System_abort("VirtQueue_init(): MultiProc_self already set!");
124 return;
125 }
127 /* clusterId is needed to support single image loading */
128 clusterId = MultiProc_getBaseIdOfCluster();
130 /* compute local procId, add one to account for HOST processor */
131 procId = clusterId + DNUM + 1;
133 /* set the local procId */
134 MultiProc_setLocalId(procId);
135 }
137 /*
138 * ======== VirtQueue_Instance_init ========
139 */
140 Int VirtQueue_Instance_init(VirtQueue_Object *vq, UInt16 remoteProcId,
141 const VirtQueue_Params *params, Error_Block *eb)
142 {
143 void *vringAddr = NULL;
144 #if !defined(xdc_runtime_Assert_DISABLE_ALL) \
145 || !defined(xdc_runtime_Log_DISABLE_ALL)
146 UInt32 marValue;
147 #endif
149 VirtQueue_module->traceBufPtr = Resource_getTraceBufPtr();
151 /* create the thread protection gate */
152 vq->gateH = GateAll_create(NULL, eb);
153 if (Error_check(eb)) {
154 Log_error0("VirtQueue_create: could not create gate object");
155 Error_raise(NULL, Error_E_generic, 0, 0);
156 return(0);
157 }
159 vq->vringPtr = Memory_calloc(NULL, sizeof(struct vring), 0, eb);
160 Assert_isTrue((vq->vringPtr != NULL), NULL);
162 vq->callback = params->callback;
163 vq->id = params->vqId;
164 vq->procId = remoteProcId;
165 vq->last_avail_idx = 0;
166 vq->last_used_idx = 0;
167 vq->num_free = VirtQueue_RP_MSG_NUM_BUFS;
168 vq->swiHandle = params->swiHandle;
170 switch (vq->id) {
171 case ID_SELF_TO_HOST:
172 case ID_HOST_TO_SELF:
173 vringAddr = (struct vring *)Resource_getVringDA(vq->id);
174 Assert_isTrue(vringAddr != NULL, NULL);
175 #ifdef PER_CORE_VRING_PATCH
176 /* Add per core offset: must match on host side: */
177 vringAddr = (struct vring *)((UInt32)vringAddr +
178 (DNUM * VirtQueue_VRING_OFFSET));
179 #endif
180 /* Also, assert that the vring address is non-cached: */
181 #if !defined(xdc_runtime_Assert_DISABLE_ALL) \
182 || !defined(xdc_runtime_Log_DISABLE_ALL)
183 marValue = Cache_getMar((Ptr)vringAddr);
184 #endif
185 Log_print1(Diags_USER1, "Vring cache is %s",
186 (IArg)(marValue & 0x1 ? "enabled" : "disabled"));
187 Assert_isTrue(!(marValue & 0x1), NULL);
188 break ;
190 default:
191 Log_error1("VirtQueue_create: invalid vq->id: %d", vq->id);
192 GateAll_delete(&vq->gateH);
193 Memory_free(NULL, vq->vringPtr, sizeof(struct vring));
194 Error_raise(NULL, Error_E_generic, 0, 0);
195 return(0);
196 }
198 Log_print3(Diags_USER1, "vring: %d 0x%x (0x%x)", vq->id, (IArg)vringAddr,
199 RP_MSG_RING_SIZE);
201 vring_init(vq->vringPtr, VirtQueue_RP_MSG_NUM_BUFS, vringAddr,
202 VirtQueue_RP_MSG_VRING_ALIGN);
204 queueRegistry[vq->id] = vq;
205 return(0);
206 }
208 /*
209 * ======== VirtQueue_kick ========
210 */
211 Void VirtQueue_kick(VirtQueue_Handle vq)
212 {
213 struct vring *vring = vq->vringPtr;
215 /* For now, simply interrupt remote processor */
216 if (vring->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
217 Log_print0(Diags_USER1, "VirtQueue_kick: no kick because of "
218 "VRING_AVAIL_F_NO_INTERRUPT");
219 return;
220 }
222 Log_print2(Diags_USER1, "VirtQueue_kick: Sending interrupt to proc %d "
223 "with payload 0x%x", (IArg)vq->procId, (IArg)vq->id);
225 VirtQueue_InterruptProxy_intSend(vq->procId, NULL, 0);
226 }
228 /*
229 * ======== VirtQueue_addUsedBuf ========
230 */
231 Int VirtQueue_addUsedBuf(VirtQueue_Handle vq, Int16 head, Int len)
232 {
233 struct vring_used_elem *used;
234 struct vring *vring = vq->vringPtr;
235 IArg key;
237 key = GateAll_enter(vq->gateH);
238 if ((head > vring->num) || (head < 0)) {
239 Error_raise(NULL, Error_E_generic, 0, 0);
240 }
241 else {
242 /*
243 * The virtqueue contains a ring of used buffers. Get a pointer to the
244 * next entry in that used ring.
245 */
246 used = &vring->used->ring[vring->used->idx % vring->num];
247 used->id = head;
248 used->len = len;
250 vring->used->idx++;
251 }
252 GateAll_leave(vq->gateH, key);
254 return (0);
255 }
257 /*
258 * ======== VirtQueue_addAvailBuf ========
259 */
260 Int VirtQueue_addAvailBuf(VirtQueue_Object *vq, Void *buf)
261 {
262 UInt16 avail;
263 struct vring *vring = vq->vringPtr;
264 IArg key;
266 key = GateAll_enter(vq->gateH);
267 if (vq->num_free == 0) {
268 /* There's no more space */
269 Error_raise(NULL, Error_E_generic, 0, 0);
270 }
271 else {
272 vq->num_free--;
274 avail = vring->avail->idx++ % vring->num;
276 vring->desc[avail].addr = mapVAtoPA(buf);
277 vring->desc[avail].len = RPMSG_BUF_SIZE;
278 }
279 GateAll_leave(vq->gateH, key);
281 return (vq->num_free);
282 }
284 /*
285 * ======== VirtQueue_getUsedBuf ========
286 */
287 Void *VirtQueue_getUsedBuf(VirtQueue_Object *vq)
288 {
289 UInt16 head;
290 Void *buf;
291 struct vring *vring = vq->vringPtr;
292 IArg key;
294 key = GateAll_enter(vq->gateH);
295 /* There's nothing available? */
296 if (vq->last_used_idx == vring->used->idx) {
297 buf = NULL;
298 }
299 else {
300 head = vring->used->ring[vq->last_used_idx % vring->num].id;
301 vq->last_used_idx++;
302 vq->num_free++;
304 buf = mapPAtoVA(vring->desc[head].addr);
305 }
306 GateAll_leave(vq->gateH, key);
308 return (buf);
309 }
311 /*
312 * ======== VirtQueue_getAvailBuf ========
313 */
314 Int16 VirtQueue_getAvailBuf(VirtQueue_Handle vq, Void **buf, Int *len)
315 {
316 Int16 head;
317 struct vring *vring = vq->vringPtr;
318 IArg key;
320 key = GateAll_enter(vq->gateH);
321 Log_print6(Diags_USER1, "getAvailBuf vq: 0x%x %d %d %d 0x%x 0x%x",
322 (IArg)vq, (IArg)vq->last_avail_idx, (IArg)vring->avail->idx,
323 (IArg)vring->num, (IArg)&vring->avail, (IArg)vring->avail);
325 /* Clear flag here to avoid race condition with remote processor.
326 * This is a negative flag, clearing it means that we want to
327 * receive an interrupt when a buffer has been added to the pool.
328 */
329 vring->used->flags &= ~VRING_USED_F_NO_NOTIFY;
331 /* There's nothing available? */
332 if (vq->last_avail_idx == vring->avail->idx) {
333 head = (-1);
334 }
335 else {
336 /* No need to be kicked about added buffers anymore */
337 vring->used->flags |= VRING_USED_F_NO_NOTIFY;
339 /*
340 * Grab the next descriptor number they're advertising, and increment
341 * the index we've seen.
342 */
343 head = vring->avail->ring[vq->last_avail_idx++ % vring->num];
345 *buf = mapPAtoVA(vring->desc[head].addr);
346 *len = vring->desc[head].len;
347 }
348 GateAll_leave(vq->gateH, key);
350 return (head);
351 }
353 /*
354 * ======== VirtQueue_isr ========
355 * Note 'msg' is ignored: it is only used where there is a mailbox payload.
356 */
357 static Void VirtQueue_isr(UArg msg)
358 {
359 VirtQueue_Object *vq;
361 Log_print1(Diags_USER1, "VirtQueue_isr received msg = 0x%x", msg);
363 vq = queueRegistry[0];
364 if (vq) {
365 vq->callback(vq);
366 }
367 vq = queueRegistry[1];
368 if (vq) {
369 vq->callback(vq);
370 }
371 }
373 /*
374 * ======== VirtQueue_startup ========
375 */
376 Void VirtQueue_startup(UInt16 procId, Bool isHost)
377 {
379 /* Wait for first kick from host, which happens to coincide with the
380 * priming of host's receive buffers, indicating host is ready to send.
381 * Since interrupt is cleared, we throw away this first kick, which is
382 * OK since we don't process this in the ISR anyway.
383 */
384 Log_print0(Diags_USER1, "VirtQueue_startup: Polling for host int...");
385 while (!VirtQueue_InterruptProxy_intClear(procId, NULL));
387 VirtQueue_InterruptProxy_intRegister(procId, NULL, (Fxn)VirtQueue_isr, 0);
388 Log_print0(Diags_USER1, "Passed VirtQueue_startup");
389 }
391 /* By convention, Host VirtQueues host are the even number in the pair */
392 Bool VirtQueue_isSlave(VirtQueue_Handle vq)
393 {
394 return (vq->id & 0x1);
395 }
397 Bool VirtQueue_isHost(VirtQueue_Handle vq)
398 {
399 return (~(vq->id & 0x1));
400 }
402 UInt16 VirtQueue_getId(VirtQueue_Handle vq)
403 {
404 return (vq->id);
405 }
407 Swi_Handle VirtQueue_getSwiHandle(VirtQueue_Handle vq)
408 {
409 return (vq->swiHandle);
410 }
412 #define CACHE_WB_TICK_PERIOD 5
414 /*
415 * ======== VirtQueue_cacheWb ========
416 *
417 * Used for flushing SysMin trace buffer.
418 */
419 Void VirtQueue_cacheWb()
420 {
421 static UInt32 oldticks = 0;
422 UInt32 newticks;
424 newticks = Clock_getTicks();
425 if (newticks - oldticks < (UInt32)CACHE_WB_TICK_PERIOD) {
426 /* Don't keep flushing cache */
427 return;
428 }
430 oldticks = newticks;
432 /* Flush the cache of the SysMin buffer only: */
433 Assert_isTrue((VirtQueue_module->traceBufPtr != NULL), NULL);
434 Cache_wb(VirtQueue_module->traceBufPtr, SysMin_bufSize, Cache_Type_ALL,
435 FALSE);
436 }