1 /*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
27 #include "openamp/virtqueue.h"
29 /* Prototype for internal functions. */
30 static void vq_ring_init(struct virtqueue *);
31 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
32 static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
33 uint16_t, struct llist *, int, int);
34 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
35 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
36 static int vq_ring_must_notify_host(struct virtqueue *vq);
37 static void vq_ring_notify_host(struct virtqueue *vq);
38 static int virtqueue_nused(struct virtqueue *vq);
40 /**
41 * virtqueue_create - Creates new VirtIO queue
42 *
43 * @param device - Pointer to VirtIO device
44 * @param id - VirtIO queue ID , must be unique
45 * @param name - Name of VirtIO queue
46 * @param ring - Pointer to vring_alloc_info control block
47 * @param callback - Pointer to callback function, invoked
48 * when message is available on VirtIO queue
49 * @param notify - Pointer to notify function, used to notify
50 * other side that there is job available for it
51 * @param v_queue - Created VirtIO queue.
52 *
53 * @return - Function status
54 */
55 int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
56 char *name, struct vring_alloc_info *ring,
57 void (*callback) (struct virtqueue * vq),
58 void (*notify) (struct virtqueue * vq),
59 struct virtqueue **v_queue)
60 {
62 struct virtqueue *vq = VQ_NULL;
63 int status = VQUEUE_SUCCESS;
64 uint32_t vq_size = 0;
66 VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
67 VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
68 VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
69 ERROR_VRING_ALIGN);
71 //TODO : Error check for indirect buffer addition
73 if (status == VQUEUE_SUCCESS) {
75 vq_size = sizeof(struct virtqueue)
76 + (ring->num_descs) * sizeof(struct vq_desc_extra);
77 vq = (struct virtqueue *)env_allocate_memory(vq_size);
79 if (vq == VQ_NULL) {
80 return (ERROR_NO_MEM);
81 }
83 env_memset(vq, 0x00, vq_size);
85 vq->vq_dev = virt_dev;
86 env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
87 vq->vq_queue_index = id;
88 vq->vq_alignment = ring->align;
89 vq->vq_nentries = ring->num_descs;
90 vq->vq_free_cnt = vq->vq_nentries;
91 vq->callback = callback;
92 vq->notify = notify;
94 //TODO : Whether we want to support indirect addition or not.
95 vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
96 vq->vq_ring_mem = (void *)ring->phy_addr;
98 /* Initialize vring control block in virtqueue. */
99 vq_ring_init(vq);
101 /* Disable callbacks - will be enabled by the application
102 * once initialization is completed.
103 */
104 virtqueue_disable_cb(vq);
106 *v_queue = vq;
108 //TODO : Need to add cleanup in case of error used with the indirect buffer addition
109 //TODO: do we need to save the new queue in db based on its id
110 }
112 return (status);
113 }
115 /**
116 * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
117 * by other side. Readable buffers are always
118 * inserted before writable buffers
119 *
120 * @param vq - Pointer to VirtIO queue control block.
121 * @param buffer - Pointer to buffer list
122 * @param readable - Number of readable buffers
123 * @param writable - Number of writable buffers
124 * @param cookie - Pointer to hold call back data
125 *
126 * @return - Function status
127 */
128 int virtqueue_add_buffer(struct virtqueue *vq, struct llist *buffer,
129 int readable, int writable, void *cookie)
130 {
132 struct vq_desc_extra *dxp = VQ_NULL;
133 int status = VQUEUE_SUCCESS;
134 uint16_t head_idx;
135 uint16_t idx;
136 int needed;
138 needed = readable + writable;
140 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
141 VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
142 VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
144 //TODO: Add parameters validation for indirect buffer addition
146 VQUEUE_BUSY(vq);
148 if (status == VQUEUE_SUCCESS) {
150 //TODO : Indirect buffer addition support
152 VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");
154 head_idx = vq->vq_desc_head_idx;
155 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
156 dxp = &vq->vq_descx[head_idx];
158 VQASSERT(vq, (dxp->cookie == VQ_NULL),
159 "cookie already exists for index");
161 dxp->cookie = cookie;
162 dxp->ndescs = needed;
164 /* Enqueue buffer onto the ring. */
165 idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer,
166 readable, writable);
168 vq->vq_desc_head_idx = idx;
169 vq->vq_free_cnt -= needed;
171 if (vq->vq_free_cnt == 0)
172 VQ_RING_ASSERT_CHAIN_TERM(vq);
173 else
174 VQ_RING_ASSERT_VALID_IDX(vq, idx);
176 /*
177 * Update vring_avail control block fields so that other
178 * side can get buffer using it.
179 */
180 vq_ring_update_avail(vq, head_idx);
181 }
183 VQUEUE_IDLE(vq);
185 return (status);
186 }
188 /**
189 * virtqueue_add_single_buffer - Enqueues single buffer in vring
190 *
191 * @param vq - Pointer to VirtIO queue control block
192 * @param cookie - Pointer to hold call back data
193 * @param buffer_addr - Address of buffer
194 * @param len - Length of buffer
195 * @param writable - If buffer writable
196 * @param has_next - If buffers for subsequent call are
197 * to be chained
198 *
199 * @return - Function status
200 */
201 int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,
202 void *buffer_addr, uint32_t len, int writable,
203 boolean has_next)
204 {
206 struct vq_desc_extra *dxp;
207 struct vring_desc *dp;
208 uint16_t head_idx;
209 uint16_t idx;
210 int status = VQUEUE_SUCCESS;
212 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
213 VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
215 VQUEUE_BUSY(vq);
217 if (status == VQUEUE_SUCCESS) {
219 VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");
221 head_idx = vq->vq_desc_head_idx;
222 dxp = &vq->vq_descx[head_idx];
224 dxp->cookie = cookie;
225 dxp->ndescs = 1;
226 idx = head_idx;
228 dp = &vq->vq_ring.desc[idx];
229 dp->addr = env_map_vatopa(buffer_addr);
230 dp->len = len;
231 dp->flags = 0;
232 idx = dp->next;
234 if (has_next)
235 dp->flags |= VRING_DESC_F_NEXT;
236 if (writable)
237 dp->flags |= VRING_DESC_F_WRITE;
239 vq->vq_desc_head_idx = idx;
240 vq->vq_free_cnt--;
242 if (vq->vq_free_cnt == 0)
243 VQ_RING_ASSERT_CHAIN_TERM(vq);
244 else
245 VQ_RING_ASSERT_VALID_IDX(vq, idx);
247 vq_ring_update_avail(vq, head_idx);
248 }
250 VQUEUE_IDLE(vq);
252 return (status);
253 }
255 /**
256 * virtqueue_get_buffer - Returns used buffers from VirtIO queue
257 *
258 * @param vq - Pointer to VirtIO queue control block
259 * @param len - Length of conumed buffer
260 *
261 * @return - Pointer to used buffer
262 */
263 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t * len)
264 {
265 struct vring_used_elem *uep;
266 void *cookie;
267 uint16_t used_idx, desc_idx;
269 if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
270 return (VQ_NULL);
272 VQUEUE_BUSY(vq);
274 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
275 uep = &vq->vq_ring.used->ring[used_idx];
277 env_rmb();
279 desc_idx = (uint16_t) uep->id;
280 if (len != VQ_NULL)
281 *len = uep->len;
283 vq_ring_free_chain(vq, desc_idx);
285 cookie = vq->vq_descx[desc_idx].cookie;
286 vq->vq_descx[desc_idx].cookie = VQ_NULL;
288 VQUEUE_IDLE(vq);
290 return (cookie);
291 }
293 /**
294 * virtqueue_free - Frees VirtIO queue resources
295 *
296 * @param vq - Pointer to VirtIO queue control block
297 *
298 */
299 void virtqueue_free(struct virtqueue *vq)
300 {
302 if (vq != VQ_NULL) {
304 if (vq->vq_free_cnt != vq->vq_nentries) {
305 env_print
306 ("\r\nWARNING %s: freeing non-empty virtqueue\r\n",
307 vq->vq_name);
308 }
309 //TODO : Need to free indirect buffers here
311 if (vq->vq_ring_mem != VQ_NULL) {
312 vq->vq_ring_size = 0;
313 vq->vq_ring_mem = VQ_NULL;
314 }
316 env_free_memory(vq);
317 }
318 }
320 /**
321 * virtqueue_get_available_buffer - Returns buffer available for use in the
322 * VirtIO queue
323 *
324 * @param vq - Pointer to VirtIO queue control block
325 * @param avail_idx - Pointer to index used in vring desc table
326 * @param len - Length of buffer
327 *
328 * @return - Pointer to available buffer
329 */
330 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t * avail_idx,
331 uint32_t * len)
332 {
334 uint16_t head_idx = 0;
335 void *buffer;
337 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
338 return (VQ_NULL);
339 }
341 VQUEUE_BUSY(vq);
343 head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
344 *avail_idx = vq->vq_ring.avail->ring[head_idx];
346 env_rmb();
348 buffer = env_map_patova(vq->vq_ring.desc[*avail_idx].addr);
349 *len = vq->vq_ring.desc[*avail_idx].len;
351 VQUEUE_IDLE(vq);
353 return (buffer);
354 }
356 /**
357 * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
358 *
359 * @param vq - Pointer to VirtIO queue control block
360 * @param head_idx - Index of vring desc containing used buffer
361 * @param len - Length of buffer
362 *
363 * @return - Function status
364 */
365 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
366 uint32_t len)
367 {
369 struct vring_used_elem *used_desc = VQ_NULL;
370 uint16_t used_idx;
372 if ((head_idx > vq->vq_nentries) || (head_idx < 0)) {
373 return (ERROR_VRING_NO_BUFF);
374 }
376 VQUEUE_BUSY(vq);
378 used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
379 used_desc = &(vq->vq_ring.used->ring[used_idx]);
380 used_desc->id = head_idx;
381 used_desc->len = len;
383 env_wmb();
385 vq->vq_ring.used->idx++;
387 VQUEUE_IDLE(vq);
389 return (VQUEUE_SUCCESS);
390 }
392 /**
393 * virtqueue_enable_cb - Enables callback generation
394 *
395 * @param vq - Pointer to VirtIO queue control block
396 *
397 * @return - Function status
398 */
399 int virtqueue_enable_cb(struct virtqueue *vq)
400 {
402 return (vq_ring_enable_interrupt(vq, 0));
403 }
405 /**
406 * virtqueue_enable_cb - Disables callback generation
407 *
408 * @param vq - Pointer to VirtIO queue control block
409 *
410 */
411 void virtqueue_disable_cb(struct virtqueue *vq)
412 {
414 VQUEUE_BUSY(vq);
416 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
417 vring_used_event(&vq->vq_ring) =
418 vq->vq_used_cons_idx - vq->vq_nentries - 1;
419 } else {
420 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
421 }
423 VQUEUE_IDLE(vq);
424 }
426 /**
427 * virtqueue_kick - Notifies other side that there is buffer available for it.
428 *
429 * @param vq - Pointer to VirtIO queue control block
430 */
431 void virtqueue_kick(struct virtqueue *vq)
432 {
434 VQUEUE_BUSY(vq);
436 /* Ensure updated avail->idx is visible to host. */
437 env_mb();
439 if (vq_ring_must_notify_host(vq))
440 vq_ring_notify_host(vq);
442 vq->vq_queued_cnt = 0;
444 VQUEUE_IDLE(vq);
445 }
447 /**
448 * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
449 *
450 * @param vq - Pointer to VirtIO queue control block
451 */
452 void virtqueue_dump(struct virtqueue *vq)
453 {
455 if (vq == VQ_NULL)
456 return;
458 env_print("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
459 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
460 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
461 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
462 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
463 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
464 vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
465 vq->vq_ring.used->flags);
466 }
468 /**
469 * virtqueue_get_desc_size - Returns vring descriptor size
470 *
471 * @param vq - Pointer to VirtIO queue control block
472 *
473 * @return - Descriptor length
474 */
475 uint32_t virtqueue_get_desc_size(struct virtqueue * vq)
476 {
477 uint16_t head_idx = 0;
478 uint16_t avail_idx = 0;
479 uint32_t len = 0;
481 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
482 return (VQ_NULL);
483 }
485 VQUEUE_BUSY(vq);
487 head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
488 avail_idx = vq->vq_ring.avail->ring[head_idx];
489 len = vq->vq_ring.desc[avail_idx].len;
491 VQUEUE_IDLE(vq);
493 return (len);
494 }
496 /**************************************************************************
497 * Helper Functions *
498 **************************************************************************/
500 /**
501 *
502 * vq_ring_add_buffer
503 *
504 */
505 static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
506 struct vring_desc *desc, uint16_t head_idx,
507 struct llist *buffer, int readable,
508 int writable)
509 {
511 struct vring_desc *dp;
512 int i, needed;
513 uint16_t idx;
515 needed = readable + writable;
517 for (i = 0, idx = head_idx; (i < needed && buffer != VQ_NULL);
518 i++, idx = dp->next, buffer = buffer->next) {
520 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
521 "premature end of free desc chain");
523 dp = &desc[idx];
524 dp->addr = env_map_vatopa(buffer->data);
525 dp->len = buffer->attr;
526 dp->flags = 0;
528 if (i < needed - 1)
529 dp->flags |= VRING_DESC_F_NEXT;
531 /* Readable buffers are inserted into vring before the writable buffers. */
532 if (i >= readable)
533 dp->flags |= VRING_DESC_F_WRITE;
534 }
536 return (idx);
537 }
539 /**
540 *
541 * vq_ring_free_chain
542 *
543 */
544 static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
545 {
546 struct vring_desc *dp;
547 struct vq_desc_extra *dxp;
549 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
550 dp = &vq->vq_ring.desc[desc_idx];
551 dxp = &vq->vq_descx[desc_idx];
553 if (vq->vq_free_cnt == 0)
554 VQ_RING_ASSERT_CHAIN_TERM(vq);
556 vq->vq_free_cnt += dxp->ndescs;
557 dxp->ndescs--;
559 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
560 while (dp->flags & VRING_DESC_F_NEXT) {
561 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
562 dp = &vq->vq_ring.desc[dp->next];
563 dxp->ndescs--;
564 }
565 }
567 VQASSERT(vq, (dxp->ndescs == 0),
568 "failed to free entire desc chain, remaining");
570 /*
571 * We must append the existing free chain, if any, to the end of
572 * newly freed chain. If the virtqueue was completely used, then
573 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
574 */
575 dp->next = vq->vq_desc_head_idx;
576 vq->vq_desc_head_idx = desc_idx;
577 }
579 /**
580 *
581 * vq_ring_init
582 *
583 */
584 static void vq_ring_init(struct virtqueue *vq)
585 {
586 struct vring *vr;
587 unsigned char *ring_mem;
588 int i, size;
590 ring_mem = vq->vq_ring_mem;
591 size = vq->vq_nentries;
592 vr = &vq->vq_ring;
594 vring_init(vr, size, ring_mem, vq->vq_alignment);
596 for (i = 0; i < size - 1; i++)
597 vr->desc[i].next = i + 1;
598 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
599 }
601 /**
602 *
603 * vq_ring_update_avail
604 *
605 */
606 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
607 {
608 uint16_t avail_idx;
610 /*
611 * Place the head of the descriptor chain into the next slot and make
612 * it usable to the host. The chain is made available now rather than
613 * deferring to virtqueue_notify() in the hopes that if the host is
614 * currently running on another CPU, we can keep it processing the new
615 * descriptor.
616 */
617 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
618 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
620 env_wmb();
622 vq->vq_ring.avail->idx++;
624 /* Keep pending count until virtqueue_notify(). */
625 vq->vq_queued_cnt++;
626 }
628 /**
629 *
630 * vq_ring_enable_interrupt
631 *
632 */
633 static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
634 {
636 /*
637 * Enable interrupts, making sure we get the latest index of
638 * what's already been consumed.
639 */
640 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
641 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
642 } else {
643 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
644 }
646 env_mb();
648 /*
649 * Enough items may have already been consumed to meet our threshold
650 * since we last checked. Let our caller know so it processes the new
651 * entries.
652 */
653 if (virtqueue_nused(vq) > ndesc) {
654 return (1);
655 }
657 return (0);
658 }
660 /**
661 *
662 * virtqueue_interrupt
663 *
664 */
665 void virtqueue_notification(struct virtqueue *vq)
666 {
668 if (vq->callback != VQ_NULL)
669 vq->callback(vq);
670 }
672 /**
673 *
674 * vq_ring_must_notify_host
675 *
676 */
677 static int vq_ring_must_notify_host(struct virtqueue *vq)
678 {
679 uint16_t new_idx, prev_idx, event_idx;
681 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
682 new_idx = vq->vq_ring.avail->idx;
683 prev_idx = new_idx - vq->vq_queued_cnt;
684 event_idx = vring_avail_event(&vq->vq_ring);
686 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
687 }
689 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
690 }
692 /**
693 *
694 * vq_ring_notify_host
695 *
696 */
697 static void vq_ring_notify_host(struct virtqueue *vq)
698 {
700 if (vq->notify != VQ_NULL)
701 vq->notify(vq);
702 }
704 /**
705 *
706 * virtqueue_nused
707 *
708 */
709 static int virtqueue_nused(struct virtqueue *vq)
710 {
711 uint16_t used_idx, nused;
713 used_idx = vq->vq_ring.used->idx;
715 nused = (uint16_t) (used_idx - vq->vq_used_cons_idx);
716 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
718 return (nused);
719 }