1 /*-\r
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>\r
3 * All rights reserved.\r
4 *\r
5 * Redistribution and use in source and binary forms, with or without\r
6 * modification, are permitted provided that the following conditions\r
7 * are met:\r
8 * 1. Redistributions of source code must retain the above copyright\r
9 * notice unmodified, this list of conditions, and the following\r
10 * disclaimer.\r
11 * 2. Redistributions in binary form must reproduce the above copyright\r
12 * notice, this list of conditions and the following disclaimer in the\r
13 * documentation and/or other materials provided with the distribution.\r
14 *\r
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\r
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\r
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\r
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\r
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\r
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\r
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
25 */\r
26 \r
27 #include "virtqueue.h"\r
28 \r
29 /* Prototype for internal functions. */\r
30 static void vq_ring_init(struct virtqueue *);\r
31 static void vq_ring_update_avail(struct virtqueue *, uint16_t);\r
32 static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,\r
33 uint16_t, struct llist *, int, int);\r
34 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);\r
35 static void vq_ring_free_chain(struct virtqueue *, uint16_t);\r
36 static int vq_ring_must_notify_host(struct virtqueue *vq);\r
37 static void vq_ring_notify_host(struct virtqueue *vq);\r
38 static int virtqueue_nused(struct virtqueue *vq);\r
39 \r
40 /**\r
41 * virtqueue_create - Creates new VirtIO queue\r
42 *\r
43 * @param device - Pointer to VirtIO device\r
44 * @param id - VirtIO queue ID , must be unique\r
45 * @param name - Name of VirtIO queue\r
46 * @param ring - Pointer to vring_alloc_info control block\r
47 * @param callback - Pointer to callback function, invoked\r
48 * when message is available on VirtIO queue\r
49 * @param notify - Pointer to notify function, used to notify\r
50 * other side that there is job available for it\r
51 * @param v_queue - Created VirtIO queue.\r
52 *\r
53 * @return - Function status\r
54 */\r
55 int virtqueue_create(struct virtio_device *virt_dev, unsigned short id, char *name,\r
56 struct vring_alloc_info *ring, void (*callback)(struct virtqueue *vq),\r
57 void (*notify)(struct virtqueue *vq),\r
58 struct virtqueue **v_queue) {\r
59 \r
60 struct virtqueue *vq = VQ_NULL;\r
61 int status = VQUEUE_SUCCESS;\r
62 uint32_t vq_size = 0;\r
63 \r
64 VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);\r
65 VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);\r
66 VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,\r
67 ERROR_VRING_ALIGN);\r
68 \r
69 //TODO : Error check for indirect buffer addition\r
70 \r
71 if (status == VQUEUE_SUCCESS) {\r
72 \r
73 vq_size = sizeof(struct virtqueue)\r
74 + (ring->num_descs) * sizeof(struct vq_desc_extra);\r
75 vq = (struct virtqueue *) env_allocate_memory(vq_size);\r
76 \r
77 if (vq == VQ_NULL) {\r
78 return (ERROR_NO_MEM);\r
79 }\r
80 \r
81 env_memset(vq, 0x00, vq_size);\r
82 \r
83 vq->vq_dev = virt_dev;\r
84 env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);\r
85 vq->vq_queue_index = id;\r
86 vq->vq_alignment = ring->align;\r
87 vq->vq_nentries = ring->num_descs;\r
88 vq->vq_free_cnt = vq->vq_nentries;\r
89 vq->callback = callback;\r
90 vq->notify = notify;\r
91 \r
92 //TODO : Whether we want to support indirect addition or not.\r
93 vq->vq_ring_size = vring_size(ring->num_descs, ring->align);\r
94 vq->vq_ring_mem = (void *) ring->phy_addr;\r
95 \r
96 /* Initialize vring control block in virtqueue. */\r
97 vq_ring_init(vq);\r
98 \r
99 /* Disable callbacks - will be enabled by the application\r
100 * once initialization is completed.\r
101 */\r
102 virtqueue_disable_cb(vq);\r
103 \r
104 *v_queue = vq;\r
105 \r
106 //TODO : Need to add cleanup in case of error used with the indirect buffer addition\r
107 //TODO: do we need to save the new queue in db based on its id\r
108 }\r
109 \r
110 return (status);\r
111 }\r
112 \r
113 /**\r
114 * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption\r
115 * by other side. Readable buffers are always\r
116 * inserted before writable buffers\r
117 *\r
118 * @param vq - Pointer to VirtIO queue control block.\r
119 * @param buffer - Pointer to buffer list\r
120 * @param readable - Number of readable buffers\r
121 * @param writable - Number of writable buffers\r
122 * @param cookie - Pointer to hold call back data\r
123 *\r
124 * @return - Function status\r
125 */\r
126 int virtqueue_add_buffer(struct virtqueue *vq, struct llist *buffer,\r
127 int readable, int writable, void *cookie) {\r
128 \r
129 struct vq_desc_extra *dxp = VQ_NULL;\r
130 int status = VQUEUE_SUCCESS;\r
131 uint16_t head_idx;\r
132 uint16_t idx;\r
133 int needed;\r
134 \r
135 needed = readable + writable;\r
136 \r
137 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);\r
138 VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);\r
139 VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);\r
140 \r
141 //TODO: Add parameters validation for indirect buffer addition\r
142 \r
143 VQUEUE_BUSY(vq);\r
144 \r
145 if (status == VQUEUE_SUCCESS) {\r
146 \r
147 //TODO : Indirect buffer addition support\r
148 \r
149 VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");\r
150 \r
151 head_idx = vq->vq_desc_head_idx;\r
152 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);\r
153 dxp = &vq->vq_descx[head_idx];\r
154 \r
155 VQASSERT(vq, (dxp->cookie == VQ_NULL), "cookie already exists for index");\r
156 \r
157 dxp->cookie = cookie;\r
158 dxp->ndescs = needed;\r
159 \r
160 /* Enqueue buffer onto the ring. */\r
161 idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer,\r
162 readable, writable);\r
163 \r
164 vq->vq_desc_head_idx = idx;\r
165 vq->vq_free_cnt -= needed;\r
166 \r
167 if (vq->vq_free_cnt == 0)\r
168 VQ_RING_ASSERT_CHAIN_TERM(vq);\r
169 else\r
170 VQ_RING_ASSERT_VALID_IDX(vq, idx);\r
171 \r
172 /*\r
173 * Update vring_avail control block fields so that other\r
174 * side can get buffer using it.\r
175 */\r
176 vq_ring_update_avail(vq, head_idx);\r
177 }\r
178 \r
179 VQUEUE_IDLE(vq);\r
180 \r
181 return (status);\r
182 }\r
183 \r
184 /**\r
185 * virtqueue_add_single_buffer - Enqueues single buffer in vring\r
186 *\r
187 * @param vq - Pointer to VirtIO queue control block\r
188 * @param cookie - Pointer to hold call back data\r
189 * @param buffer_addr - Address of buffer\r
190 * @param len - Length of buffer\r
191 * @param writable - If buffer writable\r
192 * @param has_next - If buffers for subsequent call are\r
193 * to be chained\r
194 *\r
195 * @return - Function status\r
196 */\r
197 int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,\r
198 void *buffer_addr, uint_t len, int writable, boolean has_next) {\r
199 \r
200 struct vq_desc_extra *dxp;\r
201 struct vring_desc *dp;\r
202 uint16_t head_idx;\r
203 uint16_t idx;\r
204 int status = VQUEUE_SUCCESS;\r
205 \r
206 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);\r
207 VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);\r
208 \r
209 VQUEUE_BUSY(vq);\r
210 \r
211 if (status == VQUEUE_SUCCESS) {\r
212 \r
213 VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");\r
214 \r
215 head_idx = vq->vq_desc_head_idx;\r
216 dxp = &vq->vq_descx[head_idx];\r
217 \r
218 dxp->cookie = cookie;\r
219 dxp->ndescs = 1;\r
220 idx = head_idx;\r
221 \r
222 dp = &vq->vq_ring.desc[idx];\r
223 dp->addr = env_map_vatopa(buffer_addr);\r
224 dp->len = len;\r
225 dp->flags = 0;\r
226 idx = dp->next;\r
227 \r
228 if (has_next)\r
229 dp->flags |= VRING_DESC_F_NEXT;\r
230 if (writable)\r
231 dp->flags |= VRING_DESC_F_WRITE;\r
232 \r
233 vq->vq_desc_head_idx = idx;\r
234 vq->vq_free_cnt--;\r
235 \r
236 if (vq->vq_free_cnt == 0)\r
237 VQ_RING_ASSERT_CHAIN_TERM(vq);\r
238 else\r
239 VQ_RING_ASSERT_VALID_IDX(vq, idx);\r
240 \r
241 vq_ring_update_avail(vq, head_idx);\r
242 }\r
243 \r
244 VQUEUE_IDLE(vq);\r
245 \r
246 return (status);\r
247 }\r
248 \r
249 /**\r
250 * virtqueue_get_buffer - Returns used buffers from VirtIO queue\r
251 *\r
252 * @param vq - Pointer to VirtIO queue control block\r
253 * @param len - Length of conumed buffer\r
254 *\r
255 * @return - Pointer to used buffer\r
256 */\r
257 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len) {\r
258 struct vring_used_elem *uep;\r
259 void *cookie;\r
260 uint16_t used_idx, desc_idx;\r
261 \r
262 if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))\r
263 return (VQ_NULL);\r
264 \r
265 VQUEUE_BUSY(vq);\r
266 \r
267 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);\r
268 uep = &vq->vq_ring.used->ring[used_idx];\r
269 \r
270 env_rmb();\r
271 \r
272 desc_idx = (uint16_t) uep->id;\r
273 if (len != VQ_NULL)\r
274 *len = uep->len;\r
275 \r
276 vq_ring_free_chain(vq, desc_idx);\r
277 \r
278 cookie = vq->vq_descx[desc_idx].cookie;\r
279 vq->vq_descx[desc_idx].cookie = VQ_NULL;\r
280 \r
281 VQUEUE_IDLE(vq);\r
282 \r
283 return (cookie);\r
284 }\r
285 \r
286 /**\r
287 * virtqueue_free - Frees VirtIO queue resources\r
288 *\r
289 * @param vq - Pointer to VirtIO queue control block\r
290 *\r
291 */\r
292 void virtqueue_free(struct virtqueue *vq) {\r
293 \r
294 if (vq != VQ_NULL) {\r
295 \r
296 if (vq->vq_free_cnt != vq->vq_nentries) {\r
297 env_print("\r\nWARNING %s: freeing non-empty virtqueue\r\n", vq->vq_name);\r
298 }\r
299 \r
300 //TODO : Need to free indirect buffers here\r
301 \r
302 if (vq->vq_ring_mem != VQ_NULL) {\r
303 vq->vq_ring_size = 0;\r
304 vq->vq_ring_mem = VQ_NULL;\r
305 }\r
306 \r
307 env_free_memory(vq);\r
308 }\r
309 }\r
310 \r
311 /**\r
312 * virtqueue_get_available_buffer - Returns buffer available for use in the\r
313 * VirtIO queue\r
314 *\r
315 * @param vq - Pointer to VirtIO queue control block\r
316 * @param avail_idx - Pointer to index used in vring desc table\r
317 * @param len - Length of buffer\r
318 *\r
319 * @return - Pointer to available buffer\r
320 */\r
321 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,\r
322 uint32_t *len) {\r
323 \r
324 uint16_t head_idx = 0;\r
325 void *buffer;\r
326 \r
327 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {\r
328 return (VQ_NULL);\r
329 }\r
330 \r
331 VQUEUE_BUSY(vq);\r
332 \r
333 head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);\r
334 *avail_idx = vq->vq_ring.avail->ring[head_idx];\r
335 \r
336 env_rmb();\r
337 \r
338 buffer = env_map_patova(vq->vq_ring.desc[*avail_idx].addr);\r
339 *len = vq->vq_ring.desc[*avail_idx].len;\r
340 \r
341 VQUEUE_IDLE(vq);\r
342 \r
343 return (buffer);\r
344 }\r
345 \r
346 /**\r
347 * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue\r
348 *\r
349 * @param vq - Pointer to VirtIO queue control block\r
350 * @param head_idx - Index of vring desc containing used buffer\r
351 * @param len - Length of buffer\r
352 *\r
353 * @return - Function status\r
354 */\r
355 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,\r
356 uint_t len) {\r
357 \r
358 struct vring_used_elem *used_desc = VQ_NULL;\r
359 uint16_t used_idx;\r
360 \r
361 if ((head_idx > vq->vq_nentries) || (head_idx < 0)) {\r
362 return (ERROR_VRING_NO_BUFF);\r
363 }\r
364 \r
365 VQUEUE_BUSY(vq);\r
366 \r
367 used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);\r
368 used_desc = &(vq->vq_ring.used->ring[used_idx]);\r
369 used_desc->id = head_idx;\r
370 used_desc->len = len;\r
371 \r
372 env_wmb();\r
373 \r
374 vq->vq_ring.used->idx++;\r
375 \r
376 VQUEUE_IDLE(vq);\r
377 \r
378 return (VQUEUE_SUCCESS);\r
379 }\r
380 \r
381 /**\r
382 * virtqueue_enable_cb - Enables callback generation\r
383 *\r
384 * @param vq - Pointer to VirtIO queue control block\r
385 *\r
386 * @return - Function status\r
387 */\r
388 int virtqueue_enable_cb(struct virtqueue *vq) {\r
389 \r
390 return (vq_ring_enable_interrupt(vq, 0));\r
391 }\r
392 \r
393 /**\r
394 * virtqueue_enable_cb - Disables callback generation\r
395 *\r
396 * @param vq - Pointer to VirtIO queue control block\r
397 *\r
398 */\r
399 void virtqueue_disable_cb(struct virtqueue *vq) {\r
400 \r
401 VQUEUE_BUSY(vq);\r
402 \r
403 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {\r
404 vring_used_event(&vq->vq_ring)= vq->vq_used_cons_idx - vq->vq_nentries\r
405 - 1;\r
406 } else {\r
407 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;\r
408 }\r
409 \r
410 VQUEUE_IDLE(vq);\r
411 }\r
412 \r
413 /**\r
414 * virtqueue_kick - Notifies other side that there is buffer available for it.\r
415 *\r
416 * @param vq - Pointer to VirtIO queue control block\r
417 */\r
418 void virtqueue_kick(struct virtqueue *vq) {\r
419 \r
420 VQUEUE_BUSY(vq);\r
421 \r
422 /* Ensure updated avail->idx is visible to host. */\r
423 env_mb();\r
424 \r
425 if (vq_ring_must_notify_host(vq))\r
426 vq_ring_notify_host(vq);\r
427 \r
428 vq->vq_queued_cnt = 0;\r
429 \r
430 VQUEUE_IDLE(vq);\r
431 }\r
432 \r
433 /**\r
434 * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes\r
435 *\r
436 * @param vq - Pointer to VirtIO queue control block\r
437 */\r
438 void virtqueue_dump(struct virtqueue *vq) {\r
439 \r
440 if (vq == VQ_NULL)\r
441 return;\r
442 \r
443 env_print("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "\r
444 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "\r
445 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n", vq->vq_name,\r
446 vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),\r
447 vq->vq_queued_cnt, vq->vq_desc_head_idx, vq->vq_ring.avail->idx,\r
448 vq->vq_used_cons_idx, vq->vq_ring.used->idx,\r
449 vq->vq_ring.avail->flags, vq->vq_ring.used->flags);\r
450 }\r
451 \r
452 /**\r
453 * virtqueue_get_desc_size - Returns vring descriptor size\r
454 *\r
455 * @param vq - Pointer to VirtIO queue control block\r
456 *\r
457 * @return - Descriptor length\r
458 */\r
459 uint32_t virtqueue_get_desc_size(struct virtqueue *vq) {\r
460 uint16_t head_idx = 0;\r
461 uint16_t avail_idx = 0;\r
462 uint32_t len = 0;\r
463 \r
464 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {\r
465 return (VQ_NULL);\r
466 }\r
467 \r
468 VQUEUE_BUSY(vq);\r
469 \r
470 head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);\r
471 avail_idx = vq->vq_ring.avail->ring[head_idx];\r
472 len = vq->vq_ring.desc[avail_idx].len;\r
473 \r
474 VQUEUE_IDLE(vq);\r
475 \r
476 return (len);\r
477 }\r
478 /**************************************************************************\r
479 * Helper Functions *\r
480 **************************************************************************/\r
481 \r
482 /**\r
483 *\r
484 * vq_ring_add_buffer\r
485 *\r
486 */\r
487 static uint16_t vq_ring_add_buffer(struct virtqueue *vq,\r
488 struct vring_desc *desc, uint16_t head_idx, struct llist *buffer,\r
489 int readable, int writable) {\r
490 \r
491 struct vring_desc *dp;\r
492 int i, needed;\r
493 uint16_t idx;\r
494 \r
495 needed = readable + writable;\r
496 \r
497 for (i = 0, idx = head_idx; (i < needed && buffer != VQ_NULL);\r
498 i++, idx = dp->next, buffer = buffer->next) {\r
499 \r
500 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,\r
501 "premature end of free desc chain");\r
502 \r
503 dp = &desc[idx];\r
504 dp->addr = env_map_vatopa(buffer->data);\r
505 dp->len = buffer->attr;\r
506 dp->flags = 0;\r
507 \r
508 if (i < needed - 1)\r
509 dp->flags |= VRING_DESC_F_NEXT;\r
510 \r
511 /* Readable buffers are inserted into vring before the writable buffers.*/\r
512 if (i >= readable)\r
513 dp->flags |= VRING_DESC_F_WRITE;\r
514 }\r
515 \r
516 return (idx);\r
517 }\r
518 \r
519 /**\r
520 *\r
521 * vq_ring_free_chain\r
522 *\r
523 */\r
524 static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) {\r
525 struct vring_desc *dp;\r
526 struct vq_desc_extra *dxp;\r
527 \r
528 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);\r
529 dp = &vq->vq_ring.desc[desc_idx];\r
530 dxp = &vq->vq_descx[desc_idx];\r
531 \r
532 if (vq->vq_free_cnt == 0)\r
533 VQ_RING_ASSERT_CHAIN_TERM(vq);\r
534 \r
535 vq->vq_free_cnt += dxp->ndescs;\r
536 dxp->ndescs--;\r
537 \r
538 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {\r
539 while (dp->flags & VRING_DESC_F_NEXT) {\r
540 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);\r
541 dp = &vq->vq_ring.desc[dp->next];\r
542 dxp->ndescs--;\r
543 }\r
544 }\r
545 \r
546 VQASSERT(vq, (dxp->ndescs == 0),\r
547 "failed to free entire desc chain, remaining");\r
548 \r
549 /*\r
550 * We must append the existing free chain, if any, to the end of\r
551 * newly freed chain. If the virtqueue was completely used, then\r
552 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).\r
553 */\r
554 dp->next = vq->vq_desc_head_idx;\r
555 vq->vq_desc_head_idx = desc_idx;\r
556 }\r
557 \r
558 /**\r
559 *\r
560 * vq_ring_init\r
561 *\r
562 */\r
563 static void vq_ring_init(struct virtqueue *vq) {\r
564 struct vring *vr;\r
565 unsigned char *ring_mem;\r
566 int i, size;\r
567 \r
568 ring_mem = vq->vq_ring_mem;\r
569 size = vq->vq_nentries;\r
570 vr = &vq->vq_ring;\r
571 \r
572 vring_init(vr, size, ring_mem, vq->vq_alignment);\r
573 \r
574 for (i = 0; i < size - 1; i++)\r
575 vr->desc[i].next = i + 1;\r
576 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;\r
577 }\r
578 \r
579 /**\r
580 *\r
581 * vq_ring_update_avail\r
582 *\r
583 */\r
584 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) {\r
585 uint16_t avail_idx;\r
586 \r
587 /*\r
588 * Place the head of the descriptor chain into the next slot and make\r
589 * it usable to the host. The chain is made available now rather than\r
590 * deferring to virtqueue_notify() in the hopes that if the host is\r
591 * currently running on another CPU, we can keep it processing the new\r
592 * descriptor.\r
593 */\r
594 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);\r
595 vq->vq_ring.avail->ring[avail_idx] = desc_idx;\r
596 \r
597 env_wmb();\r
598 \r
599 vq->vq_ring.avail->idx++;\r
600 \r
601 /* Keep pending count until virtqueue_notify(). */\r
602 vq->vq_queued_cnt++;\r
603 }\r
604 \r
605 /**\r
606 *\r
607 * vq_ring_enable_interrupt\r
608 *\r
609 */\r
610 static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) {\r
611 \r
612 /*\r
613 * Enable interrupts, making sure we get the latest index of\r
614 * what's already been consumed.\r
615 */\r
616 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {\r
617 vring_used_event(&vq->vq_ring)= vq->vq_used_cons_idx + ndesc;\r
618 } else {\r
619 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;\r
620 }\r
621 \r
622 env_mb();\r
623 \r
624 /*\r
625 * Enough items may have already been consumed to meet our threshold\r
626 * since we last checked. Let our caller know so it processes the new\r
627 * entries.\r
628 */\r
629 if (virtqueue_nused(vq) > ndesc) {\r
630 return (1);\r
631 }\r
632 \r
633 return (0);\r
634 }\r
635 \r
636 /**\r
637 *\r
638 * virtqueue_interrupt\r
639 *\r
640 */\r
641 void virtqueue_notification(struct virtqueue *vq) {\r
642 \r
643 if (vq->callback != VQ_NULL)\r
644 vq->callback(vq);\r
645 }\r
646 \r
647 /**\r
648 *\r
649 * vq_ring_must_notify_host\r
650 *\r
651 */\r
652 static int vq_ring_must_notify_host(struct virtqueue *vq) {\r
653 uint16_t new_idx, prev_idx, event_idx;\r
654 \r
655 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {\r
656 new_idx = vq->vq_ring.avail->idx;\r
657 prev_idx = new_idx - vq->vq_queued_cnt;\r
658 event_idx = vring_avail_event(&vq->vq_ring);\r
659 \r
660 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);\r
661 }\r
662 \r
663 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);\r
664 }\r
665 \r
666 /**\r
667 *\r
668 * vq_ring_notify_host\r
669 *\r
670 */\r
671 static void vq_ring_notify_host(struct virtqueue *vq) {\r
672 \r
673 if (vq->notify != VQ_NULL)\r
674 vq->notify(vq);\r
675 }\r
676 \r
677 /**\r
678 *\r
679 * virtqueue_nused\r
680 *\r
681 */\r
682 static int virtqueue_nused(struct virtqueue *vq) {\r
683 uint16_t used_idx, nused;\r
684 \r
685 used_idx = vq->vq_ring.used->idx;\r
686 \r
687 nused = (uint16_t) (used_idx - vq->vq_used_cons_idx);\r
688 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");\r
689 \r
690 return (nused);\r
691 }\r