1 /*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
27 #include <string.h>
28 #include "openamp/virtqueue.h"
29 #include "metal/atomic.h"
30 #include "metal/dma.h"
31 #include "metal/io.h"
32 #include "metal/alloc.h"
34 /* Prototype for internal functions. */
35 static void vq_ring_init(struct virtqueue *);
36 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
37 static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
38 uint16_t, struct metal_sg *, int, int);
39 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
40 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
41 static int vq_ring_must_notify_host(struct virtqueue *vq);
42 static void vq_ring_notify_host(struct virtqueue *vq);
43 static int virtqueue_nused(struct virtqueue *vq);
45 /**
46 * virtqueue_create - Creates new VirtIO queue
47 *
48 * @param device - Pointer to VirtIO device
49 * @param id - VirtIO queue ID , must be unique
50 * @param name - Name of VirtIO queue
51 * @param ring - Pointer to vring_alloc_info control block
52 * @param callback - Pointer to callback function, invoked
53 * when message is available on VirtIO queue
54 * @param notify - Pointer to notify function, used to notify
55 * other side that there is job available for it
56 * @param shm_io - shared memory I/O region of the virtqueue
57 * @param v_queue - Created VirtIO queue.
58 *
59 * @return - Function status
60 */
61 int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
62 char *name, struct vring_alloc_info *ring,
63 void (*callback) (struct virtqueue * vq),
64 void (*notify) (struct virtqueue * vq),
65 struct metal_io_region *shm_io,
66 struct virtqueue **v_queue)
67 {
69 struct virtqueue *vq = VQ_NULL;
70 int status = VQUEUE_SUCCESS;
71 uint32_t vq_size = 0;
73 VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
74 VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
75 VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
76 ERROR_VRING_ALIGN);
78 //TODO : Error check for indirect buffer addition
80 if (status == VQUEUE_SUCCESS) {
82 vq_size = sizeof(struct virtqueue)
83 + (ring->num_descs) * sizeof(struct vq_desc_extra);
84 vq = (struct virtqueue *)metal_allocate_memory(vq_size);
86 if (vq == VQ_NULL) {
87 return (ERROR_NO_MEM);
88 }
90 memset(vq, 0x00, vq_size);
92 vq->vq_dev = virt_dev;
93 strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
94 vq->vq_queue_index = id;
95 vq->vq_alignment = ring->align;
96 vq->vq_nentries = ring->num_descs;
97 vq->vq_free_cnt = vq->vq_nentries;
98 vq->callback = callback;
99 vq->notify = notify;
100 vq->shm_io = shm_io;
102 //TODO : Whether we want to support indirect addition or not.
103 vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
104 vq->vq_ring_mem = (void *)ring->vaddr;
106 /* Initialize vring control block in virtqueue. */
107 vq_ring_init(vq);
109 /* Disable callbacks - will be enabled by the application
110 * once initialization is completed.
111 */
112 virtqueue_disable_cb(vq);
114 *v_queue = vq;
116 //TODO : Need to add cleanup in case of error used with the indirect buffer addition
117 //TODO: do we need to save the new queue in db based on its id
118 }
120 return (status);
121 }
123 /**
124 * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
125 * by other side. Readable buffers are always
126 * inserted before writable buffers
127 *
128 * @param vq - Pointer to VirtIO queue control block.
129 * @param sg - Pointer to buffer scatter/gather list
130 * @param readable - Number of readable buffers
131 * @param writable - Number of writable buffers
132 * @param cookie - Pointer to hold call back data
133 *
134 * @return - Function status
135 */
136 int virtqueue_add_buffer(struct virtqueue *vq, struct metal_sg *sg,
137 int readable, int writable, void *cookie)
138 {
140 struct vq_desc_extra *dxp = VQ_NULL;
141 int status = VQUEUE_SUCCESS;
142 uint16_t head_idx;
143 uint16_t idx;
144 int needed;
146 needed = readable + writable;
148 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
149 VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
150 VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
152 //TODO: Add parameters validation for indirect buffer addition
154 VQUEUE_BUSY(vq);
156 if (status == VQUEUE_SUCCESS) {
158 //TODO : Indirect buffer addition support
160 VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");
162 head_idx = vq->vq_desc_head_idx;
163 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
164 dxp = &vq->vq_descx[head_idx];
166 VQASSERT(vq, (dxp->cookie == VQ_NULL),
167 "cookie already exists for index");
169 dxp->cookie = cookie;
170 dxp->ndescs = needed;
172 /* Enqueue buffer onto the ring. */
173 idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, sg,
174 readable, writable);
176 vq->vq_desc_head_idx = idx;
177 vq->vq_free_cnt -= needed;
179 if (vq->vq_free_cnt == 0) {
180 VQ_RING_ASSERT_CHAIN_TERM(vq);
181 } else {
182 VQ_RING_ASSERT_VALID_IDX(vq, idx);
183 }
185 /*
186 * Update vring_avail control block fields so that other
187 * side can get buffer using it.
188 */
189 vq_ring_update_avail(vq, head_idx);
190 }
192 VQUEUE_IDLE(vq);
194 return (status);
195 }
197 /**
198 * virtqueue_add_single_buffer - Enqueues single buffer in vring
199 *
200 * @param vq - Pointer to VirtIO queue control block
201 * @param cookie - Pointer to hold call back data
202 * @param sg - metal_scatter/gather struct element
203 * @param writable - If buffer writable
204 * @param has_next - If buffers for subsequent call are
205 * to be chained
206 *
207 * @return - Function status
208 */
209 int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,
210 struct metal_sg *sg, int writable,
211 boolean has_next)
212 {
214 struct vq_desc_extra *dxp;
215 struct vring_desc *dp;
216 uint16_t head_idx;
217 uint16_t idx;
218 int status = VQUEUE_SUCCESS;
220 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
221 VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
223 VQUEUE_BUSY(vq);
225 if (status == VQUEUE_SUCCESS) {
227 VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");
229 head_idx = vq->vq_desc_head_idx;
230 dxp = &vq->vq_descx[head_idx];
232 dxp->cookie = cookie;
233 dxp->ndescs = 1;
234 idx = head_idx;
236 dp = &vq->vq_ring.desc[idx];
237 dp->addr = metal_io_virt_to_phys(sg->io, sg->virt);
238 dp->len = sg->len;
239 dp->flags = 0;
240 idx = dp->next;
242 if (has_next)
243 dp->flags |= VRING_DESC_F_NEXT;
244 if (writable)
245 dp->flags |= VRING_DESC_F_WRITE;
247 vq->vq_desc_head_idx = idx;
248 vq->vq_free_cnt--;
250 if (vq->vq_free_cnt == 0) {
251 VQ_RING_ASSERT_CHAIN_TERM(vq);
252 } else {
253 VQ_RING_ASSERT_VALID_IDX(vq, idx);
254 }
256 vq_ring_update_avail(vq, head_idx);
257 }
259 VQUEUE_IDLE(vq);
261 return (status);
262 }
264 /**
265 * virtqueue_get_buffer - Returns used buffers from VirtIO queue
266 *
267 * @param vq - Pointer to VirtIO queue control block
268 * @param len - Length of conumed buffer
269 * @param idx - index of the buffer
270 *
271 * @return - Pointer to used buffer
272 */
273 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t * len, uint16_t * idx)
274 {
275 struct vring_used_elem *uep;
276 void *cookie;
277 uint16_t used_idx, desc_idx;
279 if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
280 return (VQ_NULL);
282 VQUEUE_BUSY(vq);
284 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
285 uep = &vq->vq_ring.used->ring[used_idx];
287 atomic_thread_fence(memory_order_seq_cst);
289 desc_idx = (uint16_t) uep->id;
290 if (len != VQ_NULL)
291 *len = uep->len;
293 vq_ring_free_chain(vq, desc_idx);
295 cookie = vq->vq_descx[desc_idx].cookie;
296 vq->vq_descx[desc_idx].cookie = VQ_NULL;
298 if (idx != VQ_NULL)
299 *idx = used_idx;
300 VQUEUE_IDLE(vq);
302 return (cookie);
303 }
305 uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
306 {
307 return vq->vq_ring.desc[idx].len;
308 }
310 /**
311 * virtqueue_free - Frees VirtIO queue resources
312 *
313 * @param vq - Pointer to VirtIO queue control block
314 *
315 */
316 void virtqueue_free(struct virtqueue *vq)
317 {
319 if (vq != VQ_NULL) {
321 if (vq->vq_free_cnt != vq->vq_nentries) {
322 openamp_print
323 ("\r\nWARNING %s: freeing non-empty virtqueue\r\n",
324 vq->vq_name);
325 }
326 //TODO : Need to free indirect buffers here
328 if (vq->vq_ring_mem != VQ_NULL) {
329 vq->vq_ring_size = 0;
330 vq->vq_ring_mem = VQ_NULL;
331 }
333 metal_free_memory(vq);
334 }
335 }
337 /**
338 * virtqueue_get_available_buffer - Returns buffer available for use in the
339 * VirtIO queue
340 *
341 * @param vq - Pointer to VirtIO queue control block
342 * @param avail_idx - Pointer to index used in vring desc table
343 * @param len - Length of buffer
344 *
345 * @return - Pointer to available buffer
346 */
347 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t * avail_idx,
348 uint32_t * len)
349 {
351 uint16_t head_idx = 0;
352 void *buffer;
354 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
355 return (VQ_NULL);
356 }
358 VQUEUE_BUSY(vq);
360 head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
361 *avail_idx = vq->vq_ring.avail->ring[head_idx];
363 atomic_thread_fence(memory_order_seq_cst);
365 buffer = metal_io_phys_to_virt(vq->shm_io, vq->vq_ring.desc[*avail_idx].addr);
366 *len = vq->vq_ring.desc[*avail_idx].len;
368 VQUEUE_IDLE(vq);
370 return (buffer);
371 }
373 /**
374 * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
375 *
376 * @param vq - Pointer to VirtIO queue control block
377 * @param head_idx - Index of vring desc containing used buffer
378 * @param len - Length of buffer
379 *
380 * @return - Function status
381 */
382 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
383 uint32_t len)
384 {
386 struct vring_used_elem *used_desc = VQ_NULL;
387 uint16_t used_idx;
389 if (head_idx > vq->vq_nentries) {
390 return (ERROR_VRING_NO_BUFF);
391 }
393 VQUEUE_BUSY(vq);
395 used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
396 used_desc = &(vq->vq_ring.used->ring[used_idx]);
397 used_desc->id = head_idx;
398 used_desc->len = len;
400 atomic_thread_fence(memory_order_seq_cst);
402 vq->vq_ring.used->idx++;
404 VQUEUE_IDLE(vq);
406 return (VQUEUE_SUCCESS);
407 }
409 /**
410 * virtqueue_enable_cb - Enables callback generation
411 *
412 * @param vq - Pointer to VirtIO queue control block
413 *
414 * @return - Function status
415 */
416 int virtqueue_enable_cb(struct virtqueue *vq)
417 {
419 return (vq_ring_enable_interrupt(vq, 0));
420 }
422 /**
423 * virtqueue_enable_cb - Disables callback generation
424 *
425 * @param vq - Pointer to VirtIO queue control block
426 *
427 */
428 void virtqueue_disable_cb(struct virtqueue *vq)
429 {
431 VQUEUE_BUSY(vq);
433 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
434 vring_used_event(&vq->vq_ring) =
435 vq->vq_used_cons_idx - vq->vq_nentries - 1;
436 } else {
437 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
438 }
440 VQUEUE_IDLE(vq);
441 }
443 /**
444 * virtqueue_kick - Notifies other side that there is buffer available for it.
445 *
446 * @param vq - Pointer to VirtIO queue control block
447 */
448 void virtqueue_kick(struct virtqueue *vq)
449 {
451 VQUEUE_BUSY(vq);
453 /* Ensure updated avail->idx is visible to host. */
454 atomic_thread_fence(memory_order_seq_cst);
456 if (vq_ring_must_notify_host(vq))
457 vq_ring_notify_host(vq);
459 vq->vq_queued_cnt = 0;
461 VQUEUE_IDLE(vq);
462 }
464 /**
465 * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
466 *
467 * @param vq - Pointer to VirtIO queue control block
468 */
469 void virtqueue_dump(struct virtqueue *vq)
470 {
472 if (vq == VQ_NULL)
473 return;
475 openamp_print("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
476 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
477 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
478 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
479 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
480 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
481 vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
482 vq->vq_ring.used->flags);
483 }
485 /**
486 * virtqueue_get_desc_size - Returns vring descriptor size
487 *
488 * @param vq - Pointer to VirtIO queue control block
489 *
490 * @return - Descriptor length
491 */
492 uint32_t virtqueue_get_desc_size(struct virtqueue * vq)
493 {
494 uint16_t head_idx = 0;
495 uint16_t avail_idx = 0;
496 uint32_t len = 0;
498 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
499 return (VQ_NULL);
500 }
502 VQUEUE_BUSY(vq);
504 head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
505 avail_idx = vq->vq_ring.avail->ring[head_idx];
506 len = vq->vq_ring.desc[avail_idx].len;
508 VQUEUE_IDLE(vq);
510 return (len);
511 }
513 /**************************************************************************
514 * Helper Functions *
515 **************************************************************************/
517 /**
518 *
519 * vq_ring_add_buffer
520 *
521 */
522 static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
523 struct vring_desc *desc, uint16_t head_idx,
524 struct metal_sg *sg, int readable,
525 int writable)
526 {
528 struct vring_desc *dp;
529 int i, needed;
530 uint16_t idx;
532 (void)vq;
534 needed = readable + writable;
536 for (i = 0, idx = head_idx; i < needed; i++, idx = dp->next) {
538 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
539 "premature end of free desc chain");
541 dp = &desc[idx];
542 dp->addr = metal_io_virt_to_phys(sg[i].io, sg[i].virt);
543 dp->len = sg[i].len;
544 dp->flags = 0;
546 if (i < needed - 1)
547 dp->flags |= VRING_DESC_F_NEXT;
549 /* Readable buffers are inserted into vring before the writable buffers. */
550 if (i >= readable)
551 dp->flags |= VRING_DESC_F_WRITE;
552 }
554 return (idx);
555 }
557 /**
558 *
559 * vq_ring_free_chain
560 *
561 */
562 static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
563 {
564 struct vring_desc *dp;
565 struct vq_desc_extra *dxp;
567 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
568 dp = &vq->vq_ring.desc[desc_idx];
569 dxp = &vq->vq_descx[desc_idx];
571 if (vq->vq_free_cnt == 0) {
572 VQ_RING_ASSERT_CHAIN_TERM(vq);
573 }
575 vq->vq_free_cnt += dxp->ndescs;
576 dxp->ndescs--;
578 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
579 while (dp->flags & VRING_DESC_F_NEXT) {
580 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
581 dp = &vq->vq_ring.desc[dp->next];
582 dxp->ndescs--;
583 }
584 }
586 VQASSERT(vq, (dxp->ndescs == 0),
587 "failed to free entire desc chain, remaining");
589 /*
590 * We must append the existing free chain, if any, to the end of
591 * newly freed chain. If the virtqueue was completely used, then
592 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
593 */
594 dp->next = vq->vq_desc_head_idx;
595 vq->vq_desc_head_idx = desc_idx;
596 }
598 /**
599 *
600 * vq_ring_init
601 *
602 */
603 static void vq_ring_init(struct virtqueue *vq)
604 {
605 struct vring *vr;
606 unsigned char *ring_mem;
607 int i, size;
609 ring_mem = vq->vq_ring_mem;
610 size = vq->vq_nentries;
611 vr = &vq->vq_ring;
613 vring_init(vr, size, ring_mem, vq->vq_alignment);
615 for (i = 0; i < size - 1; i++)
616 vr->desc[i].next = i + 1;
617 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
618 }
620 /**
621 *
622 * vq_ring_update_avail
623 *
624 */
625 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
626 {
627 uint16_t avail_idx;
629 /*
630 * Place the head of the descriptor chain into the next slot and make
631 * it usable to the host. The chain is made available now rather than
632 * deferring to virtqueue_notify() in the hopes that if the host is
633 * currently running on another CPU, we can keep it processing the new
634 * descriptor.
635 */
636 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
637 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
639 atomic_thread_fence(memory_order_seq_cst);
641 vq->vq_ring.avail->idx++;
643 /* Keep pending count until virtqueue_notify(). */
644 vq->vq_queued_cnt++;
645 }
647 /**
648 *
649 * vq_ring_enable_interrupt
650 *
651 */
652 static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
653 {
655 /*
656 * Enable interrupts, making sure we get the latest index of
657 * what's already been consumed.
658 */
659 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
660 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
661 } else {
662 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
663 }
665 atomic_thread_fence(memory_order_seq_cst);
667 /*
668 * Enough items may have already been consumed to meet our threshold
669 * since we last checked. Let our caller know so it processes the new
670 * entries.
671 */
672 if (virtqueue_nused(vq) > ndesc) {
673 return (1);
674 }
676 return (0);
677 }
679 /**
680 *
681 * virtqueue_interrupt
682 *
683 */
684 void virtqueue_notification(struct virtqueue *vq)
685 {
687 if (vq->callback != VQ_NULL)
688 vq->callback(vq);
689 }
691 /**
692 *
693 * vq_ring_must_notify_host
694 *
695 */
696 static int vq_ring_must_notify_host(struct virtqueue *vq)
697 {
698 uint16_t new_idx, prev_idx, event_idx;
700 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
701 new_idx = vq->vq_ring.avail->idx;
702 prev_idx = new_idx - vq->vq_queued_cnt;
703 event_idx = vring_avail_event(&vq->vq_ring);
705 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
706 }
708 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
709 }
711 /**
712 *
713 * vq_ring_notify_host
714 *
715 */
716 static void vq_ring_notify_host(struct virtqueue *vq)
717 {
719 if (vq->notify != VQ_NULL)
720 vq->notify(vq);
721 }
723 /**
724 *
725 * virtqueue_nused
726 *
727 */
728 static int virtqueue_nused(struct virtqueue *vq)
729 {
730 uint16_t used_idx, nused;
732 used_idx = vq->vq_ring.used->idx;
734 nused = (uint16_t) (used_idx - vq->vq_used_cons_idx);
735 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
737 return (nused);
738 }