diff options
author | Tuomas Tynkkynen | 2018-10-15 04:21:01 -0500 |
---|---|---|
committer | Simon Glass | 2018-11-14 11:16:27 -0600 |
commit | c011641ec4fcb61d1335f61b413117c1b7d83e5e (patch) | |
tree | fbe2ad2f529009817ee9e90c5fec12da5a473da9 | |
parent | 8fb49b4c7a820461db7c11dce767f36fd6395cac (diff) | |
download | u-boot-c011641ec4fcb61d1335f61b413117c1b7d83e5e.tar.gz u-boot-c011641ec4fcb61d1335f61b413117c1b7d83e5e.tar.xz u-boot-c011641ec4fcb61d1335f61b413117c1b7d83e5e.zip |
virtio: Add codes for virtual queue/ring management
This adds support for managing virtual queue/ring, the channel
for high performance I/O between host and guest.
Signed-off-by: Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
-rw-r--r-- | drivers/virtio/Makefile | 2 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 358 | ||||
-rw-r--r-- | include/virtio_ring.h | 320 |
3 files changed, 679 insertions, 1 deletions
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 23e7be7165..17d264a771 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile | |||
@@ -3,4 +3,4 @@ | |||
3 | # Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi> | 3 | # Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi> |
4 | # Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> | 4 | # Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> |
5 | 5 | ||
6 | obj-y += virtio-uclass.o | 6 | obj-y += virtio-uclass.o virtio_ring.o |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c new file mode 100644 index 0000000000..0eeb3501c2 --- /dev/null +++ b/drivers/virtio/virtio_ring.c | |||
@@ -0,0 +1,358 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi> | ||
4 | * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> | ||
5 | * | ||
6 | * virtio ring implementation | ||
7 | */ | ||
8 | |||
9 | #include <common.h> | ||
10 | #include <dm.h> | ||
11 | #include <malloc.h> | ||
12 | #include <virtio_types.h> | ||
13 | #include <virtio.h> | ||
14 | #include <virtio_ring.h> | ||
15 | |||
16 | int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[], | ||
17 | unsigned int out_sgs, unsigned int in_sgs) | ||
18 | { | ||
19 | struct vring_desc *desc; | ||
20 | unsigned int total_sg = out_sgs + in_sgs; | ||
21 | unsigned int i, n, avail, descs_used, uninitialized_var(prev); | ||
22 | int head; | ||
23 | |||
24 | WARN_ON(total_sg == 0); | ||
25 | |||
26 | head = vq->free_head; | ||
27 | |||
28 | desc = vq->vring.desc; | ||
29 | i = head; | ||
30 | descs_used = total_sg; | ||
31 | |||
32 | if (vq->num_free < descs_used) { | ||
33 | debug("Can't add buf len %i - avail = %i\n", | ||
34 | descs_used, vq->num_free); | ||
35 | /* | ||
36 | * FIXME: for historical reasons, we force a notify here if | ||
37 | * there are outgoing parts to the buffer. Presumably the | ||
38 | * host should service the ring ASAP. | ||
39 | */ | ||
40 | if (out_sgs) | ||
41 | virtio_notify(vq->vdev, vq); | ||
42 | return -ENOSPC; | ||
43 | } | ||
44 | |||
45 | for (n = 0; n < out_sgs; n++) { | ||
46 | struct virtio_sg *sg = sgs[n]; | ||
47 | |||
48 | desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT); | ||
49 | desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr); | ||
50 | desc[i].len = cpu_to_virtio32(vq->vdev, sg->length); | ||
51 | |||
52 | prev = i; | ||
53 | i = virtio16_to_cpu(vq->vdev, desc[i].next); | ||
54 | } | ||
55 | for (; n < (out_sgs + in_sgs); n++) { | ||
56 | struct virtio_sg *sg = sgs[n]; | ||
57 | |||
58 | desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT | | ||
59 | VRING_DESC_F_WRITE); | ||
60 | desc[i].addr = cpu_to_virtio64(vq->vdev, | ||
61 | (u64)(uintptr_t)sg->addr); | ||
62 | desc[i].len = cpu_to_virtio32(vq->vdev, sg->length); | ||
63 | |||
64 | prev = i; | ||
65 | i = virtio16_to_cpu(vq->vdev, desc[i].next); | ||
66 | } | ||
67 | /* Last one doesn't continue */ | ||
68 | desc[prev].flags &= cpu_to_virtio16(vq->vdev, ~VRING_DESC_F_NEXT); | ||
69 | |||
70 | /* We're using some buffers from the free list. */ | ||
71 | vq->num_free -= descs_used; | ||
72 | |||
73 | /* Update free pointer */ | ||
74 | vq->free_head = i; | ||
75 | |||
76 | /* | ||
77 | * Put entry in available array (but don't update avail->idx | ||
78 | * until they do sync). | ||
79 | */ | ||
80 | avail = vq->avail_idx_shadow & (vq->vring.num - 1); | ||
81 | vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head); | ||
82 | |||
83 | /* | ||
84 | * Descriptors and available array need to be set before we expose the | ||
85 | * new available array entries. | ||
86 | */ | ||
87 | virtio_wmb(); | ||
88 | vq->avail_idx_shadow++; | ||
89 | vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow); | ||
90 | vq->num_added++; | ||
91 | |||
92 | /* | ||
93 | * This is very unlikely, but theoretically possible. | ||
94 | * Kick just in case. | ||
95 | */ | ||
96 | if (unlikely(vq->num_added == (1 << 16) - 1)) | ||
97 | virtqueue_kick(vq); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static bool virtqueue_kick_prepare(struct virtqueue *vq) | ||
103 | { | ||
104 | u16 new, old; | ||
105 | bool needs_kick; | ||
106 | |||
107 | /* | ||
108 | * We need to expose available array entries before checking | ||
109 | * avail event. | ||
110 | */ | ||
111 | virtio_mb(); | ||
112 | |||
113 | old = vq->avail_idx_shadow - vq->num_added; | ||
114 | new = vq->avail_idx_shadow; | ||
115 | vq->num_added = 0; | ||
116 | |||
117 | if (vq->event) { | ||
118 | needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev, | ||
119 | vring_avail_event(&vq->vring)), new, old); | ||
120 | } else { | ||
121 | needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev, | ||
122 | VRING_USED_F_NO_NOTIFY)); | ||
123 | } | ||
124 | |||
125 | return needs_kick; | ||
126 | } | ||
127 | |||
128 | void virtqueue_kick(struct virtqueue *vq) | ||
129 | { | ||
130 | if (virtqueue_kick_prepare(vq)) | ||
131 | virtio_notify(vq->vdev, vq); | ||
132 | } | ||
133 | |||
134 | static void detach_buf(struct virtqueue *vq, unsigned int head) | ||
135 | { | ||
136 | unsigned int i; | ||
137 | __virtio16 nextflag = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT); | ||
138 | |||
139 | /* Put back on free list: unmap first-level descriptors and find end */ | ||
140 | i = head; | ||
141 | |||
142 | while (vq->vring.desc[i].flags & nextflag) { | ||
143 | i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next); | ||
144 | vq->num_free++; | ||
145 | } | ||
146 | |||
147 | vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head); | ||
148 | vq->free_head = head; | ||
149 | |||
150 | /* Plus final descriptor */ | ||
151 | vq->num_free++; | ||
152 | } | ||
153 | |||
154 | static inline bool more_used(const struct virtqueue *vq) | ||
155 | { | ||
156 | return vq->last_used_idx != virtio16_to_cpu(vq->vdev, | ||
157 | vq->vring.used->idx); | ||
158 | } | ||
159 | |||
160 | void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len) | ||
161 | { | ||
162 | unsigned int i; | ||
163 | u16 last_used; | ||
164 | |||
165 | if (!more_used(vq)) { | ||
166 | debug("(%s.%d): No more buffers in queue\n", | ||
167 | vq->vdev->name, vq->index); | ||
168 | return NULL; | ||
169 | } | ||
170 | |||
171 | /* Only get used array entries after they have been exposed by host */ | ||
172 | virtio_rmb(); | ||
173 | |||
174 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); | ||
175 | i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id); | ||
176 | if (len) { | ||
177 | *len = virtio32_to_cpu(vq->vdev, | ||
178 | vq->vring.used->ring[last_used].len); | ||
179 | debug("(%s.%d): last used idx %u with len %u\n", | ||
180 | vq->vdev->name, vq->index, i, *len); | ||
181 | } | ||
182 | |||
183 | if (unlikely(i >= vq->vring.num)) { | ||
184 | printf("(%s.%d): id %u out of range\n", | ||
185 | vq->vdev->name, vq->index, i); | ||
186 | return NULL; | ||
187 | } | ||
188 | |||
189 | detach_buf(vq, i); | ||
190 | vq->last_used_idx++; | ||
191 | /* | ||
192 | * If we expect an interrupt for the next entry, tell host | ||
193 | * by writing event index and flush out the write before | ||
194 | * the read in the next get_buf call. | ||
195 | */ | ||
196 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) | ||
197 | virtio_store_mb(&vring_used_event(&vq->vring), | ||
198 | cpu_to_virtio16(vq->vdev, vq->last_used_idx)); | ||
199 | |||
200 | return (void *)(uintptr_t)virtio64_to_cpu(vq->vdev, | ||
201 | vq->vring.desc[i].addr); | ||
202 | } | ||
203 | |||
204 | static struct virtqueue *__vring_new_virtqueue(unsigned int index, | ||
205 | struct vring vring, | ||
206 | struct udevice *udev) | ||
207 | { | ||
208 | unsigned int i; | ||
209 | struct virtqueue *vq; | ||
210 | struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev); | ||
211 | struct udevice *vdev = uc_priv->vdev; | ||
212 | |||
213 | vq = malloc(sizeof(*vq)); | ||
214 | if (!vq) | ||
215 | return NULL; | ||
216 | |||
217 | vq->vdev = vdev; | ||
218 | vq->index = index; | ||
219 | vq->num_free = vring.num; | ||
220 | vq->vring = vring; | ||
221 | vq->last_used_idx = 0; | ||
222 | vq->avail_flags_shadow = 0; | ||
223 | vq->avail_idx_shadow = 0; | ||
224 | vq->num_added = 0; | ||
225 | list_add_tail(&vq->list, &uc_priv->vqs); | ||
226 | |||
227 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | ||
228 | |||
229 | /* Tell other side not to bother us */ | ||
230 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; | ||
231 | if (!vq->event) | ||
232 | vq->vring.avail->flags = cpu_to_virtio16(vdev, | ||
233 | vq->avail_flags_shadow); | ||
234 | |||
235 | /* Put everything in free lists */ | ||
236 | vq->free_head = 0; | ||
237 | for (i = 0; i < vring.num - 1; i++) | ||
238 | vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); | ||
239 | |||
240 | return vq; | ||
241 | } | ||
242 | |||
243 | struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num, | ||
244 | unsigned int vring_align, | ||
245 | struct udevice *udev) | ||
246 | { | ||
247 | struct virtqueue *vq; | ||
248 | void *queue = NULL; | ||
249 | struct vring vring; | ||
250 | |||
251 | /* We assume num is a power of 2 */ | ||
252 | if (num & (num - 1)) { | ||
253 | printf("Bad virtqueue length %u\n", num); | ||
254 | return NULL; | ||
255 | } | ||
256 | |||
257 | /* TODO: allocate each queue chunk individually */ | ||
258 | for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { | ||
259 | queue = memalign(PAGE_SIZE, vring_size(num, vring_align)); | ||
260 | if (queue) | ||
261 | break; | ||
262 | } | ||
263 | |||
264 | if (!num) | ||
265 | return NULL; | ||
266 | |||
267 | if (!queue) { | ||
268 | /* Try to get a single page. You are my only hope! */ | ||
269 | queue = memalign(PAGE_SIZE, vring_size(num, vring_align)); | ||
270 | } | ||
271 | if (!queue) | ||
272 | return NULL; | ||
273 | |||
274 | memset(queue, 0, vring_size(num, vring_align)); | ||
275 | vring_init(&vring, num, queue, vring_align); | ||
276 | |||
277 | vq = __vring_new_virtqueue(index, vring, udev); | ||
278 | if (!vq) { | ||
279 | free(queue); | ||
280 | return NULL; | ||
281 | } | ||
282 | debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name, | ||
283 | queue, vq, num); | ||
284 | |||
285 | return vq; | ||
286 | } | ||
287 | |||
288 | void vring_del_virtqueue(struct virtqueue *vq) | ||
289 | { | ||
290 | free(vq->vring.desc); | ||
291 | list_del(&vq->list); | ||
292 | free(vq); | ||
293 | } | ||
294 | |||
295 | unsigned int virtqueue_get_vring_size(struct virtqueue *vq) | ||
296 | { | ||
297 | return vq->vring.num; | ||
298 | } | ||
299 | |||
300 | ulong virtqueue_get_desc_addr(struct virtqueue *vq) | ||
301 | { | ||
302 | return (ulong)vq->vring.desc; | ||
303 | } | ||
304 | |||
305 | ulong virtqueue_get_avail_addr(struct virtqueue *vq) | ||
306 | { | ||
307 | return (ulong)vq->vring.desc + | ||
308 | ((char *)vq->vring.avail - (char *)vq->vring.desc); | ||
309 | } | ||
310 | |||
311 | ulong virtqueue_get_used_addr(struct virtqueue *vq) | ||
312 | { | ||
313 | return (ulong)vq->vring.desc + | ||
314 | ((char *)vq->vring.used - (char *)vq->vring.desc); | ||
315 | } | ||
316 | |||
317 | bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx) | ||
318 | { | ||
319 | virtio_mb(); | ||
320 | |||
321 | return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx); | ||
322 | } | ||
323 | |||
324 | void virtqueue_dump(struct virtqueue *vq) | ||
325 | { | ||
326 | unsigned int i; | ||
327 | |||
328 | printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name); | ||
329 | printf("\tindex %u, phys addr %p num %u\n", | ||
330 | vq->index, vq->vring.desc, vq->vring.num); | ||
331 | printf("\tfree_head %u, num_added %u, num_free %u\n", | ||
332 | vq->free_head, vq->num_added, vq->num_free); | ||
333 | printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n", | ||
334 | vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow); | ||
335 | |||
336 | printf("Descriptor dump:\n"); | ||
337 | for (i = 0; i < vq->vring.num; i++) { | ||
338 | printf("\tdesc[%u] = { 0x%llx, len %u, flags %u, next %u }\n", | ||
339 | i, vq->vring.desc[i].addr, vq->vring.desc[i].len, | ||
340 | vq->vring.desc[i].flags, vq->vring.desc[i].next); | ||
341 | } | ||
342 | |||
343 | printf("Avail ring dump:\n"); | ||
344 | printf("\tflags %u, idx %u\n", | ||
345 | vq->vring.avail->flags, vq->vring.avail->idx); | ||
346 | for (i = 0; i < vq->vring.num; i++) { | ||
347 | printf("\tavail[%u] = %u\n", | ||
348 | i, vq->vring.avail->ring[i]); | ||
349 | } | ||
350 | |||
351 | printf("Used ring dump:\n"); | ||
352 | printf("\tflags %u, idx %u\n", | ||
353 | vq->vring.used->flags, vq->vring.used->idx); | ||
354 | for (i = 0; i < vq->vring.num; i++) { | ||
355 | printf("\tused[%u] = { %u, %u }\n", i, | ||
356 | vq->vring.used->ring[i].id, vq->vring.used->ring[i].len); | ||
357 | } | ||
358 | } | ||
diff --git a/include/virtio_ring.h b/include/virtio_ring.h new file mode 100644 index 0000000000..6fc0593b14 --- /dev/null +++ b/include/virtio_ring.h | |||
@@ -0,0 +1,320 @@ | |||
1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
2 | /* | ||
3 | * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi> | ||
4 | * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> | ||
5 | * | ||
6 | * From Linux kernel include/uapi/linux/virtio_ring.h | ||
7 | */ | ||
8 | |||
9 | #ifndef _LINUX_VIRTIO_RING_H | ||
10 | #define _LINUX_VIRTIO_RING_H | ||
11 | |||
12 | #include <virtio_types.h> | ||
13 | |||
14 | /* This marks a buffer as continuing via the next field */ | ||
15 | #define VRING_DESC_F_NEXT 1 | ||
16 | /* This marks a buffer as write-only (otherwise read-only) */ | ||
17 | #define VRING_DESC_F_WRITE 2 | ||
18 | /* This means the buffer contains a list of buffer descriptors */ | ||
19 | #define VRING_DESC_F_INDIRECT 4 | ||
20 | |||
21 | /* | ||
22 | * The Host uses this in used->flags to advise the Guest: don't kick me when | ||
23 | * you add a buffer. It's unreliable, so it's simply an optimization. Guest | ||
24 | * will still kick if it's out of buffers. | ||
25 | */ | ||
26 | #define VRING_USED_F_NO_NOTIFY 1 | ||
27 | |||
28 | /* | ||
29 | * The Guest uses this in avail->flags to advise the Host: don't interrupt me | ||
30 | * when you consume a buffer. It's unreliable, so it's simply an optimization. | ||
31 | */ | ||
32 | #define VRING_AVAIL_F_NO_INTERRUPT 1 | ||
33 | |||
34 | /* We support indirect buffer descriptors */ | ||
35 | #define VIRTIO_RING_F_INDIRECT_DESC 28 | ||
36 | |||
37 | /* | ||
38 | * The Guest publishes the used index for which it expects an interrupt | ||
39 | * at the end of the avail ring. Host should ignore the avail->flags field. | ||
40 | * | ||
41 | * The Host publishes the avail index for which it expects a kick | ||
42 | * at the end of the used ring. Guest should ignore the used->flags field. | ||
43 | */ | ||
44 | #define VIRTIO_RING_F_EVENT_IDX 29 | ||
45 | |||
46 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ | ||
47 | struct vring_desc { | ||
48 | /* Address (guest-physical) */ | ||
49 | __virtio64 addr; | ||
50 | /* Length */ | ||
51 | __virtio32 len; | ||
52 | /* The flags as indicated above */ | ||
53 | __virtio16 flags; | ||
54 | /* We chain unused descriptors via this, too */ | ||
55 | __virtio16 next; | ||
56 | }; | ||
57 | |||
58 | struct vring_avail { | ||
59 | __virtio16 flags; | ||
60 | __virtio16 idx; | ||
61 | __virtio16 ring[]; | ||
62 | }; | ||
63 | |||
64 | struct vring_used_elem { | ||
65 | /* Index of start of used descriptor chain */ | ||
66 | __virtio32 id; | ||
67 | /* Total length of the descriptor chain which was used (written to) */ | ||
68 | __virtio32 len; | ||
69 | }; | ||
70 | |||
71 | struct vring_used { | ||
72 | __virtio16 flags; | ||
73 | __virtio16 idx; | ||
74 | struct vring_used_elem ring[]; | ||
75 | }; | ||
76 | |||
77 | struct vring { | ||
78 | unsigned int num; | ||
79 | struct vring_desc *desc; | ||
80 | struct vring_avail *avail; | ||
81 | struct vring_used *used; | ||
82 | }; | ||
83 | |||
84 | /** | ||
85 | * virtqueue - a queue to register buffers for sending or receiving. | ||
86 | * | ||
87 | * @list: the chain of virtqueues for this device | ||
88 | * @vdev: the virtio device this queue was created for | ||
89 | * @index: the zero-based ordinal number for this queue | ||
90 | * @num_free: number of elements we expect to be able to fit | ||
91 | * @vring: actual memory layout for this queue | ||
92 | * @event: host publishes avail event idx | ||
93 | * @free_head: head of free buffer list | ||
94 | * @num_added: number we've added since last sync | ||
95 | * @last_used_idx: last used index we've seen | ||
96 | * @avail_flags_shadow: last written value to avail->flags | ||
97 | * @avail_idx_shadow: last written value to avail->idx in guest byte order | ||
98 | */ | ||
99 | struct virtqueue { | ||
100 | struct list_head list; | ||
101 | struct udevice *vdev; | ||
102 | unsigned int index; | ||
103 | unsigned int num_free; | ||
104 | struct vring vring; | ||
105 | bool event; | ||
106 | unsigned int free_head; | ||
107 | unsigned int num_added; | ||
108 | u16 last_used_idx; | ||
109 | u16 avail_flags_shadow; | ||
110 | u16 avail_idx_shadow; | ||
111 | }; | ||
112 | |||
113 | /* | ||
114 | * Alignment requirements for vring elements. | ||
115 | * When using pre-virtio 1.0 layout, these fall out naturally. | ||
116 | */ | ||
117 | #define VRING_AVAIL_ALIGN_SIZE 2 | ||
118 | #define VRING_USED_ALIGN_SIZE 4 | ||
119 | #define VRING_DESC_ALIGN_SIZE 16 | ||
120 | |||
121 | /* | ||
122 | * We publish the used event index at the end of the available ring, | ||
123 | * and vice versa. They are at the end for backwards compatibility. | ||
124 | */ | ||
125 | #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) | ||
126 | #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num]) | ||
127 | |||
128 | static inline void vring_init(struct vring *vr, unsigned int num, void *p, | ||
129 | unsigned long align) | ||
130 | { | ||
131 | vr->num = num; | ||
132 | vr->desc = p; | ||
133 | vr->avail = p + num * sizeof(struct vring_desc); | ||
134 | vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + | ||
135 | sizeof(__virtio16) + align - 1) & ~(align - 1)); | ||
136 | } | ||
137 | |||
138 | static inline unsigned int vring_size(unsigned int num, unsigned long align) | ||
139 | { | ||
140 | return ((sizeof(struct vring_desc) * num + | ||
141 | sizeof(__virtio16) * (3 + num) + align - 1) & ~(align - 1)) + | ||
142 | sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX. | ||
147 | * Assuming a given event_idx value from the other side, if we have just | ||
148 | * incremented index from old to new_idx, should we trigger an event? | ||
149 | */ | ||
150 | static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) | ||
151 | { | ||
152 | /* | ||
153 | * Note: Xen has similar logic for notification hold-off | ||
154 | * in include/xen/interface/io/ring.h with req_event and req_prod | ||
155 | * corresponding to event_idx + 1 and new_idx respectively. | ||
156 | * Note also that req_event and req_prod in Xen start at 1, | ||
157 | * event indexes in virtio start at 0. | ||
158 | */ | ||
159 | return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); | ||
160 | } | ||
161 | |||
162 | struct virtio_sg; | ||
163 | |||
164 | /** | ||
165 | * virtqueue_add - expose buffers to other end | ||
166 | * | ||
167 | * @vq: the struct virtqueue we're talking about | ||
168 | * @sgs: array of terminated scatterlists | ||
169 | * @out_sgs: the number of scatterlists readable by other side | ||
170 | * @in_sgs: the number of scatterlists which are writable | ||
171 | * (after readable ones) | ||
172 | * | ||
173 | * Caller must ensure we don't call this with other virtqueue operations | ||
174 | * at the same time (except where noted). | ||
175 | * | ||
176 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). | ||
177 | */ | ||
178 | int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[], | ||
179 | unsigned int out_sgs, unsigned int in_sgs); | ||
180 | |||
181 | /** | ||
182 | * virtqueue_kick - update after add_buf | ||
183 | * | ||
184 | * @vq: the struct virtqueue | ||
185 | * | ||
186 | * After one or more virtqueue_add() calls, invoke this to kick | ||
187 | * the other side. | ||
188 | * | ||
189 | * Caller must ensure we don't call this with other virtqueue | ||
190 | * operations at the same time (except where noted). | ||
191 | */ | ||
192 | void virtqueue_kick(struct virtqueue *vq); | ||
193 | |||
194 | /** | ||
195 | * virtqueue_get_buf - get the next used buffer | ||
196 | * | ||
197 | * @vq: the struct virtqueue we're talking about | ||
198 | * @len: the length written into the buffer | ||
199 | * | ||
200 | * If the device wrote data into the buffer, @len will be set to the | ||
201 | * amount written. This means you don't need to clear the buffer | ||
202 | * beforehand to ensure there's no data leakage in the case of short | ||
203 | * writes. | ||
204 | * | ||
205 | * Caller must ensure we don't call this with other virtqueue | ||
206 | * operations at the same time (except where noted). | ||
207 | * | ||
208 | * Returns NULL if there are no used buffers, or the memory buffer | ||
209 | * handed to virtqueue_add_*(). | ||
210 | */ | ||
211 | void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); | ||
212 | |||
213 | /** | ||
214 | * vring_create_virtqueue - create a virtqueue for a virtio device | ||
215 | * | ||
216 | * @index: the index of the queue | ||
217 | * @num: number of elements of the queue | ||
218 | * @vring_align:the alignment requirement of the descriptor ring | ||
219 | * @udev: the virtio transport udevice | ||
220 | * @return: the virtqueue pointer or NULL if failed | ||
221 | * | ||
222 | * This creates a virtqueue and allocates the descriptor ring for a virtio | ||
223 | * device. The caller should query virtqueue_get_ring_size() to learn the | ||
224 | * actual size of the ring. | ||
225 | * | ||
226 | * This API is supposed to be called by the virtio transport driver in the | ||
227 | * virtio find_vqs() uclass method. | ||
228 | */ | ||
229 | struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num, | ||
230 | unsigned int vring_align, | ||
231 | struct udevice *udev); | ||
232 | |||
233 | /** | ||
234 | * vring_del_virtqueue - destroy a virtqueue | ||
235 | * | ||
236 | * @vq: the struct virtqueue we're talking about | ||
237 | * | ||
238 | * This destroys a virtqueue. If created with vring_create_virtqueue(), | ||
239 | * this also frees the descriptor ring. | ||
240 | * | ||
241 | * This API is supposed to be called by the virtio transport driver in the | ||
242 | * virtio del_vqs() uclass method. | ||
243 | */ | ||
244 | void vring_del_virtqueue(struct virtqueue *vq); | ||
245 | |||
246 | /** | ||
247 | * virtqueue_get_vring_size - get the size of the virtqueue's vring | ||
248 | * | ||
249 | * @vq: the struct virtqueue containing the vring of interest | ||
250 | * @return: the size of the vring in a virtqueue. | ||
251 | */ | ||
252 | unsigned int virtqueue_get_vring_size(struct virtqueue *vq); | ||
253 | |||
254 | /** | ||
255 | * virtqueue_get_desc_addr - get the vring descriptor table address | ||
256 | * | ||
257 | * @vq: the struct virtqueue containing the vring of interest | ||
258 | * @return: the descriptor table address of the vring in a virtqueue. | ||
259 | */ | ||
260 | ulong virtqueue_get_desc_addr(struct virtqueue *vq); | ||
261 | |||
262 | /** | ||
263 | * virtqueue_get_avail_addr - get the vring available ring address | ||
264 | * | ||
265 | * @vq: the struct virtqueue containing the vring of interest | ||
266 | * @return: the available ring address of the vring in a virtqueue. | ||
267 | */ | ||
268 | ulong virtqueue_get_avail_addr(struct virtqueue *vq); | ||
269 | |||
270 | /** | ||
271 | * virtqueue_get_used_addr - get the vring used ring address | ||
272 | * | ||
273 | * @vq: the struct virtqueue containing the vring of interest | ||
274 | * @return: the used ring address of the vring in a virtqueue. | ||
275 | */ | ||
276 | ulong virtqueue_get_used_addr(struct virtqueue *vq); | ||
277 | |||
278 | /** | ||
279 | * virtqueue_poll - query pending used buffers | ||
280 | * | ||
281 | * @vq: the struct virtqueue we're talking about | ||
282 | * @last_used_idx: virtqueue last used index | ||
283 | * | ||
284 | * Returns "true" if there are pending used buffers in the queue. | ||
285 | */ | ||
286 | bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx); | ||
287 | |||
288 | /** | ||
289 | * virtqueue_dump - dump the virtqueue for debugging | ||
290 | * | ||
291 | * @vq: the struct virtqueue we're talking about | ||
292 | * | ||
293 | * Caller must ensure we don't call this with other virtqueue operations | ||
294 | * at the same time (except where noted). | ||
295 | */ | ||
296 | void virtqueue_dump(struct virtqueue *vq); | ||
297 | |||
298 | /* | ||
299 | * Barriers in virtio are tricky. Since we are not in a hyperviosr/guest | ||
300 | * scenario, having these as nops is enough to work as expected. | ||
301 | */ | ||
302 | |||
303 | static inline void virtio_mb(void) | ||
304 | { | ||
305 | } | ||
306 | |||
307 | static inline void virtio_rmb(void) | ||
308 | { | ||
309 | } | ||
310 | |||
311 | static inline void virtio_wmb(void) | ||
312 | { | ||
313 | } | ||
314 | |||
315 | static inline void virtio_store_mb(__virtio16 *p, __virtio16 v) | ||
316 | { | ||
317 | WRITE_ONCE(*p, v); | ||
318 | } | ||
319 | |||
320 | #endif /* _LINUX_VIRTIO_RING_H */ | ||