File: | hw/virtio/dataplane/vring.c |
Location: | line 400, column 5 |
Description: | The left operand of '<' is a garbage value |
1 | /* Copyright 2012 Red Hat, Inc. | |||||
2 | * Copyright IBM, Corp. 2012 | |||||
3 | * | |||||
4 | * Based on Linux 2.6.39 vhost code: | |||||
5 | * Copyright (C) 2009 Red Hat, Inc. | |||||
6 | * Copyright (C) 2006 Rusty Russell IBM Corporation | |||||
7 | * | |||||
8 | * Author: Michael S. Tsirkin <mst@redhat.com> | |||||
9 | * Stefan Hajnoczi <stefanha@redhat.com> | |||||
10 | * | |||||
11 | * Inspiration, some code, and most witty comments come from | |||||
12 | * Documentation/virtual/lguest/lguest.c, by Rusty Russell | |||||
13 | * | |||||
14 | * This work is licensed under the terms of the GNU GPL, version 2. | |||||
15 | */ | |||||
16 | ||||||
17 | #include "trace.h" | |||||
18 | #include "hw/hw.h" | |||||
19 | #include "exec/memory.h" | |||||
20 | #include "exec/address-spaces.h" | |||||
21 | #include "hw/virtio/dataplane/vring.h" | |||||
22 | #include "qemu/error-report.h" | |||||
23 | ||||||
24 | /* vring_map can be coupled with vring_unmap or (if you still have the | |||||
25 | * value returned in *mr) memory_region_unref. | |||||
26 | */ | |||||
27 | static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len, | |||||
28 | bool_Bool is_write) | |||||
29 | { | |||||
30 | MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len); | |||||
31 | ||||||
32 | if (!section.mr || int128_get64(section.size) < len) { | |||||
33 | goto out; | |||||
34 | } | |||||
35 | if (is_write && section.readonly) { | |||||
36 | goto out; | |||||
37 | } | |||||
38 | if (!memory_region_is_ram(section.mr)) { | |||||
39 | goto out; | |||||
40 | } | |||||
41 | ||||||
42 | /* Ignore regions with dirty logging, we cannot mark them dirty */ | |||||
43 | if (memory_region_is_logging(section.mr)) { | |||||
44 | goto out; | |||||
45 | } | |||||
46 | ||||||
47 | *mr = section.mr; | |||||
48 | return memory_region_get_ram_ptr(section.mr) + section.offset_within_region; | |||||
49 | ||||||
50 | out: | |||||
51 | memory_region_unref(section.mr); | |||||
52 | *mr = NULL((void*)0); | |||||
53 | return NULL((void*)0); | |||||
54 | } | |||||
55 | ||||||
56 | static void vring_unmap(void *buffer, bool_Bool is_write) | |||||
57 | { | |||||
58 | ram_addr_t addr; | |||||
59 | MemoryRegion *mr; | |||||
60 | ||||||
61 | mr = qemu_ram_addr_from_host(buffer, &addr); | |||||
62 | memory_region_unref(mr); | |||||
63 | } | |||||
64 | ||||||
65 | /* Map the guest's vring to host memory */ | |||||
66 | bool_Bool vring_setup(Vring *vring, VirtIODevice *vdev, int n) | |||||
67 | { | |||||
68 | hwaddr vring_addr = virtio_queue_get_ring_addr(vdev, n); | |||||
69 | hwaddr vring_size = virtio_queue_get_ring_size(vdev, n); | |||||
70 | void *vring_ptr; | |||||
71 | ||||||
72 | vring->broken = false0; | |||||
73 | ||||||
74 | vring_ptr = vring_map(&vring->mr, vring_addr, vring_size, true1); | |||||
75 | if (!vring_ptr) { | |||||
76 | error_report("Failed to map vring " | |||||
77 | "addr %#" HWADDR_PRIx"l" "x" " size %" HWADDR_PRIu"l" "u", | |||||
78 | vring_addr, vring_size); | |||||
79 | vring->broken = true1; | |||||
80 | return false0; | |||||
81 | } | |||||
82 | ||||||
83 | vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096); | |||||
84 | ||||||
85 | vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n); | |||||
86 | vring->last_used_idx = vring->vr.used->idx; | |||||
87 | vring->signalled_used = 0; | |||||
88 | vring->signalled_used_valid = false0; | |||||
89 | ||||||
90 | trace_vring_setup(virtio_queue_get_ring_addr(vdev, n), | |||||
91 | vring->vr.desc, vring->vr.avail, vring->vr.used); | |||||
92 | return true1; | |||||
93 | } | |||||
94 | ||||||
95 | void vring_teardown(Vring *vring, VirtIODevice *vdev, int n) | |||||
96 | { | |||||
97 | virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx); | |||||
98 | virtio_queue_invalidate_signalled_used(vdev, n); | |||||
99 | ||||||
100 | memory_region_unref(vring->mr); | |||||
101 | } | |||||
102 | ||||||
103 | /* Disable guest->host notifies */ | |||||
104 | void vring_disable_notification(VirtIODevice *vdev, Vring *vring) | |||||
105 | { | |||||
106 | if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX29))) { | |||||
107 | vring->vr.used->flags |= VRING_USED_F_NO_NOTIFY1; | |||||
108 | } | |||||
109 | } | |||||
110 | ||||||
111 | /* Enable guest->host notifies | |||||
112 | * | |||||
113 | * Return true if the vring is empty, false if there are more requests. | |||||
114 | */ | |||||
115 | bool_Bool vring_enable_notification(VirtIODevice *vdev, Vring *vring) | |||||
116 | { | |||||
117 | if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX29)) { | |||||
118 | vring_avail_event(&vring->vr)(*(__u16 *)&(&vring->vr)->used->ring[(&vring ->vr)->num]) = vring->vr.avail->idx; | |||||
119 | } else { | |||||
120 | vring->vr.used->flags &= ~VRING_USED_F_NO_NOTIFY1; | |||||
121 | } | |||||
122 | smp_mb()__sync_synchronize(); /* ensure update is seen before reading avail_idx */ | |||||
123 | return !vring_more_avail(vring); | |||||
124 | } | |||||
125 | ||||||
126 | /* This is stolen from linux/drivers/vhost/vhost.c:vhost_notify() */ | |||||
127 | bool_Bool vring_should_notify(VirtIODevice *vdev, Vring *vring) | |||||
128 | { | |||||
129 | uint16_t old, new; | |||||
130 | bool_Bool v; | |||||
131 | /* Flush out used index updates. This is paired | |||||
132 | * with the barrier that the Guest executes when enabling | |||||
133 | * interrupts. */ | |||||
134 | smp_mb()__sync_synchronize(); | |||||
135 | ||||||
136 | if ((vdev->guest_features & VIRTIO_F_NOTIFY_ON_EMPTY24) && | |||||
137 | unlikely(vring->vr.avail->idx == vring->last_avail_idx)__builtin_expect(!!(vring->vr.avail->idx == vring->last_avail_idx ), 0)) { | |||||
138 | return true1; | |||||
139 | } | |||||
140 | ||||||
141 | if (!(vdev->guest_features & VIRTIO_RING_F_EVENT_IDX29)) { | |||||
142 | return !(vring->vr.avail->flags & VRING_AVAIL_F_NO_INTERRUPT1); | |||||
143 | } | |||||
144 | old = vring->signalled_used; | |||||
145 | v = vring->signalled_used_valid; | |||||
146 | new = vring->signalled_used = vring->last_used_idx; | |||||
147 | vring->signalled_used_valid = true1; | |||||
148 | ||||||
149 | if (unlikely(!v)__builtin_expect(!!(!v), 0)) { | |||||
150 | return true1; | |||||
151 | } | |||||
152 | ||||||
153 | return vring_need_event(vring_used_event(&vring->vr)((&vring->vr)->avail->ring[(&vring->vr)-> num]), new, old); | |||||
154 | } | |||||
155 | ||||||
156 | ||||||
157 | static int get_desc(Vring *vring, VirtQueueElement *elem, | |||||
158 | struct vring_desc *desc) | |||||
159 | { | |||||
160 | unsigned *num; | |||||
161 | struct iovec *iov; | |||||
162 | hwaddr *addr; | |||||
163 | MemoryRegion *mr; | |||||
164 | ||||||
165 | if (desc->flags & VRING_DESC_F_WRITE2) { | |||||
166 | num = &elem->in_num; | |||||
167 | iov = &elem->in_sg[*num]; | |||||
168 | addr = &elem->in_addr[*num]; | |||||
169 | } else { | |||||
170 | num = &elem->out_num; | |||||
171 | iov = &elem->out_sg[*num]; | |||||
172 | addr = &elem->out_addr[*num]; | |||||
173 | ||||||
174 | /* If it's an output descriptor, they're all supposed | |||||
175 | * to come before any input descriptors. */ | |||||
176 | if (unlikely(elem->in_num)__builtin_expect(!!(elem->in_num), 0)) { | |||||
177 | error_report("Descriptor has out after in"); | |||||
178 | return -EFAULT14; | |||||
179 | } | |||||
180 | } | |||||
181 | ||||||
182 | /* Stop for now if there are not enough iovecs available. */ | |||||
183 | if (*num >= VIRTQUEUE_MAX_SIZE1024) { | |||||
184 | return -ENOBUFS105; | |||||
185 | } | |||||
186 | ||||||
187 | /* TODO handle non-contiguous memory across region boundaries */ | |||||
188 | iov->iov_base = vring_map(&mr, desc->addr, desc->len, | |||||
189 | desc->flags & VRING_DESC_F_WRITE2); | |||||
190 | if (!iov->iov_base) { | |||||
191 | error_report("Failed to map descriptor addr %#" PRIx64"l" "x" " len %u", | |||||
192 | (uint64_t)desc->addr, desc->len); | |||||
193 | return -EFAULT14; | |||||
194 | } | |||||
195 | ||||||
196 | /* The MemoryRegion is looked up again and unref'ed later, leave the | |||||
197 | * ref in place. */ | |||||
198 | iov->iov_len = desc->len; | |||||
199 | *addr = desc->addr; | |||||
200 | *num += 1; | |||||
201 | return 0; | |||||
202 | } | |||||
203 | ||||||
204 | /* This is stolen from linux/drivers/vhost/vhost.c. */ | |||||
205 | static int get_indirect(Vring *vring, VirtQueueElement *elem, | |||||
206 | struct vring_desc *indirect) | |||||
207 | { | |||||
208 | struct vring_desc desc; | |||||
209 | unsigned int i = 0, count, found = 0; | |||||
210 | int ret; | |||||
211 | ||||||
212 | /* Sanity check */ | |||||
213 | if (unlikely(indirect->len % sizeof(desc))__builtin_expect(!!(indirect->len % sizeof(desc)), 0)) { | |||||
214 | error_report("Invalid length in indirect descriptor: " | |||||
215 | "len %#x not multiple of %#zx", | |||||
216 | indirect->len, sizeof(desc)); | |||||
217 | vring->broken = true1; | |||||
218 | return -EFAULT14; | |||||
219 | } | |||||
220 | ||||||
221 | count = indirect->len / sizeof(desc); | |||||
222 | /* Buffers are chained via a 16 bit next field, so | |||||
223 | * we can have at most 2^16 of these. */ | |||||
224 | if (unlikely(count > USHRT_MAX + 1)__builtin_expect(!!(count > (32767 *2 +1) + 1), 0)) { | |||||
225 | error_report("Indirect buffer length too big: %d", indirect->len); | |||||
226 | vring->broken = true1; | |||||
227 | return -EFAULT14; | |||||
228 | } | |||||
229 | ||||||
230 | do { | |||||
231 | struct vring_desc *desc_ptr; | |||||
232 | MemoryRegion *mr; | |||||
233 | ||||||
234 | /* Translate indirect descriptor */ | |||||
235 | desc_ptr = vring_map(&mr, | |||||
236 | indirect->addr + found * sizeof(desc), | |||||
237 | sizeof(desc), false0); | |||||
238 | if (!desc_ptr) { | |||||
239 | error_report("Failed to map indirect descriptor " | |||||
240 | "addr %#" PRIx64"l" "x" " len %zu", | |||||
241 | (uint64_t)indirect->addr + found * sizeof(desc), | |||||
242 | sizeof(desc)); | |||||
243 | vring->broken = true1; | |||||
244 | return -EFAULT14; | |||||
245 | } | |||||
246 | desc = *desc_ptr; | |||||
247 | memory_region_unref(mr); | |||||
248 | ||||||
249 | /* Ensure descriptor has been loaded before accessing fields */ | |||||
250 | barrier()({ asm volatile("" ::: "memory"); (void)0; }); /* read_barrier_depends(); */ | |||||
251 | ||||||
252 | if (unlikely(++found > count)__builtin_expect(!!(++found > count), 0)) { | |||||
253 | error_report("Loop detected: last one at %u " | |||||
254 | "indirect size %u", i, count); | |||||
255 | vring->broken = true1; | |||||
256 | return -EFAULT14; | |||||
257 | } | |||||
258 | ||||||
259 | if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)__builtin_expect(!!(desc.flags & 4), 0)) { | |||||
260 | error_report("Nested indirect descriptor"); | |||||
261 | vring->broken = true1; | |||||
262 | return -EFAULT14; | |||||
263 | } | |||||
264 | ||||||
265 | ret = get_desc(vring, elem, &desc); | |||||
266 | if (ret < 0) { | |||||
267 | vring->broken |= (ret == -EFAULT14); | |||||
268 | return ret; | |||||
269 | } | |||||
270 | i = desc.next; | |||||
271 | } while (desc.flags & VRING_DESC_F_NEXT1); | |||||
272 | return 0; | |||||
273 | } | |||||
274 | ||||||
275 | void vring_free_element(VirtQueueElement *elem) | |||||
276 | { | |||||
277 | int i; | |||||
278 | ||||||
279 | /* This assumes that the iovecs, if changed, are never moved past | |||||
280 | * the end of the valid area. This is true if iovec manipulations | |||||
281 | * are done with iov_discard_front and iov_discard_back. | |||||
282 | */ | |||||
283 | for (i = 0; i < elem->out_num; i++) { | |||||
284 | vring_unmap(elem->out_sg[i].iov_base, false0); | |||||
285 | } | |||||
286 | ||||||
287 | for (i = 0; i < elem->in_num; i++) { | |||||
288 | vring_unmap(elem->in_sg[i].iov_base, true1); | |||||
289 | } | |||||
290 | ||||||
291 | g_slice_free(VirtQueueElement, elem)do { if (1) g_slice_free1 (sizeof (VirtQueueElement), (elem)) ; else (void) ((VirtQueueElement*) 0 == (elem)); } while (0); | |||||
292 | } | |||||
293 | ||||||
294 | /* This looks in the virtqueue and for the first available buffer, and converts | |||||
295 | * it to an iovec for convenient access. Since descriptors consist of some | |||||
296 | * number of output then some number of input descriptors, it's actually two | |||||
297 | * iovecs, but we pack them into one and note how many of each there were. | |||||
298 | * | |||||
299 | * This function returns the descriptor number found, or vq->num (which is | |||||
300 | * never a valid descriptor number) if none was found. A negative code is | |||||
301 | * returned on error. | |||||
302 | * | |||||
303 | * Stolen from linux/drivers/vhost/vhost.c. | |||||
304 | */ | |||||
305 | int vring_pop(VirtIODevice *vdev, Vring *vring, | |||||
306 | VirtQueueElement **p_elem) | |||||
307 | { | |||||
308 | struct vring_desc desc; | |||||
309 | unsigned int i, head, found = 0, num = vring->vr.num; | |||||
310 | uint16_t avail_idx, last_avail_idx; | |||||
311 | VirtQueueElement *elem = NULL((void*)0); | |||||
312 | int ret; | |||||
| ||||||
313 | ||||||
314 | /* If there was a fatal error then refuse operation */ | |||||
315 | if (vring->broken) { | |||||
316 | ret = -EFAULT14; | |||||
317 | goto out; | |||||
318 | } | |||||
319 | ||||||
320 | /* Check it isn't doing very strange things with descriptor numbers. */ | |||||
321 | last_avail_idx = vring->last_avail_idx; | |||||
322 | avail_idx = vring->vr.avail->idx; | |||||
323 | barrier()({ asm volatile("" ::: "memory"); (void)0; }); /* load indices now and not again later */ | |||||
324 | ||||||
325 | if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)__builtin_expect(!!((uint16_t)(avail_idx - last_avail_idx) > num), 0)) { | |||||
326 | error_report("Guest moved used index from %u to %u", | |||||
327 | last_avail_idx, avail_idx); | |||||
328 | ret = -EFAULT14; | |||||
329 | goto out; | |||||
330 | } | |||||
331 | ||||||
332 | /* If there's nothing new since last we looked. */ | |||||
333 | if (avail_idx == last_avail_idx) { | |||||
334 | ret = -EAGAIN11; | |||||
335 | goto out; | |||||
336 | } | |||||
337 | ||||||
338 | /* Only get avail ring entries after they have been exposed by guest. */ | |||||
339 | smp_rmb()__atomic_thread_fence(2); | |||||
340 | ||||||
341 | /* Grab the next descriptor number they're advertising, and increment | |||||
342 | * the index we've seen. */ | |||||
343 | head = vring->vr.avail->ring[last_avail_idx % num]; | |||||
344 | ||||||
345 | elem = g_slice_new(VirtQueueElement)((VirtQueueElement*) g_slice_alloc (sizeof (VirtQueueElement) )); | |||||
346 | elem->index = head; | |||||
347 | elem->in_num = elem->out_num = 0; | |||||
348 | ||||||
349 | /* If their number is silly, that's an error. */ | |||||
350 | if (unlikely(head >= num)__builtin_expect(!!(head >= num), 0)) { | |||||
351 | error_report("Guest says index %u > %u is available", head, num); | |||||
352 | ret = -EFAULT14; | |||||
353 | goto out; | |||||
354 | } | |||||
355 | ||||||
356 | if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX29)) { | |||||
357 | vring_avail_event(&vring->vr)(*(__u16 *)&(&vring->vr)->used->ring[(&vring ->vr)->num]) = vring->vr.avail->idx; | |||||
358 | } | |||||
359 | ||||||
360 | i = head; | |||||
361 | do { | |||||
362 | if (unlikely(i >= num)__builtin_expect(!!(i >= num), 0)) { | |||||
363 | error_report("Desc index is %u > %u, head = %u", i, num, head); | |||||
364 | ret = -EFAULT14; | |||||
365 | goto out; | |||||
366 | } | |||||
367 | if (unlikely(++found > num)__builtin_expect(!!(++found > num), 0)) { | |||||
368 | error_report("Loop detected: last one at %u vq size %u head %u", | |||||
369 | i, num, head); | |||||
370 | ret = -EFAULT14; | |||||
371 | goto out; | |||||
372 | } | |||||
373 | desc = vring->vr.desc[i]; | |||||
374 | ||||||
375 | /* Ensure descriptor is loaded before accessing fields */ | |||||
376 | barrier()({ asm volatile("" ::: "memory"); (void)0; }); | |||||
377 | ||||||
378 | if (desc.flags & VRING_DESC_F_INDIRECT4) { | |||||
379 | int ret = get_indirect(vring, elem, &desc); | |||||
380 | if (ret < 0) { | |||||
381 | goto out; | |||||
382 | } | |||||
383 | continue; | |||||
384 | } | |||||
385 | ||||||
386 | ret = get_desc(vring, elem, &desc); | |||||
387 | if (ret < 0) { | |||||
388 | goto out; | |||||
389 | } | |||||
390 | ||||||
391 | i = desc.next; | |||||
392 | } while (desc.flags & VRING_DESC_F_NEXT1); | |||||
393 | ||||||
394 | /* On success, increment avail index. */ | |||||
395 | vring->last_avail_idx++; | |||||
396 | *p_elem = elem; | |||||
397 | return head; | |||||
398 | ||||||
399 | out: | |||||
400 | assert(ret < 0)((ret < 0) ? (void) (0) : __assert_fail ("ret < 0", "/home/stefan/src/qemu/qemu.org/qemu/hw/virtio/dataplane/vring.c" , 400, __PRETTY_FUNCTION__)); | |||||
| ||||||
401 | if (ret == -EFAULT14) { | |||||
402 | vring->broken = true1; | |||||
403 | } | |||||
404 | if (elem) { | |||||
405 | vring_free_element(elem); | |||||
406 | } | |||||
407 | *p_elem = NULL((void*)0); | |||||
408 | return ret; | |||||
409 | } | |||||
410 | ||||||
411 | /* After we've used one of their buffers, we tell them about it. | |||||
412 | * | |||||
413 | * Stolen from linux/drivers/vhost/vhost.c. | |||||
414 | */ | |||||
415 | void vring_push(Vring *vring, VirtQueueElement *elem, int len) | |||||
416 | { | |||||
417 | struct vring_used_elem *used; | |||||
418 | unsigned int head = elem->index; | |||||
419 | uint16_t new; | |||||
420 | ||||||
421 | vring_free_element(elem); | |||||
422 | ||||||
423 | /* Don't touch vring if a fatal error occurred */ | |||||
424 | if (vring->broken) { | |||||
425 | return; | |||||
426 | } | |||||
427 | ||||||
428 | /* The virtqueue contains a ring of used buffers. Get a pointer to the | |||||
429 | * next entry in that used ring. */ | |||||
430 | used = &vring->vr.used->ring[vring->last_used_idx % vring->vr.num]; | |||||
431 | used->id = head; | |||||
432 | used->len = len; | |||||
433 | ||||||
434 | /* Make sure buffer is written before we update index. */ | |||||
435 | smp_wmb()__atomic_thread_fence(3); | |||||
436 | ||||||
437 | new = vring->vr.used->idx = ++vring->last_used_idx; | |||||
438 | if (unlikely((int16_t)(new - vring->signalled_used) < (uint16_t)1)__builtin_expect(!!((int16_t)(new - vring->signalled_used) < (uint16_t)1), 0)) { | |||||
439 | vring->signalled_used_valid = false0; | |||||
440 | } | |||||
441 | } |