libvhost-user: return early on virtqueue errors

vu_panic() is not guaranteed to exit the program. Return early when
errors are encountered.

Note that libvhost-user does not have an "unmap" operation for mapped
descriptors. Therefore it is correct to return without explicit cleanup.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20200921113420.154378-2-stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
master
Stefan Hajnoczi 2020-09-21 12:34:19 +01:00 committed by Michael S. Tsirkin
parent 2bc9e0da57
commit fd40901c82
1 changed files with 17 additions and 8 deletions

View File

@ -2416,7 +2416,7 @@ vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
}
}
static void
static bool
virtqueue_map_desc(VuDev *dev,
unsigned int *p_num_sg, struct iovec *iov,
unsigned int max_num_sg, bool is_write,
@ -2428,7 +2428,7 @@ virtqueue_map_desc(VuDev *dev,
if (!sz) {
vu_panic(dev, "virtio: zero sized buffers are not allowed");
return;
return false;
}
while (sz) {
@ -2436,13 +2436,13 @@ virtqueue_map_desc(VuDev *dev,
if (num_sg == max_num_sg) {
vu_panic(dev, "virtio: too many descriptors in indirect table");
return;
return false;
}
iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
if (iov[num_sg].iov_base == NULL) {
vu_panic(dev, "virtio: invalid address for buffers");
return;
return false;
}
iov[num_sg].iov_len = len;
num_sg++;
@ -2451,6 +2451,7 @@ virtqueue_map_desc(VuDev *dev,
}
*p_num_sg = num_sg;
return true;
}
static void *
@ -2488,6 +2489,7 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) {
if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) {
vu_panic(dev, "Invalid size for indirect buffer table");
return NULL;
}
/* loop over the indirect descriptor table */
@ -2515,22 +2517,29 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
/* Collect all the descriptors */
do {
if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) {
virtqueue_map_desc(dev, &in_num, iov + out_num,
if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
VIRTQUEUE_MAX_SIZE - out_num, true,
ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len));
ldq_le_p(&desc[i].addr),
ldl_le_p(&desc[i].len))) {
return NULL;
}
} else {
if (in_num) {
vu_panic(dev, "Incorrect order for descriptors");
return NULL;
}
virtqueue_map_desc(dev, &out_num, iov,
if (!virtqueue_map_desc(dev, &out_num, iov,
VIRTQUEUE_MAX_SIZE, false,
ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len));
ldq_le_p(&desc[i].addr),
ldl_le_p(&desc[i].len))) {
return NULL;
}
}
/* If we've got too many, that implies a descriptor loop. */
if ((in_num + out_num) > max) {
vu_panic(dev, "Looped descriptor");
return NULL;
}
rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);