virtio: regression fix

Fixes regression with migration and vsock, as fixing that
 exposes some known issues in vhost user cleanup, this attempts
 to fix those as well. More work on vhost user is needed :)
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmOIWaEPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRp+RQH/2PVAjD/GA3zF5F3Z07vH51c55T6tluZ85c3
 4u66SSkF5JR1hATCujYCtrt9V0mnqhmhhm4gJH5xcsynFjjyIXd2dDrTFRpCtRgn
 icXOmYCc9pCu8XsluJnWvY/5r/KEDxqmGVE8Kyhz551QjvsBkezhI9x9vhJZJLCn
 Xn1XQ/3jpUcQLwasu8AxZb0IDW8WdCtonbke6xIyMzOYGR2bnRdXlDXVVG1zJ/SZ
 eS3HUad71VekhfzWq0fx8yEJnfvbes9vo007y8rOGdHOcMneWGAie52W1dOBhclh
 Zt56zID55t1USEwlPxkZSj7UXNbVl7Uz/XU5ElN0yTesttP4Iq0=
 =ZkaX
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio: regression fix

Fixes regression with migration and vsock, as fixing that
exposes some known issues in vhost user cleanup, this attempts
to fix those as well. More work on vhost user is needed :)

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmOIWaEPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRp+RQH/2PVAjD/GA3zF5F3Z07vH51c55T6tluZ85c3
# 4u66SSkF5JR1hATCujYCtrt9V0mnqhmhhm4gJH5xcsynFjjyIXd2dDrTFRpCtRgn
# icXOmYCc9pCu8XsluJnWvY/5r/KEDxqmGVE8Kyhz551QjvsBkezhI9x9vhJZJLCn
# Xn1XQ/3jpUcQLwasu8AxZb0IDW8WdCtonbke6xIyMzOYGR2bnRdXlDXVVG1zJ/SZ
# eS3HUad71VekhfzWq0fx8yEJnfvbes9vo007y8rOGdHOcMneWGAie52W1dOBhclh
# Zt56zID55t1USEwlPxkZSj7UXNbVl7Uz/XU5ElN0yTesttP4Iq0=
# =ZkaX
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 01 Dec 2022 02:37:05 EST
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu:
  include/hw: VM state takes precedence in virtio_device_should_start
  hw/virtio: generalise CHR_EVENT_CLOSED handling
  hw/virtio: add started_vu status field to vhost-user-gpio
  vhost: enable vrings in vhost_dev_start() for vhost-user devices
  tests/qtests: override "force-legacy" for gpio virtio-mmio tests

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
master
Stefan Hajnoczi 2022-12-04 11:00:26 -05:00
commit 42f3253c34
18 changed files with 207 additions and 83 deletions

View File

@ -94,7 +94,7 @@ cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto,
goto fail_notifiers;
}
r = vhost_dev_start(&crypto->dev, dev);
r = vhost_dev_start(&crypto->dev, dev, false);
if (r < 0) {
goto fail_start;
}
@ -111,7 +111,7 @@ static void
cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
VirtIODevice *dev)
{
vhost_dev_stop(&crypto->dev, dev);
vhost_dev_stop(&crypto->dev, dev, false);
vhost_dev_disable_notifiers(&crypto->dev, dev);
}

View File

@ -85,7 +85,7 @@ vhost_user_backend_start(VhostUserBackend *b)
}
b->dev.acked_features = b->vdev->guest_features;
ret = vhost_dev_start(&b->dev, b->vdev);
ret = vhost_dev_start(&b->dev, b->vdev, true);
if (ret < 0) {
error_report("Error start vhost dev");
goto err_guest_notifiers;
@ -120,7 +120,7 @@ vhost_user_backend_stop(VhostUserBackend *b)
return;
}
vhost_dev_stop(&b->dev, b->vdev);
vhost_dev_stop(&b->dev, b->vdev, true);
if (k->set_guest_notifiers) {
ret = k->set_guest_notifiers(qbus->parent,

View File

@ -178,7 +178,7 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
}
s->dev.vq_index_end = s->dev.nvqs;
ret = vhost_dev_start(&s->dev, vdev);
ret = vhost_dev_start(&s->dev, vdev, true);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error starting vhost");
goto err_guest_notifiers;
@ -213,7 +213,7 @@ static void vhost_user_blk_stop(VirtIODevice *vdev)
return;
}
vhost_dev_stop(&s->dev, vdev);
vhost_dev_stop(&s->dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
if (ret < 0) {
@ -369,17 +369,10 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_user_blk_stop(vdev);
vhost_dev_cleanup(&s->dev);
}
static void vhost_user_blk_chr_closed_bh(void *opaque)
{
DeviceState *dev = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev);
vhost_user_blk_disconnect(dev);
/* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
NULL, opaque, NULL, true);
NULL, dev, NULL, true);
}
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
@ -398,33 +391,9 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
/*
* A close event may happen during a read/write, but vhost
* code assumes the vhost_dev remains setup, so delay the
* stop & clear.
*/
AioContext *ctx = qemu_get_current_aio_context();
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
NULL, NULL, false);
aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
/*
* Move vhost device to the stopped state. The vhost-user device
* will be clean up and disconnected in BH. This can be useful in
* the vhost migration code. If disconnect was caught there is an
* option for the general vhost code to get the dev state without
* knowing its type (in this case vhost-user).
*
* FIXME: this is sketchy to be reaching into vhost_dev
* now because we are forcing something that implies we
* have executed vhost_dev_stop() but that won't happen
* until vhost_user_blk_stop() gets called from the bh.
* Really this state check should be tracked locally.
*/
s->dev.started = false;
}
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &s->chardev, &s->dev,
vhost_user_blk_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:

View File

@ -259,7 +259,7 @@ static int vhost_net_start_one(struct vhost_net *net,
goto fail_notifiers;
}
r = vhost_dev_start(&net->dev, dev);
r = vhost_dev_start(&net->dev, dev, false);
if (r < 0) {
goto fail_start;
}
@ -308,7 +308,7 @@ fail:
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
vhost_dev_stop(&net->dev, dev);
vhost_dev_stop(&net->dev, dev, false);
fail_start:
vhost_dev_disable_notifiers(&net->dev, dev);
fail_notifiers:
@ -329,7 +329,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
vhost_dev_stop(&net->dev, dev);
vhost_dev_stop(&net->dev, dev, false);
if (net->nc->info->stop) {
net->nc->info->stop(net->nc);
}
@ -606,7 +606,7 @@ err_start:
assert(r >= 0);
}
vhost_dev_stop(&net->dev, vdev);
vhost_dev_stop(&net->dev, vdev, false);
return r;
}

View File

@ -68,7 +68,7 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
goto err_guest_notifiers;
}
ret = vhost_dev_start(&vsc->dev, vdev);
ret = vhost_dev_start(&vsc->dev, vdev, true);
if (ret < 0) {
error_report("Error start vhost dev");
goto err_guest_notifiers;
@ -101,7 +101,7 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret = 0;
vhost_dev_stop(&vsc->dev, vdev);
vhost_dev_stop(&vsc->dev, vdev, true);
if (k->set_guest_notifiers) {
ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);

View File

@ -9,8 +9,8 @@ vhost_section(const char *name) "%s"
vhost_reject_section(const char *name, int d) "%s:%d"
vhost_iotlb_miss(void *dev, int step) "%p step %d"
vhost_dev_cleanup(void *dev) "%p"
vhost_dev_start(void *dev, const char *name) "%p:%s"
vhost_dev_stop(void *dev, const char *name) "%p:%s"
vhost_dev_start(void *dev, const char *name, bool vrings) "%p:%s vrings:%d"
vhost_dev_stop(void *dev, const char *name, bool vrings) "%p:%s vrings:%d"
# vhost-user.c

View File

@ -76,7 +76,7 @@ static void vuf_start(VirtIODevice *vdev)
}
fs->vhost_dev.acked_features = vdev->guest_features;
ret = vhost_dev_start(&fs->vhost_dev, vdev);
ret = vhost_dev_start(&fs->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
@ -110,7 +110,7 @@ static void vuf_stop(VirtIODevice *vdev)
return;
}
vhost_dev_stop(&fs->vhost_dev, vdev);
vhost_dev_stop(&fs->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
if (ret < 0) {

View File

@ -81,11 +81,12 @@ static int vu_gpio_start(VirtIODevice *vdev)
*/
vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features);
ret = vhost_dev_start(&gpio->vhost_dev, vdev);
ret = vhost_dev_start(&gpio->vhost_dev, vdev, false);
if (ret < 0) {
error_report("Error starting vhost-user-gpio: %d", ret);
goto err_guest_notifiers;
}
gpio->started_vu = true;
/*
* guest_notifier_mask/pending not used yet, so just unmask
@ -126,20 +127,16 @@ static void vu_gpio_stop(VirtIODevice *vdev)
struct vhost_dev *vhost_dev = &gpio->vhost_dev;
int ret;
if (!gpio->started_vu) {
return;
}
gpio->started_vu = false;
if (!k->set_guest_notifiers) {
return;
}
/*
* We can call vu_gpio_stop multiple times, for example from
* vm_state_notify and the final object finalisation. Check we
* aren't already stopped before doing so.
*/
if (!vhost_dev_is_started(vhost_dev)) {
return;
}
vhost_dev_stop(vhost_dev, vdev);
vhost_dev_stop(vhost_dev, vdev, false);
ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
if (ret < 0) {
@ -236,6 +233,8 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
return 0;
}
static void vu_gpio_event(void *opaque, QEMUChrEvent event);
static void vu_gpio_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@ -248,6 +247,11 @@ static void vu_gpio_disconnect(DeviceState *dev)
vu_gpio_stop(vdev);
vhost_dev_cleanup(&gpio->vhost_dev);
/* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&gpio->chardev,
NULL, NULL, vu_gpio_event,
NULL, dev, NULL, true);
}
static void vu_gpio_event(void *opaque, QEMUChrEvent event)
@ -265,7 +269,9 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
vu_gpio_disconnect(dev);
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
vu_gpio_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:

View File

@ -46,7 +46,7 @@ static void vu_i2c_start(VirtIODevice *vdev)
i2c->vhost_dev.acked_features = vdev->guest_features;
ret = vhost_dev_start(&i2c->vhost_dev, vdev);
ret = vhost_dev_start(&i2c->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost-user-i2c: %d", -ret);
goto err_guest_notifiers;
@ -80,7 +80,7 @@ static void vu_i2c_stop(VirtIODevice *vdev)
return;
}
vhost_dev_stop(&i2c->vhost_dev, vdev);
vhost_dev_stop(&i2c->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false);
if (ret < 0) {

View File

@ -47,7 +47,7 @@ static void vu_rng_start(VirtIODevice *vdev)
}
rng->vhost_dev.acked_features = vdev->guest_features;
ret = vhost_dev_start(&rng->vhost_dev, vdev);
ret = vhost_dev_start(&rng->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost-user-rng: %d", -ret);
goto err_guest_notifiers;
@ -81,7 +81,7 @@ static void vu_rng_stop(VirtIODevice *vdev)
return;
}
vhost_dev_stop(&rng->vhost_dev, vdev);
vhost_dev_stop(&rng->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
if (ret < 0) {

View File

@ -21,6 +21,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "sysemu/runstate.h"
#include "sysemu/cryptodev.h"
#include "migration/migration.h"
#include "migration/postcopy-ram.h"
@ -2670,6 +2671,76 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
typedef struct {
vu_async_close_fn cb;
DeviceState *dev;
CharBackend *cd;
struct vhost_dev *vhost;
} VhostAsyncCallback;
static void vhost_user_async_close_bh(void *opaque)
{
VhostAsyncCallback *data = opaque;
struct vhost_dev *vhost = data->vhost;
/*
* If the vhost_dev has been cleared in the meantime there is
* nothing left to do as some other path has completed the
* cleanup.
*/
if (vhost->vdev) {
data->cb(data->dev);
}
g_free(data);
}
/*
* We only schedule the work if the machine is running. If suspended
* we want to keep all the in-flight data as is for migration
* purposes.
*/
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
vu_async_close_fn cb)
{
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
/*
* A close event may happen during a read/write, but vhost
* code assumes the vhost_dev remains setup, so delay the
* stop & clear.
*/
AioContext *ctx = qemu_get_current_aio_context();
VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
/* Save data for the callback */
data->cb = cb;
data->dev = d;
data->cd = chardev;
data->vhost = vhost;
/* Disable any further notifications on the chardev */
qemu_chr_fe_set_handlers(chardev,
NULL, NULL, NULL, NULL, NULL, NULL,
false);
aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
/*
* Move vhost device to the stopped state. The vhost-user device
* will be clean up and disconnected in BH. This can be useful in
* the vhost migration code. If disconnect was caught there is an
* option for the general vhost code to get the dev state without
* knowing its type (in this case vhost-user).
*
* Note if the vhost device is fully cleared by the time we
* execute the bottom half we won't continue with the cleanup.
*/
vhost->started = false;
}
}
static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
{
if (!virtio_has_feature(dev->protocol_features,

View File

@ -70,7 +70,7 @@ int vhost_vsock_common_start(VirtIODevice *vdev)
}
vvc->vhost_dev.acked_features = vdev->guest_features;
ret = vhost_dev_start(&vvc->vhost_dev, vdev);
ret = vhost_dev_start(&vvc->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
@ -105,7 +105,7 @@ void vhost_vsock_common_stop(VirtIODevice *vdev)
return;
}
vhost_dev_stop(&vvc->vhost_dev, vdev);
vhost_dev_stop(&vvc->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false);
if (ret < 0) {

View File

@ -1777,15 +1777,36 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
return 0;
}
static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
{
if (!hdev->vhost_ops->vhost_set_vring_enable) {
return 0;
}
/*
* For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not
* been negotiated, the rings start directly in the enabled state, and
* .vhost_set_vring_enable callback will fail since
* VHOST_USER_SET_VRING_ENABLE is not supported.
*/
if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER &&
!virtio_has_feature(hdev->backend_features,
VHOST_USER_F_PROTOCOL_FEATURES)) {
return 0;
}
return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
}
/* Host notifiers must be enabled at this point. */
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i, r;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
trace_vhost_dev_start(hdev, vdev->name);
trace_vhost_dev_start(hdev, vdev->name, vrings);
vdev->vhost_started = true;
hdev->started = true;
@ -1830,10 +1851,16 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
goto fail_log;
}
}
if (vrings) {
r = vhost_dev_set_vring_enable(hdev, true);
if (r) {
goto fail_log;
}
}
if (hdev->vhost_ops->vhost_dev_start) {
r = hdev->vhost_ops->vhost_dev_start(hdev, true);
if (r) {
goto fail_log;
goto fail_start;
}
}
if (vhost_dev_has_iommu(hdev) &&
@ -1848,6 +1875,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
}
return 0;
fail_start:
if (vrings) {
vhost_dev_set_vring_enable(hdev, false);
}
fail_log:
vhost_log_put(hdev, false);
fail_vq:
@ -1866,18 +1897,21 @@ fail_features:
}
/* Host notifiers must be enabled at this point. */
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
trace_vhost_dev_stop(hdev, vdev->name);
trace_vhost_dev_stop(hdev, vdev->name, vrings);
if (hdev->vhost_ops->vhost_dev_start) {
hdev->vhost_ops->vhost_dev_start(hdev, false);
}
if (vrings) {
vhost_dev_set_vring_enable(hdev, false);
}
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev,
vdev,

View File

@ -28,7 +28,17 @@ struct VHostUserGPIO {
VhostUserState vhost_user;
VirtQueue *command_vq;
VirtQueue *interrupt_vq;
/**
* There are at least two steps of initialization of the
* vhost-user device. The first is a "connect" step and
* second is a "start" step. Make a separation between
* those initialization phases by using two fields.
*
* @connected: see vu_gpio_connect()/vu_gpio_disconnect()
* @started_vu: see vu_gpio_start()/vu_gpio_stop()
*/
bool connected;
bool started_vu;
/*< public >*/
};

View File

@ -68,4 +68,22 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
*/
void vhost_user_cleanup(VhostUserState *user);
/**
* vhost_user_async_close() - cleanup vhost-user post connection drop
* @d: DeviceState for the associated device (passed to callback)
* @chardev: the CharBackend associated with the connection
* @vhost: the common vhost device
* @cb: the user callback function to complete the clean-up
*
* This function is used to handle the shutdown of a vhost-user
* connection to a backend. We handle this centrally to make sure we
* do all the steps and handle potential races due to VM shutdowns.
* Once the connection is disabled we call a backhalf to ensure
*/
typedef void (*vu_async_close_fn)(DeviceState *cb);
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
vu_async_close_fn cb);
#endif

View File

@ -184,24 +184,26 @@ static inline bool vhost_dev_is_started(struct vhost_dev *hdev)
* vhost_dev_start() - start the vhost device
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
* @vrings: true to have vrings enabled in this call
*
* Starts the vhost device. From this point VirtIO feature negotiation
* can start and the device can start processing VirtIO transactions.
*
* Return: 0 on success, < 0 on error.
*/
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
/**
* vhost_dev_stop() - stop the vhost device
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
* @vrings: true to have vrings disabled in this call
*
* Stop the vhost device. After the device is stopped the notifiers
* can be disabled (@vhost_dev_disable_notifiers) and the device can
* be torn down (@vhost_dev_cleanup).
*/
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
/**
* DOC: vhost device configuration handling

View File

@ -116,6 +116,13 @@ struct VirtIODevice
bool broken; /* device in invalid state, needs reset */
bool use_disabled_flag; /* allow use of 'disable' flag when needed */
bool disabled; /* device in temporarily disabled state */
/**
* @use_started: true if the @started flag should be used to check the
* current state of the VirtIO device. Otherwise status bits
* should be checked for a current status of the device.
* @use_started is only set via QMP and defaults to true for all
* modern machines (since 4.1).
*/
bool use_started;
bool started;
bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
@ -391,6 +398,16 @@ static inline bool virtio_is_big_endian(VirtIODevice *vdev)
return false;
}
/**
* virtio_device_started() - check if device started
* @vdev - the VirtIO device
* @status - the devices status bits
*
* Check if the device is started. For most modern machines this is
* tracked via the @vdev->started field (to support migration),
* otherwise we check for the final negotiated status bit that
* indicates everything is ready.
*/
static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
{
if (vdev->use_started) {
@ -411,15 +428,11 @@ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
*/
static inline bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status)
{
if (vdev->use_started) {
return vdev->started;
}
if (!vdev->vm_running) {
return false;
}
return status & VIRTIO_CONFIG_S_DRIVER_OK;
return virtio_device_started(vdev, status);
}
static inline void virtio_set_started(VirtIODevice *vdev, bool started)

View File

@ -154,7 +154,8 @@ static void virtio_gpio_register_nodes(void)
QOSGraphEdgeOptions edge_opts = { };
/* vhost-user-gpio-device */
edge_opts.extra_device_opts = "id=gpio0,chardev=chr-vhost-user-test";
edge_opts.extra_device_opts = "id=gpio0,chardev=chr-vhost-user-test "
"-global virtio-mmio.force-legacy=false";
qos_node_create_driver("vhost-user-gpio-device",
virtio_gpio_device_create);
qos_node_consumes("vhost-user-gpio-device", "virtio-bus", &edge_opts);