@@ -68,4 +68,22 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
*/
void vhost_user_cleanup(VhostUserState *user);
+/**
+ * vhost_user_async_close() - cleanup vhost-user post connection drop
+ * @d: DeviceState for the associated device (passed to callback)
+ * @chardev: the CharBackend associated with the connection
+ * @vhost: the common vhost device
+ * @cb: the user callback function to complete the clean-up
+ *
+ * This function is used to handle the shutdown of a vhost-user
+ * connection to a backend. We handle this centrally to make sure we
+ * do all the steps and handle potential races due to VM shutdowns.
+ * Once the connection is disabled we call a backhalf to ensure
+ */
+typedef void (*vu_async_close_fn)(DeviceState *cb);
+
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb);
+
#endif
@@ -369,17 +369,10 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_user_blk_stop(vdev);
vhost_dev_cleanup(&s->dev);
-}
-static void vhost_user_blk_chr_closed_bh(void *opaque)
-{
- DeviceState *dev = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBlk *s = VHOST_USER_BLK(vdev);
-
- vhost_user_blk_disconnect(dev);
+ /* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
- NULL, opaque, NULL, true);
+ NULL, dev, NULL, true);
}
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
@@ -398,33 +391,9 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- if (!runstate_check(RUN_STATE_SHUTDOWN)) {
- /*
- * A close event may happen during a read/write, but vhost
- * code assumes the vhost_dev remains setup, so delay the
- * stop & clear.
- */
- AioContext *ctx = qemu_get_current_aio_context();
-
- qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
- NULL, NULL, false);
- aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
-
- /*
- * Move vhost device to the stopped state. The vhost-user device
- * will be clean up and disconnected in BH. This can be useful in
- * the vhost migration code. If disconnect was caught there is an
- * option for the general vhost code to get the dev state without
- * knowing its type (in this case vhost-user).
- *
- * FIXME: this is sketchy to be reaching into vhost_dev
- * now because we are forcing something that implies we
- * have executed vhost_dev_stop() but that won't happen
- * until vhost_user_blk_stop() gets called from the bh.
- * Really this state check should be tracked locally.
- */
- s->dev.started = false;
- }
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &s->chardev, &s->dev,
+ vhost_user_blk_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@@ -233,6 +233,8 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
return 0;
}
+static void vu_gpio_event(void *opaque, QEMUChrEvent event);
+
static void vu_gpio_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -245,6 +247,11 @@ static void vu_gpio_disconnect(DeviceState *dev)
vu_gpio_stop(vdev);
vhost_dev_cleanup(&gpio->vhost_dev);
+
+ /* Re-instate the event handler for new connections */
+ qemu_chr_fe_set_handlers(&gpio->chardev,
+ NULL, NULL, vu_gpio_event,
+ NULL, dev, NULL, true);
}
static void vu_gpio_event(void *opaque, QEMUChrEvent event)
@@ -262,7 +269,9 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- vu_gpio_disconnect(dev);
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
+ vu_gpio_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@@ -21,6 +21,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
+#include "sysemu/runstate.h"
#include "sysemu/cryptodev.h"
#include "migration/migration.h"
#include "migration/postcopy-ram.h"
@@ -2670,6 +2671,76 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
+
+typedef struct {
+ vu_async_close_fn cb;
+ DeviceState *dev;
+ CharBackend *cd;
+ struct vhost_dev *vhost;
+} VhostAsyncCallback;
+
+static void vhost_user_async_close_bh(void *opaque)
+{
+ VhostAsyncCallback *data = opaque;
+ struct vhost_dev *vhost = data->vhost;
+
+ /*
+ * If the vhost_dev has been cleared in the meantime there is
+ * nothing left to do as some other path has completed the
+ * cleanup.
+ */
+ if (vhost->vdev) {
+ data->cb(data->dev);
+ }
+
+ g_free(data);
+}
+
+/*
+ * We only schedule the work if the machine is running. If suspended
+ * we want to keep all the in-flight data as is for migration
+ * purposes.
+ */
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb)
+{
+ if (!runstate_check(RUN_STATE_SHUTDOWN)) {
+ /*
+ * A close event may happen during a read/write, but vhost
+ * code assumes the vhost_dev remains setup, so delay the
+ * stop & clear.
+ */
+ AioContext *ctx = qemu_get_current_aio_context();
+ VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
+
+ /* Save data for the callback */
+ data->cb = cb;
+ data->dev = d;
+ data->cd = chardev;
+ data->vhost = vhost;
+
+ /* Disable any further notifications on the chardev */
+ qemu_chr_fe_set_handlers(chardev,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ false);
+
+ aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
+
+ /*
+ * Move vhost device to the stopped state. The vhost-user device
+ * will be clean up and disconnected in BH. This can be useful in
+ * the vhost migration code. If disconnect was caught there is an
+ * option for the general vhost code to get the dev state without
+ * knowing its type (in this case vhost-user).
+ *
+ * Note if the vhost device is fully cleared by the time we
+ * execute the bottom half we won't continue with the cleanup.
+ */
+ vhost->started = false;
+ }
+}
+
static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
{
if (!virtio_has_feature(dev->protocol_features,