@@ -693,6 +693,15 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
int i;
for (i = 0; i < dev->nvqs; ++i) {
+ /* No workers should run here by design. However, races have
+ * previously occurred where drivers have been unable to flush
+ * all work properly prior to clean-up. Without a successful
+ * flush the guest will malfunction, but avoiding host memory
+ * corruption in those cases does seem preferable.
+ */
+ WARN_ON(mutex_is_locked(&dev->vqs[i]->mutex));
+
+ mutex_lock(&dev->vqs[i]->mutex);
if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->kick)
@@ -700,6 +709,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
if (dev->vqs[i]->call_ctx.ctx)
eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
vhost_vq_reset(dev, dev->vqs[i]);
+ mutex_unlock(&dev->vqs[i]->mutex);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
vhost_vsock_handle_tx_kick() already holds the mutex during its call to vhost_get_vq_desc(). All we have to do here is take the same lock during virtqueue clean-up and we mitigate the reported issues. Also WARN() as a precautionary measure. The purpose of this is to capture possible future race conditions which may pop up over time. Link: https://syzkaller.appspot.com/bug?extid=279432d30d825e63ba00 Cc: <stable@vger.kernel.org> Reported-by: syzbot+adc3cb32385586bec859@syzkaller.appspotmail.com Signed-off-by: Lee Jones <lee.jones@linaro.org> --- drivers/vhost/vhost.c | 10 ++++++++++ 1 file changed, 10 insertions(+)