@@ -1334,8 +1334,10 @@ static int vhost_net_open(struct inode *inode, struct file *f)
VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
NULL);
- vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
- vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
+ vqs[VHOST_NET_VQ_TX]);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
+ vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
n->page_frag.page = NULL;
@@ -187,13 +187,15 @@ EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- __poll_t mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev,
+ struct vhost_virtqueue *vq)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
poll->wqh = NULL;
+ poll->vq = vq;
vhost_work_init(&poll->work, fn);
}
@@ -298,9 +300,15 @@ bool vhost_has_work(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_has_work);
+void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
+{
+ vhost_work_queue_on(work, vq->worker);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
+
void vhost_poll_queue(struct vhost_poll *poll)
{
- vhost_work_queue(poll->dev, &poll->work);
+ vhost_vq_work_queue(poll->vq, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_queue);
@@ -359,6 +367,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
+ vq->worker = NULL;
vhost_vring_call_reset(&vq->call_ctx);
__vhost_vq_meta_reset(vq);
}
@@ -527,7 +536,7 @@ void vhost_dev_init(struct vhost_dev *dev,
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
- EPOLLIN, dev);
+ EPOLLIN, dev, vq);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
@@ -662,6 +671,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
static int vhost_worker_try_create_def(struct vhost_dev *dev)
{
struct vhost_worker *worker;
+ int i;
if (!dev->use_worker || dev->workers)
return 0;
@@ -674,6 +684,9 @@ static int vhost_worker_try_create_def(struct vhost_dev *dev)
if (!worker)
goto free_workers;
+ for (i = 0; i < dev->nvqs; i++)
+ dev->vqs[i]->worker = worker;
+
dev->workers[worker->id] = worker;
dev->num_workers++;
return 0;
@@ -41,14 +41,17 @@ struct vhost_poll {
struct vhost_work work;
__poll_t mask;
struct vhost_dev *dev;
+ struct vhost_virtqueue *vq;
};
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
bool vhost_has_work(struct vhost_dev *dev);
+void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- __poll_t mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev,
+ struct vhost_virtqueue *vq);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
@@ -76,6 +79,7 @@ struct vhost_vring_call {
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
+ struct vhost_worker *worker;
/* The actual ring of buffers. */
struct mutex mutex;
This allows vhost_polls to use the worker the vq the poll is associated with. This also exports a helper vhost_vq_work_queue which is used here internally, and will be used in the vhost-scsi patches. Signed-off-by: Mike Christie <michael.christie@oracle.com> --- drivers/vhost/net.c | 6 ++++-- drivers/vhost/vhost.c | 19 ++++++++++++++++--- drivers/vhost/vhost.h | 6 +++++- 3 files changed, 25 insertions(+), 6 deletions(-)