@@ -97,26 +97,20 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
return;
irq = ops->get_vq_irq(vdpa, qid);
- spin_lock(&vq->call_ctx.ctx_lock);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
- if (!vq->call_ctx.ctx || irq < 0) {
- spin_unlock(&vq->call_ctx.ctx_lock);
+ if (!vq->call_ctx.ctx || irq < 0)
return;
- }
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
- spin_unlock(&vq->call_ctx.ctx_lock);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
struct vhost_virtqueue *vq = &v->vqs[qid];
- spin_lock(&vq->call_ctx.ctx_lock);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
- spin_unlock(&vq->call_ctx.ctx_lock);
}
static void vhost_vdpa_reset(struct vhost_vdpa *v)
@@ -302,7 +302,6 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
{
call_ctx->ctx = NULL;
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
- spin_lock_init(&call_ctx->ctx_lock);
}
static void vhost_vq_reset(struct vhost_dev *dev,
@@ -1637,9 +1636,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
break;
}
- spin_lock(&vq->call_ctx.ctx_lock);
swap(ctx, vq->call_ctx.ctx);
- spin_unlock(&vq->call_ctx.ctx_lock);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
@@ -64,7 +64,6 @@ enum vhost_uaddr_type {
struct vhost_vring_call {
struct eventfd_ctx *ctx;
struct irq_bypass_producer producer;
- spinlock_t ctx_lock;
};
/* The virtqueue structure describes a queue attached to a device. */
This commit removed unnecessary spin_locks in vhost_vring_call and related operations. Because we manipulate irq offloading contents in vhost_vdpa ioctl code path which is already protected by dev mutex and vq mutex. Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com> --- drivers/vhost/vdpa.c | 8 +------- drivers/vhost/vhost.c | 3 --- drivers/vhost/vhost.h | 1 - 3 files changed, 1 insertion(+), 11 deletions(-)