@@ -254,7 +254,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex);
-
+ destroy_workqueue(nbd->recv_workq);
kfree(nbd);
}
@@ -1260,10 +1260,6 @@ static void nbd_config_put(struct nbd_device *nbd)
kfree(nbd->config);
nbd->config = NULL;
- if (nbd->recv_workq)
- destroy_workqueue(nbd->recv_workq);
- nbd->recv_workq = NULL;
-
nbd->tag_set.timeout = 0;
nbd->disk->queue->limits.discard_granularity = 0;
nbd->disk->queue->limits.discard_alignment = 0;
@@ -1292,14 +1288,6 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
- nbd->recv_workq = alloc_workqueue("knbd%d-recv",
- WQ_MEM_RECLAIM | WQ_HIGHPRI |
- WQ_UNBOUND, 0, nbd->index);
- if (!nbd->recv_workq) {
- dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
- return -ENOMEM;
- }
-
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
nbd->pid = task_pid_nr(current);
@@ -1725,6 +1713,15 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
}
nbd->disk = disk;
+ nbd->recv_workq = alloc_workqueue("nbd%d-recv",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI |
+ WQ_UNBOUND, 0, nbd->index);
+ if (!nbd->recv_workq) {
+ dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
+ err = -ENOMEM;
+ goto out_err_disk;
+ }
+
/*
* Tell the block layer that we are not a rotational device
*/
@@ -1755,7 +1752,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
disk->first_minor = index << part_shift;
if (disk->first_minor < index || disk->first_minor > MINORMASK) {
err = -EINVAL;
- goto out_err_disk;
+ goto out_free_work;
}
disk->minors = 1 << part_shift;
@@ -1764,7 +1761,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
sprintf(disk->disk_name, "nbd%d", index);
err = add_disk(disk);
if (err)
- goto out_err_disk;
+ goto out_free_work;
/*
* Now publish the device.
@@ -1773,6 +1770,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd_total_devices++;
return nbd;
+out_free_work:
+ destroy_workqueue(nbd->recv_workq);
out_err_disk:
blk_cleanup_disk(disk);
out_free_idr:
@@ -2028,13 +2027,10 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
nbd_disconnect(nbd);
sock_shutdown(nbd);
/*
- * Make sure recv thread has finished, so it does not drop the last
- * config ref and try to destroy the workqueue from inside the work
- * queue. And this also ensure that we can safely call nbd_clear_que()
+ * Make sure recv thread has finished, we can safely call nbd_clear_que()
* to cancel the inflight I/Os.
*/
- if (nbd->recv_workq)
- flush_workqueue(nbd->recv_workq);
+ flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
mutex_unlock(&nbd->config_lock);