@@ -353,7 +353,7 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
return NULL;
rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
- if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+ if (data_dir == DD_READ || !rq->q->limits.use_zone_write_lock)
return rq;
/*
@@ -398,7 +398,7 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
if (!rq)
return NULL;
- if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+ if (data_dir == DD_READ || !rq->q->limits.use_zone_write_lock)
return rq;
/*
@@ -526,8 +526,9 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
}
/*
- * For a zoned block device, if we only have writes queued and none of
- * them can be dispatched, rq will be NULL.
+ * For a zoned block device that requires write serialization, if we
+ * only have writes queued and none of them can be dispatched, rq will
+ * be NULL.
*/
if (!rq)
return NULL;
@@ -934,7 +935,7 @@ static void dd_finish_request(struct request *rq)
atomic_inc(&per_prio->stats.completed);
- if (blk_queue_is_zoned(q)) {
+ if (rq->q->limits.use_zone_write_lock) {
unsigned long flags;
spin_lock_irqsave(&dd->zone_lock, flags);
Measurements have shown that limiting the queue depth to one per zone for zoned writes has a significant negative performance impact on zoned UFS devices. Hence this patch that disables zone locking by the mq-deadline scheduler if the storage controller preserves the command order. This patch is based on the following assumptions: - It happens infrequently that zoned write requests are reordered by the block layer. - The I/O priority of all write requests is the same per zone. - Either no I/O scheduler is used or an I/O scheduler is used that serializes write requests per zone. Cc: Damien Le Moal <dlemoal@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> --- block/mq-deadline.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-)