@@ -56,6 +56,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
+ lim->use_zone_write_lock = true;
lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
@@ -685,6 +686,11 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_secure_erase_sectors);
t->zone_write_granularity = max(t->zone_write_granularity,
b->zone_write_granularity);
+ /*
+ * Whether or not the zone write lock should be used depends on the
+ * bottom driver only.
+ */
+ t->use_zone_write_lock = b->use_zone_write_lock;
t->zoned = max(t->zoned, b->zoned);
return ret;
}
@@ -316,6 +316,7 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
+ bool use_zone_write_lock;
enum blk_zoned_model zoned;
/*
Writes in sequential write required zones must happen at the write pointer. Even if the submitter of the write commands (e.g. a filesystem) submits writes for sequential write required zones in order, the block layer or the storage controller may reorder these write commands. The zone locking mechanism in the mq-deadline I/O scheduler serializes write commands for sequential zones. Some but not all storage controllers require this serialization. Introduce a new request queue limit member variable to allow block drivers to indicate that they preserve the order of write commands and thus do not require serialization of writes per zone. Cc: Damien Le Moal <dlemoal@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> --- block/blk-settings.c | 6 ++++++ include/linux/blkdev.h | 1 + 2 files changed, 7 insertions(+)