@@ -475,6 +475,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (!(data->rq_flags & RQF_ELV))
blk_mq_tag_busy(data->hctx);
+ if (data->flags & BLK_MQ_REQ_RESERVED)
+ data->rq_flags |= RQF_RESV;
+
/*
* Try batched alloc if we want more than 1 tag.
*/
@@ -589,6 +592,9 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
else
data.rq_flags |= RQF_ELV;
+ if (flags & BLK_MQ_REQ_RESERVED)
+ data.rq_flags |= RQF_RESV;
+
ret = -EWOULDBLOCK;
tag = blk_mq_get_tag(&data);
if (tag == BLK_MQ_NO_TAG)
@@ -57,6 +57,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
/* queue has elevator attached */
#define RQF_ELV ((__force req_flags_t)(1 << 22))
+#define RQF_RESV ((__force req_flags_t)(1 << 23))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -825,6 +826,11 @@ static inline bool blk_mq_need_time_stamp(struct request *rq)
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
}
+static inline bool blk_mq_is_reserved_rq(struct request *rq)
+{
+ return rq->rq_flags & RQF_RESV;
+}
+
/*
* Batched completions only work when there is no I/O error and no special
* ->end_io handler.