@@ -523,6 +523,20 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
return ERR_PTR(-ENOMEM);
}
+#ifdef CONFIG_BLK_SUB_PAGE_SEGMENTS
+/* Number of DMA segments required to transfer @bytes data. */
+unsigned int blk_segments(const struct queue_limits *limits, unsigned int bytes)
+{
+ const unsigned int mss = limits->max_segment_size;
+
+ if (bytes <= mss)
+ return 1;
+ if (is_power_of_2(mss))
+ return round_up(bytes, mss) >> ilog2(mss);
+ return (bytes + mss - 1) / mss;
+}
+#endif
+
/*
* Append a bio to a passthrough request. Only works if the bio can be merged
* into the request based on the driver constraints.
@@ -534,7 +548,7 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
unsigned int nr_segs = 0;
bio_for_each_bvec(bv, bio, iter)
- nr_segs++;
+ nr_segs += blk_segments(&rq->q->limits, bv.bv_len);
if (!rq->bio) {
blk_rq_bio_prep(rq, bio, nr_segs);
@@ -76,6 +76,17 @@ struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
gfp_t gfp_mask);
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
+#ifdef CONFIG_BLK_SUB_PAGE_SEGMENTS
+unsigned int blk_segments(const struct queue_limits *limits,
+ unsigned int bytes);
+#else
+static inline unsigned int blk_segments(const struct queue_limits *limits,
+ unsigned int bytes)
+{
+ return 1;
+}
+#endif
+
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)
{
If the segment size is smaller than the page size there may be multiple segments per bvec even if a bvec only contains a single page. Hence this patch. Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Cc: Keith Busch <kbusch@kernel.org> Signed-off-by: Bart Van Assche <bvanassche@acm.org> --- block/blk-map.c | 16 +++++++++++++++- block/blk.h | 11 +++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-)