@@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(s
return sectors & (lbs - 1);
}
-static unsigned get_max_segment_size(const struct request_queue *q,
- unsigned offset)
+static inline unsigned get_max_segment_size(const struct request_queue *q,
+ struct page *start_page,
+ unsigned long offset)
{
unsigned long mask = queue_segment_boundary(q);
- /* default segment boundary mask means no boundary limit */
- if (mask == BLK_SEG_BOUNDARY_MASK)
- return queue_max_segment_size(q);
-
- return min_t(unsigned long, mask - (mask & offset) + 1,
+ offset = mask & (page_to_phys(start_page) + offset);
+ return min_t(unsigned long, mask - offset + 1,
queue_max_segment_size(q));
}
@@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+ seg_size = get_max_segment_size(q, bv->bv_page,
+ bv->bv_offset + total_len);
seg_size = min(seg_size, len);
(*nsegs)++;
@@ -404,7 +403,8 @@ static unsigned blk_bvec_map_sg(struct r
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ unsigned len = min(get_max_segment_size(q, bvec->bv_page,
+ offset), nbytes);
struct page *page = bvec->bv_page;
/*