@@ -1665,6 +1665,7 @@ enum ImgConvertBlockStatus {
typedef struct ImgConvertState {
BlockBackend **src;
int64_t *src_sectors;
+ int *src_alignment;
int src_num;
int64_t total_sectors;
int64_t allocated_sectors;
@@ -1731,6 +1732,7 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
if (s->sector_next_status <= sector_num) {
uint64_t offset = (sector_num - src_cur_offset) * BDRV_SECTOR_SIZE;
int64_t count;
+ int tail;
do {
count = n * BDRV_SECTOR_SIZE;
@@ -1769,6 +1771,16 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
+ /*
+ * Avoid that s->sector_next_status becomes unaligned to the source
+ * request alignment and/or cluster size to avoid unnecessary read
+ * cycles.
+ */
+ tail = (sector_num - src_cur_offset + n) % s->src_alignment[src_cur];
+ if (n > tail) {
+ n -= tail;
+ }
+
if (ret & BDRV_BLOCK_ZERO) {
s->status = post_backing_zero ? BLK_BACKING_FILE : BLK_ZERO;
} else if (ret & BDRV_BLOCK_DATA) {
@@ -2407,8 +2419,10 @@ static int img_convert(int argc, char **argv)
s.src = g_new0(BlockBackend *, s.src_num);
s.src_sectors = g_new(int64_t, s.src_num);
+ s.src_alignment = g_new(int, s.src_num);
for (bs_i = 0; bs_i < s.src_num; bs_i++) {
+ BlockDriverState *src_bs;
s.src[bs_i] = img_open(image_opts, argv[optind + bs_i],
fmt, src_flags, src_writethrough, s.quiet,
force_share);
@@ -2423,6 +2437,13 @@ static int img_convert(int argc, char **argv)
ret = -1;
goto out;
}
+ src_bs = blk_bs(s.src[bs_i]);
+ s.src_alignment[bs_i] = DIV_ROUND_UP(src_bs->bl.request_alignment,
+ BDRV_SECTOR_SIZE);
+ if (!bdrv_get_info(src_bs, &bdi)) {
+ s.src_alignment[bs_i] = MAX(s.src_alignment[bs_i],
+ bdi.cluster_size / BDRV_SECTOR_SIZE);
+ }
s.total_sectors += s.src_sectors[bs_i];
}
@@ -2704,6 +2725,7 @@ out:
g_free(s.src);
}
g_free(s.src_sectors);
+ g_free(s.src_alignment);
fail_getopt:
g_free(options);
in case of large continous areas that share the same allocation status it happens that the value of s->sector_next_status is unaligned to the cluster size or even request alignment of the source. Avoid this by stripping down the s->sector_next_status position to cluster boundaries. Signed-off-by: Peter Lieven <pl@kamp.de> --- qemu-img.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)