@@ -1039,7 +1039,8 @@ extern unsigned int bdrv_drain_all_count;
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
-bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align);
+bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
+ uint64_t align);
BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
int get_tmp_filename(char *filename, int size);
@@ -2957,7 +2957,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
req->bytes = end - req->offset;
req->overlap_bytes = req->bytes;
- bdrv_mark_request_serialising(req, bs->bl.request_alignment);
+ bdrv_make_request_serialising(req, bs->bl.request_alignment);
}
#endif
@@ -778,15 +778,14 @@ bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
return waited;
}
-bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
+/* Called with req->bs->reqs_lock held */
+static void tracked_request_set_serialising(BdrvTrackedRequest *req,
+ uint64_t align)
{
- BlockDriverState *bs = req->bs;
int64_t overlap_offset = req->offset & ~(align - 1);
uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
- overlap_offset;
- bool waited;
- qemu_co_mutex_lock(&bs->reqs_lock);
if (!req->serialising) {
atomic_inc(&req->bs->serialising_in_flight);
req->serialising = true;
@@ -794,9 +793,6 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
- waited = bdrv_wait_serialising_requests_locked(req);
- qemu_co_mutex_unlock(&bs->reqs_lock);
- return waited;
}
/**
@@ -882,6 +878,21 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
return waited;
}
+bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
+ uint64_t align)
+{
+ bool waited;
+
+ qemu_co_mutex_lock(&req->bs->reqs_lock);
+
+ tracked_request_set_serialising(req, align);
+ waited = bdrv_wait_serialising_requests_locked(req);
+
+ qemu_co_mutex_unlock(&req->bs->reqs_lock);
+
+ return waited;
+}
+
static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
size_t size)
{
@@ -1492,7 +1503,7 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
* with each other for the same cluster. For example, in copy-on-read
* it ensures that the CoR read and write operations are atomic and
* guest writes cannot interleave between them. */
- bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
+ bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
} else {
bdrv_wait_serialising_requests(req);
}
@@ -1903,7 +1914,7 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
assert(!(flags & ~BDRV_REQ_MASK));
if (flags & BDRV_REQ_SERIALISING) {
- bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
+ bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
} else {
bdrv_wait_serialising_requests(req);
}
@@ -2069,7 +2080,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
padding = bdrv_init_padding(bs, offset, bytes, &pad);
if (padding) {
- bdrv_mark_request_serialising(req, align);
+ bdrv_make_request_serialising(req, align);
bdrv_padding_rmw_read(child, req, &pad, true);
@@ -2183,7 +2194,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
}
if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
- bdrv_mark_request_serialising(&req, align);
+ bdrv_make_request_serialising(&req, align);
bdrv_padding_rmw_read(child, &req, &pad, false);
}
@@ -3347,7 +3358,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
* new area, we need to make sure that no write requests are made to it
* concurrently or they might be overwritten by preallocation. */
if (new_bytes) {
- bdrv_mark_request_serialising(&req, 1);
+ bdrv_make_request_serialising(&req, 1);
}
if (bs->read_only) {
error_setg(errp, "Image is read-only");
We'll need a separate function, which will only "mark" request serialising with specified align but not wait for conflicting requests. So, it will be like old bdrv_mark_request_serialising(), before merging bdrv_wait_serialising_requests_locked() into it. To reduce the possible mess, let's do the following: Public function that does both marking and waiting will be called bdrv_make_request_serialising, and private function which will only "mark" will be called tracked_request_set_serialising(). Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> --- include/block/block_int.h | 3 ++- block/file-posix.c | 2 +- block/io.c | 35 +++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 14 deletions(-)