[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC 02/24] block/block-copy: use block_status
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[RFC 02/24] block/block-copy: use block_status |
Date: |
Fri, 15 Nov 2019 17:14:22 +0300 |
Use bdrv_block_status_above to chose effective chunk size and to handle
zeroes effectively.
This substitutes checking for just being allocated or not, and drops
old code path for it. Assistance by backup job is dropped too, as
caching block-status information is more difficult than just caching
is-allocated information in our dirty bitmap, and backup job is not
good place for this caching anyway.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
block/block-copy.c | 67 +++++++++++++++++++++++++++++++++++++---------
block/trace-events | 1 +
2 files changed, 55 insertions(+), 13 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 8602e2cae7..74295d93d5 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -152,7 +152,7 @@ void block_copy_set_callbacks(
*/
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
int64_t start, int64_t end,
- bool *error_is_read)
+ bool zeroes, bool *error_is_read)
{
int ret;
int nbytes = MIN(end, s->len) - start;
@@ -162,6 +162,18 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState
*s,
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
+ if (zeroes) {
+ ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags &
+ ~BDRV_REQ_WRITE_COMPRESSED);
+ if (ret < 0) {
+ trace_block_copy_write_zeroes_fail(s, start, ret);
+ if (error_is_read) {
+ *error_is_read = false;
+ }
+ }
+ return ret;
+ }
+
if (s->use_copy_range) {
ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
0, s->write_flags);
@@ -225,6 +237,34 @@ out:
return ret;
}
+static int block_copy_block_status(BlockCopyState *s, int64_t offset,
+ int64_t bytes, int64_t *pnum)
+{
+ int64_t num;
+ BlockDriverState *base;
+ int ret;
+
+ if (s->skip_unallocated && s->source->bs->backing) {
+ base = s->source->bs->backing->bs;
+ } else {
+ base = NULL;
+ }
+
+ ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
+ NULL, NULL);
+ if (ret < 0 || num < s->cluster_size) {
+ num = s->cluster_size;
+ ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
+ } else if (offset + num == s->len) {
+ num = QEMU_ALIGN_UP(num, s->cluster_size);
+ } else {
+ num = QEMU_ALIGN_DOWN(num, s->cluster_size);
+ }
+
+ *pnum = num;
+ return ret;
+}
+
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
@@ -301,7 +341,6 @@ int coroutine_fn block_copy(BlockCopyState *s,
{
int ret = 0;
int64_t end = bytes + start; /* bytes */
- int64_t status_bytes;
BlockCopyInFlightReq req;
/*
@@ -318,7 +357,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
block_copy_inflight_req_begin(s, &req, start, end);
while (start < end) {
- int64_t next_zero, chunk_end;
+ int64_t next_zero, chunk_end, status_bytes;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
@@ -336,23 +375,25 @@ int coroutine_fn block_copy(BlockCopyState *s,
chunk_end = next_zero;
}
- if (s->skip_unallocated) {
- ret = block_copy_reset_unallocated(s, start, &status_bytes);
- if (ret == 0) {
- trace_block_copy_skip_range(s, start, status_bytes);
- start += status_bytes;
- continue;
- }
- /* Clamp to known allocated region */
- chunk_end = MIN(chunk_end, start + status_bytes);
+ ret = block_copy_block_status(s, start, chunk_end - start,
+ &status_bytes);
+ if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes);
+ s->progress_reset_callback(s->progress_opaque);
+ trace_block_copy_skip_range(s, start, status_bytes);
+ start += status_bytes;
+ continue;
}
+ chunk_end = MIN(chunk_end, start + status_bytes);
+
trace_block_copy_process(s, start);
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
co_get_from_shres(s->mem, chunk_end - start);
- ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
+ ret = block_copy_do_copy(s, start, chunk_end, ret & BDRV_BLOCK_ZERO,
+ error_is_read);
co_put_to_shres(s->mem, chunk_end - start);
if (ret < 0) {
bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
diff --git a/block/trace-events b/block/trace-events
index 6ba86decca..346537a1d2 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -48,6 +48,7 @@ block_copy_process(void *bcs, int64_t start) "bcs %p start
%"PRId64
block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
+block_copy_write_zeroes_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
# ../blockdev.c
qmp_block_job_cancel(void *job) "job %p"
--
2.21.0
- [RFC 16/24] block/block-copy: add max_chunk and max_workers paramters, (continued)
- [RFC 16/24] block/block-copy: add max_chunk and max_workers paramters, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 03/24] block/block-copy: factor out block_copy_find_inflight_req, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 12/24] block/block-copy: move block_copy_task_create down, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 09/24] block/block-copy: alloc task on each iteration, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 18/24] block/block-copy: add block_copy_cancel, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 23/24] python: add qemu/bench_block_job.py, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 17/24] block/block-copy: add ratelimit to block-copy, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 15/24] block/block-copy: implement block_copy_async, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 14/24] block/block-copy: More explicit call_state, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 08/24] block/block-copy: rename in-flight requests to tasks, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 02/24] block/block-copy: use block_status,
Vladimir Sementsov-Ogievskiy <=
- [RFC 06/24] block/block-copy: reduce intersecting request lock, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 13/24] block/block-copy: use aio-task-pool API, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 22/24] python: add simplebench.py, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 10/24] block/block-copy: add state pointer to BlockCopyTask, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 04/24] block/block-copy: refactor interfaces to use bytes instead of end, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 01/24] block/block-copy: specialcase first copy_range request, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 05/24] block/block-copy: rename start to offset in interfaces, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 07/24] block/block-copy: hide structure definitions, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 11/24] block/block-copy: move task size initial calculation to _task_create, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 24/24] python: benchmark new backup architecture, Vladimir Sementsov-Ogievskiy, 2019/11/15