qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skip


From: Vladimir Sementsov-Ogievskiy
Subject: [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping
Date: Wed, 7 Aug 2019 11:07:46 +0300

Limit block_status querying to request bounds on write notifier to
avoid extra seeking.

Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
 block/backup.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/block/backup.c b/block/backup.c
index 11e27c844d..a4d37d2d62 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -180,14 +180,14 @@ static int coroutine_fn 
backup_cow_with_offload(BackupBlockJob *job,
  * return via pnum the number of contiguous clusters sharing this allocation.
  */
 static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
-                                       int64_t *pnum)
+                                       int64_t bytes, int64_t *pnum)
 {
     BlockDriverState *bs = blk_bs(s->common.blk);
     int64_t count, total_count = 0;
-    int64_t bytes = s->len - offset;
     int ret;
 
     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
+    bytes = MIN(s->len - offset, bytes);
 
     while (true) {
         ret = bdrv_is_allocated(bs, offset, bytes, &count);
@@ -224,12 +224,13 @@ static int backup_is_cluster_allocated(BackupBlockJob *s, 
int64_t offset,
  *         1 otherwise, and -ret on error.
  */
 static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
-                                               int64_t offset, int64_t *count)
+                                               int64_t offset, int64_t bytes,
+                                               int64_t *pnum)
 {
     int ret;
-    int64_t clusters, bytes, estimate;
+    int64_t clusters, estimate;
 
-    ret = backup_is_cluster_allocated(s, offset, &clusters);
+    ret = backup_is_cluster_allocated(s, offset, bytes, &clusters);
     if (ret < 0) {
         return ret;
     }
@@ -242,7 +243,7 @@ static int64_t 
backup_bitmap_reset_unallocated(BackupBlockJob *s,
         job_progress_set_remaining(&s->common.job, estimate);
     }
 
-    *count = bytes;
+    *pnum = bytes;
     return ret;
 }
 
@@ -255,7 +256,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
     int ret = 0;
     int64_t start, end; /* bytes */
     void *bounce_buffer = NULL;
-    int64_t skip_bytes;
 
     qemu_co_rwlock_rdlock(&job->flush_rwlock);
 
@@ -267,6 +267,18 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
     wait_for_overlapping_requests(job, start, end);
     cow_request_begin(&cow_request, job, start, end);
 
+    if (job->initializing_bitmap) {
+        int64_t off, chunk;
+
+        for (off = offset; offset < end; offset += chunk) {
+            ret = backup_bitmap_reset_unallocated(job, off, end - off, &chunk);
+            if (ret < 0) {
+                chunk = job->cluster_size;
+            }
+        }
+    }
+    ret = 0;
+
     while (start < end) {
         int64_t dirty_end;
 
@@ -276,15 +288,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
             continue; /* already copied */
         }
 
-        if (job->initializing_bitmap) {
-            ret = backup_bitmap_reset_unallocated(job, start, &skip_bytes);
-            if (ret == 0) {
-                trace_backup_do_cow_skip_range(job, start, skip_bytes);
-                start += skip_bytes;
-                continue;
-            }
-        }
-
         dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start,
                                                 end - start);
         if (dirty_end < 0) {
@@ -546,7 +549,8 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
                 goto out;
             }
 
-            ret = backup_bitmap_reset_unallocated(s, offset, &count);
+            ret = backup_bitmap_reset_unallocated(s, offset, s->len - offset,
+                                                  &count);
             if (ret < 0) {
                 goto out;
             }
-- 
2.18.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]