qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 6/7] block/backup: teach backup_cow_with_bounce_b


From: Vladimir Sementsov-Ogievskiy
Subject: [Qemu-devel] [PATCH v2 6/7] block/backup: teach backup_cow_with_bounce_buffer to copy more at once
Date: Fri, 9 Aug 2019 18:32:06 +0300

backup_cow_with_offload can transfer more than on cluster. Let
backup_cow_with_bounce_buffer behave similarly. It reduces number
of IO and there are no needs to copy cluster by cluster.

Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
 block/backup.c | 29 +++++++++++++++--------------
 1 file changed, 15 insertions(+), 14 deletions(-)

diff --git a/block/backup.c b/block/backup.c
index d482d93458..155e21d0a3 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -104,22 +104,25 @@ static int coroutine_fn 
backup_cow_with_bounce_buffer(BackupBlockJob *job,
                                                       int64_t start,
                                                       int64_t end,
                                                       bool is_write_notifier,
-                                                      bool *error_is_read,
-                                                      void **bounce_buffer)
+                                                      bool *error_is_read)
 {
     int ret;
     BlockBackend *blk = job->common.blk;
     int nbytes;
     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
+    void *bounce_buffer;
 
     assert(QEMU_IS_ALIGNED(start, job->cluster_size));
-    bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
-    nbytes = MIN(job->cluster_size, job->len - start);
-    if (!*bounce_buffer) {
-        *bounce_buffer = blk_blockalign(blk, job->cluster_size);
+
+    nbytes = MIN(end - start, job->len - start);
+    bounce_buffer = blk_try_blockalign(blk, nbytes);
+    if (!bounce_buffer) {
+        return -ENOMEM;
     }
 
-    ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags);
+    bdrv_reset_dirty_bitmap(job->copy_bitmap, start, end - start);
+
+    ret = blk_co_pread(blk, start, nbytes, bounce_buffer, read_flags);
     if (ret < 0) {
         trace_backup_do_cow_read_fail(job, start, ret);
         if (error_is_read) {
@@ -128,7 +131,7 @@ static int coroutine_fn 
backup_cow_with_bounce_buffer(BackupBlockJob *job,
         goto fail;
     }
 
-    ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer,
+    ret = blk_co_pwrite(job->target, start, nbytes, bounce_buffer,
                         job->write_flags);
     if (ret < 0) {
         trace_backup_do_cow_write_fail(job, start, ret);
@@ -138,9 +141,12 @@ static int coroutine_fn 
backup_cow_with_bounce_buffer(BackupBlockJob *job,
         goto fail;
     }
 
+    qemu_vfree(bounce_buffer);
     return nbytes;
+
 fail:
     bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
+    qemu_vfree(bounce_buffer);
     return ret;
 
 }
@@ -254,7 +260,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
     CowRequest cow_request;
     int ret = 0;
     int64_t start, end; /* bytes */
-    void *bounce_buffer = NULL;
     int64_t skip_bytes;
 
     qemu_co_rwlock_rdlock(&job->flush_rwlock);
@@ -303,7 +308,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
         if (!job->use_copy_range) {
             ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
                                                 is_write_notifier,
-                                                error_is_read, &bounce_buffer);
+                                                error_is_read);
         }
         if (ret < 0) {
             break;
@@ -318,10 +323,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
         ret = 0;
     }
 
-    if (bounce_buffer) {
-        qemu_vfree(bounce_buffer);
-    }
-
     cow_request_end(&cow_request);
 
     trace_backup_do_cow_return(job, offset, bytes, ret);
-- 
2.18.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]