[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH COLO-Frame v6 13/31] COLO RAM: Flush cached RAM into
From: |
zhanghailiang |
Subject: |
[Qemu-devel] [PATCH COLO-Frame v6 13/31] COLO RAM: Flush cached RAM into SVM's memory |
Date: |
Thu, 18 Jun 2015 16:58:37 +0800 |
During the time of VM's running, PVM/SVM may dirty some pages, we will transfer
PVM's dirty pages to SVM and store them into SVM's RAM cache at next checkpoint
time. So, the content of SVM's RAM cache will always be some with PVM's memory
after checkpoint.
Instead of flushing all content of SVM's RAM cache into SVM's MEMORY,
we do this in a more efficient way:
Only flush any page that dirtied by PVM or SVM since last checkpoint.
In this way, we ensure SVM's memory same with PVM's.
Besides, we must ensure flush RAM cache before load device state.
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Lai Jiangshan <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Yang Hongyang <address@hidden>
Signed-off-by: Gonglei <address@hidden>
---
include/migration/migration-colo.h | 1 +
migration/colo.c | 2 -
migration/ram.c | 92 ++++++++++++++++++++++++++++++++++++++
3 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/include/migration/migration-colo.h
b/include/migration/migration-colo.h
index 2110182..c03c391 100644
--- a/include/migration/migration-colo.h
+++ b/include/migration/migration-colo.h
@@ -37,5 +37,6 @@ void *colo_process_incoming_checkpoints(void *opaque);
bool loadvm_in_colo_state(void);
/* ram cache */
int create_and_init_ram_cache(void);
+void colo_flush_ram_cache(void);
void release_ram_cache(void);
#endif
diff --git a/migration/colo.c b/migration/colo.c
index c82feb5..07f677a 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -414,8 +414,6 @@ void *colo_process_incoming_checkpoints(void *opaque)
}
qemu_mutex_unlock_iothread();
- /* TODO: flush vm state */
-
ret = colo_ctl_put(ctl, COLO_CHECKPOINT_LOADED);
if (ret < 0) {
goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 8c9edf0..e677162 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1482,6 +1482,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
int flags = 0, ret = 0;
static uint64_t seq_iter;
int len = 0;
+ bool need_flush = false;
seq_iter++;
@@ -1548,6 +1549,8 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
ret = -EINVAL;
break;
}
+
+ need_flush = true;
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
@@ -1558,6 +1561,8 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
ret = -EINVAL;
break;
}
+
+ need_flush = true;
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_COMPRESS_PAGE:
@@ -1590,6 +1595,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
ret = -EINVAL;
break;
}
+ need_flush = true;
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
@@ -1609,6 +1615,11 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
}
rcu_read_unlock();
+
+ if (!ret && ram_cache_enable && need_flush) {
+ DPRINTF("Flush ram_cache\n");
+ colo_flush_ram_cache();
+ }
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
@@ -1694,6 +1705,87 @@ static void
*memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block)
return block->host_cache + (addr - block->offset);
}
+/* fix me: should this helper function be merged with
+ * migration_bitmap_find_and_reset_dirty ?
+ */
+static inline
+ram_addr_t host_bitmap_find_and_reset_dirty(MemoryRegion *mr,
+ ram_addr_t start)
+{
+ unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
+ unsigned long nr = base + (start >> TARGET_PAGE_BITS);
+ uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
+ unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+
+ unsigned long next;
+
+ next = find_next_bit(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION],
+ size, nr);
+ if (next < size) {
+ clear_bit(next, ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ }
+ return (next - base) << TARGET_PAGE_BITS;
+}
+
+/*
+* Flush content of RAM cache into SVM's memory.
+* Only flush the pages that be dirtied by PVM or SVM or both.
+*/
+void colo_flush_ram_cache(void)
+{
+ RAMBlock *block = NULL;
+ void *dst_host;
+ void *src_host;
+ ram_addr_t ca = 0, ha = 0;
+ bool got_ca = 0, got_ha = 0;
+ int64_t host_dirty = 0, both_dirty = 0;
+
+ address_space_sync_dirty_bitmap(&address_space_memory);
+ rcu_read_lock();
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
+ while (true) {
+ if (ca < block->used_length && ca <= ha) {
+ ca = migration_bitmap_find_and_reset_dirty(block->mr, ca);
+ if (ca < block->used_length) {
+ got_ca = 1;
+ }
+ }
+ if (ha < block->used_length && ha <= ca) {
+ ha = host_bitmap_find_and_reset_dirty(block->mr, ha);
+ if (ha < block->used_length && ha != ca) {
+ got_ha = 1;
+ }
+ host_dirty += (ha < block->used_length ? 1 : 0);
+ both_dirty += (ha < block->used_length && ha == ca ? 1 : 0);
+ }
+ if (ca >= block->used_length && ha >= block->used_length) {
+ ca = 0;
+ ha = 0;
+ block = QLIST_NEXT_RCU(block, next);
+ if (!block) {
+ break;
+ }
+ } else {
+ if (got_ha) {
+ got_ha = 0;
+ dst_host = memory_region_get_ram_ptr(block->mr) + ha;
+ src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+ + ha;
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ if (got_ca) {
+ got_ca = 0;
+ dst_host = memory_region_get_ram_ptr(block->mr) + ca;
+ src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+ + ca;
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ }
+ }
+ rcu_read_unlock();
+ assert(migration_dirty_pages == 0);
+}
+
static SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
--
1.7.12.4
- [Qemu-devel] [PATCH COLO-Frame v6 00/31] COarse-grain LOck-stepping(COLO) Virtual Machines for Non-stop Service, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 01/31] configure: Add parameter for configure to enable/disable COLO support, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 03/31] COLO: migrate colo related info to slave, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 04/31] migration: Integrate COLO checkpoint process into migration, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 10/31] COLO RAM: Load PVM's dirty page into SVM's RAM cache temporarily, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 06/31] COLO: Implement colo checkpoint protocol, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 05/31] migration: Integrate COLO checkpoint process into loadvm, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 09/31] COLO: Save VM state to slave when do checkpoint, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 12/31] arch_init: Start to trace dirty pages of SVM, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 13/31] COLO RAM: Flush cached RAM into SVM's memory,
zhanghailiang <=
- [Qemu-devel] [PATCH COLO-Frame v6 07/31] COLO: Add a new RunState RUN_STATE_COLO, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 08/31] QEMUSizedBuffer: Introduce two help functions for qsb, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 11/31] COLO VMstate: Load VM state into qsb before restore it, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 15/31] COLO failover: Implement COLO primary/secondary vm failover work, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 02/31] migration: Introduce capability 'colo' to migration, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 14/31] COLO failover: Introduce a new command to trigger a failover, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 17/31] COLO failover: Don't do failover during loading VM's state, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 21/31] COLO NIC: Implement colo nic device interface configure(), zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 24/31] COLO: Handle nfnetlink message from proxy module, zhanghailiang, 2015/06/18
- [Qemu-devel] [PATCH COLO-Frame v6 19/31] COLO NIC: Init/remove colo nic devices when add/cleanup tap devices, zhanghailiang, 2015/06/18