[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH RFC 18/26] migration: Add load_finish handler and associated func
From: |
Maciej S. Szmigiero |
Subject: |
[PATCH RFC 18/26] migration: Add load_finish handler and associated functions |
Date: |
Tue, 16 Apr 2024 16:42:57 +0200 |
From: "Maciej S. Szmigiero" <maciej.szmigiero@oracle.com>
load_finish SaveVMHandler allows migration code to poll whether
a device-specific asynchronous device state loading operation had finished.
In order to avoid calling this handler needlessly the device is supposed
to notify the migration code of its possible readiness via a call to
qemu_loadvm_load_finish_ready_broadcast() while holding
qemu_loadvm_load_finish_ready_lock.
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
---
include/migration/register.h | 21 +++++++++++++++
migration/migration.c | 6 +++++
migration/migration.h | 3 +++
migration/savevm.c | 52 ++++++++++++++++++++++++++++++++++++
migration/savevm.h | 4 +++
5 files changed, 86 insertions(+)
diff --git a/include/migration/register.h b/include/migration/register.h
index 7d29b7e0b559..f15881fc87cd 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -272,6 +272,27 @@ typedef struct SaveVMHandlers {
int (*load_state_buffer)(void *opaque, char *data, size_t data_size,
Error **errp);
+ /**
+ * @load_finish
+ *
+ * Poll whether all asynchronous device state loading had finished.
+ * Not called on the load failure path.
+ *
+ * Called while holding the qemu_loadvm_load_finish_ready_lock.
+ *
+ * If this method signals "not ready" then it might not be called
+ * again until qemu_loadvm_load_finish_ready_broadcast() is invoked
+ * while holding qemu_loadvm_load_finish_ready_lock.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @is_finished: whether the loading had finished (output parameter)
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ * It's not an error that the loading still hasn't finished.
+ */
+ int (*load_finish)(void *opaque, bool *is_finished, Error **errp);
+
/**
* @load_setup
*
diff --git a/migration/migration.c b/migration/migration.c
index 8fe8be71a0e3..e4f82695a338 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -234,6 +234,9 @@ void migration_object_init(void)
qemu_cond_init(¤t_incoming->page_request_cond);
current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
+ g_mutex_init(¤t_incoming->load_finish_ready_mutex);
+ g_cond_init(¤t_incoming->load_finish_ready_cond);
+
migration_object_check(current_migration, &error_fatal);
blk_mig_init();
@@ -387,6 +390,9 @@ void migration_incoming_state_destroy(void)
mis->postcopy_qemufile_dst = NULL;
}
+ g_mutex_clear(&mis->load_finish_ready_mutex);
+ g_cond_clear(&mis->load_finish_ready_cond);
+
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
}
diff --git a/migration/migration.h b/migration/migration.h
index a6114405917f..92014ef4cfcc 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -227,6 +227,9 @@ struct MigrationIncomingState {
* is needed as this field is updated serially.
*/
unsigned int switchover_ack_pending_num;
+
+ GCond load_finish_ready_cond;
+ GMutex load_finish_ready_mutex;
};
MigrationIncomingState *migration_incoming_get_current(void);
diff --git a/migration/savevm.c b/migration/savevm.c
index 2e4d63faca06..30521ad3f340 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -2994,6 +2994,37 @@ int qemu_loadvm_state(QEMUFile *f)
return ret;
}
+ qemu_loadvm_load_finish_ready_lock();
+ while (!ret) { /* Don't call load_finish() handlers on the load failure
path */
+ bool all_ready = true;
+ SaveStateEntry *se = NULL;
+
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ bool this_ready;
+
+ if (!se->ops || !se->ops->load_finish) {
+ continue;
+ }
+
+ ret = se->ops->load_finish(se->opaque, &this_ready, &local_err);
+ if (ret) {
+ error_report_err(local_err);
+
+ qemu_loadvm_load_finish_ready_unlock();
+ return -EINVAL;
+ } else if (!this_ready) {
+ all_ready = false;
+ }
+ }
+
+ if (all_ready) {
+ break;
+ }
+
+ g_cond_wait(&mis->load_finish_ready_cond,
&mis->load_finish_ready_mutex);
+ }
+ qemu_loadvm_load_finish_ready_unlock();
+
if (ret == 0) {
ret = qemu_file_get_error(f);
}
@@ -3098,6 +3129,27 @@ int qemu_loadvm_load_state_buffer(const char *idstr,
uint32_t instance_id,
return 0;
}
+void qemu_loadvm_load_finish_ready_lock(void)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ g_mutex_lock(&mis->load_finish_ready_mutex);
+}
+
+void qemu_loadvm_load_finish_ready_unlock(void)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ g_mutex_unlock(&mis->load_finish_ready_mutex);
+}
+
+void qemu_loadvm_load_finish_ready_broadcast(void)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ g_cond_broadcast(&mis->load_finish_ready_cond);
+}
+
bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
bool has_devices, strList *devices, Error **errp)
{
diff --git a/migration/savevm.h b/migration/savevm.h
index c879ba8c970e..85e8b882bd37 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -73,4 +73,8 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile
*f,
int qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id,
char *buf, size_t len, Error **errp);
+void qemu_loadvm_load_finish_ready_lock(void);
+void qemu_loadvm_load_finish_ready_unlock(void);
+void qemu_loadvm_load_finish_ready_broadcast(void);
+
#endif
- [PATCH RFC 07/26] migration/postcopy: pass PostcopyPChannelConnectData when connecting sending preempt socket, (continued)
- [PATCH RFC 07/26] migration/postcopy: pass PostcopyPChannelConnectData when connecting sending preempt socket, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 09/26] migration: Add send/receive header for postcopy preempt channel, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 08/26] migration: Allow passing migration header in migration channel creation, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 10/26] migration: Add send/receive header for multifd channel, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 11/26] migration/options: Mapped-ram is not channel header compatible, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 12/26] migration: Enable x-channel-header pseudo-capability, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 13/26] vfio/migration: Add save_{iterate, complete_precopy}_started trace events, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 14/26] migration/ram: Add load start trace event, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 15/26] migration/multifd: Zero p->flags before starting filling a packet, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 16/26] migration: Add save_live_complete_precopy_async{, wait} handlers, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 18/26] migration: Add load_finish handler and associated functions,
Maciej S. Szmigiero <=
- [PATCH RFC 17/26] migration: Add qemu_loadvm_load_state_buffer() and its handler, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 22/26] migration/multifd: Convert multifd_send_pages::next_channel to atomic, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 21/26] migration/multifd: Device state transfer support - receive side, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 25/26] vfio/migration: Multifd device state transfer support - receive side, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 23/26] migration/multifd: Device state transfer support - send side, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 19/26] migration: Add x-multifd-channels-device-state parameter, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 20/26] migration: Add MULTIFD_DEVICE_STATE migration channel type, Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 24/26] migration/multifd: Add migration_has_device_state_support(), Maciej S. Szmigiero, 2024/04/16
- [PATCH RFC 26/26] vfio/migration: Multifd device state transfer support - send side, Maciej S. Szmigiero, 2024/04/16