[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 20/23] multifd: Support for zero pages transmission
From: |
Dr. David Alan Gilbert |
Subject: |
Re: [PATCH v4 20/23] multifd: Support for zero pages transmission |
Date: |
Tue, 18 Jan 2022 19:49:54 +0000 |
User-agent: |
Mutt/2.1.5 (2021-12-30) |
* Juan Quintela (quintela@redhat.com) wrote:
> This patch adds counters and similar. Logic will be added on the
> following patch.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> migration/multifd.h | 13 ++++++++++++-
> migration/multifd.c | 22 +++++++++++++++++++---
> migration/trace-events | 2 +-
> 3 files changed, 32 insertions(+), 5 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index 4dda900a0b..4c6d29c954 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -49,7 +49,10 @@ typedef struct {
> /* size of the next packet that contains pages */
> uint32_t next_packet_size;
> uint64_t packet_num;
> - uint64_t unused[4]; /* Reserved for future use */
> + /* zero pages */
> + uint32_t zero_pages;
> + uint32_t unused32[1]; /* Reserved for future use */
> + uint64_t unused64[3]; /* Reserved for future use */
> char ramblock[256];
> uint64_t offset[];
> } __attribute__((packed)) MultiFDPacket_t;
> @@ -117,6 +120,10 @@ typedef struct {
> ram_addr_t *normal;
> /* num of non zero pages */
> uint32_t normal_num;
> + /* Pages that are zero */
> + ram_addr_t *zero;
> + /* num of zero pages */
> + uint32_t zero_num;
> /* used for compression methods */
> void *data;
> } MultiFDSendParams;
> @@ -162,6 +169,10 @@ typedef struct {
> ram_addr_t *normal;
> /* num of non zero pages */
> uint32_t normal_num;
> + /* Pages that are zero */
> + ram_addr_t *zero;
> + /* num of zero pages */
> + uint32_t zero_num;
> /* used for de-compression methods */
> void *data;
> } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 76b57a7177..cfa9f75d13 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -265,6 +265,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
> packet->normal_pages = cpu_to_be32(p->normal_num);
> packet->next_packet_size = cpu_to_be32(p->next_packet_size);
> packet->packet_num = cpu_to_be64(p->packet_num);
> + packet->zero_pages = cpu_to_be32(p->zero_num);
>
> if (p->pages->block) {
> strncpy(packet->ramblock, p->pages->block->idstr, 256);
> @@ -327,7 +328,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> p->next_packet_size = be32_to_cpu(packet->next_packet_size);
> p->packet_num = be64_to_cpu(packet->packet_num);
>
> - if (p->normal_num == 0) {
> + p->zero_num = be32_to_cpu(packet->zero_pages);
> + if (p->zero_num > packet->pages_alloc - p->normal_num) {
> + error_setg(errp, "multifd: received packet "
> + "with %u zero pages and expected maximum pages are %u",
> + p->zero_num, packet->pages_alloc - p->normal_num) ;
> + return -1;
> + }
> +
> + if (p->normal_num == 0 && p->zero_num == 0) {
> return 0;
> }
>
> @@ -553,6 +562,8 @@ void multifd_save_cleanup(void)
> p->iov = NULL;
> g_free(p->normal);
> p->normal = NULL;
> + g_free(p->zero);
> + p->zero = NULL;
> multifd_send_state->ops->send_cleanup(p, &local_err);
> if (local_err) {
> migrate_set_error(migrate_get_current(), local_err);
> @@ -641,6 +652,7 @@ static void *multifd_send_thread(void *opaque)
> uint32_t flags = p->flags;
> p->iovs_num = 1;
> p->normal_num = 0;
> + p->zero_num = 0;
>
> for (int i = 0; i < p->pages->num; i++) {
> p->normal[p->normal_num] = p->pages->offset[i];
> @@ -662,8 +674,8 @@ static void *multifd_send_thread(void *opaque)
> p->pages->block = NULL;
> qemu_mutex_unlock(&p->mutex);
>
> - trace_multifd_send(p->id, packet_num, p->normal_num, flags,
> - p->next_packet_size);
> + trace_multifd_send(p->id, packet_num, p->normal_num, p->zero_num,
> + flags, p->next_packet_size);
>
> p->iov[0].iov_len = p->packet_len;
> p->iov[0].iov_base = p->packet;
> @@ -913,6 +925,7 @@ int multifd_save_setup(Error **errp)
> /* We need one extra place for the packet header */
> p->iov = g_new0(struct iovec, page_count + 1);
> p->normal = g_new0(ram_addr_t, page_count);
> + p->zero = g_new0(ram_addr_t, page_count);
> socket_send_channel_create(multifd_new_send_channel_async, p);
> }
>
> @@ -1014,6 +1027,8 @@ int multifd_load_cleanup(Error **errp)
> p->iov = NULL;
> g_free(p->normal);
> p->normal = NULL;
> + g_free(p->zero);
> + p->zero = NULL;
> multifd_recv_state->ops->recv_cleanup(p);
> }
> qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1153,6 +1168,7 @@ int multifd_load_setup(Error **errp)
> p->name = g_strdup_printf("multifdrecv_%d", i);
> p->iov = g_new0(struct iovec, page_count);
> p->normal = g_new0(ram_addr_t, page_count);
> + p->zero = g_new0(ram_addr_t, page_count);
> }
>
> for (i = 0; i < thread_count; i++) {
> diff --git a/migration/trace-events b/migration/trace-events
> index 171a83a55d..b7e8f54395 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -124,7 +124,7 @@ multifd_recv_sync_main_wait(uint8_t id) "channel %u"
> multifd_recv_terminate_threads(bool error) "error %d"
> multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages)
> "channel %u packets %" PRIu64 " pages %" PRIu64
> multifd_recv_thread_start(uint8_t id) "%u"
> -multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t
> flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal
> pages %u flags 0x%x next packet size %u"
> +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t
> zero, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %"
> PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
> multifd_send_error(uint8_t id) "channel %u"
> multifd_send_sync_main(long packet_num) "packet num %ld"
> multifd_send_sync_main_signal(uint8_t id) "channel %u"
> --
> 2.34.1
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
- Re: [PATCH v4 15/23] multifd: Use normal pages array on the recv side, (continued)
- [PATCH v4 18/23] migration: Make ram_save_target_page() a pointer, Juan Quintela, 2022/01/11
- [PATCH v4 17/23] multifd: Rename pages_used to normal_pages, Juan Quintela, 2022/01/11
- [PATCH v4 13/23] multifd: Unfold "used" variable by its value, Juan Quintela, 2022/01/11
- [PATCH v4 23/23] migration: Export ram_release_page(), Juan Quintela, 2022/01/11
- [PATCH v4 21/23] multifd: Zero pages transmission, Juan Quintela, 2022/01/11
- [PATCH v4 20/23] multifd: Support for zero pages transmission, Juan Quintela, 2022/01/11
- Re: [PATCH v4 20/23] multifd: Support for zero pages transmission,
Dr. David Alan Gilbert <=