[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 08/23] multifd: Move iov from pages to params
From: |
Dr. David Alan Gilbert |
Subject: |
Re: [PATCH v4 08/23] multifd: Move iov from pages to params |
Date: |
Tue, 18 Jan 2022 17:56:57 +0000 |
User-agent: |
Mutt/2.1.5 (2021-12-30) |
* Juan Quintela (quintela@redhat.com) wrote:
> This will allow us to reduce the number of system calls on the next patch.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
> migration/multifd.h | 8 ++++++--
> migration/multifd.c | 34 ++++++++++++++++++++++++----------
> 2 files changed, 30 insertions(+), 12 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index e57adc783b..c3f18af364 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -62,8 +62,6 @@ typedef struct {
> uint64_t packet_num;
> /* offset of each page */
> ram_addr_t *offset;
> - /* pointer to each page */
> - struct iovec *iov;
> RAMBlock *block;
> } MultiFDPages_t;
>
> @@ -110,6 +108,10 @@ typedef struct {
> uint64_t num_pages;
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
> + /* buffers to send */
> + struct iovec *iov;
> + /* number of iovs used */
> + uint32_t iovs_num;
> /* used for compression methods */
> void *data;
> } MultiFDSendParams;
> @@ -149,6 +151,8 @@ typedef struct {
> uint64_t num_pages;
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
> + /* buffers to recv */
> + struct iovec *iov;
Why is there the asymmetry between send and recv, where the send
has the iovs_num and the recv doesn't?
Dave
> /* used for de-compression methods */
> void *data;
> } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 4d62850258..f75bd3c188 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -86,7 +86,16 @@ static void nocomp_send_cleanup(MultiFDSendParams *p,
> Error **errp)
> */
> static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
> {
> - p->next_packet_size = p->pages->num * qemu_target_page_size();
> + MultiFDPages_t *pages = p->pages;
> + size_t page_size = qemu_target_page_size();
> +
> + for (int i = 0; i < p->pages->num; i++) {
> + p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
> + p->iov[p->iovs_num].iov_len = page_size;
> + p->iovs_num++;
> + }
> +
> + p->next_packet_size = p->pages->num * page_size;
> p->flags |= MULTIFD_FLAG_NOCOMP;
> return 0;
> }
> @@ -104,7 +113,7 @@ static int nocomp_send_prepare(MultiFDSendParams *p,
> Error **errp)
> */
> static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error
> **errp)
> {
> - return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
> + return qio_channel_writev_all(p->c, p->iov, p->iovs_num, errp);
> }
>
> /**
> @@ -146,13 +155,18 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
> static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
> {
> uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
> + size_t page_size = qemu_target_page_size();
>
> if (flags != MULTIFD_FLAG_NOCOMP) {
> error_setg(errp, "multifd %u: flags received %x flags expected %x",
> p->id, flags, MULTIFD_FLAG_NOCOMP);
> return -1;
> }
> - return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp);
> + for (int i = 0; i < p->pages->num; i++) {
> + p->iov[i].iov_base = p->pages->block->host + p->pages->offset[i];
> + p->iov[i].iov_len = page_size;
> + }
> + return qio_channel_readv_all(p->c, p->iov, p->pages->num, errp);
> }
>
> static MultiFDMethods multifd_nocomp_ops = {
> @@ -242,7 +256,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
> MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
>
> pages->allocated = size;
> - pages->iov = g_new0(struct iovec, size);
> pages->offset = g_new0(ram_addr_t, size);
>
> return pages;
> @@ -254,8 +267,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
> pages->allocated = 0;
> pages->packet_num = 0;
> pages->block = NULL;
> - g_free(pages->iov);
> - pages->iov = NULL;
> g_free(pages->offset);
> pages->offset = NULL;
> g_free(pages);
> @@ -365,8 +376,6 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> return -1;
> }
> p->pages->offset[i] = offset;
> - p->pages->iov[i].iov_base = block->host + offset;
> - p->pages->iov[i].iov_len = page_size;
> }
>
> return 0;
> @@ -470,8 +479,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block,
> ram_addr_t offset)
>
> if (pages->block == block) {
> pages->offset[pages->num] = offset;
> - pages->iov[pages->num].iov_base = block->host + offset;
> - pages->iov[pages->num].iov_len = qemu_target_page_size();
> pages->num++;
>
> if (pages->num < pages->allocated) {
> @@ -567,6 +574,8 @@ void multifd_save_cleanup(void)
> p->packet_len = 0;
> g_free(p->packet);
> p->packet = NULL;
> + g_free(p->iov);
> + p->iov = NULL;
> multifd_send_state->ops->send_cleanup(p, &local_err);
> if (local_err) {
> migrate_set_error(migrate_get_current(), local_err);
> @@ -654,6 +663,7 @@ static void *multifd_send_thread(void *opaque)
> uint32_t used = p->pages->num;
> uint64_t packet_num = p->packet_num;
> uint32_t flags = p->flags;
> + p->iovs_num = 0;
>
> if (used) {
> ret = multifd_send_state->ops->send_prepare(p, &local_err);
> @@ -922,6 +932,7 @@ int multifd_save_setup(Error **errp)
> p->packet->version = cpu_to_be32(MULTIFD_VERSION);
> p->name = g_strdup_printf("multifdsend_%d", i);
> p->tls_hostname = g_strdup(s->hostname);
> + p->iov = g_new0(struct iovec, page_count);
> socket_send_channel_create(multifd_new_send_channel_async, p);
> }
>
> @@ -1021,6 +1032,8 @@ int multifd_load_cleanup(Error **errp)
> p->packet_len = 0;
> g_free(p->packet);
> p->packet = NULL;
> + g_free(p->iov);
> + p->iov = NULL;
> multifd_recv_state->ops->recv_cleanup(p);
> }
> qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1161,6 +1174,7 @@ int multifd_load_setup(Error **errp)
> + sizeof(uint64_t) * page_count;
> p->packet = g_malloc0(p->packet_len);
> p->name = g_strdup_printf("multifdrecv_%d", i);
> + p->iov = g_new0(struct iovec, page_count);
> }
>
> for (i = 0; i < thread_count; i++) {
> --
> 2.34.1
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
- [PATCH v4 04/23] migration: Remove masking for compression, (continued)
- [PATCH v4 04/23] migration: Remove masking for compression, Juan Quintela, 2022/01/11
- [PATCH v4 02/23] migration: We only need last_stage in two places, Juan Quintela, 2022/01/11
- [PATCH v4 09/23] multifd: Make zlib use iov's, Juan Quintela, 2022/01/11
- [PATCH v4 05/23] migration: simplify do_compress_ram_page, Juan Quintela, 2022/01/11
- [PATCH v4 10/23] multifd: Make zstd use iov's, Juan Quintela, 2022/01/11
- [PATCH v4 07/23] multifd: Use proper maximum compression values, Juan Quintela, 2022/01/11
- [PATCH v4 08/23] multifd: Move iov from pages to params, Juan Quintela, 2022/01/11
- Re: [PATCH v4 08/23] multifd: Move iov from pages to params,
Dr. David Alan Gilbert <=
- [PATCH v4 06/23] migration: Move ram_release_pages() call to save_zero_page_to_file(), Juan Quintela, 2022/01/11
- [PATCH v4 11/23] multifd: Remove send_write() method, Juan Quintela, 2022/01/11
- [PATCH v4 12/23] multifd: Use a single writev on the send side, Juan Quintela, 2022/01/11
- [PATCH v4 19/23] multifd: Add property to enable/disable zero_page, Juan Quintela, 2022/01/11
- [PATCH v4 22/23] migration: Use multifd before we check for the zero page, Juan Quintela, 2022/01/11