qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 14/18] migration: Use "i" as an for index in ram-compress.c


From: Lukas Straub
Subject: Re: [PATCH 14/18] migration: Use "i" as an for index in ram-compress.c
Date: Thu, 22 Jun 2023 13:47:31 +0200

On Tue, 13 Jun 2023 16:57:53 +0200
Juan Quintela <quintela@redhat.com> wrote:

> It is used everywhere else in C.  Once there, make sure that we don't
> use the index outside of the for declaring the variable there.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>

Reviewed-by: Lukas Straub <lukasstraub2@web.de>

> ---
>  migration/ram-compress.c | 57 ++++++++++++++++++----------------------
>  1 file changed, 25 insertions(+), 32 deletions(-)
> 
> diff --git a/migration/ram-compress.c b/migration/ram-compress.c
> index cf3a6c238d..a23531606e 100644
> --- a/migration/ram-compress.c
> +++ b/migration/ram-compress.c
> @@ -243,27 +243,25 @@ static inline void compress_reset_result(CompressParam 
> *param)
>  
>  void flush_compressed_data(int (send_queued_data(CompressParam *)))
>  {
> -    int idx, thread_count;
> -
> -    thread_count = migrate_compress_threads();
> +    int thread_count = migrate_compress_threads();
>  
>      qemu_mutex_lock(&comp_done_lock);
> -    for (idx = 0; idx < thread_count; idx++) {
> -        while (!comp_param[idx].done) {
> +    for (int i = 0; i < thread_count; i++) {
> +        while (!comp_param[i].done) {
>              qemu_cond_wait(&comp_done_cond, &comp_done_lock);
>          }
>      }
>      qemu_mutex_unlock(&comp_done_lock);
>  
> -    for (idx = 0; idx < thread_count; idx++) {
> -        qemu_mutex_lock(&comp_param[idx].mutex);
> -        if (!comp_param[idx].quit) {
> -            CompressParam *param = &comp_param[idx];
> +    for (int i = 0; i < thread_count; i++) {
> +        qemu_mutex_lock(&comp_param[i].mutex);
> +        if (!comp_param[i].quit) {
> +            CompressParam *param = &comp_param[i];
>              send_queued_data(param);
>              assert(qemu_file_buffer_empty(param->file));
>              compress_reset_result(param);
>          }
> -        qemu_mutex_unlock(&comp_param[idx].mutex);
> +        qemu_mutex_unlock(&comp_param[i].mutex);
>      }
>  }
>  
> @@ -281,16 +279,15 @@ static inline void set_compress_params(CompressParam 
> *param, RAMBlock *block,
>  bool compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset,
>                                       int (send_queued_data(CompressParam *)))
>  {
> -    int idx, thread_count;
>      bool wait = migrate_compress_wait_thread();
>  
> -    thread_count = migrate_compress_threads();
> +    int thread_count = migrate_compress_threads();
>      qemu_mutex_lock(&comp_done_lock);
>  
>      while (true) {
> -        for (idx = 0; idx < thread_count; idx++) {
> -            if (comp_param[idx].done) {
> -                CompressParam *param = &comp_param[idx];
> +        for (int i = 0; i < thread_count; i++) {
> +            if (comp_param[i].done) {
> +                CompressParam *param = &comp_param[i];
>                  qemu_mutex_lock(&param->mutex);
>                  param->done = false;
>                  send_queued_data(param);
> @@ -384,16 +381,14 @@ static void *do_data_decompress(void *opaque)
>  
>  int wait_for_decompress_done(void)
>  {
> -    int idx, thread_count;
> -
>      if (!migrate_compress()) {
>          return 0;
>      }
>  
> -    thread_count = migrate_decompress_threads();
> +    int thread_count = migrate_decompress_threads();
>      qemu_mutex_lock(&decomp_done_lock);
> -    for (idx = 0; idx < thread_count; idx++) {
> -        while (!decomp_param[idx].done) {
> +    for (int i = 0; i < thread_count; i++) {
> +        while (!decomp_param[i].done) {
>              qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
>          }
>      }
> @@ -484,20 +479,18 @@ exit:
>  
>  void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len)
>  {
> -    int idx, thread_count;
> -
> -    thread_count = migrate_decompress_threads();
> +    int thread_count = migrate_decompress_threads();
>      QEMU_LOCK_GUARD(&decomp_done_lock);
>      while (true) {
> -        for (idx = 0; idx < thread_count; idx++) {
> -            if (decomp_param[idx].done) {
> -                decomp_param[idx].done = false;
> -                qemu_mutex_lock(&decomp_param[idx].mutex);
> -                qemu_get_buffer(f, decomp_param[idx].compbuf, len);
> -                decomp_param[idx].des = host;
> -                decomp_param[idx].len = len;
> -                qemu_cond_signal(&decomp_param[idx].cond);
> -                qemu_mutex_unlock(&decomp_param[idx].mutex);
> +        for (int i = 0; i < thread_count; i++) {
> +            if (decomp_param[i].done) {
> +                decomp_param[i].done = false;
> +                qemu_mutex_lock(&decomp_param[i].mutex);
> +                qemu_get_buffer(f, decomp_param[i].compbuf, len);
> +                decomp_param[i].des = host;
> +                decomp_param[i].len = len;
> +                qemu_cond_signal(&decomp_param[i].cond);
> +                qemu_mutex_unlock(&decomp_param[i].mutex);
>                  return;
>              }
>          }

Attachment: pgpZ0yRPeXIcs.pgp
Description: OpenPGP digital signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]