qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v7 06/12] migration/dirtyrate: Record hash results for each s


From: Li Qiang
Subject: Re: [PATCH v7 06/12] migration/dirtyrate: Record hash results for each sampled page
Date: Mon, 14 Sep 2020 19:25:26 +0800

Zheng Chuan <zhengchuan@huawei.com> 于2020年9月13日周日 上午10:59写道:
>
>
>
> On 2020/9/10 21:51, Li Qiang wrote:
> > Chuan Zheng <zhengchuan@huawei.com> 于2020年9月9日周三 下午10:14写道:
> >>
> >> Record hash results for each sampled page, crc32 is taken to calculate
> >> hash results for each sampled length in TARGET_PAGE_SIZE.
> >>
> >> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> >> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> >> Reviewed-by: David Edmondson <david.edmondson@oracle.com>
> >> ---
> >>  migration/dirtyrate.c | 125 
> >> ++++++++++++++++++++++++++++++++++++++++++++++++++
> >>  1 file changed, 125 insertions(+)
> >>
> >> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> >> index d56cd93..bc87269 100644
> >> --- a/migration/dirtyrate.c
> >> +++ b/migration/dirtyrate.c
> >> @@ -10,6 +10,7 @@
> >>   * See the COPYING file in the top-level directory.
> >>   */
> >>
> >> +#include <zlib.h>
> >>  #include "qemu/osdep.h"
> >>  #include "qapi/error.h"
> >>  #include "cpu.h"
> >> @@ -68,6 +69,130 @@ static void update_dirtyrate(uint64_t msec)
> >>      DirtyStat.dirty_rate = dirtyrate;
> >>  }
> >>
> >> +/*
> >> + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
> >> + * in ramblock, which starts from ramblock base address.
> >> + */
> >> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
> >> +                                      uint64_t vfn)
> >> +{
> >> +    uint32_t crc;
> >> +
> >> +    crc = crc32(0, (info->ramblock_addr +
> >> +                vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
> >> +
> >> +    return crc;
> >> +}
> >> +
> >> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
> >> +{
> >> +    unsigned int sample_pages_count;
> >> +    int i;
> >> +    GRand *rand;
> >> +
> >> +    sample_pages_count = info->sample_pages_count;
> >> +
> >> +    /* ramblock size less than one page, return success to skip this 
> >> ramblock */
> >> +    if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
> >> +        return 0;
> >> +    }
> >> +
> >> +    info->hash_result = g_try_malloc0_n(sample_pages_count,
> >> +                                        sizeof(uint32_t));
> >> +    if (!info->hash_result) {
> >> +        return -1;
> >> +    }
> >> +
> >> +    info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
> >> +                                            sizeof(uint64_t));
> >> +    if (!info->sample_page_vfn) {
> >> +        g_free(info->hash_result);
> >> +        return -1;
> >> +    }
> >> +
> >> +    rand  = g_rand_new();
> >> +    for (i = 0; i < sample_pages_count; i++) {
> >> +        info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
> >> +                                                    info->ramblock_pages 
> >> - 1);
> >> +        info->hash_result[i] = get_ramblock_vfn_hash(info,
> >> +                                                     
> >> info->sample_page_vfn[i]);
> >> +    }
> >> +    g_rand_free(rand);
> >> +
> >> +    return 0;
> >> +}
> >> +
> >> +static void get_ramblock_dirty_info(RAMBlock *block,
> >> +                                    struct RamblockDirtyInfo *info,
> >> +                                    struct DirtyRateConfig *config)
> >> +{
> >> +    uint64_t sample_pages_per_gigabytes = 
> >> config->sample_pages_per_gigabytes;
> >> +
> >> +    /* Right shift 30 bits to calc ramblock size in GB */
> >> +    info->sample_pages_count = (qemu_ram_get_used_length(block) *
> >> +                                sample_pages_per_gigabytes) >> 30;
> >> +    /* Right shift TARGET_PAGE_BITS to calc page count */
> >> +    info->ramblock_pages = qemu_ram_get_used_length(block) >>
> >> +                           TARGET_PAGE_BITS;
> >> +    info->ramblock_addr = qemu_ram_get_host_addr(block);
> >> +    strcpy(info->idstr, qemu_ram_get_idstr(block));
> >> +}
> >> +
> >> +static struct RamblockDirtyInfo *
> >> +alloc_ramblock_dirty_info(int *block_index,
> >> +                          struct RamblockDirtyInfo *block_dinfo)
> >> +{
> >> +    struct RamblockDirtyInfo *info = NULL;
> >> +    int index = *block_index;
> >> +
> >> +    if (!block_dinfo) {
> >> +        index = 0;
> >> +        block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
> >> +    } else {
> >> +        index++;
> >> +        block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
> >> +                                    sizeof(struct RamblockDirtyInfo));
> >> +    }
> >> +    if (!block_dinfo) {
> >> +        return NULL;
> >
> > What if this case happens the 'index' has been increased?  but the
> > allocation is failed.
> >
> >> +    }
> >> +
> >> +    info = &block_dinfo[index];
> >> +    *block_index = index;
> >> +    memset(info, 0, sizeof(struct RamblockDirtyInfo));
> >> +
> >> +    return block_dinfo;
> >> +}
> >> +
> >> +static int record_ramblock_hash_info(struct RamblockDirtyInfo 
> >> **block_dinfo,
> >> +                                     struct DirtyRateConfig config,
> >> +                                     int *block_index)
> >> +{
> >> +    struct RamblockDirtyInfo *info = NULL;
> >> +    struct RamblockDirtyInfo *dinfo = NULL;
> >> +    RAMBlock *block = NULL;
> >> +    int index = 0;
> >> +
> >> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
> >> +        dinfo = alloc_ramblock_dirty_info(&index, dinfo);
> >
> > Here for every migratable block, you call 'alloc_ramblock_dirty_info'.
> > This also complicates the 'alloc_ramblock_dirty_info' itself as:
> > 1. you need to differentiate the first and other element.
> > 2. you need to use two out parameter which seems can make confusion.
> >
> > Could we allocates this array at onetime.  This maybe two iteration
> > the ram block list.
> > But I think may make the code more simple and clean.
> >
> > Thank,s
> > Li Qiang
> >
> Hi, Qiang.
> Thank you for your review.
> I am not sure if i fully understand what's you mean:)
> You mean we first record total index by first iteration
> the ram block list and allocate array at onetime?

Hi Chuan,

Yes, this is what I mean.
I have just see your new patches, will review asap.

Thanks,
Li Qiang

>
> >> +        if (dinfo == NULL) {
> >> +            return -1;
> >> +        }
> >> +        info = &dinfo[index];
> >> +        get_ramblock_dirty_info(block, info, &config);
> >> +        if (save_ramblock_hash(info) < 0) {
> >> +            *block_dinfo = dinfo;
> >> +            *block_index = index;
> >
> > As the first comment, here 'index' seems not right?
> >
> >
> > Thanks,
> > Li Qiang
> >> +            return -1;
> >> +        }
> >> +    }
> >> +
> >> +    *block_dinfo = dinfo;
> >> +    *block_index = index;
> >> +
> >> +    return 0;
> >> +}
> >> +
> >>  static void calculate_dirtyrate(struct DirtyRateConfig config)
> >>  {
> >>      /* todo */
> >> --
> >> 1.8.3.1
> >>
> >>
> >
> > .
> >



reply via email to

[Prev in Thread] Current Thread [Next in Thread]