Message ID | 1600137887-58739-7-git-send-email-zhengchuan@huawei.com |
---|---|
State | New |
Headers | show |
Series | *** A Method for evaluating dirty page rate *** | expand |
Chuan Zheng <zhengchuan@huawei.com> 于2020年9月15日周二 上午10:34写道: > > Record hash results for each sampled page, crc32 is taken to calculate > hash results for each sampled length in TARGET_PAGE_SIZE. > > Signed-off-by: Chuan Zheng <zhengchuan@huawei.com> > Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com> > Reviewed-by: David Edmondson <david.edmondson@oracle.com> Reviewed-by: Li Qiang <liq3ea@gmail.com> > --- > migration/dirtyrate.c | 109 ++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 109 insertions(+) > > diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c > index 7366bf3..5e6eedf 100644 > --- a/migration/dirtyrate.c > +++ b/migration/dirtyrate.c > @@ -10,6 +10,7 @@ > * See the COPYING file in the top-level directory. > */ > > +#include <zlib.h> > #include "qemu/osdep.h" > #include "qapi/error.h" > #include "cpu.h" > @@ -68,6 +69,114 @@ static void update_dirtyrate(uint64_t msec) > DirtyStat.dirty_rate = dirtyrate; > } > > +/* > + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE > + * in ramblock, which starts from ramblock base address. > + */ > +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, > + uint64_t vfn) > +{ > + uint32_t crc; > + > + crc = crc32(0, (info->ramblock_addr + > + vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); > + > + return crc; > +} > + > +static bool save_ramblock_hash(struct RamblockDirtyInfo *info) > +{ > + unsigned int sample_pages_count; > + int i; > + GRand *rand; > + > + sample_pages_count = info->sample_pages_count; > + > + /* ramblock size less than one page, return success to skip this ramblock */ > + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) { > + return true; > + } > + > + info->hash_result = g_try_malloc0_n(sample_pages_count, > + sizeof(uint32_t)); > + if (!info->hash_result) { > + return false; > + } > + > + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count, > + sizeof(uint64_t)); > + if (!info->sample_page_vfn) { > + g_free(info->hash_result); > + return false; > + } > + > + rand = g_rand_new(); > + for (i = 0; i < sample_pages_count; i++) { > + info->sample_page_vfn[i] = g_rand_int_range(rand, 0, > + info->ramblock_pages - 1); > + info->hash_result[i] = get_ramblock_vfn_hash(info, > + info->sample_page_vfn[i]); > + } > + g_rand_free(rand); > + > + return true; > +} > + > +static void get_ramblock_dirty_info(RAMBlock *block, > + struct RamblockDirtyInfo *info, > + struct DirtyRateConfig *config) > +{ > + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; > + > + /* Right shift 30 bits to calc ramblock size in GB */ > + info->sample_pages_count = (qemu_ram_get_used_length(block) * > + sample_pages_per_gigabytes) >> 30; > + /* Right shift TARGET_PAGE_BITS to calc page count */ > + info->ramblock_pages = qemu_ram_get_used_length(block) >> > + TARGET_PAGE_BITS; > + info->ramblock_addr = qemu_ram_get_host_addr(block); > + strcpy(info->idstr, qemu_ram_get_idstr(block)); > +} > + > +static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo, > + struct DirtyRateConfig config, > + int *block_count) > +{ > + struct RamblockDirtyInfo *info = NULL; > + struct RamblockDirtyInfo *dinfo = NULL; > + RAMBlock *block = NULL; > + int total_count = 0; > + int index = 0; > + bool ret = false; > + > + RAMBLOCK_FOREACH_MIGRATABLE(block) { > + total_count++; > + } > + > + dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo)); > + if (dinfo == NULL) { > + goto out; > + } > + > + RAMBLOCK_FOREACH_MIGRATABLE(block) { > + if (index >= total_count) { > + break; > + } > + info = &dinfo[index]; > + get_ramblock_dirty_info(block, info, &config); > + if (!save_ramblock_hash(info)) { > + goto out; > + } > + index++; > + } > + ret = true; > + > +out: > + *block_count = index; > + *block_dinfo = dinfo; > + return ret; > +} > + > static void calculate_dirtyrate(struct DirtyRateConfig config) > { > /* todo */ > -- > 1.8.3.1 >
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 7366bf3..5e6eedf 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -10,6 +10,7 @@ * See the COPYING file in the top-level directory. */ +#include <zlib.h> #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" @@ -68,6 +69,114 @@ static void update_dirtyrate(uint64_t msec) DirtyStat.dirty_rate = dirtyrate; } +/* + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE + * in ramblock, which starts from ramblock base address. + */ +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, + uint64_t vfn) +{ + uint32_t crc; + + crc = crc32(0, (info->ramblock_addr + + vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); + + return crc; +} + +static bool save_ramblock_hash(struct RamblockDirtyInfo *info) +{ + unsigned int sample_pages_count; + int i; + GRand *rand; + + sample_pages_count = info->sample_pages_count; + + /* ramblock size less than one page, return success to skip this ramblock */ + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) { + return true; + } + + info->hash_result = g_try_malloc0_n(sample_pages_count, + sizeof(uint32_t)); + if (!info->hash_result) { + return false; + } + + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count, + sizeof(uint64_t)); + if (!info->sample_page_vfn) { + g_free(info->hash_result); + return false; + } + + rand = g_rand_new(); + for (i = 0; i < sample_pages_count; i++) { + info->sample_page_vfn[i] = g_rand_int_range(rand, 0, + info->ramblock_pages - 1); + info->hash_result[i] = get_ramblock_vfn_hash(info, + info->sample_page_vfn[i]); + } + g_rand_free(rand); + + return true; +} + +static void get_ramblock_dirty_info(RAMBlock *block, + struct RamblockDirtyInfo *info, + struct DirtyRateConfig *config) +{ + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; + + /* Right shift 30 bits to calc ramblock size in GB */ + info->sample_pages_count = (qemu_ram_get_used_length(block) * + sample_pages_per_gigabytes) >> 30; + /* Right shift TARGET_PAGE_BITS to calc page count */ + info->ramblock_pages = qemu_ram_get_used_length(block) >> + TARGET_PAGE_BITS; + info->ramblock_addr = qemu_ram_get_host_addr(block); + strcpy(info->idstr, qemu_ram_get_idstr(block)); +} + +static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo, + struct DirtyRateConfig config, + int *block_count) +{ + struct RamblockDirtyInfo *info = NULL; + struct RamblockDirtyInfo *dinfo = NULL; + RAMBlock *block = NULL; + int total_count = 0; + int index = 0; + bool ret = false; + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + total_count++; + } + + dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo)); + if (dinfo == NULL) { + goto out; + } + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + if (index >= total_count) { + break; + } + info = &dinfo[index]; + get_ramblock_dirty_info(block, info, &config); + if (!save_ramblock_hash(info)) { + goto out; + } + index++; + } + ret = true; + +out: + *block_count = index; + *block_dinfo = dinfo; + return ret; +} + static void calculate_dirtyrate(struct DirtyRateConfig config) { /* todo */