[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 10/20] parallels: Add dirty bitmaps saving
From: |
Mike Maslenkin |
Subject: |
Re: [PATCH v2 10/20] parallels: Add dirty bitmaps saving |
Date: |
Sat, 21 Oct 2023 13:40:14 +0300 |
On Thu, Oct 19, 2023 at 4:05 PM Alexander Ivanov
<alexander.ivanov@virtuozzo.com> wrote:
>
> Now dirty bitmaps can be loaded but there is no their saving. Add code for
> dirty bitmap storage.
>
> Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
> ---
> block/parallels-ext.c | 167 ++++++++++++++++++++++++++++++++++++++++++
> block/parallels.c | 16 +++-
> block/parallels.h | 5 ++
> 3 files changed, 186 insertions(+), 2 deletions(-)
>
> diff --git a/block/parallels-ext.c b/block/parallels-ext.c
> index 8a109f005a..0a632a2331 100644
> --- a/block/parallels-ext.c
> +++ b/block/parallels-ext.c
> @@ -24,6 +24,7 @@
> */
>
> #include "qemu/osdep.h"
> +#include "qemu/error-report.h"
> #include "qapi/error.h"
> #include "block/block-io.h"
> #include "block/block_int.h"
> @@ -301,3 +302,169 @@ out:
>
> return ret;
> }
> +
> +static void parallels_save_bitmap(BlockDriverState *bs, BdrvDirtyBitmap
> *bitmap,
> + uint8_t **buf, int *buf_size)
> +{
> + BDRVParallelsState *s = bs->opaque;
> + ParallelsFeatureHeader *fh;
> + ParallelsDirtyBitmapFeature *bh;
> + uint64_t *l1_table, l1_size, granularity, limit;
> + int64_t bm_size, ser_size, offset, buf_used;
> + int64_t alloc_size = 1;
> + const char *name;
> + uint8_t *bm_buf;
> + QemuUUID uuid;
> + int ret = 0;
> +
> + if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
> + bdrv_dirty_bitmap_inconsistent(bitmap)) {
> + return;
> + }
> +
> + bm_size = bdrv_dirty_bitmap_size(bitmap);
> + granularity = bdrv_dirty_bitmap_granularity(bitmap);
> + limit = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size,
> bitmap);
> + ser_size = bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size);
> + l1_size = DIV_ROUND_UP(ser_size, s->cluster_size);
> +
> + buf_used = l1_size * 8 + sizeof(*fh) + sizeof(*bh);
> + /* Check if there is enough space for the final section */
> + if (*buf_size - buf_used < sizeof(*fh)) {
> + return;
> + }
> +
> + name = bdrv_dirty_bitmap_name(bitmap);
> + ret = qemu_uuid_parse(name, &uuid);
> + if (ret < 0) {
> + error_report("Can't save dirty bitmap: ID parsing error: '%s'",
> name);
> + return;
> + }
> +
> + fh = (ParallelsFeatureHeader *)*buf;
> + bh = (ParallelsDirtyBitmapFeature *)(*buf + sizeof(*fh));
> + l1_table = (uint64_t *)((uint8_t *)bh + sizeof(*bh));
> +
> + fh->magic = cpu_to_le64(PARALLELS_DIRTY_BITMAP_FEATURE_MAGIC);
> + fh->data_size = cpu_to_le32(l1_size * 8 + sizeof(*bh));
> +
> + bh->l1_size = cpu_to_le32(l1_size);
> + bh->size = cpu_to_le64(bm_size >> BDRV_SECTOR_BITS);
> + bh->granularity = cpu_to_le32(granularity >> BDRV_SECTOR_BITS);
> + memcpy(bh->id, &uuid, sizeof(uuid));
> +
> + bm_buf = qemu_blockalign(bs, s->cluster_size);
> +
> + offset = 0;
> + while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, bm_size))
> >= 0) {
> + uint64_t idx = offset / limit;
> + int64_t cluster_off, end, write_size;
> +
> + offset = QEMU_ALIGN_DOWN(offset, limit);
> + end = MIN(bm_size, offset + limit);
> + write_size = bdrv_dirty_bitmap_serialization_size(bitmap, offset,
> + end - offset);
> + assert(write_size <= s->cluster_size);
> +
> + bdrv_dirty_bitmap_serialize_part(bitmap, bm_buf, offset, end -
> offset);
> + if (write_size < s->cluster_size) {
> + memset(bm_buf + write_size, 0, s->cluster_size - write_size);
> + }
> +
> + cluster_off = parallels_allocate_host_clusters(bs, &alloc_size);
> + if (cluster_off <= 0) {
> + goto end;
> + }
> +
> + ret = bdrv_pwrite(bs->file, cluster_off, s->cluster_size, bm_buf, 0);
> + if (ret < 0) {
> + memset(&fh->magic, 0, sizeof(fh->magic));
> + parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
> + cluster_off, 1);
> + goto end;
> + }
> +
> + l1_table[idx] = cpu_to_le64(cluster_off >> BDRV_SECTOR_BITS);
> + offset = end;
> + }
> +
> + *buf_size -= buf_used;
> + *buf += buf_used;
> +
> +end:
> + qemu_vfree(bm_buf);
> +}
> +
> +void parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs,
> + Error **errp)
> +{
> + BDRVParallelsState *s = bs->opaque;
> + BdrvDirtyBitmap *bitmap;
> + ParallelsFormatExtensionHeader *eh;
> + int remaining = s->cluster_size;
> + uint8_t *buf, *pos;
> + int64_t header_off, alloc_size = 1;
> + g_autofree uint8_t *hash = NULL;
> + size_t hash_len = 0;
> + int ret;
> +
> + s->header->ext_off = 0;
> +
> + if (!bdrv_has_named_bitmaps(bs)) {
> + return;
> + }
> +
> + buf = qemu_blockalign0(bs, s->cluster_size);
> +
> + eh = (ParallelsFormatExtensionHeader *)buf;
> + pos = buf + sizeof(*eh);
> +
> + eh->magic = cpu_to_le64(PARALLELS_FORMAT_EXTENSION_MAGIC);
> +
> + FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
> + parallels_save_bitmap(bs, bitmap, &pos, &remaining);
> + }
> +
> + header_off = parallels_allocate_host_clusters(bs, &alloc_size);
> + if (header_off < 0) {
> + error_report("Can't save dirty bitmap: cluster allocation error");
> + ret = header_off;
> + goto end;
> + }
> +
> + ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5,
> + (const char *)(buf + sizeof(*eh)),
> + s->cluster_size - sizeof(*eh),
> + &hash, &hash_len, errp);
> + if (ret < 0 || hash_len != sizeof(eh->check_sum)) {
> + error_report("Can't save dirty bitmap: hash error");
> + ret = -EINVAL;
> + goto end;
> + }
> + memcpy(eh->check_sum, hash, hash_len);
> +
> + ret = bdrv_pwrite(bs->file, header_off, s->cluster_size, buf, 0);
> + if (ret < 0) {
> + error_report("Can't save dirty bitmap: IO error");
> + parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
> + header_off, 1);
> + goto end;
> + }
> +
> + s->header->ext_off = cpu_to_le64(header_off / BDRV_SECTOR_SIZE);
> +end:
> + qemu_vfree(buf);
> +}
> +
> +bool coroutine_fn parallels_co_can_store_new_dirty_bitmap(BlockDriverState
> *bs,
> + const char *name,
> + uint32_t
> granularity,
> + Error **errp)
> +{
> + if (bdrv_find_dirty_bitmap(bs, name)) {
> + error_setg(errp, "Bitmap already exists: %s", name);
> + return false;
> + }
> +
> + return true;
> +}
> diff --git a/block/parallels.c b/block/parallels.c
> index 2ee2b42038..bb1e765ec8 100644
> --- a/block/parallels.c
> +++ b/block/parallels.c
> @@ -1470,14 +1470,25 @@ fail:
> static int parallels_inactivate(BlockDriverState *bs)
> {
> BDRVParallelsState *s = bs->opaque;
> + Error *err = NULL;
> int ret;
>
> + parallels_store_persistent_dirty_bitmaps(bs, &err);
> + if (err != NULL) {
> + error_reportf_err(err, "Lost persistent bitmaps during "
> + "inactivation of node '%s': ",
> + bdrv_get_device_or_node_name(bs));
> + }
> +
> s->header->inuse = 0;
> parallels_update_header(bs);
>
Recently bdrv_get_device_or_node_name() calls were guarded by
bdrv_graph_rd{,un}lock_main_loop.
There are no annotations for parallels_close() or
parallels_inactivate(), so may be it should be done in the same way?
- Re: [PATCH v2 01/20] parallels: Set s->used_bmap to NULL in parallels_free_used_bitmap(), (continued)
- [PATCH v2 04/20] parallels: Move host clusters allocation to a separate function, Alexander Ivanov, 2023/10/19
- [PATCH v2 06/20] parallels: Recreate used bitmap in parallels_check_leak(), Alexander Ivanov, 2023/10/19
- [PATCH v2 05/20] parallels: Set data_end value in parallels_check_leak(), Alexander Ivanov, 2023/10/19
- [PATCH v2 03/20] parallels: Add mark_unused() helper, Alexander Ivanov, 2023/10/19
- [PATCH v2 08/20] parallels: Create used bitmap even if checks needed, Alexander Ivanov, 2023/10/19
- [PATCH v2 07/20] parallels: Add a note about used bitmap in parallels_check_duplicate(), Alexander Ivanov, 2023/10/19
- [PATCH v2 09/20] parallels: Make mark_used() and mark_unused() global functions, Alexander Ivanov, 2023/10/19
- [PATCH v2 10/20] parallels: Add dirty bitmaps saving, Alexander Ivanov, 2023/10/19
- Re: [PATCH v2 10/20] parallels: Add dirty bitmaps saving,
Mike Maslenkin <=
- [PATCH v2 12/20] parallels: Handle L1 entries equal to one, Alexander Ivanov, 2023/10/19
- [PATCH v2 13/20] parallels: Make a loaded dirty bitmap persistent, Alexander Ivanov, 2023/10/19
- [PATCH v2 14/20] parallels: Reverse a conditional in parallels_check_leak() to reduce indents, Alexander Ivanov, 2023/10/19
- [PATCH v2 11/20] parallels: Let image extensions work in RW mode, Alexander Ivanov, 2023/10/19
- [PATCH v2 16/20] parallels: Check unused clusters in parallels_check_leak(), Alexander Ivanov, 2023/10/19
- [PATCH v2 17/20] parallels: Remove unnecessary data_end field, Alexander Ivanov, 2023/10/19
- [PATCH v2 18/20] tests: Add parallels images support to test 165, Alexander Ivanov, 2023/10/19
- [PATCH v2 19/20] tests: Turned on 256, 299, 304 and block-status-cache for parallels format, Alexander Ivanov, 2023/10/19
- [PATCH v2 15/20] parallels: Truncate images on the last used cluster, Alexander Ivanov, 2023/10/19