[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 12/34] qcow2: Add l2_entry_size()
From: |
Max Reitz |
Subject: |
[PULL 12/34] qcow2: Add l2_entry_size() |
Date: |
Tue, 25 Aug 2020 10:32:49 +0200 |
From: Alberto Garcia <berto@igalia.com>
qcow2 images with subclusters have 128-bit L2 entries. The first 64
bits contain the same information as traditional images and the last
64 bits form a bitmap with the status of each individual subcluster.
Because of that we cannot assume that L2 entries are sizeof(uint64_t)
anymore. This function returns the proper value for the image.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-Id:
<d34d578bd0380e739e2dde3e8dd6187d3d249fa9.1594396418.git.berto@igalia.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
---
block/qcow2.h | 9 +++++++++
block/qcow2-cluster.c | 12 ++++++------
block/qcow2-refcount.c | 14 ++++++++------
block/qcow2.c | 8 ++++----
4 files changed, 27 insertions(+), 16 deletions(-)
diff --git a/block/qcow2.h b/block/qcow2.h
index 4fe31adfd3..46b351229a 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -80,6 +80,10 @@
#define QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER 32
+/* Size of normal and extended L2 entries */
+#define L2E_SIZE_NORMAL (sizeof(uint64_t))
+#define L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
+
#define MIN_CLUSTER_BITS 9
#define MAX_CLUSTER_BITS 21
@@ -521,6 +525,11 @@ static inline bool has_subclusters(BDRVQcow2State *s)
return false;
}
+static inline size_t l2_entry_size(BDRVQcow2State *s)
+{
+ return has_subclusters(s) ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
+}
+
static inline uint64_t get_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice,
int idx)
{
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 5bd1e1feb8..0b762502f6 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -208,7 +208,7 @@ static int l2_load(BlockDriverState *bs, uint64_t offset,
uint64_t l2_offset, uint64_t **l2_slice)
{
BDRVQcow2State *s = bs->opaque;
- int start_of_slice = sizeof(uint64_t) *
+ int start_of_slice = l2_entry_size(s) *
(offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
@@ -281,7 +281,7 @@ static int l2_allocate(BlockDriverState *bs, int l1_index)
/* allocate a new l2 entry */
- l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
+ l2_offset = qcow2_alloc_clusters(bs, s->l2_size * l2_entry_size(s));
if (l2_offset < 0) {
ret = l2_offset;
goto fail;
@@ -305,7 +305,7 @@ static int l2_allocate(BlockDriverState *bs, int l1_index)
/* allocate a new entry in the l2 cache */
- slice_size2 = s->l2_slice_size * sizeof(uint64_t);
+ slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2;
trace_qcow2_l2_allocate_get_empty(bs, l1_index);
@@ -369,7 +369,7 @@ fail:
}
s->l1_table[l1_index] = old_l2_offset;
if (l2_offset > 0) {
- qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
+ qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s),
QCOW2_DISCARD_ALWAYS);
}
return ret;
@@ -717,7 +717,7 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t
offset,
/* Then decrease the refcount of the old table */
if (l2_offset) {
- qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
+ qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s),
QCOW2_DISCARD_OTHER);
}
@@ -1921,7 +1921,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState
*bs, uint64_t *l1_table,
int ret;
int i, j;
- slice_size2 = s->l2_slice_size * sizeof(uint64_t);
+ slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2;
if (!is_active_l1) {
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 04546838e8..770c5dbc83 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -1254,7 +1254,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
l2_slice = NULL;
l1_table = NULL;
l1_size2 = l1_size * sizeof(uint64_t);
- slice_size2 = s->l2_slice_size * sizeof(uint64_t);
+ slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2;
s->cache_discards = true;
@@ -1605,7 +1605,7 @@ static int check_refcounts_l2(BlockDriverState *bs,
BdrvCheckResult *res,
int i, l2_size, nb_csectors, ret;
/* Read L2 table from disk */
- l2_size = s->l2_size * sizeof(uint64_t);
+ l2_size = s->l2_size * l2_entry_size(s);
l2_table = g_malloc(l2_size);
ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
@@ -1680,15 +1680,16 @@ static int check_refcounts_l2(BlockDriverState *bs,
BdrvCheckResult *res,
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR",
offset);
if (fix & BDRV_FIX_ERRORS) {
+ int idx = i * (l2_entry_size(s) / sizeof(uint64_t));
uint64_t l2e_offset =
- l2_offset + (uint64_t)i * sizeof(uint64_t);
+ l2_offset + (uint64_t)i * l2_entry_size(s);
int ign = active ? QCOW2_OL_ACTIVE_L2 :
QCOW2_OL_INACTIVE_L2;
l2_entry = QCOW_OFLAG_ZERO;
set_l2_entry(s, l2_table, i, l2_entry);
ret = qcow2_pre_write_overlap_check(bs, ign,
- l2e_offset, sizeof(uint64_t), false);
+ l2e_offset, l2_entry_size(s), false);
if (ret < 0) {
fprintf(stderr, "ERROR: Overlap check failed\n");
res->check_errors++;
@@ -1698,7 +1699,8 @@ static int check_refcounts_l2(BlockDriverState *bs,
BdrvCheckResult *res,
}
ret = bdrv_pwrite_sync(bs->file, l2e_offset,
- &l2_table[i], sizeof(uint64_t));
+ &l2_table[idx],
+ l2_entry_size(s));
if (ret < 0) {
fprintf(stderr, "ERROR: Failed to overwrite L2 "
"table entry: %s\n", strerror(-ret));
@@ -1905,7 +1907,7 @@ static int check_oflag_copied(BlockDriverState *bs,
BdrvCheckResult *res,
}
ret = bdrv_pread(bs->file, l2_offset, l2_table,
- s->l2_size * sizeof(uint64_t));
+ s->l2_size * l2_entry_size(s));
if (ret < 0) {
fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
strerror(-ret));
diff --git a/block/qcow2.c b/block/qcow2.c
index fb4584d3ee..edbf9fbd0a 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -883,7 +883,7 @@ static void read_cache_sizes(BlockDriverState *bs, QemuOpts
*opts,
uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size);
/* An L2 table is always one cluster in size so the max cache size
* should be a multiple of the cluster size. */
- uint64_t max_l2_cache = ROUND_UP(max_l2_entries * sizeof(uint64_t),
+ uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s),
s->cluster_size);
combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
@@ -1042,7 +1042,7 @@ static int qcow2_update_options_prepare(BlockDriverState
*bs,
}
}
- r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t);
+ r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s);
r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
l2_cache_entry_size);
r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
@@ -1489,7 +1489,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState
*bs, QDict *options,
bs->encrypted = true;
}
- s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
+ s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s));
s->l2_size = 1 << s->l2_bits;
/* 2^(s->refcount_order - 3) is the refcount width in bytes */
s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
@@ -4238,7 +4238,7 @@ static int coroutine_fn
qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
* preallocation. All that matters is that we will not have to
allocate
* new refcount structures for them.) */
nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
- s->cluster_size / sizeof(uint64_t));
+ s->cluster_size / l2_entry_size(s));
/* The cluster range may not be aligned to L2 boundaries, so add one L2
* table for a potential head/tail */
nb_new_l2_tables++;
--
2.26.2
- [PULL 01/34] qcow2: Make Qcow2AioTask store the full host offset, (continued)
- [PULL 01/34] qcow2: Make Qcow2AioTask store the full host offset, Max Reitz, 2020/08/25
- [PULL 02/34] qcow2: Convert qcow2_get_cluster_offset() into qcow2_get_host_offset(), Max Reitz, 2020/08/25
- [PULL 03/34] qcow2: Add calculate_l2_meta(), Max Reitz, 2020/08/25
- [PULL 04/34] qcow2: Split cluster_needs_cow() out of count_cow_clusters(), Max Reitz, 2020/08/25
- [PULL 05/34] qcow2: Process QCOW2_CLUSTER_ZERO_ALLOC clusters in handle_copied(), Max Reitz, 2020/08/25
- [PULL 06/34] qcow2: Add get_l2_entry() and set_l2_entry(), Max Reitz, 2020/08/25
- [PULL 07/34] qcow2: Document the Extended L2 Entries feature, Max Reitz, 2020/08/25
- [PULL 09/34] qcow2: Add subcluster-related fields to BDRVQcow2State, Max Reitz, 2020/08/25
- [PULL 10/34] qcow2: Add offset_to_sc_index(), Max Reitz, 2020/08/25
- [PULL 11/34] qcow2: Add offset_into_subcluster() and size_to_subclusters(), Max Reitz, 2020/08/25
- [PULL 12/34] qcow2: Add l2_entry_size(),
Max Reitz <=
- [PULL 13/34] qcow2: Update get/set_l2_entry() and add get/set_l2_bitmap(), Max Reitz, 2020/08/25
- [PULL 08/34] qcow2: Add dummy has_subclusters() function, Max Reitz, 2020/08/25
- [PULL 15/34] qcow2: Add qcow2_get_subcluster_range_type(), Max Reitz, 2020/08/25
- [PULL 14/34] qcow2: Add QCow2SubclusterType and qcow2_get_subcluster_type(), Max Reitz, 2020/08/25
- [PULL 16/34] qcow2: Add qcow2_cluster_is_allocated(), Max Reitz, 2020/08/25
- [PULL 18/34] qcow2: Replace QCOW2_CLUSTER_* with QCOW2_SUBCLUSTER_*, Max Reitz, 2020/08/25
- [PULL 19/34] qcow2: Handle QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC, Max Reitz, 2020/08/25
- [PULL 22/34] qcow2: Add subcluster support to zero_in_l2_slice(), Max Reitz, 2020/08/25
- [PULL 21/34] qcow2: Add subcluster support to qcow2_get_host_offset(), Max Reitz, 2020/08/25
- [PULL 23/34] qcow2: Add subcluster support to discard_in_l2_slice(), Max Reitz, 2020/08/25