mirror of
https://github.com/qemu/qemu.git
synced 2025-08-15 05:06:56 +00:00
qcow2: Use byte granularity in qcow2_alloc_cluster_offset()
This gets rid of the nb_clusters and keep_clusters and the associated complicated calculations. Just advance the number of bytes that have been processed and everything is fine. This patch advances the variables even after the last operation even though they aren't used any more afterwards to make things look more uniform. A later patch will turn the whole thing into a loop and then it actually starts making sense. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
411d62b04b
commit
710c2496d8
@ -1127,27 +1127,23 @@ int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
|||||||
int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
|
int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
|
||||||
{
|
{
|
||||||
BDRVQcowState *s = bs->opaque;
|
BDRVQcowState *s = bs->opaque;
|
||||||
int l2_index, ret, sectors;
|
uint64_t start, remaining;
|
||||||
unsigned int nb_clusters, keep_clusters;
|
|
||||||
uint64_t cluster_offset;
|
uint64_t cluster_offset;
|
||||||
uint64_t cur_bytes;
|
uint64_t cur_bytes;
|
||||||
|
int ret;
|
||||||
|
|
||||||
trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
|
trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
|
||||||
n_start, n_end);
|
n_start, n_end);
|
||||||
|
|
||||||
|
assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset));
|
||||||
|
offset = start_of_cluster(s, offset);
|
||||||
|
|
||||||
again:
|
again:
|
||||||
|
start = offset + (n_start << BDRV_SECTOR_BITS);
|
||||||
|
remaining = (n_end - n_start) << BDRV_SECTOR_BITS;
|
||||||
cluster_offset = 0;
|
cluster_offset = 0;
|
||||||
*host_offset = 0;
|
*host_offset = 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate the number of clusters to look for. We stop at L2 table
|
|
||||||
* boundaries to keep things simple.
|
|
||||||
*/
|
|
||||||
l2_index = offset_to_l2_index(s, offset);
|
|
||||||
nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS),
|
|
||||||
s->l2_size - l2_index);
|
|
||||||
n_end = MIN(n_end, nb_clusters * s->cluster_sectors);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now start gathering as many contiguous clusters as possible:
|
* Now start gathering as many contiguous clusters as possible:
|
||||||
*
|
*
|
||||||
@ -1165,8 +1161,8 @@ again:
|
|||||||
* cluster_offset to write to the same cluster and set up the right
|
* cluster_offset to write to the same cluster and set up the right
|
||||||
* synchronisation between the in-flight request and the new one.
|
* synchronisation between the in-flight request and the new one.
|
||||||
*/
|
*/
|
||||||
cur_bytes = (n_end - n_start) * BDRV_SECTOR_SIZE;
|
cur_bytes = remaining;
|
||||||
ret = handle_dependencies(bs, offset, &cur_bytes);
|
ret = handle_dependencies(bs, start, &cur_bytes);
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
goto again;
|
goto again;
|
||||||
} else if (ret < 0) {
|
} else if (ret < 0) {
|
||||||
@ -1177,33 +1173,28 @@ again:
|
|||||||
* correctly during the next loop iteration. */
|
* correctly during the next loop iteration. */
|
||||||
}
|
}
|
||||||
|
|
||||||
nb_clusters = size_to_clusters(s, offset + cur_bytes)
|
|
||||||
- (offset >> s->cluster_bits);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 2. Count contiguous COPIED clusters.
|
* 2. Count contiguous COPIED clusters.
|
||||||
*/
|
*/
|
||||||
uint64_t tmp_bytes = cur_bytes;
|
ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
|
||||||
ret = handle_copied(bs, offset, &cluster_offset, &tmp_bytes, m);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
} else if (ret) {
|
} else if (ret) {
|
||||||
keep_clusters =
|
|
||||||
size_to_clusters(s, tmp_bytes + offset_into_cluster(s, offset));
|
|
||||||
nb_clusters -= keep_clusters;
|
|
||||||
|
|
||||||
if (!*host_offset) {
|
if (!*host_offset) {
|
||||||
*host_offset = start_of_cluster(s, cluster_offset);
|
*host_offset = start_of_cluster(s, cluster_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start += cur_bytes;
|
||||||
|
remaining -= cur_bytes;
|
||||||
|
cluster_offset += cur_bytes;
|
||||||
|
|
||||||
|
cur_bytes = remaining;
|
||||||
} else if (cur_bytes == 0) {
|
} else if (cur_bytes == 0) {
|
||||||
keep_clusters = 0;
|
|
||||||
goto done;
|
goto done;
|
||||||
} else {
|
|
||||||
keep_clusters = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If there is something left to allocate, do that now */
|
/* If there is something left to allocate, do that now */
|
||||||
if (nb_clusters == 0) {
|
if (remaining == 0) {
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1211,43 +1202,24 @@ again:
|
|||||||
* 3. If the request still hasn't completed, allocate new clusters,
|
* 3. If the request still hasn't completed, allocate new clusters,
|
||||||
* considering any cluster_offset of steps 1c or 2.
|
* considering any cluster_offset of steps 1c or 2.
|
||||||
*/
|
*/
|
||||||
int alloc_n_start;
|
ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
|
||||||
int alloc_n_end;
|
|
||||||
|
|
||||||
if (keep_clusters != 0) {
|
|
||||||
offset = start_of_cluster(s, offset
|
|
||||||
+ keep_clusters * s->cluster_size);
|
|
||||||
cluster_offset = start_of_cluster(s, cluster_offset
|
|
||||||
+ keep_clusters * s->cluster_size);
|
|
||||||
|
|
||||||
alloc_n_start = 0;
|
|
||||||
alloc_n_end = n_end - keep_clusters * s->cluster_sectors;
|
|
||||||
} else {
|
|
||||||
alloc_n_start = n_start;
|
|
||||||
alloc_n_end = n_end;
|
|
||||||
}
|
|
||||||
|
|
||||||
cur_bytes = MIN(cur_bytes, ((alloc_n_end - alloc_n_start) << BDRV_SECTOR_BITS));
|
|
||||||
|
|
||||||
ret = handle_alloc(bs, offset, &cluster_offset, &cur_bytes, m);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
} else if (ret) {
|
||||||
|
if (!*host_offset) {
|
||||||
|
*host_offset = start_of_cluster(s, cluster_offset);
|
||||||
|
}
|
||||||
|
|
||||||
if (!*host_offset) {
|
start += cur_bytes;
|
||||||
*host_offset = start_of_cluster(s, cluster_offset);
|
remaining -= cur_bytes;
|
||||||
|
cluster_offset += cur_bytes;
|
||||||
}
|
}
|
||||||
nb_clusters = size_to_clusters(s, cur_bytes + offset_into_cluster(s, offset));
|
|
||||||
|
|
||||||
/* Some cleanup work */
|
/* Some cleanup work */
|
||||||
done:
|
done:
|
||||||
sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
|
*num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS);
|
||||||
if (sectors > n_end) {
|
assert(*num > 0);
|
||||||
sectors = n_end;
|
assert(*host_offset != 0);
|
||||||
}
|
|
||||||
|
|
||||||
assert(sectors > n_start);
|
|
||||||
*num = sectors - n_start;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user