From 9bda600b0833821685e23068e50be5134dc0ab05 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Tue, 17 Mar 2020 12:55:34 -0500 Subject: [PATCH 01/11] build: Silence clang warning on older glib autoptr usage glib's G_DEFINE_AUTOPTR_CLEANUP_FUNC() macro defines several static inline functions, often with some of them unused, but prior to 2.57.2 did not mark the functions as such. As a result, clang (but not gcc) fails to build with older glib unless -Wno-unused-function is enabled. Reported-by: Peter Maydell Signed-off-by: Eric Blake Reviewed-by: John Snow Message-id: 20200317175534.196295-1-eblake@redhat.com Signed-off-by: John Snow --- configure | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/configure b/configure index 06fcd070fb..479336bf6e 100755 --- a/configure +++ b/configure @@ -3851,6 +3851,26 @@ if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then fi fi +# Silence clang warnings triggered by glib < 2.57.2 +cat > $TMPC << EOF +#include +typedef struct Foo { + int i; +} Foo; +static void foo_free(Foo *f) +{ + g_free(f); +} +G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free); +int main(void) { return 0; } +EOF +if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then + if cc_has_warning_flag "-Wno-unused-function"; then + glib_cflags="$glib_cflags -Wno-unused-function" + CFLAGS="$CFLAGS -Wno-unused-function" + fi +fi + ######################################### # zlib check From 6a150995d486013221361cddb5ae6189afb0cef5 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:32 +0300 Subject: [PATCH 02/11] hbitmap: assert that we don't create bitmap larger than INT64_MAX We have APIs which returns signed int64_t, to be able to return error. Therefore we can't handle bitmaps with absolute size larger than (INT64_MAX+1). Still, keep maximum to be INT64_MAX which is a bit safer. Note, that bitmaps are used to represent disk images, which can't exceed INT64_MAX anyway. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: Eric Blake Reviewed-by: John Snow Message-id: 20200205112041.6003-2-vsementsov@virtuozzo.com Signed-off-by: John Snow --- util/hbitmap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/util/hbitmap.c b/util/hbitmap.c index 242c6e519c..7f9b3e0cd7 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -716,6 +716,7 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity) HBitmap *hb = g_new0(struct HBitmap, 1); unsigned i; + assert(size <= INT64_MAX); hb->orig_size = size; assert(granularity >= 0 && granularity < 64); @@ -746,6 +747,7 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) uint64_t num_elements = size; uint64_t old; + assert(size <= INT64_MAX); hb->orig_size = size; /* Size comes in as logical elements, adjust for granularity. */ From be24c7140cf9212de2a00b855939d049d7cb7554 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:33 +0300 Subject: [PATCH 03/11] hbitmap: move hbitmap_iter_next_word to hbitmap.c The function is definitely internal (it's not used by third party and it has complicated interface). Move it to .c file. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-3-vsementsov@virtuozzo.com Signed-off-by: John Snow --- include/qemu/hbitmap.h | 30 ------------------------------ util/hbitmap.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 30 deletions(-) diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 1bf944ca3d..ab227b117f 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -362,34 +362,4 @@ void hbitmap_free_meta(HBitmap *hb); */ int64_t hbitmap_iter_next(HBitmapIter *hbi); -/** - * hbitmap_iter_next_word: - * @hbi: HBitmapIter to operate on. - * @p_cur: Location where to store the next non-zero word. - * - * Return the index of the next nonzero word that is set in @hbi's - * associated HBitmap, and set *p_cur to the content of that word - * (bits before the index that was passed to hbitmap_iter_init are - * trimmed on the first call). Return -1, and set *p_cur to zero, - * if all remaining words are zero. - */ -static inline size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) -{ - unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; - - if (cur == 0) { - cur = hbitmap_iter_skip_words(hbi); - if (cur == 0) { - *p_cur = 0; - return -1; - } - } - - /* The next call will resume work from the next word. */ - hbi->cur[HBITMAP_LEVELS - 1] = 0; - *p_cur = cur; - return hbi->pos; -} - - #endif diff --git a/util/hbitmap.c b/util/hbitmap.c index 7f9b3e0cd7..a368dc5ef7 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -298,6 +298,35 @@ uint64_t hbitmap_count(const HBitmap *hb) return hb->count << hb->granularity; } +/** + * hbitmap_iter_next_word: + * @hbi: HBitmapIter to operate on. + * @p_cur: Location where to store the next non-zero word. + * + * Return the index of the next nonzero word that is set in @hbi's + * associated HBitmap, and set *p_cur to the content of that word + * (bits before the index that was passed to hbitmap_iter_init are + * trimmed on the first call). Return -1, and set *p_cur to zero, + * if all remaining words are zero. + */ +static size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) +{ + unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; + + if (cur == 0) { + cur = hbitmap_iter_skip_words(hbi); + if (cur == 0) { + *p_cur = 0; + return -1; + } + } + + /* The next call will resume work from the next word. */ + hbi->cur[HBITMAP_LEVELS - 1] = 0; + *p_cur = cur; + return hbi->pos; +} + /* Count the number of set bits between start and end, not accounting for * the granularity. Also an example of how to use hbitmap_iter_next_word. */ From 30b8346cc3e1e9d73d55bb442e2b4a670dccd444 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:34 +0300 Subject: [PATCH 04/11] hbitmap: unpublish hbitmap_iter_skip_words Function is internal and even commented as internal. Drop its definition from .h file. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-4-vsementsov@virtuozzo.com Signed-off-by: John Snow --- include/qemu/hbitmap.h | 7 ------- util/hbitmap.c | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index ab227b117f..15837a0e2d 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -297,13 +297,6 @@ void hbitmap_free(HBitmap *hb); */ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); -/* hbitmap_iter_skip_words: - * @hbi: HBitmapIter to operate on. - * - * Internal function used by hbitmap_iter_next and hbitmap_iter_next_word. - */ -unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); - /* hbitmap_next_zero: * * Find next not dirty bit within selected range. If not found, return -1. diff --git a/util/hbitmap.c b/util/hbitmap.c index a368dc5ef7..26145d4b9e 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -104,7 +104,7 @@ struct HBitmap { /* Advance hbi to the next nonzero word and return it. hbi->pos * is updated. Returns zero if we reach the end of the bitmap. */ -unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) +static unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) { size_t pos = hbi->pos; const HBitmap *hb = hbi->hb; From 0c88f1970c769289fa49361bc3f00b5fba9d5d0e Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:35 +0300 Subject: [PATCH 05/11] hbitmap: drop meta bitmaps as they are unused Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-5-vsementsov@virtuozzo.com Signed-off-by: John Snow --- include/qemu/hbitmap.h | 21 -------- tests/test-hbitmap.c | 115 ----------------------------------------- util/hbitmap.c | 16 ------ 3 files changed, 152 deletions(-) diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 15837a0e2d..df922d8517 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -325,27 +325,6 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count); bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, uint64_t *count); -/* hbitmap_create_meta: - * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. - * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to - * free it. - * - * Currently, we only guarantee that if a bit in the hbitmap is changed it - * will be reflected in the meta bitmap, but we do not yet guarantee the - * opposite. - * - * @hb: The HBitmap to operate on. - * @chunk_size: How many bits in @hb does one bit in the meta track. - */ -HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size); - -/* hbitmap_free_meta: - * Free the meta bitmap of @hb. - * - * @hb: The HBitmap whose meta bitmap should be freed. - */ -void hbitmap_free_meta(HBitmap *hb); - /** * hbitmap_iter_next: * @hbi: HBitmapIter to operate on. diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index e1f867085f..aeaa0b3f22 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -22,7 +22,6 @@ typedef struct TestHBitmapData { HBitmap *hb; - HBitmap *meta; unsigned long *bits; size_t size; size_t old_size; @@ -94,14 +93,6 @@ static void hbitmap_test_init(TestHBitmapData *data, } } -static void hbitmap_test_init_meta(TestHBitmapData *data, - uint64_t size, int granularity, - int meta_chunk) -{ - hbitmap_test_init(data, size, granularity); - data->meta = hbitmap_create_meta(data->hb, meta_chunk); -} - static inline size_t hbitmap_test_array_size(size_t bits) { size_t n = DIV_ROUND_UP(bits, BITS_PER_LONG); @@ -144,9 +135,6 @@ static void hbitmap_test_teardown(TestHBitmapData *data, const void *unused) { if (data->hb) { - if (data->meta) { - hbitmap_free_meta(data->hb); - } hbitmap_free(data->hb); data->hb = NULL; } @@ -648,96 +636,6 @@ static void test_hbitmap_truncate_shrink_large(TestHBitmapData *data, hbitmap_test_truncate(data, size, -diff, 0); } -static void hbitmap_check_meta(TestHBitmapData *data, - int64_t start, int count) -{ - int64_t i; - - for (i = 0; i < data->size; i++) { - if (i >= start && i < start + count) { - g_assert(hbitmap_get(data->meta, i)); - } else { - g_assert(!hbitmap_get(data->meta, i)); - } - } -} - -static void hbitmap_test_meta(TestHBitmapData *data, - int64_t start, int count, - int64_t check_start, int check_count) -{ - hbitmap_reset_all(data->hb); - hbitmap_reset_all(data->meta); - - /* Test "unset" -> "unset" will not update meta. */ - hbitmap_reset(data->hb, start, count); - hbitmap_check_meta(data, 0, 0); - - /* Test "unset" -> "set" will update meta */ - hbitmap_set(data->hb, start, count); - hbitmap_check_meta(data, check_start, check_count); - - /* Test "set" -> "set" will not update meta */ - hbitmap_reset_all(data->meta); - hbitmap_set(data->hb, start, count); - hbitmap_check_meta(data, 0, 0); - - /* Test "set" -> "unset" will update meta */ - hbitmap_reset_all(data->meta); - hbitmap_reset(data->hb, start, count); - hbitmap_check_meta(data, check_start, check_count); -} - -static void hbitmap_test_meta_do(TestHBitmapData *data, int chunk_size) -{ - uint64_t size = chunk_size * 100; - hbitmap_test_init_meta(data, size, 0, chunk_size); - - hbitmap_test_meta(data, 0, 1, 0, chunk_size); - hbitmap_test_meta(data, 0, chunk_size, 0, chunk_size); - hbitmap_test_meta(data, chunk_size - 1, 1, 0, chunk_size); - hbitmap_test_meta(data, chunk_size - 1, 2, 0, chunk_size * 2); - hbitmap_test_meta(data, chunk_size - 1, chunk_size + 1, 0, chunk_size * 2); - hbitmap_test_meta(data, chunk_size - 1, chunk_size + 2, 0, chunk_size * 3); - hbitmap_test_meta(data, 7 * chunk_size - 1, chunk_size + 2, - 6 * chunk_size, chunk_size * 3); - hbitmap_test_meta(data, size - 1, 1, size - chunk_size, chunk_size); - hbitmap_test_meta(data, 0, size, 0, size); -} - -static void test_hbitmap_meta_byte(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_meta_do(data, BITS_PER_BYTE); -} - -static void test_hbitmap_meta_word(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_meta_do(data, BITS_PER_LONG); -} - -static void test_hbitmap_meta_sector(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_meta_do(data, BDRV_SECTOR_SIZE * BITS_PER_BYTE); -} - -/** - * Create an HBitmap and test set/unset. - */ -static void test_hbitmap_meta_one(TestHBitmapData *data, const void *unused) -{ - int i; - int64_t offsets[] = { - 0, 1, L1 - 1, L1, L1 + 1, L2 - 1, L2, L2 + 1, L3 - 1, L3, L3 + 1 - }; - - hbitmap_test_init_meta(data, L3 * 2, 0, 1); - for (i = 0; i < ARRAY_SIZE(offsets); i++) { - hbitmap_test_meta(data, offsets[i], 1, offsets[i], 1); - hbitmap_test_meta(data, offsets[i], L1, offsets[i], L1); - hbitmap_test_meta(data, offsets[i], L2, offsets[i], L2); - } -} - static void test_hbitmap_serialize_align(TestHBitmapData *data, const void *unused) { @@ -750,13 +648,6 @@ static void test_hbitmap_serialize_align(TestHBitmapData *data, g_assert_cmpint(r, ==, 64 << 3); } -static void test_hbitmap_meta_zero(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_init_meta(data, 0, 0, 1); - - hbitmap_check_meta(data, 0, 0); -} - static void hbitmap_test_serialize_range(TestHBitmapData *data, uint8_t *buf, size_t buf_size, uint64_t pos, uint64_t count) @@ -1165,12 +1056,6 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/truncate/shrink/large", test_hbitmap_truncate_shrink_large); - hbitmap_test_add("/hbitmap/meta/zero", test_hbitmap_meta_zero); - hbitmap_test_add("/hbitmap/meta/one", test_hbitmap_meta_one); - hbitmap_test_add("/hbitmap/meta/byte", test_hbitmap_meta_byte); - hbitmap_test_add("/hbitmap/meta/word", test_hbitmap_meta_word); - hbitmap_test_add("/hbitmap/meta/sector", test_hbitmap_meta_sector); - hbitmap_test_add("/hbitmap/serialize/align", test_hbitmap_serialize_align); hbitmap_test_add("/hbitmap/serialize/basic", diff --git a/util/hbitmap.c b/util/hbitmap.c index 26145d4b9e..b6d4b99a06 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -905,22 +905,6 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) return true; } -HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size) -{ - assert(!(chunk_size & (chunk_size - 1))); - assert(!hb->meta); - hb->meta = hbitmap_alloc(hb->size << hb->granularity, - hb->granularity + ctz32(chunk_size)); - return hb->meta; -} - -void hbitmap_free_meta(HBitmap *hb) -{ - assert(hb->meta); - hbitmap_free(hb->meta); - hb->meta = NULL; -} - char *hbitmap_sha256(const HBitmap *bitmap, Error **errp) { size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long); From 642700fda029ed6b4051db7eab8f704131217643 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:36 +0300 Subject: [PATCH 06/11] block/dirty-bitmap: switch _next_dirty_area and _next_zero to int64_t We are going to introduce bdrv_dirty_bitmap_next_dirty so that same variable may be used to store its return value and to be its parameter, so it would int64_t. Similarly, we are going to refactor hbitmap_next_dirty_area to use hbitmap_next_dirty together with hbitmap_next_zero, therefore we want hbitmap_next_zero parameter type to be int64_t too. So, for convenience update all parameters of *_next_zero and *_next_dirty_area to be int64_t. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-6-vsementsov@virtuozzo.com Signed-off-by: John Snow --- block/dirty-bitmap.c | 6 +++--- include/block/dirty-bitmap.h | 6 +++--- include/qemu/hbitmap.h | 7 +++---- nbd/server.c | 2 +- tests/test-hbitmap.c | 36 ++++++++++++++++++------------------ util/hbitmap.c | 13 ++++++++----- 6 files changed, 36 insertions(+), 34 deletions(-) diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index 7039e82520..af9f5411a6 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -860,14 +860,14 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp) return hbitmap_sha256(bitmap->bitmap, errp); } -int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, - uint64_t bytes) +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes) { return hbitmap_next_zero(bitmap->bitmap, offset, bytes); } bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, - uint64_t *offset, uint64_t *bytes) + int64_t *offset, int64_t *bytes) { return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes); } diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index e2b20ecab9..27c72cc56a 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -105,10 +105,10 @@ for (bitmap = bdrv_dirty_bitmap_first(bs); bitmap; \ bitmap = bdrv_dirty_bitmap_next(bitmap)) char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); -int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, - uint64_t bytes); +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes); bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, - uint64_t *offset, uint64_t *bytes); + int64_t *offset, int64_t *bytes); BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, Error **errp); diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index df922d8517..b6e85f3d5d 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -304,10 +304,10 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); * @hb: The HBitmap to operate on * @start: The bit to start from. * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole - * bitmap is looked through. You can use UINT64_MAX as @count to search up to + * bitmap is looked through. You can use INT64_MAX as @count to search up to * the bitmap end. */ -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count); +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count); /* hbitmap_next_dirty_area: * @hb: The HBitmap to operate on @@ -322,8 +322,7 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count); * @offset and @bytes appropriately. Otherwise returns false and leaves @offset * and @bytes unchanged. */ -bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, - uint64_t *count); +bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count); /** * hbitmap_iter_next: diff --git a/nbd/server.c b/nbd/server.c index 11a31094ff..3106aaf3b4 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -2055,7 +2055,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, bool next_dirty = !dirty; if (dirty) { - end = bdrv_dirty_bitmap_next_zero(bitmap, begin, UINT64_MAX); + end = bdrv_dirty_bitmap_next_zero(bitmap, begin, INT64_MAX); } else { bdrv_set_dirty_iter(it, begin); end = bdrv_dirty_iter_next(it); diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index aeaa0b3f22..9d210dc18c 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -817,8 +817,8 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, } static void test_hbitmap_next_zero_check_range(TestHBitmapData *data, - uint64_t start, - uint64_t count) + int64_t start, + int64_t count) { int64_t ret1 = hbitmap_next_zero(data->hb, start, count); int64_t ret2 = start; @@ -837,7 +837,7 @@ static void test_hbitmap_next_zero_check_range(TestHBitmapData *data, static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) { - test_hbitmap_next_zero_check_range(data, start, UINT64_MAX); + test_hbitmap_next_zero_check_range(data, start, INT64_MAX); } static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) @@ -905,11 +905,11 @@ static void test_hbitmap_next_zero_after_truncate(TestHBitmapData *data, } static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, - uint64_t offset, - uint64_t count) + int64_t offset, + int64_t count) { - uint64_t off1, off2; - uint64_t len1 = 0, len2; + int64_t off1, off2; + int64_t len1 = 0, len2; bool ret1, ret2; int64_t end; @@ -945,24 +945,24 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, int granularity) { hbitmap_test_init(data, L3, granularity); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); test_hbitmap_next_dirty_area_check(data, 0, 1); test_hbitmap_next_dirty_area_check(data, L3 - 1, 1); hbitmap_set(data->hb, L2, 1); test_hbitmap_next_dirty_area_check(data, 0, 1); test_hbitmap_next_dirty_area_check(data, 0, L2); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2 - 1, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2 - 1, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2 - 1, 1); test_hbitmap_next_dirty_area_check(data, L2 - 1, 2); test_hbitmap_next_dirty_area_check(data, L2 - 1, 3); - test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2, 1); test_hbitmap_next_dirty_area_check(data, L2 + 1, 1); hbitmap_set(data->hb, L2 + 5, L1); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2 - 2, 8); test_hbitmap_next_dirty_area_check(data, L2 + 1, 5); test_hbitmap_next_dirty_area_check(data, L2 + 1, 3); @@ -974,16 +974,16 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, test_hbitmap_next_dirty_area_check(data, L2 + 1, 0); hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2 + 1, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2 + 1, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1, 5); test_hbitmap_next_dirty_area_check(data, L2 * 2 - L1, L1 + 1); test_hbitmap_next_dirty_area_check(data, L2 * 2, L2); hbitmap_set(data->hb, 0, L3); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); } static void test_hbitmap_next_dirty_area_0(TestHBitmapData *data, @@ -1010,7 +1010,7 @@ static void test_hbitmap_next_dirty_area_after_truncate(TestHBitmapData *data, hbitmap_test_init(data, L1, 0); hbitmap_test_truncate_impl(data, L1 * 2); hbitmap_set(data->hb, L1 + 1, 1); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); } int main(int argc, char **argv) diff --git a/util/hbitmap.c b/util/hbitmap.c index b6d4b99a06..df22f06be6 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -193,7 +193,7 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) } } -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) { size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; @@ -202,6 +202,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) uint64_t end_bit, sz; int64_t res; + assert(start >= 0 && count >= 0); + if (start >= hb->orig_size || count == 0) { return -1; } @@ -244,14 +246,15 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) return res; } -bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, - uint64_t *count) +bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count) { HBitmapIter hbi; int64_t firt_dirty_off, area_end; uint32_t granularity = 1UL << hb->granularity; uint64_t end; + assert(*start >= 0 && *count >= 0); + if (*start >= hb->orig_size || *count == 0) { return false; } @@ -834,8 +837,8 @@ bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) */ static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) { - uint64_t offset = 0; - uint64_t count = src->orig_size; + int64_t offset = 0; + int64_t count = src->orig_size; while (hbitmap_next_dirty_area(src, &offset, &count)) { hbitmap_set(dst, offset, count); From 9399c54b7557a20bc78aaecf2d51983cfafbbf41 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:37 +0300 Subject: [PATCH 07/11] block/dirty-bitmap: add _next_dirty API We have bdrv_dirty_bitmap_next_zero, let's add corresponding bdrv_dirty_bitmap_next_dirty, which is more comfortable to use than bitmap iterators in some cases. For test modify test_hbitmap_next_zero_check_range to check both next_zero and next_dirty and add some new checks. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-7-vsementsov@virtuozzo.com Signed-off-by: John Snow --- block/dirty-bitmap.c | 6 ++ include/block/dirty-bitmap.h | 2 + include/qemu/hbitmap.h | 13 ++++ tests/test-hbitmap.c | 130 ++++++++++++++++++++--------------- util/hbitmap.c | 60 ++++++++-------- 5 files changed, 126 insertions(+), 85 deletions(-) diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index af9f5411a6..1b14c8eb26 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -860,6 +860,12 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp) return hbitmap_sha256(bitmap->bitmap, errp); } +int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes) +{ + return hbitmap_next_dirty(bitmap->bitmap, offset, bytes); +} + int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes) { diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index 27c72cc56a..b1f0de12db 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -105,6 +105,8 @@ for (bitmap = bdrv_dirty_bitmap_first(bs); bitmap; \ bitmap = bdrv_dirty_bitmap_next(bitmap)) char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); +int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes); int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes); bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index b6e85f3d5d..6e9ae51ed3 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -297,6 +297,19 @@ void hbitmap_free(HBitmap *hb); */ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); +/* + * hbitmap_next_dirty: + * + * Find next dirty bit within selected range. If not found, return -1. + * + * @hb: The HBitmap to operate on + * @start: The bit to start from. + * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole + * bitmap is looked through. You can use INT64_MAX as @count to search up to + * the bitmap end. + */ +int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count); + /* hbitmap_next_zero: * * Find next not dirty bit within selected range. If not found, return -1. diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index 9d210dc18c..8905b8a351 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -816,92 +816,108 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, hbitmap_iter_next(&hbi); } -static void test_hbitmap_next_zero_check_range(TestHBitmapData *data, - int64_t start, - int64_t count) +static void test_hbitmap_next_x_check_range(TestHBitmapData *data, + int64_t start, + int64_t count) { - int64_t ret1 = hbitmap_next_zero(data->hb, start, count); - int64_t ret2 = start; + int64_t next_zero = hbitmap_next_zero(data->hb, start, count); + int64_t next_dirty = hbitmap_next_dirty(data->hb, start, count); + int64_t next; int64_t end = start >= data->size || data->size - start < count ? data->size : start + count; + bool first_bit = hbitmap_get(data->hb, start); - for ( ; ret2 < end && hbitmap_get(data->hb, ret2); ret2++) { + for (next = start; + next < end && hbitmap_get(data->hb, next) == first_bit; + next++) + { ; } - if (ret2 == end) { - ret2 = -1; + + if (next == end) { + next = -1; } - g_assert_cmpint(ret1, ==, ret2); + g_assert_cmpint(next_dirty, ==, first_bit ? start : next); + g_assert_cmpint(next_zero, ==, first_bit ? next : start); } -static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) +static void test_hbitmap_next_x_check(TestHBitmapData *data, int64_t start) { - test_hbitmap_next_zero_check_range(data, start, INT64_MAX); + test_hbitmap_next_x_check_range(data, start, INT64_MAX); } -static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) +static void test_hbitmap_next_x_do(TestHBitmapData *data, int granularity) { hbitmap_test_init(data, L3, granularity); - test_hbitmap_next_zero_check(data, 0); - test_hbitmap_next_zero_check(data, L3 - 1); - test_hbitmap_next_zero_check_range(data, 0, 1); - test_hbitmap_next_zero_check_range(data, L3 - 1, 1); + test_hbitmap_next_x_check(data, 0); + test_hbitmap_next_x_check(data, L3 - 1); + test_hbitmap_next_x_check_range(data, 0, 1); + test_hbitmap_next_x_check_range(data, L3 - 1, 1); hbitmap_set(data->hb, L2, 1); - test_hbitmap_next_zero_check(data, 0); - test_hbitmap_next_zero_check(data, L2 - 1); - test_hbitmap_next_zero_check(data, L2); - test_hbitmap_next_zero_check(data, L2 + 1); - test_hbitmap_next_zero_check_range(data, 0, 1); - test_hbitmap_next_zero_check_range(data, 0, L2); - test_hbitmap_next_zero_check_range(data, L2 - 1, 1); - test_hbitmap_next_zero_check_range(data, L2 - 1, 2); - test_hbitmap_next_zero_check_range(data, L2, 1); - test_hbitmap_next_zero_check_range(data, L2 + 1, 1); + test_hbitmap_next_x_check(data, 0); + test_hbitmap_next_x_check(data, L2 - 1); + test_hbitmap_next_x_check(data, L2); + test_hbitmap_next_x_check(data, L2 + 1); + test_hbitmap_next_x_check_range(data, 0, 1); + test_hbitmap_next_x_check_range(data, 0, L2); + test_hbitmap_next_x_check_range(data, L2 - 1, 1); + test_hbitmap_next_x_check_range(data, L2 - 1, 2); + test_hbitmap_next_x_check_range(data, L2, 1); + test_hbitmap_next_x_check_range(data, L2 + 1, 1); hbitmap_set(data->hb, L2 + 5, L1); - test_hbitmap_next_zero_check(data, 0); - test_hbitmap_next_zero_check(data, L2 + 1); - test_hbitmap_next_zero_check(data, L2 + 2); - test_hbitmap_next_zero_check(data, L2 + 5); - test_hbitmap_next_zero_check(data, L2 + L1 - 1); - test_hbitmap_next_zero_check(data, L2 + L1); - test_hbitmap_next_zero_check_range(data, L2, 6); - test_hbitmap_next_zero_check_range(data, L2 + 1, 3); - test_hbitmap_next_zero_check_range(data, L2 + 4, L1); - test_hbitmap_next_zero_check_range(data, L2 + 5, L1); + test_hbitmap_next_x_check(data, 0); + test_hbitmap_next_x_check(data, L2 - L1); + test_hbitmap_next_x_check(data, L2 + 1); + test_hbitmap_next_x_check(data, L2 + 2); + test_hbitmap_next_x_check(data, L2 + 5); + test_hbitmap_next_x_check(data, L2 + L1 - 1); + test_hbitmap_next_x_check(data, L2 + L1); + test_hbitmap_next_x_check(data, L2 + L1 + 1); + test_hbitmap_next_x_check_range(data, L2 - 2, L1); + test_hbitmap_next_x_check_range(data, L2, 4); + test_hbitmap_next_x_check_range(data, L2, 6); + test_hbitmap_next_x_check_range(data, L2 + 1, 3); + test_hbitmap_next_x_check_range(data, L2 + 4, L1); + test_hbitmap_next_x_check_range(data, L2 + 5, L1); + test_hbitmap_next_x_check_range(data, L2 + 5 + L1 - 1, 1); + test_hbitmap_next_x_check_range(data, L2 + 5 + L1, 1); + test_hbitmap_next_x_check_range(data, L2 + 5 + L1 + 1, 1); hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); - test_hbitmap_next_zero_check(data, L2 * 2 - L1); - test_hbitmap_next_zero_check(data, L2 * 2 - 2); - test_hbitmap_next_zero_check(data, L2 * 2 - 1); - test_hbitmap_next_zero_check(data, L2 * 2); - test_hbitmap_next_zero_check(data, L3 - 1); - test_hbitmap_next_zero_check_range(data, L2 * 2 - L1, L1 + 1); - test_hbitmap_next_zero_check_range(data, L2 * 2, L2); + test_hbitmap_next_x_check(data, L2 * 2 - L1); + test_hbitmap_next_x_check(data, L2 * 2 - 2); + test_hbitmap_next_x_check(data, L2 * 2 - 1); + test_hbitmap_next_x_check(data, L2 * 2); + test_hbitmap_next_x_check(data, L2 * 2 + 1); + test_hbitmap_next_x_check(data, L2 * 2 + L1); + test_hbitmap_next_x_check(data, L3 - 1); + test_hbitmap_next_x_check_range(data, L2 * 2 - L1, L1 + 1); + test_hbitmap_next_x_check_range(data, L2 * 2, L2); hbitmap_set(data->hb, 0, L3); - test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_x_check(data, 0); } -static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused) +static void test_hbitmap_next_x_0(TestHBitmapData *data, const void *unused) { - test_hbitmap_next_zero_do(data, 0); + test_hbitmap_next_x_do(data, 0); } -static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused) +static void test_hbitmap_next_x_4(TestHBitmapData *data, const void *unused) { - test_hbitmap_next_zero_do(data, 4); + test_hbitmap_next_x_do(data, 4); } -static void test_hbitmap_next_zero_after_truncate(TestHBitmapData *data, - const void *unused) +static void test_hbitmap_next_x_after_truncate(TestHBitmapData *data, + const void *unused) { hbitmap_test_init(data, L1, 0); hbitmap_test_truncate_impl(data, L1 * 2); hbitmap_set(data->hb, 0, L1); - test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_x_check(data, 0); } static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, @@ -1068,12 +1084,12 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/iter/iter_and_reset", test_hbitmap_iter_and_reset); - hbitmap_test_add("/hbitmap/next_zero/next_zero_0", - test_hbitmap_next_zero_0); - hbitmap_test_add("/hbitmap/next_zero/next_zero_4", - test_hbitmap_next_zero_4); - hbitmap_test_add("/hbitmap/next_zero/next_zero_after_truncate", - test_hbitmap_next_zero_after_truncate); + hbitmap_test_add("/hbitmap/next_zero/next_x_0", + test_hbitmap_next_x_0); + hbitmap_test_add("/hbitmap/next_zero/next_x_4", + test_hbitmap_next_x_4); + hbitmap_test_add("/hbitmap/next_zero/next_x_after_truncate", + test_hbitmap_next_x_after_truncate); hbitmap_test_add("/hbitmap/next_dirty_area/next_dirty_area_0", test_hbitmap_next_dirty_area_0); diff --git a/util/hbitmap.c b/util/hbitmap.c index df22f06be6..883ca48fa6 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -193,6 +193,30 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) } } +int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count) +{ + HBitmapIter hbi; + int64_t first_dirty_off; + uint64_t end; + + assert(start >= 0 && count >= 0); + + if (start >= hb->orig_size || count == 0) { + return -1; + } + + end = count > hb->orig_size - start ? hb->orig_size : start + count; + + hbitmap_iter_init(&hbi, hb, start); + first_dirty_off = hbitmap_iter_next(&hbi); + + if (first_dirty_off < 0 || first_dirty_off >= end) { + return -1; + } + + return MAX(start, first_dirty_off); +} + int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) { size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; @@ -248,40 +272,20 @@ int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count) { - HBitmapIter hbi; - int64_t firt_dirty_off, area_end; - uint32_t granularity = 1UL << hb->granularity; - uint64_t end; + int64_t area_start, area_end; - assert(*start >= 0 && *count >= 0); - - if (*start >= hb->orig_size || *count == 0) { + area_start = hbitmap_next_dirty(hb, *start, *count); + if (area_start < 0) { return false; } - end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count; - - hbitmap_iter_init(&hbi, hb, *start); - firt_dirty_off = hbitmap_iter_next(&hbi); - - if (firt_dirty_off < 0 || firt_dirty_off >= end) { - return false; + area_end = hbitmap_next_zero(hb, area_start, *start + *count - area_start); + if (area_end < 0) { + area_end = MIN(hb->orig_size, *start + *count); } - if (firt_dirty_off + granularity >= end) { - area_end = end; - } else { - area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity, - end - firt_dirty_off - granularity); - if (area_end < 0) { - area_end = end; - } - } - - if (firt_dirty_off > *start) { - *start = firt_dirty_off; - } - *count = area_end - *start; + *start = area_start; + *count = area_end - area_start; return true; } From 299ea9ff01a8452dd14a042d700d8651370f5314 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:38 +0300 Subject: [PATCH 08/11] block/dirty-bitmap: improve _next_dirty_area API Firstly, _next_dirty_area is for scenarios when we may contiguously search for next dirty area inside some limited region, so it is more comfortable to specify "end" which should not be recalculated on each iteration. Secondly, let's add a possibility to limit resulting area size, not limiting searching area. This will be used in NBD code in further commit. (Note that now bdrv_dirty_bitmap_next_dirty_area is unused) Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-8-vsementsov@virtuozzo.com Signed-off-by: John Snow --- block/dirty-bitmap.c | 6 +++-- include/block/dirty-bitmap.h | 3 ++- include/qemu/hbitmap.h | 23 ++++++++++-------- tests/test-hbitmap.c | 45 +++++++++++++++++++++++------------- util/hbitmap.c | 44 +++++++++++++++++++++-------------- 5 files changed, 75 insertions(+), 46 deletions(-) diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index 1b14c8eb26..063793e316 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -873,9 +873,11 @@ int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, } bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, - int64_t *offset, int64_t *bytes) + int64_t start, int64_t end, int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count) { - return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes); + return hbitmap_next_dirty_area(bitmap->bitmap, start, end, max_dirty_count, + dirty_start, dirty_count); } /** diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index b1f0de12db..8a10029418 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -110,7 +110,8 @@ int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes); bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, - int64_t *offset, int64_t *bytes); + int64_t start, int64_t end, int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count); BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, Error **errp); diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 6e9ae51ed3..5e71b6d6f7 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -324,18 +324,21 @@ int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count); /* hbitmap_next_dirty_area: * @hb: The HBitmap to operate on - * @start: in-out parameter. - * in: the offset to start from - * out: (if area found) start of found area - * @count: in-out parameter. - * in: length of requested region - * out: length of found area + * @start: the offset to start from + * @end: end of requested area + * @max_dirty_count: limit for out parameter dirty_count + * @dirty_start: on success: start of found area + * @dirty_count: on success: length of found area * - * If dirty area found within [@start, @start + @count), returns true and sets - * @offset and @bytes appropriately. Otherwise returns false and leaves @offset - * and @bytes unchanged. + * If dirty area found within [@start, @end), returns true and sets + * @dirty_start and @dirty_count appropriately. @dirty_count will not exceed + * @max_dirty_count. + * If dirty area was not found, returns false and leaves @dirty_start and + * @dirty_count unchanged. */ -bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count); +bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, + int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count); /** * hbitmap_iter_next: diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index 8905b8a351..b6726cf76b 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -920,18 +920,19 @@ static void test_hbitmap_next_x_after_truncate(TestHBitmapData *data, test_hbitmap_next_x_check(data, 0); } -static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, - int64_t offset, - int64_t count) +static void test_hbitmap_next_dirty_area_check_limited(TestHBitmapData *data, + int64_t offset, + int64_t count, + int64_t max_dirty) { int64_t off1, off2; int64_t len1 = 0, len2; bool ret1, ret2; int64_t end; - off1 = offset; - len1 = count; - ret1 = hbitmap_next_dirty_area(data->hb, &off1, &len1); + ret1 = hbitmap_next_dirty_area(data->hb, + offset, count == INT64_MAX ? INT64_MAX : offset + count, max_dirty, + &off1, &len1); end = offset > data->size || data->size - offset < count ? data->size : offset + count; @@ -940,21 +941,25 @@ static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, ; } - for (len2 = 1; off2 + len2 < end && hbitmap_get(data->hb, off2 + len2); - len2++) { + for (len2 = 1; (off2 + len2 < end && len2 < max_dirty && + hbitmap_get(data->hb, off2 + len2)); len2++) + { ; } ret2 = off2 < end; - if (!ret2) { - /* leave unchanged */ - off2 = offset; - len2 = count; - } - g_assert_cmpint(ret1, ==, ret2); - g_assert_cmpint(off1, ==, off2); - g_assert_cmpint(len1, ==, len2); + + if (ret2) { + g_assert_cmpint(off1, ==, off2); + g_assert_cmpint(len1, ==, len2); + } +} + +static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, + int64_t offset, int64_t count) +{ + test_hbitmap_next_dirty_area_check_limited(data, offset, count, INT64_MAX); } static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, @@ -964,6 +969,7 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); test_hbitmap_next_dirty_area_check(data, 0, 1); test_hbitmap_next_dirty_area_check(data, L3 - 1, 1); + test_hbitmap_next_dirty_area_check_limited(data, 0, INT64_MAX, 1); hbitmap_set(data->hb, L2, 1); test_hbitmap_next_dirty_area_check(data, 0, 1); @@ -976,6 +982,8 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2, 1); test_hbitmap_next_dirty_area_check(data, L2 + 1, 1); + test_hbitmap_next_dirty_area_check_limited(data, 0, INT64_MAX, 1); + test_hbitmap_next_dirty_area_check_limited(data, L2 - 1, 2, 1); hbitmap_set(data->hb, L2 + 5, L1); test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); @@ -988,6 +996,8 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, test_hbitmap_next_dirty_area_check(data, L2 + L1, L1); test_hbitmap_next_dirty_area_check(data, L2, 0); test_hbitmap_next_dirty_area_check(data, L2 + 1, 0); + test_hbitmap_next_dirty_area_check_limited(data, L2 + 3, INT64_MAX, 3); + test_hbitmap_next_dirty_area_check_limited(data, L2 + 3, 7, 10); hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); @@ -997,6 +1007,9 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1, 5); test_hbitmap_next_dirty_area_check(data, L2 * 2 - L1, L1 + 1); test_hbitmap_next_dirty_area_check(data, L2 * 2, L2); + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, INT64_MAX, 5); + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, 10, 5); + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, 2, 5); hbitmap_set(data->hb, 0, L3); test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); diff --git a/util/hbitmap.c b/util/hbitmap.c index 883ca48fa6..305b894a63 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -270,22 +270,33 @@ int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) return res; } -bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count) +bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, + int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count) { - int64_t area_start, area_end; + int64_t next_zero; - area_start = hbitmap_next_dirty(hb, *start, *count); - if (area_start < 0) { + assert(start >= 0 && end >= 0 && max_dirty_count > 0); + + end = MIN(end, hb->orig_size); + if (start >= end) { return false; } - area_end = hbitmap_next_zero(hb, area_start, *start + *count - area_start); - if (area_end < 0) { - area_end = MIN(hb->orig_size, *start + *count); + start = hbitmap_next_dirty(hb, start, end - start); + if (start < 0) { + return false; } - *start = area_start; - *count = area_end - area_start; + end = start + MIN(end - start, max_dirty_count); + + next_zero = hbitmap_next_zero(hb, start, end - start); + if (next_zero >= 0) { + end = next_zero; + } + + *dirty_start = start; + *dirty_count = end - start; return true; } @@ -841,16 +852,15 @@ bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) */ static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) { - int64_t offset = 0; - int64_t count = src->orig_size; + int64_t offset; + int64_t count; - while (hbitmap_next_dirty_area(src, &offset, &count)) { + for (offset = 0; + hbitmap_next_dirty_area(src, offset, src->orig_size, INT64_MAX, + &offset, &count); + offset += count) + { hbitmap_set(dst, offset, count); - offset += count; - if (offset >= src->orig_size) { - break; - } - count = src->orig_size - offset; } } From 89cbc7e30860fda2e1b86fca471ade33c9c68aa7 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:39 +0300 Subject: [PATCH 09/11] nbd/server: introduce NBDExtentArray Introduce NBDExtentArray class, to handle extents list creation in more controlled way and with fewer OUT parameters in functions. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Eric Blake Message-id: 20200205112041.6003-9-vsementsov@virtuozzo.com Signed-off-by: John Snow --- nbd/server.c | 216 +++++++++++++++++++++++++++++---------------------- 1 file changed, 121 insertions(+), 95 deletions(-) diff --git a/nbd/server.c b/nbd/server.c index 3106aaf3b4..f90bb33a75 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1909,27 +1909,98 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, return ret; } -/* - * Populate @extents from block status. Update @bytes to be the actual - * length encoded (which may be smaller than the original), and update - * @nb_extents to the number of extents used. - * - * Returns zero on success and -errno on bdrv_block_status_above failure. - */ -static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, - uint64_t *bytes, NBDExtent *extents, - unsigned int *nb_extents) -{ - uint64_t remaining_bytes = *bytes; - NBDExtent *extent = extents, *extents_end = extents + *nb_extents; - bool first_extent = true; +typedef struct NBDExtentArray { + NBDExtent *extents; + unsigned int nb_alloc; + unsigned int count; + uint64_t total_length; + bool can_add; + bool converted_to_be; +} NBDExtentArray; - assert(*nb_extents); - while (remaining_bytes) { +static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc) +{ + NBDExtentArray *ea = g_new0(NBDExtentArray, 1); + + ea->nb_alloc = nb_alloc; + ea->extents = g_new(NBDExtent, nb_alloc); + ea->can_add = true; + + return ea; +} + +static void nbd_extent_array_free(NBDExtentArray *ea) +{ + g_free(ea->extents); + g_free(ea); +} +G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free); + +/* Further modifications of the array after conversion are abandoned */ +static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) +{ + int i; + + assert(!ea->converted_to_be); + ea->can_add = false; + ea->converted_to_be = true; + + for (i = 0; i < ea->count; i++) { + ea->extents[i].flags = cpu_to_be32(ea->extents[i].flags); + ea->extents[i].length = cpu_to_be32(ea->extents[i].length); + } +} + +/* + * Add extent to NBDExtentArray. If extent can't be added (no available space), + * return -1. + * For safety, when returning -1 for the first time, .can_add is set to false, + * further call to nbd_extent_array_add() will crash. + * (to avoid the situation, when after failing to add an extent (returned -1), + * user miss this failure and add another extent, which is successfully added + * (array is full, but new extent may be squashed into the last one), then we + * have invalid array with skipped extent) + */ +static int nbd_extent_array_add(NBDExtentArray *ea, + uint32_t length, uint32_t flags) +{ + assert(ea->can_add); + + if (!length) { + return 0; + } + + /* Extend previous extent if flags are the same */ + if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) { + uint64_t sum = (uint64_t)length + ea->extents[ea->count - 1].length; + + if (sum <= UINT32_MAX) { + ea->extents[ea->count - 1].length = sum; + ea->total_length += length; + return 0; + } + } + + if (ea->count >= ea->nb_alloc) { + ea->can_add = false; + return -1; + } + + ea->total_length += length; + ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags}; + ea->count++; + + return 0; +} + +static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, + uint64_t bytes, NBDExtentArray *ea) +{ + while (bytes) { uint32_t flags; int64_t num; - int ret = bdrv_block_status_above(bs, NULL, offset, remaining_bytes, - &num, NULL, NULL); + int ret = bdrv_block_status_above(bs, NULL, offset, bytes, &num, + NULL, NULL); if (ret < 0) { return ret; @@ -1938,60 +2009,37 @@ static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, flags = (ret & BDRV_BLOCK_ALLOCATED ? 0 : NBD_STATE_HOLE) | (ret & BDRV_BLOCK_ZERO ? NBD_STATE_ZERO : 0); - if (first_extent) { - extent->flags = flags; - extent->length = num; - first_extent = false; - } else if (flags == extent->flags) { - /* extend current extent */ - extent->length += num; - } else { - if (extent + 1 == extents_end) { - break; - } - - /* start new extent */ - extent++; - extent->flags = flags; - extent->length = num; + if (nbd_extent_array_add(ea, num, flags) < 0) { + return 0; } + offset += num; - remaining_bytes -= num; + bytes -= num; } - extents_end = extent + 1; - - for (extent = extents; extent < extents_end; extent++) { - extent->flags = cpu_to_be32(extent->flags); - extent->length = cpu_to_be32(extent->length); - } - - *bytes -= remaining_bytes; - *nb_extents = extents_end - extents; - return 0; } -/* nbd_co_send_extents +/* + * nbd_co_send_extents * - * @length is only for tracing purposes (and may be smaller or larger - * than the client's original request). @last controls whether - * NBD_REPLY_FLAG_DONE is sent. @extents should already be in - * big-endian format. + * @ea is converted to BE by the function + * @last controls whether NBD_REPLY_FLAG_DONE is sent. */ static int nbd_co_send_extents(NBDClient *client, uint64_t handle, - NBDExtent *extents, unsigned int nb_extents, - uint64_t length, bool last, - uint32_t context_id, Error **errp) + NBDExtentArray *ea, + bool last, uint32_t context_id, Error **errp) { NBDStructuredMeta chunk; - struct iovec iov[] = { {.iov_base = &chunk, .iov_len = sizeof(chunk)}, - {.iov_base = extents, .iov_len = nb_extents * sizeof(extents[0])} + {.iov_base = ea->extents, .iov_len = ea->count * sizeof(ea->extents[0])} }; - trace_nbd_co_send_extents(handle, nb_extents, context_id, length, last); + nbd_extent_array_convert_to_be(ea); + + trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length, + last); set_be_chunk(&chunk.h, last ? NBD_REPLY_FLAG_DONE : 0, NBD_REPLY_TYPE_BLOCK_STATUS, handle, sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len); @@ -2009,39 +2057,27 @@ static int nbd_co_send_block_status(NBDClient *client, uint64_t handle, { int ret; unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; - NBDExtent *extents = g_new(NBDExtent, nb_extents); - uint64_t final_length = length; + g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); - ret = blockstatus_to_extents(bs, offset, &final_length, extents, - &nb_extents); + ret = blockstatus_to_extents(bs, offset, length, ea); if (ret < 0) { - g_free(extents); return nbd_co_send_structured_error( client, handle, -ret, "can't get block status", errp); } - ret = nbd_co_send_extents(client, handle, extents, nb_extents, - final_length, last, context_id, errp); - - g_free(extents); - - return ret; + return nbd_co_send_extents(client, handle, ea, last, context_id, errp); } /* - * Populate @extents from a dirty bitmap. Unless @dont_fragment, the - * final extent may exceed the original @length. Store in @length the - * byte length encoded (which may be smaller or larger than the - * original), and return the number of extents used. + * Populate @ea from a dirty bitmap. Unless @dont_fragment, the + * final extent may exceed the original @length. */ -static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, - uint64_t *length, NBDExtent *extents, - unsigned int nb_extents, - bool dont_fragment) +static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, + uint64_t offset, uint64_t length, + NBDExtentArray *ea, bool dont_fragment) { uint64_t begin = offset, end = offset; - uint64_t overall_end = offset + *length; - unsigned int i = 0; + uint64_t overall_end = offset + length; BdrvDirtyBitmapIter *it; bool dirty; @@ -2050,8 +2086,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, it = bdrv_dirty_iter_new(bitmap); dirty = bdrv_dirty_bitmap_get_locked(bitmap, offset); - assert(begin < overall_end && nb_extents); - while (begin < overall_end && i < nb_extents) { + while (begin < overall_end) { bool next_dirty = !dirty; if (dirty) { @@ -2071,9 +2106,10 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, end = overall_end; } - extents[i].length = cpu_to_be32(end - begin); - extents[i].flags = cpu_to_be32(dirty ? NBD_STATE_DIRTY : 0); - i++; + if (nbd_extent_array_add(ea, end - begin, + dirty ? NBD_STATE_DIRTY : 0) < 0) { + break; + } begin = end; dirty = next_dirty; } @@ -2083,8 +2119,6 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, bdrv_dirty_bitmap_unlock(bitmap); assert(offset < end); - *length = end - offset; - return i; } static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, @@ -2092,20 +2126,12 @@ static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, uint32_t length, bool dont_fragment, bool last, uint32_t context_id, Error **errp) { - int ret; unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; - NBDExtent *extents = g_new(NBDExtent, nb_extents); - uint64_t final_length = length; + g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); - nb_extents = bitmap_to_extents(bitmap, offset, &final_length, extents, - nb_extents, dont_fragment); + bitmap_to_extents(bitmap, offset, length, ea, dont_fragment); - ret = nbd_co_send_extents(client, handle, extents, nb_extents, - final_length, last, context_id, errp); - - g_free(extents); - - return ret; + return nbd_co_send_extents(client, handle, ea, last, context_id, errp); } /* nbd_co_receive_request From dacbb6eb8acbd40d97c1c01cde5ccc55e56e7785 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:40 +0300 Subject: [PATCH 10/11] nbd/server: use bdrv_dirty_bitmap_next_dirty_area Use bdrv_dirty_bitmap_next_dirty_area for bitmap_to_extents. Since bdrv_dirty_bitmap_next_dirty_area is very accurate in its interface, we'll never exceed requested region with last chunk. So, we don't need dont_fragment, and bitmap_to_extents() interface becomes clean enough to not require any comment. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Eric Blake Message-id: 20200205112041.6003-10-vsementsov@virtuozzo.com Signed-off-by: John Snow --- nbd/server.c | 59 +++++++++++++++++----------------------------------- 1 file changed, 19 insertions(+), 40 deletions(-) diff --git a/nbd/server.c b/nbd/server.c index f90bb33a75..02b1ed0801 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -2068,57 +2068,36 @@ static int nbd_co_send_block_status(NBDClient *client, uint64_t handle, return nbd_co_send_extents(client, handle, ea, last, context_id, errp); } -/* - * Populate @ea from a dirty bitmap. Unless @dont_fragment, the - * final extent may exceed the original @length. - */ +/* Populate @ea from a dirty bitmap. */ static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, uint64_t length, - NBDExtentArray *ea, bool dont_fragment) + NBDExtentArray *es) { - uint64_t begin = offset, end = offset; - uint64_t overall_end = offset + length; - BdrvDirtyBitmapIter *it; - bool dirty; + int64_t start, dirty_start, dirty_count; + int64_t end = offset + length; + bool full = false; bdrv_dirty_bitmap_lock(bitmap); - it = bdrv_dirty_iter_new(bitmap); - dirty = bdrv_dirty_bitmap_get_locked(bitmap, offset); - - while (begin < overall_end) { - bool next_dirty = !dirty; - - if (dirty) { - end = bdrv_dirty_bitmap_next_zero(bitmap, begin, INT64_MAX); - } else { - bdrv_set_dirty_iter(it, begin); - end = bdrv_dirty_iter_next(it); - } - if (end == -1 || end - begin > UINT32_MAX) { - /* Cap to an aligned value < 4G beyond begin. */ - end = MIN(bdrv_dirty_bitmap_size(bitmap), - begin + UINT32_MAX + 1 - - bdrv_dirty_bitmap_granularity(bitmap)); - next_dirty = dirty; - } - if (dont_fragment && end > overall_end) { - end = overall_end; - } - - if (nbd_extent_array_add(ea, end - begin, - dirty ? NBD_STATE_DIRTY : 0) < 0) { + for (start = offset; + bdrv_dirty_bitmap_next_dirty_area(bitmap, start, end, INT32_MAX, + &dirty_start, &dirty_count); + start = dirty_start + dirty_count) + { + if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) || + (nbd_extent_array_add(es, dirty_count, NBD_STATE_DIRTY) < 0)) + { + full = true; break; } - begin = end; - dirty = next_dirty; } - bdrv_dirty_iter_free(it); + if (!full) { + /* last non dirty extent */ + nbd_extent_array_add(es, end - start, 0); + } bdrv_dirty_bitmap_unlock(bitmap); - - assert(offset < end); } static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, @@ -2129,7 +2108,7 @@ static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); - bitmap_to_extents(bitmap, offset, length, ea, dont_fragment); + bitmap_to_extents(bitmap, offset, length, ea); return nbd_co_send_extents(client, handle, ea, last, context_id, errp); } From 2d00cbd8e222a4adc08f415c399e84590ee8ff9a Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 5 Feb 2020 14:20:41 +0300 Subject: [PATCH 11/11] block/qcow2-bitmap: use bdrv_dirty_bitmap_next_dirty store_bitmap_data() loop does bdrv_set_dirty_iter() on each iteration, which means that we actually don't need iterator itself and we can use simpler bitmap API. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Reviewed-by: John Snow Message-id: 20200205112041.6003-11-vsementsov@virtuozzo.com Signed-off-by: John Snow --- block/qcow2-bitmap.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c index 8cccc2c9f3..cb06954b4a 100644 --- a/block/qcow2-bitmap.c +++ b/block/qcow2-bitmap.c @@ -1288,7 +1288,6 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap); const char *bm_name = bdrv_dirty_bitmap_name(bitmap); uint8_t *buf = NULL; - BdrvDirtyBitmapIter *dbi; uint64_t *tb; uint64_t tb_size = size_to_clusters(s, @@ -1307,12 +1306,14 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, return NULL; } - dbi = bdrv_dirty_iter_new(bitmap); buf = g_malloc(s->cluster_size); limit = bytes_covered_by_bitmap_cluster(s, bitmap); assert(DIV_ROUND_UP(bm_size, limit) == tb_size); - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { + offset = 0; + while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, INT64_MAX)) + >= 0) + { uint64_t cluster = offset / limit; uint64_t end, write_size; int64_t off; @@ -1355,23 +1356,17 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, goto fail; } - if (end >= bm_size) { - break; - } - - bdrv_set_dirty_iter(dbi, end); + offset = end; } *bitmap_table_size = tb_size; g_free(buf); - bdrv_dirty_iter_free(dbi); return tb; fail: clear_bitmap_table(bs, tb, tb_size); g_free(buf); - bdrv_dirty_iter_free(dbi); g_free(tb); return NULL;