From 6d49d3a859b0f19226dbb0df5e7f50267b42f45c Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Fri, 21 Feb 2020 11:25:19 +0000 Subject: [PATCH 01/19] luks: extract qcrypto_block_calculate_payload_offset() The qcow2 .bdrv_measure() code calculates the crypto payload offset. This logic really belongs in crypto/block.c where it can be reused by other image formats. The "luks" block driver will need this same logic in order to implement .bdrv_measure(), so extract the qcrypto_block_calculate_payload_offset() function now. Signed-off-by: Stefan Hajnoczi Reviewed-by: Max Reitz Message-Id: <20200221112522.1497712-2-stefanha@redhat.com> Signed-off-by: Max Reitz --- block/qcow2.c | 74 +++++++++++------------------------------- crypto/block.c | 36 ++++++++++++++++++++ include/crypto/block.h | 22 +++++++++++++ 3 files changed, 77 insertions(+), 55 deletions(-) diff --git a/block/qcow2.c b/block/qcow2.c index 3640e8c07d..f667349e50 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -4608,60 +4608,6 @@ static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) return ret; } -static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block, - size_t headerlen, void *opaque, Error **errp) -{ - size_t *headerlenp = opaque; - - /* Stash away the payload size */ - *headerlenp = headerlen; - return 0; -} - -static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block, - size_t offset, const uint8_t *buf, size_t buflen, - void *opaque, Error **errp) -{ - /* Discard the bytes, we're not actually writing to an image */ - return buflen; -} - -/* Determine the number of bytes for the LUKS payload */ -static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len, - Error **errp) -{ - QDict *opts_qdict; - QDict *cryptoopts_qdict; - QCryptoBlockCreateOptions *cryptoopts; - QCryptoBlock *crypto; - - /* Extract "encrypt." options into a qdict */ - opts_qdict = qemu_opts_to_qdict(opts, NULL); - qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); - qobject_unref(opts_qdict); - - /* Build QCryptoBlockCreateOptions object from qdict */ - qdict_put_str(cryptoopts_qdict, "format", "luks"); - cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp); - qobject_unref(cryptoopts_qdict); - if (!cryptoopts) { - return false; - } - - /* Fake LUKS creation in order to determine the payload size */ - crypto = qcrypto_block_create(cryptoopts, "encrypt.", - qcow2_measure_crypto_hdr_init_func, - qcow2_measure_crypto_hdr_write_func, - len, errp); - qapi_free_QCryptoBlockCreateOptions(cryptoopts); - if (!crypto) { - return false; - } - - qcrypto_block_free(crypto); - return true; -} - static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, Error **errp) { @@ -4712,9 +4658,27 @@ static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, g_free(optstr); if (has_luks) { + g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL; + QDict *opts_qdict; + QDict *cryptoopts; size_t headerlen; - if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) { + opts_qdict = qemu_opts_to_qdict(opts, NULL); + qdict_extract_subqdict(opts_qdict, &cryptoopts, "encrypt."); + qobject_unref(opts_qdict); + + qdict_put_str(cryptoopts, "format", "luks"); + + create_opts = block_crypto_create_opts_init(cryptoopts, errp); + qobject_unref(cryptoopts); + if (!create_opts) { + goto err; + } + + if (!qcrypto_block_calculate_payload_offset(create_opts, + "encrypt.", + &headerlen, + &local_err)) { goto err; } diff --git a/crypto/block.c b/crypto/block.c index 325752871c..6f42b32f1e 100644 --- a/crypto/block.c +++ b/crypto/block.c @@ -115,6 +115,42 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, } +static ssize_t qcrypto_block_headerlen_hdr_init_func(QCryptoBlock *block, + size_t headerlen, void *opaque, Error **errp) +{ + size_t *headerlenp = opaque; + + /* Stash away the payload size */ + *headerlenp = headerlen; + return 0; +} + + +static ssize_t qcrypto_block_headerlen_hdr_write_func(QCryptoBlock *block, + size_t offset, const uint8_t *buf, size_t buflen, + void *opaque, Error **errp) +{ + /* Discard the bytes, we're not actually writing to an image */ + return buflen; +} + + +bool +qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts, + const char *optprefix, + size_t *len, + Error **errp) +{ + /* Fake LUKS creation in order to determine the payload size */ + g_autoptr(QCryptoBlock) crypto = + qcrypto_block_create(create_opts, optprefix, + qcrypto_block_headerlen_hdr_init_func, + qcrypto_block_headerlen_hdr_write_func, + len, errp); + return crypto != NULL; +} + + QCryptoBlockInfo *qcrypto_block_get_info(QCryptoBlock *block, Error **errp) { diff --git a/include/crypto/block.h b/include/crypto/block.h index d49d2c2da9..c77ccaf9c0 100644 --- a/include/crypto/block.h +++ b/include/crypto/block.h @@ -145,6 +145,26 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, Error **errp); +/** + * qcrypto_block_calculate_payload_offset: + * @create_opts: the encryption options + * @optprefix: name prefix for options + * @len: output for number of header bytes before payload + * @errp: pointer to a NULL-initialized error object + * + * Calculate the number of header bytes before the payload in an encrypted + * storage volume. The header is an area before the payload that is reserved + * for encryption metadata. + * + * Returns: true on success, false on error + */ +bool +qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts, + const char *optprefix, + size_t *len, + Error **errp); + + /** * qcrypto_block_get_info: * @block: the block encryption object @@ -269,5 +289,7 @@ uint64_t qcrypto_block_get_sector_size(QCryptoBlock *block); void qcrypto_block_free(QCryptoBlock *block); G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlock, qcrypto_block_free) +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlockCreateOptions, + qapi_free_QCryptoBlockCreateOptions) #endif /* QCRYPTO_BLOCK_H */ From a9da6e49d8281e65d172ed843807a613651d4ef3 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Fri, 21 Feb 2020 11:25:20 +0000 Subject: [PATCH 02/19] luks: implement .bdrv_measure() Add qemu-img measure support in the "luks" block driver. Signed-off-by: Stefan Hajnoczi Reviewed-by: Max Reitz Message-Id: <20200221112522.1497712-3-stefanha@redhat.com> Signed-off-by: Max Reitz --- block/crypto.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/block/crypto.c b/block/crypto.c index 24823835c1..23e9c74d6f 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -484,6 +484,67 @@ static int64_t block_crypto_getlength(BlockDriverState *bs) } +static BlockMeasureInfo *block_crypto_measure(QemuOpts *opts, + BlockDriverState *in_bs, + Error **errp) +{ + g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL; + Error *local_err = NULL; + BlockMeasureInfo *info; + uint64_t size; + size_t luks_payload_size; + QDict *cryptoopts; + + /* + * Preallocation mode doesn't affect size requirements but we must consume + * the option. + */ + g_free(qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC)); + + size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); + + if (in_bs) { + int64_t ssize = bdrv_getlength(in_bs); + + if (ssize < 0) { + error_setg_errno(&local_err, -ssize, + "Unable to get image virtual_size"); + goto err; + } + + size = ssize; + } + + cryptoopts = qemu_opts_to_qdict_filtered(opts, NULL, + &block_crypto_create_opts_luks, true); + qdict_put_str(cryptoopts, "format", "luks"); + create_opts = block_crypto_create_opts_init(cryptoopts, &local_err); + qobject_unref(cryptoopts); + if (!create_opts) { + goto err; + } + + if (!qcrypto_block_calculate_payload_offset(create_opts, NULL, + &luks_payload_size, + &local_err)) { + goto err; + } + + /* + * Unallocated blocks are still encrypted so allocation status makes no + * difference to the file size. + */ + info = g_new(BlockMeasureInfo, 1); + info->fully_allocated = luks_payload_size + size; + info->required = luks_payload_size + size; + return info; + +err: + error_propagate(errp, local_err); + return NULL; +} + + static int block_crypto_probe_luks(const uint8_t *buf, int buf_size, const char *filename) { @@ -670,6 +731,7 @@ static BlockDriver bdrv_crypto_luks = { .bdrv_co_preadv = block_crypto_co_preadv, .bdrv_co_pwritev = block_crypto_co_pwritev, .bdrv_getlength = block_crypto_getlength, + .bdrv_measure = block_crypto_measure, .bdrv_get_info = block_crypto_get_info_luks, .bdrv_get_specific_info = block_crypto_get_specific_info_luks, From c3673dcf08c7baf900bf572f6d96d8a7bd565c9d Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Fri, 21 Feb 2020 11:25:21 +0000 Subject: [PATCH 03/19] qemu-img: allow qemu-img measure --object without a filename In most qemu-img sub-commands the --object option only makes sense when there is a filename. qemu-img measure is an exception because objects may be referenced from the image creation options instead of an existing image file. Allow --object without a filename. Signed-off-by: Stefan Hajnoczi Reviewed-by: Max Reitz Message-Id: <20200221112522.1497712-4-stefanha@redhat.com> Signed-off-by: Max Reitz --- qemu-img.c | 6 ++---- tests/qemu-iotests/178 | 2 +- tests/qemu-iotests/178.out.qcow2 | 8 ++++---- tests/qemu-iotests/178.out.raw | 8 ++++---- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/qemu-img.c b/qemu-img.c index 804630a368..4bc40df6d2 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -4932,10 +4932,8 @@ static int img_measure(int argc, char **argv) filename = argv[optind]; } - if (!filename && - (object_opts || image_opts || fmt || snapshot_name || sn_opts)) { - error_report("--object, --image-opts, -f, and -l " - "require a filename argument."); + if (!filename && (image_opts || fmt || snapshot_name || sn_opts)) { + error_report("--image-opts, -f, and -l require a filename argument."); goto out; } if (filename && img_size != UINT64_MAX) { diff --git a/tests/qemu-iotests/178 b/tests/qemu-iotests/178 index 51a70fe669..7cf0e27154 100755 --- a/tests/qemu-iotests/178 +++ b/tests/qemu-iotests/178 @@ -50,7 +50,7 @@ _make_test_img 1G $QEMU_IMG measure # missing arguments $QEMU_IMG measure --size 2G "$TEST_IMG" # only one allowed $QEMU_IMG measure "$TEST_IMG" a # only one filename allowed -$QEMU_IMG measure --object secret,id=sec0,data=MTIzNDU2,format=base64 # missing filename +$QEMU_IMG measure --object secret,id=sec0,data=MTIzNDU2,format=base64 # size or filename needed $QEMU_IMG measure --image-opts # missing filename $QEMU_IMG measure -f qcow2 # missing filename $QEMU_IMG measure -l snap1 # missing filename diff --git a/tests/qemu-iotests/178.out.qcow2 b/tests/qemu-iotests/178.out.qcow2 index 9e7d8c44df..f59bf4b2fb 100644 --- a/tests/qemu-iotests/178.out.qcow2 +++ b/tests/qemu-iotests/178.out.qcow2 @@ -5,10 +5,10 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824 qemu-img: Either --size N or one filename must be specified. qemu-img: --size N cannot be used together with a filename. qemu-img: At most one filename argument is allowed. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. +qemu-img: Either --size N or one filename must be specified. +qemu-img: --image-opts, -f, and -l require a filename argument. +qemu-img: --image-opts, -f, and -l require a filename argument. +qemu-img: --image-opts, -f, and -l require a filename argument. qemu-img: Invalid option list: , qemu-img: Invalid parameter 'snapshot.foo' qemu-img: Failed in parsing snapshot param 'snapshot.foo' diff --git a/tests/qemu-iotests/178.out.raw b/tests/qemu-iotests/178.out.raw index 6478365905..404ca908d8 100644 --- a/tests/qemu-iotests/178.out.raw +++ b/tests/qemu-iotests/178.out.raw @@ -5,10 +5,10 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824 qemu-img: Either --size N or one filename must be specified. qemu-img: --size N cannot be used together with a filename. qemu-img: At most one filename argument is allowed. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. -qemu-img: --object, --image-opts, -f, and -l require a filename argument. +qemu-img: Either --size N or one filename must be specified. +qemu-img: --image-opts, -f, and -l require a filename argument. +qemu-img: --image-opts, -f, and -l require a filename argument. +qemu-img: --image-opts, -f, and -l require a filename argument. qemu-img: Invalid option list: , qemu-img: Invalid parameter 'snapshot.foo' qemu-img: Failed in parsing snapshot param 'snapshot.foo' From c13de3b32fdeffb3e53494fd7d5cecb67cb65799 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Fri, 21 Feb 2020 11:25:22 +0000 Subject: [PATCH 04/19] iotests: add 288 luks qemu-img measure test This test exercises the block/crypto.c "luks" block driver .bdrv_measure() code. Signed-off-by: Stefan Hajnoczi Reviewed-by: Max Reitz Message-Id: <20200221112522.1497712-5-stefanha@redhat.com> [mreitz: Renamed test from 282 to 288] Signed-off-by: Max Reitz --- tests/qemu-iotests/288 | 93 ++++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/288.out | 30 ++++++++++++ tests/qemu-iotests/group | 1 + 3 files changed, 124 insertions(+) create mode 100755 tests/qemu-iotests/288 create mode 100644 tests/qemu-iotests/288.out diff --git a/tests/qemu-iotests/288 b/tests/qemu-iotests/288 new file mode 100755 index 0000000000..6c62065aef --- /dev/null +++ b/tests/qemu-iotests/288 @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +# +# qemu-img measure tests for LUKS images +# +# Copyright (C) 2020 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# creator +owner=stefanha@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img + rm -f "$TEST_IMG.converted" +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter +. ./common.pattern + +_supported_fmt luks +_supported_proto file +_supported_os Linux + +SECRET=secret,id=sec0,data=passphrase + +echo "== measure 1G image file ==" +echo + +$QEMU_IMG measure --object "$SECRET" \ + -O "$IMGFMT" \ + -o key-secret=sec0,iter-time=10 \ + --size 1G + +echo +echo "== create 1G image file (size should be no greater than measured) ==" +echo + +_make_test_img 1G +stat -c "image file size in bytes: %s" "$TEST_IMG_FILE" + +echo +echo "== modified 1G image file (size should be no greater than measured) ==" +echo + +$QEMU_IO --object "$SECRET" --image-opts "$TEST_IMG" -c "write -P 0x51 0x10000 0x400" | _filter_qemu_io | _filter_testdir +stat -c "image file size in bytes: %s" "$TEST_IMG_FILE" + +echo +echo "== measure preallocation=falloc 1G image file ==" +echo + +$QEMU_IMG measure --object "$SECRET" \ + -O "$IMGFMT" \ + -o key-secret=sec0,iter-time=10,preallocation=falloc \ + --size 1G + +echo +echo "== measure with input image file ==" +echo + +IMGFMT=raw IMGKEYSECRET= IMGOPTS= _make_test_img 1G | _filter_imgfmt +QEMU_IO_OPTIONS= IMGOPTSSYNTAX= $QEMU_IO -f raw -c "write -P 0x51 0x10000 0x400" "$TEST_IMG_FILE" | _filter_qemu_io | _filter_testdir +$QEMU_IMG measure --object "$SECRET" \ + -O "$IMGFMT" \ + -o key-secret=sec0,iter-time=10 \ + -f raw \ + "$TEST_IMG_FILE" + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/288.out b/tests/qemu-iotests/288.out new file mode 100644 index 0000000000..4bc593dc48 --- /dev/null +++ b/tests/qemu-iotests/288.out @@ -0,0 +1,30 @@ +QA output created by 288 +== measure 1G image file == + +required size: 1075810304 +fully allocated size: 1075810304 + +== create 1G image file (size should be no greater than measured) == + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824 +image file size in bytes: 1075810304 + +== modified 1G image file (size should be no greater than measured) == + +wrote 1024/1024 bytes at offset 65536 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +image file size in bytes: 1075810304 + +== measure preallocation=falloc 1G image file == + +required size: 1075810304 +fully allocated size: 1075810304 + +== measure with input image file == + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824 +wrote 1024/1024 bytes at offset 65536 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +required size: 1075810304 +fully allocated size: 1075810304 +*** done diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 0317667695..559edc139a 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -293,3 +293,4 @@ 283 auto quick 284 rw 286 rw quick +288 quick From 7788a319399f17476ff1dd43164c869e320820a2 Mon Sep 17 00:00:00 2001 From: David Edmondson Date: Mon, 24 Feb 2020 10:13:09 +0000 Subject: [PATCH 05/19] block/curl: HTTP header fields allow whitespace around values RFC 7230 section 3.2 indicates that whitespace is permitted between the field name and field value and after the field value. Signed-off-by: David Edmondson Message-Id: <20200224101310.101169-2-david.edmondson@oracle.com> Reviewed-by: Max Reitz Signed-off-by: Max Reitz --- block/curl.c | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/block/curl.c b/block/curl.c index f86299378e..f9ffb7f4e2 100644 --- a/block/curl.c +++ b/block/curl.c @@ -214,11 +214,34 @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { BDRVCURLState *s = opaque; size_t realsize = size * nmemb; - const char *accept_line = "Accept-Ranges: bytes"; + const char *header = (char *)ptr; + const char *end = header + realsize; + const char *accept_ranges = "Accept-Ranges:"; + const char *bytes = "bytes"; - if (realsize >= strlen(accept_line) - && strncmp((char *)ptr, accept_line, strlen(accept_line)) == 0) { - s->accept_range = true; + if (realsize >= strlen(accept_ranges) + && strncmp(header, accept_ranges, strlen(accept_ranges)) == 0) { + + char *p = strchr(header, ':') + 1; + + /* Skip whitespace between the header name and value. */ + while (p < end && *p && g_ascii_isspace(*p)) { + p++; + } + + if (end - p >= strlen(bytes) + && strncmp(p, bytes, strlen(bytes)) == 0) { + + /* Check that there is nothing but whitespace after the value. */ + p += strlen(bytes); + while (p < end && *p && g_ascii_isspace(*p)) { + p++; + } + + if (p == end || !*p) { + s->accept_range = true; + } + } } return realsize; From 69032253c33ae1774233c63cedf36d32242a85fc Mon Sep 17 00:00:00 2001 From: David Edmondson Date: Mon, 24 Feb 2020 10:13:10 +0000 Subject: [PATCH 06/19] block/curl: HTTP header field names are case insensitive RFC 7230 section 3.2 indicates that HTTP header field names are case insensitive. Signed-off-by: David Edmondson Message-Id: <20200224101310.101169-3-david.edmondson@oracle.com> Reviewed-by: Max Reitz Signed-off-by: Max Reitz --- block/curl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/curl.c b/block/curl.c index f9ffb7f4e2..6e325901dc 100644 --- a/block/curl.c +++ b/block/curl.c @@ -216,11 +216,12 @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque) size_t realsize = size * nmemb; const char *header = (char *)ptr; const char *end = header + realsize; - const char *accept_ranges = "Accept-Ranges:"; + const char *accept_ranges = "accept-ranges:"; const char *bytes = "bytes"; if (realsize >= strlen(accept_ranges) - && strncmp(header, accept_ranges, strlen(accept_ranges)) == 0) { + && g_ascii_strncasecmp(header, accept_ranges, + strlen(accept_ranges)) == 0) { char *p = strchr(header, ':') + 1; From 69135eb30b9c3fca583737a96df015174dc8e6dd Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 26 Feb 2020 06:54:24 -0600 Subject: [PATCH 07/19] iotests: Fix nonportable use of od --endian Tests 261 and 272 fail on RHEL 7 with coreutils 8.22, since od --endian was not added until coreutils 8.23. Fix this by manually constructing the final value one byte at a time. Fixes: fc8ba423 Reported-by: Andrey Shinkevich Signed-off-by: Eric Blake Reviewed-by: Max Reitz Message-Id: <20200226125424.481840-1-eblake@redhat.com> Signed-off-by: Max Reitz --- tests/qemu-iotests/common.rc | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc index 8a6366c09d..4c246c0450 100644 --- a/tests/qemu-iotests/common.rc +++ b/tests/qemu-iotests/common.rc @@ -56,18 +56,30 @@ poke_file() # peek_file_le 'test.img' 512 2 => 65534 peek_file_le() { - # Wrap in echo $() to strip spaces - echo $(od -j"$2" -N"$3" --endian=little -An -vtu"$3" "$1") + local val=0 shift=0 byte + + # coreutils' od --endian is not portable, so manually assemble bytes. + for byte in $(od -j"$2" -N"$3" -An -v -tu1 "$1"); do + val=$(( val | (byte << shift) )) + shift=$((shift + 8)) + done + printf %llu $val } # peek_file_be 'test.img' 512 2 => 65279 peek_file_be() { - # Wrap in echo $() to strip spaces - echo $(od -j"$2" -N"$3" --endian=big -An -vtu"$3" "$1") + local val=0 byte + + # coreutils' od --endian is not portable, so manually assemble bytes. + for byte in $(od -j"$2" -N"$3" -An -v -tu1 "$1"); do + val=$(( (val << 8) | byte )) + done + printf %llu $val } -# peek_file_raw 'test.img' 512 2 => '\xff\xfe' +# peek_file_raw 'test.img' 512 2 => '\xff\xfe'. Do not use if the raw data +# is likely to contain \0 or trailing \n. peek_file_raw() { dd if="$1" bs=1 skip="$2" count="$3" status=none From 4aebf0f0da4a57204f568deed14661cb37b4ac30 Mon Sep 17 00:00:00 2001 From: Pan Nengyuan Date: Thu, 27 Feb 2020 09:29:49 +0800 Subject: [PATCH 08/19] block/qcow2: do free crypto_opts in qcow2_close() 'crypto_opts' forgot to free in qcow2_close(), this patch fix the bellow leak stack: Direct leak of 24 byte(s) in 1 object(s) allocated from: #0 0x7f0edd81f970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970) #1 0x7f0edc6d149d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d) #2 0x55d7eaede63d in qobject_input_start_struct /mnt/sdb/qemu-new/qemu_test/qemu/qapi/qobject-input-visitor.c:295 #3 0x55d7eaed78b8 in visit_start_struct /mnt/sdb/qemu-new/qemu_test/qemu/qapi/qapi-visit-core.c:49 #4 0x55d7eaf5140b in visit_type_QCryptoBlockOpenOptions qapi/qapi-visit-crypto.c:290 #5 0x55d7eae43af3 in block_crypto_open_opts_init /mnt/sdb/qemu-new/qemu_test/qemu/block/crypto.c:163 #6 0x55d7eacd2924 in qcow2_update_options_prepare /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1148 #7 0x55d7eacd33f7 in qcow2_update_options /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1232 #8 0x55d7eacd9680 in qcow2_do_open /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1512 #9 0x55d7eacdc55e in qcow2_open_entry /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1792 #10 0x55d7eacdc8fe in qcow2_open /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1819 #11 0x55d7eac3742d in bdrv_open_driver /mnt/sdb/qemu-new/qemu_test/qemu/block.c:1317 #12 0x55d7eac3e990 in bdrv_open_common /mnt/sdb/qemu-new/qemu_test/qemu/block.c:1575 #13 0x55d7eac4442c in bdrv_open_inherit /mnt/sdb/qemu-new/qemu_test/qemu/block.c:3126 #14 0x55d7eac45c3f in bdrv_open /mnt/sdb/qemu-new/qemu_test/qemu/block.c:3219 #15 0x55d7ead8e8a4 in blk_new_open /mnt/sdb/qemu-new/qemu_test/qemu/block/block-backend.c:397 #16 0x55d7eacde74c in qcow2_co_create /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:3534 #17 0x55d7eacdfa6d in qcow2_co_create_opts /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:3668 #18 0x55d7eac1c678 in bdrv_create_co_entry /mnt/sdb/qemu-new/qemu_test/qemu/block.c:485 #19 0x55d7eb0024d2 in coroutine_trampoline /mnt/sdb/qemu-new/qemu_test/qemu/util/coroutine-ucontext.c:115 Reported-by: Euler Robot Signed-off-by: Pan Nengyuan Reviewed-by: Max Reitz Message-Id: <20200227012950.12256-2-pannengyuan@huawei.com> Signed-off-by: Max Reitz --- block/qcow2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/qcow2.c b/block/qcow2.c index f667349e50..d44b45633d 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -2610,6 +2610,7 @@ static void qcow2_close(BlockDriverState *bs) qcrypto_block_free(s->crypto); s->crypto = NULL; + qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); g_free(s->unknown_header_fields); cleanup_unknown_header_ext(bs); From fc124ea1dbfac2a1d5b851596831c9da746b7b66 Mon Sep 17 00:00:00 2001 From: Pan Nengyuan Date: Thu, 27 Feb 2020 09:29:50 +0800 Subject: [PATCH 09/19] qemu-img: free memory before re-assign collect_image_check() is called twice in img_check(), the filename/format will be alloced without free the original memory. It is not a big deal since the process will exit anyway, but seems like a clean code and it will remove the warning spotted by asan. Reported-by: Euler Robot Signed-off-by: Pan Nengyuan Message-Id: <20200227012950.12256-3-pannengyuan@huawei.com> Signed-off-by: Max Reitz --- qemu-img.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qemu-img.c b/qemu-img.c index 4bc40df6d2..7b7087dd60 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -817,6 +817,8 @@ static int img_check(int argc, char **argv) check->corruptions_fixed); } + qapi_free_ImageCheck(check); + check = g_new0(ImageCheck, 1); ret = collect_image_check(bs, check, filename, fmt, 0); check->leaks_fixed = leaks_fixed; From e7266570f2cf7b3ca2a156c677ee0a59d563458b Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Mon, 2 Mar 2020 18:09:30 +0300 Subject: [PATCH 10/19] block/qcow2-threads: fix qcow2_decompress MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On success path we return what inflate() returns instead of 0. And it most probably works for Z_STREAM_END as it is positive, but is definitely broken for Z_BUF_ERROR. While being here, switch to errno return code, to be closer to qcow2_compress API (and usual expectations). Revert condition in if to be more positive. Drop dead initialization of ret. Cc: qemu-stable@nongnu.org # v4.0 Fixes: 341926ab83e2b Signed-off-by: Vladimir Sementsov-Ogievskiy Message-Id: <20200302150930.16218-1-vsementsov@virtuozzo.com> Reviewed-by: Alberto Garcia Reviewed-by: Ján Tomko Signed-off-by: Max Reitz --- block/qcow2-threads.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c index 77bb578cdf..a68126f291 100644 --- a/block/qcow2-threads.c +++ b/block/qcow2-threads.c @@ -128,12 +128,12 @@ static ssize_t qcow2_compress(void *dest, size_t dest_size, * @src - source buffer, @src_size bytes * * Returns: 0 on success - * -1 on fail + * -EIO on fail */ static ssize_t qcow2_decompress(void *dest, size_t dest_size, const void *src, size_t src_size) { - int ret = 0; + int ret; z_stream strm; memset(&strm, 0, sizeof(strm)); @@ -144,17 +144,19 @@ static ssize_t qcow2_decompress(void *dest, size_t dest_size, ret = inflateInit2(&strm, -12); if (ret != Z_OK) { - return -1; + return -EIO; } ret = inflate(&strm, Z_FINISH); - if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) { + if ((ret == Z_STREAM_END || ret == Z_BUF_ERROR) && strm.avail_out == 0) { /* * We approve Z_BUF_ERROR because we need @dest buffer to be filled, but * @src buffer may be processed partly (because in qcow2 we know size of * compressed data with precision of one sector) */ - ret = -1; + ret = 0; + } else { + ret = -EIO; } inflateEnd(&strm); From 01fe1ca945345d3dc420d70c69488143dc0451b1 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:29:56 +0300 Subject: [PATCH 11/19] job: refactor progress to separate object We need it in separate to pass to the block-copy object in the next commit. Cc: qemu-stable@nongnu.org Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-2-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- blockjob.c | 16 +++++----- include/qemu/job.h | 11 ++----- include/qemu/progress_meter.h | 58 +++++++++++++++++++++++++++++++++++ job-qmp.c | 4 +-- job.c | 6 ++-- qemu-img.c | 6 ++-- 6 files changed, 76 insertions(+), 25 deletions(-) create mode 100644 include/qemu/progress_meter.h diff --git a/blockjob.c b/blockjob.c index 5d63b1e89d..fc850312c1 100644 --- a/blockjob.c +++ b/blockjob.c @@ -299,8 +299,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) info->device = g_strdup(job->job.id); info->busy = atomic_read(&job->job.busy); info->paused = job->job.pause_count > 0; - info->offset = job->job.progress_current; - info->len = job->job.progress_total; + info->offset = job->job.progress.current; + info->len = job->job.progress.total; info->speed = job->speed; info->io_status = job->iostatus; info->ready = job_is_ready(&job->job), @@ -330,8 +330,8 @@ static void block_job_event_cancelled(Notifier *n, void *opaque) qapi_event_send_block_job_cancelled(job_type(&job->job), job->job.id, - job->job.progress_total, - job->job.progress_current, + job->job.progress.total, + job->job.progress.current, job->speed); } @@ -350,8 +350,8 @@ static void block_job_event_completed(Notifier *n, void *opaque) qapi_event_send_block_job_completed(job_type(&job->job), job->job.id, - job->job.progress_total, - job->job.progress_current, + job->job.progress.total, + job->job.progress.current, job->speed, !!msg, msg); @@ -379,8 +379,8 @@ static void block_job_event_ready(Notifier *n, void *opaque) qapi_event_send_block_job_ready(job_type(&job->job), job->job.id, - job->job.progress_total, - job->job.progress_current, + job->job.progress.total, + job->job.progress.current, job->speed); } diff --git a/include/qemu/job.h b/include/qemu/job.h index bd59cd8944..32aabb1c60 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -28,6 +28,7 @@ #include "qapi/qapi-types-job.h" #include "qemu/queue.h" +#include "qemu/progress_meter.h" #include "qemu/coroutine.h" #include "block/aio.h" @@ -117,15 +118,7 @@ typedef struct Job { /** True if this job should automatically dismiss itself */ bool auto_dismiss; - /** - * Current progress. The unit is arbitrary as long as the ratio between - * progress_current and progress_total represents the estimated percentage - * of work already done. - */ - int64_t progress_current; - - /** Estimated progress_current value at the completion of the job */ - int64_t progress_total; + ProgressMeter progress; /** * Return code from @run and/or @prepare callback(s). diff --git a/include/qemu/progress_meter.h b/include/qemu/progress_meter.h new file mode 100644 index 0000000000..9a23ff071c --- /dev/null +++ b/include/qemu/progress_meter.h @@ -0,0 +1,58 @@ +/* + * Helper functionality for some process progress tracking. + * + * Copyright (c) 2011 IBM Corp. + * Copyright (c) 2012, 2018 Red Hat, Inc. + * Copyright (c) 2020 Virtuozzo International GmbH + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_PROGRESS_METER_H +#define QEMU_PROGRESS_METER_H + +typedef struct ProgressMeter { + /** + * Current progress. The unit is arbitrary as long as the ratio between + * current and total represents the estimated percentage + * of work already done. + */ + uint64_t current; + + /** Estimated current value at the completion of the process */ + uint64_t total; +} ProgressMeter; + +static inline void progress_work_done(ProgressMeter *pm, uint64_t done) +{ + pm->current += done; +} + +static inline void progress_set_remaining(ProgressMeter *pm, uint64_t remaining) +{ + pm->total = pm->current + remaining; +} + +static inline void progress_increase_remaining(ProgressMeter *pm, + uint64_t delta) +{ + pm->total += delta; +} + +#endif /* QEMU_PROGRESS_METER_H */ diff --git a/job-qmp.c b/job-qmp.c index fbfed25a00..fecc939ebd 100644 --- a/job-qmp.c +++ b/job-qmp.c @@ -143,8 +143,8 @@ static JobInfo *job_query_single(Job *job, Error **errp) .id = g_strdup(job->id), .type = job_type(job), .status = job->status, - .current_progress = job->progress_current, - .total_progress = job->progress_total, + .current_progress = job->progress.current, + .total_progress = job->progress.total, .has_error = !!job->err, .error = job->err ? \ g_strdup(error_get_pretty(job->err)) : NULL, diff --git a/job.c b/job.c index 04409b40aa..134a07b92e 100644 --- a/job.c +++ b/job.c @@ -369,17 +369,17 @@ void job_unref(Job *job) void job_progress_update(Job *job, uint64_t done) { - job->progress_current += done; + progress_work_done(&job->progress, done); } void job_progress_set_remaining(Job *job, uint64_t remaining) { - job->progress_total = job->progress_current + remaining; + progress_set_remaining(&job->progress, remaining); } void job_progress_increase_remaining(Job *job, uint64_t delta) { - job->progress_total += delta; + progress_increase_remaining(&job->progress, delta); } void job_event_cancelled(Job *job) diff --git a/qemu-img.c b/qemu-img.c index 7b7087dd60..afddf33f08 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -884,9 +884,9 @@ static void run_block_job(BlockJob *job, Error **errp) do { float progress = 0.0f; aio_poll(aio_context, true); - if (job->job.progress_total) { - progress = (float)job->job.progress_current / - job->job.progress_total * 100.f; + if (job->job.progress.total) { + progress = (float)job->job.progress.current / + job->job.progress.total * 100.f; } qemu_progress_print(progress, 0); } while (!job_is_ready(&job->job) && !job_is_completed(&job->job)); From d0ebeca14a585f352938062ef8ddde47fe4d39f9 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:29:57 +0300 Subject: [PATCH 12/19] block/block-copy: fix progress calculation Assume we have two regions, A and B, and region B is in-flight now, region A is not yet touched, but it is unallocated and should be skipped. Correspondingly, as progress we have total = A + B current = 0 If we reset unallocated region A and call progress_reset_callback, it will calculate 0 bytes dirty in the bitmap and call job_progress_set_remaining, which will set total = current + 0 = 0 + 0 = 0 So, B bytes are actually removed from total accounting. When job finishes we'll have total = 0 current = B , which doesn't sound good. This is because we didn't considered in-flight bytes, actually when calculating remaining, we should have set (in_flight + dirty_bytes) as remaining, not only dirty_bytes. To fix it, let's refactor progress calculation, moving it to block-copy itself instead of fixing callback. And, of course, track in_flight bytes count. We still have to keep one callback, to maintain backup job bytes_read calculation, but it will go on soon, when we turn the whole backup process into one block_copy call. Cc: qemu-stable@nongnu.org Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Message-Id: <20200311103004.7649-3-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/backup.c | 13 ++----------- block/block-copy.c | 16 ++++++++++++---- include/block/block-copy.h | 15 +++++---------- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/block/backup.c b/block/backup.c index 1383e219f5..8694e0394b 100644 --- a/block/backup.c +++ b/block/backup.c @@ -57,15 +57,6 @@ static void backup_progress_bytes_callback(int64_t bytes, void *opaque) BackupBlockJob *s = opaque; s->bytes_read += bytes; - job_progress_update(&s->common.job, bytes); -} - -static void backup_progress_reset_callback(void *opaque) -{ - BackupBlockJob *s = opaque; - uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap); - - job_progress_set_remaining(&s->common.job, estimate); } static int coroutine_fn backup_do_cow(BackupBlockJob *job, @@ -464,8 +455,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, job->cluster_size = cluster_size; job->len = len; - block_copy_set_callbacks(bcs, backup_progress_bytes_callback, - backup_progress_reset_callback, job); + block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job); + block_copy_set_progress_meter(bcs, &job->common.job.progress); /* Required permissions are already taken by backup-top target */ block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, diff --git a/block/block-copy.c b/block/block-copy.c index 79798a1567..e2d7b3b887 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -127,17 +127,20 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, return s; } -void block_copy_set_callbacks( +void block_copy_set_progress_callback( BlockCopyState *s, ProgressBytesCallbackFunc progress_bytes_callback, - ProgressResetCallbackFunc progress_reset_callback, void *progress_opaque) { s->progress_bytes_callback = progress_bytes_callback; - s->progress_reset_callback = progress_reset_callback; s->progress_opaque = progress_opaque; } +void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) +{ + s->progress = pm; +} + /* * block_copy_do_copy * @@ -269,7 +272,9 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s, if (!ret) { bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); - s->progress_reset_callback(s->progress_opaque); + progress_set_remaining(s->progress, + bdrv_get_dirty_count(s->copy_bitmap) + + s->in_flight_bytes); } *count = bytes; @@ -331,15 +336,18 @@ int coroutine_fn block_copy(BlockCopyState *s, trace_block_copy_process(s, start); bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start); + s->in_flight_bytes += chunk_end - start; co_get_from_shres(s->mem, chunk_end - start); ret = block_copy_do_copy(s, start, chunk_end, error_is_read); co_put_to_shres(s->mem, chunk_end - start); + s->in_flight_bytes -= chunk_end - start; if (ret < 0) { bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start); break; } + progress_work_done(s->progress, chunk_end - start); s->progress_bytes_callback(chunk_end - start, s->progress_opaque); start = chunk_end; ret = 0; diff --git a/include/block/block-copy.h b/include/block/block-copy.h index 0a161724d7..9def00068c 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -26,7 +26,6 @@ typedef struct BlockCopyInFlightReq { } BlockCopyInFlightReq; typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque); -typedef void (*ProgressResetCallbackFunc)(void *opaque); typedef struct BlockCopyState { /* * BdrvChild objects are not owned or managed by block-copy. They are @@ -36,6 +35,7 @@ typedef struct BlockCopyState { BdrvChild *source; BdrvChild *target; BdrvDirtyBitmap *copy_bitmap; + int64_t in_flight_bytes; int64_t cluster_size; bool use_copy_range; int64_t copy_size; @@ -60,15 +60,9 @@ typedef struct BlockCopyState { */ bool skip_unallocated; + ProgressMeter *progress; /* progress_bytes_callback: called when some copying progress is done. */ ProgressBytesCallbackFunc progress_bytes_callback; - - /* - * progress_reset_callback: called when some bytes reset from copy_bitmap - * (see @skip_unallocated above). The callee is assumed to recalculate how - * many bytes remain based on the dirty bit count of copy_bitmap. - */ - ProgressResetCallbackFunc progress_reset_callback; void *progress_opaque; SharedResource *mem; @@ -79,12 +73,13 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, BdrvRequestFlags write_flags, Error **errp); -void block_copy_set_callbacks( +void block_copy_set_progress_callback( BlockCopyState *s, ProgressBytesCallbackFunc progress_bytes_callback, - ProgressResetCallbackFunc progress_reset_callback, void *progress_opaque); +void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm); + void block_copy_state_free(BlockCopyState *s); int64_t block_copy_reset_unallocated(BlockCopyState *s, From 9d31bc53facf53d6f817c2472d4d0485d7bc5d8e Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:29:58 +0300 Subject: [PATCH 13/19] block/block-copy: specialcase first copy_range request In block_copy_do_copy we fallback to read+write if copy_range failed. In this case copy_size is larger than defined for buffered IO, and there is corresponding commit. Still, backup copies data cluster by cluster, and most of requests are limited to one cluster anyway, so the only source of this one bad-limited request is copy-before-write operation. Further patch will move backup to use block_copy directly, than for cases where copy_range is not supported, first request will be oversized in each backup. It's not good, let's change it now. Fix is simple: just limit first copy_range request like buffer-based request. If it succeed, set larger copy_range limit. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-4-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/block-copy.c | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index e2d7b3b887..ddd61c1652 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -70,16 +70,19 @@ void block_copy_state_free(BlockCopyState *s) g_free(s); } +static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) +{ + return MIN_NON_ZERO(INT_MAX, + MIN_NON_ZERO(source->bs->bl.max_transfer, + target->bs->bl.max_transfer)); +} + BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, int64_t cluster_size, BdrvRequestFlags write_flags, Error **errp) { BlockCopyState *s; BdrvDirtyBitmap *copy_bitmap; - uint32_t max_transfer = - MIN_NON_ZERO(INT_MAX, - MIN_NON_ZERO(source->bs->bl.max_transfer, - target->bs->bl.max_transfer)); copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, errp); @@ -99,7 +102,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, .mem = shres_create(BLOCK_COPY_MAX_MEM), }; - if (max_transfer < cluster_size) { + if (block_copy_max_transfer(source, target) < cluster_size) { /* * copy_range does not respect max_transfer. We don't want to bother * with requests smaller than block-copy cluster size, so fallback to @@ -114,12 +117,11 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, s->copy_size = cluster_size; } else { /* - * copy_range does not respect max_transfer (it's a TODO), so we factor - * that in here. + * We enable copy-range, but keep small copy_size, until first + * successful copy_range (look at block_copy_do_copy). */ s->use_copy_range = true; - s->copy_size = MIN(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE), - QEMU_ALIGN_DOWN(max_transfer, cluster_size)); + s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); } QLIST_INIT(&s->inflight_reqs); @@ -172,6 +174,22 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); /* Fallback to read+write with allocated buffer */ } else { + if (s->use_copy_range) { + /* + * Successful copy-range. Now increase copy_size. copy_range + * does not respect max_transfer (it's a TODO), so we factor + * that in here. + * + * Note: we double-check s->use_copy_range for the case when + * parallel block-copy request unsets it during previous + * bdrv_co_copy_range call. + */ + s->copy_size = + MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE), + QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source, + s->target), + s->cluster_size)); + } goto out; } } @@ -179,7 +197,10 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, /* * In case of failed copy_range request above, we may proceed with buffered * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will - * be properly limited, so don't care too much. + * be properly limited, so don't care too much. Moreover the most likely + * case (copy_range is unsupported for the configuration, so the very first + * copy_range request fails) is handled by setting large copy_size only + * after first successful copy_range. */ bounce_buffer = qemu_blockalign(s->source->bs, nbytes); From 2d57511a88e71485f745ad3dc0afe03b87a8ad5e Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:29:59 +0300 Subject: [PATCH 14/19] block/block-copy: use block_status Use bdrv_block_status_above to chose effective chunk size and to handle zeroes effectively. This substitutes checking for just being allocated or not, and drops old code path for it. Assistance by backup job is dropped too, as caching block-status information is more difficult than just caching is-allocated information in our dirty bitmap, and backup job is not good place for this caching anyway. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-5-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/block-copy.c | 73 +++++++++++++++++++++++++++++++++++++--------- block/trace-events | 1 + 2 files changed, 61 insertions(+), 13 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index ddd61c1652..b075dba206 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -155,7 +155,7 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) */ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, int64_t start, int64_t end, - bool *error_is_read) + bool zeroes, bool *error_is_read) { int ret; int nbytes = MIN(end, s->len) - start; @@ -165,6 +165,18 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, assert(QEMU_IS_ALIGNED(end, s->cluster_size)); assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size)); + if (zeroes) { + ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags & + ~BDRV_REQ_WRITE_COMPRESSED); + if (ret < 0) { + trace_block_copy_write_zeroes_fail(s, start, ret); + if (error_is_read) { + *error_is_read = false; + } + } + return ret; + } + if (s->use_copy_range) { ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes, 0, s->write_flags); @@ -230,6 +242,38 @@ out: return ret; } +static int block_copy_block_status(BlockCopyState *s, int64_t offset, + int64_t bytes, int64_t *pnum) +{ + int64_t num; + BlockDriverState *base; + int ret; + + if (s->skip_unallocated && s->source->bs->backing) { + base = s->source->bs->backing->bs; + } else { + base = NULL; + } + + ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num, + NULL, NULL); + if (ret < 0 || num < s->cluster_size) { + /* + * On error or if failed to obtain large enough chunk just fallback to + * copy one cluster. + */ + num = s->cluster_size; + ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA; + } else if (offset + num == s->len) { + num = QEMU_ALIGN_UP(num, s->cluster_size); + } else { + num = QEMU_ALIGN_DOWN(num, s->cluster_size); + } + + *pnum = num; + return ret; +} + /* * Check if the cluster starting at offset is allocated or not. * return via pnum the number of contiguous clusters sharing this allocation. @@ -308,7 +352,6 @@ int coroutine_fn block_copy(BlockCopyState *s, { int ret = 0; int64_t end = bytes + start; /* bytes */ - int64_t status_bytes; BlockCopyInFlightReq req; /* @@ -325,7 +368,7 @@ int coroutine_fn block_copy(BlockCopyState *s, block_copy_inflight_req_begin(s, &req, start, end); while (start < end) { - int64_t next_zero, chunk_end; + int64_t next_zero, chunk_end, status_bytes; if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) { trace_block_copy_skip(s, start); @@ -343,24 +386,28 @@ int coroutine_fn block_copy(BlockCopyState *s, chunk_end = next_zero; } - if (s->skip_unallocated) { - ret = block_copy_reset_unallocated(s, start, &status_bytes); - if (ret == 0) { - trace_block_copy_skip_range(s, start, status_bytes); - start += status_bytes; - continue; - } - /* Clamp to known allocated region */ - chunk_end = MIN(chunk_end, start + status_bytes); + ret = block_copy_block_status(s, start, chunk_end - start, + &status_bytes); + if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { + bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes); + progress_set_remaining(s->progress, + bdrv_get_dirty_count(s->copy_bitmap) + + s->in_flight_bytes); + trace_block_copy_skip_range(s, start, status_bytes); + start += status_bytes; + continue; } + chunk_end = MIN(chunk_end, start + status_bytes); + trace_block_copy_process(s, start); bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start); s->in_flight_bytes += chunk_end - start; co_get_from_shres(s->mem, chunk_end - start); - ret = block_copy_do_copy(s, start, chunk_end, error_is_read); + ret = block_copy_do_copy(s, start, chunk_end, ret & BDRV_BLOCK_ZERO, + error_is_read); co_put_to_shres(s->mem, chunk_end - start); s->in_flight_bytes -= chunk_end - start; if (ret < 0) { diff --git a/block/trace-events b/block/trace-events index 1a7329b736..29dff8881c 100644 --- a/block/trace-events +++ b/block/trace-events @@ -48,6 +48,7 @@ block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64 block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d" block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d" block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d" +block_copy_write_zeroes_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d" # ../blockdev.c qmp_block_job_cancel(void *job) "job %p" From 17187cb646913356bbd434bebdcddf43f92ce31a Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:30:00 +0300 Subject: [PATCH 15/19] block/block-copy: factor out find_conflicting_inflight_req Split find_conflicting_inflight_req to be used separately. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-6-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/block-copy.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index b075dba206..251d415a2c 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -24,23 +24,30 @@ #define BLOCK_COPY_MAX_BUFFER (1 * MiB) #define BLOCK_COPY_MAX_MEM (128 * MiB) +static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, + int64_t start, + int64_t end) +{ + BlockCopyInFlightReq *req; + + QLIST_FOREACH(req, &s->inflight_reqs, list) { + if (end > req->start_byte && start < req->end_byte) { + return req; + } + } + + return NULL; +} + static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s, int64_t start, int64_t end) { BlockCopyInFlightReq *req; - bool waited; - do { - waited = false; - QLIST_FOREACH(req, &s->inflight_reqs, list) { - if (end > req->start_byte && start < req->end_byte) { - qemu_co_queue_wait(&req->wait_queue, NULL); - waited = true; - break; - } - } - } while (waited); + while ((req = find_conflicting_inflight_req(s, start, end))) { + qemu_co_queue_wait(&req->wait_queue, NULL); + } } static void block_copy_inflight_req_begin(BlockCopyState *s, From dafaf13593de240724a210e72da66f9d162735c3 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:30:01 +0300 Subject: [PATCH 16/19] block/block-copy: refactor interfaces to use bytes instead of end We have a lot of "chunk_end - start" invocations, let's switch to bytes/cur_bytes scheme instead. While being here, improve check on block_copy_do_copy parameters to not overflow when calculating nbytes and use int64_t for bytes in block_copy for consistency. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-7-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/block-copy.c | 78 ++++++++++++++++++++------------------ include/block/block-copy.h | 6 +-- 2 files changed, 44 insertions(+), 40 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index 251d415a2c..4c947e548b 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -26,12 +26,12 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, int64_t start, - int64_t end) + int64_t bytes) { BlockCopyInFlightReq *req; QLIST_FOREACH(req, &s->inflight_reqs, list) { - if (end > req->start_byte && start < req->end_byte) { + if (start + bytes > req->start && start < req->start + req->bytes) { return req; } } @@ -41,21 +41,21 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s, int64_t start, - int64_t end) + int64_t bytes) { BlockCopyInFlightReq *req; - while ((req = find_conflicting_inflight_req(s, start, end))) { + while ((req = find_conflicting_inflight_req(s, start, bytes))) { qemu_co_queue_wait(&req->wait_queue, NULL); } } static void block_copy_inflight_req_begin(BlockCopyState *s, BlockCopyInFlightReq *req, - int64_t start, int64_t end) + int64_t start, int64_t bytes) { - req->start_byte = start; - req->end_byte = end; + req->start = start; + req->bytes = bytes; qemu_co_queue_init(&req->wait_queue); QLIST_INSERT_HEAD(&s->inflight_reqs, req, list); } @@ -153,24 +153,28 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) /* * block_copy_do_copy * - * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to - * cover last cluster when s->len is not aligned to clusters. + * Do copy of cluster-aligned chunk. Requested region is allowed to exceed + * s->len only to cover last cluster when s->len is not aligned to clusters. * * No sync here: nor bitmap neighter intersecting requests handling, only copy. * * Returns 0 on success. */ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, - int64_t start, int64_t end, + int64_t start, int64_t bytes, bool zeroes, bool *error_is_read) { int ret; - int nbytes = MIN(end, s->len) - start; + int64_t nbytes = MIN(start + bytes, s->len) - start; void *bounce_buffer = NULL; + assert(start >= 0 && bytes > 0 && INT64_MAX - start >= bytes); assert(QEMU_IS_ALIGNED(start, s->cluster_size)); - assert(QEMU_IS_ALIGNED(end, s->cluster_size)); - assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size)); + assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); + assert(start < s->len); + assert(start + bytes <= s->len || + start + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); + assert(nbytes < INT_MAX); if (zeroes) { ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags & @@ -354,11 +358,10 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s, } int coroutine_fn block_copy(BlockCopyState *s, - int64_t start, uint64_t bytes, + int64_t start, int64_t bytes, bool *error_is_read) { int ret = 0; - int64_t end = bytes + start; /* bytes */ BlockCopyInFlightReq req; /* @@ -369,32 +372,32 @@ int coroutine_fn block_copy(BlockCopyState *s, bdrv_get_aio_context(s->target->bs)); assert(QEMU_IS_ALIGNED(start, s->cluster_size)); - assert(QEMU_IS_ALIGNED(end, s->cluster_size)); + assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); block_copy_wait_inflight_reqs(s, start, bytes); - block_copy_inflight_req_begin(s, &req, start, end); + block_copy_inflight_req_begin(s, &req, start, bytes); - while (start < end) { - int64_t next_zero, chunk_end, status_bytes; + while (bytes) { + int64_t next_zero, cur_bytes, status_bytes; if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) { trace_block_copy_skip(s, start); start += s->cluster_size; + bytes -= s->cluster_size; continue; /* already copied */ } - chunk_end = MIN(end, start + s->copy_size); + cur_bytes = MIN(bytes, s->copy_size); next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start, - chunk_end - start); + cur_bytes); if (next_zero >= 0) { assert(next_zero > start); /* start is dirty */ - assert(next_zero < chunk_end); /* no need to do MIN() */ - chunk_end = next_zero; + assert(next_zero < start + cur_bytes); /* no need to do MIN() */ + cur_bytes = next_zero - start; } - ret = block_copy_block_status(s, start, chunk_end - start, - &status_bytes); + ret = block_copy_block_status(s, start, cur_bytes, &status_bytes); if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes); progress_set_remaining(s->progress, @@ -402,30 +405,31 @@ int coroutine_fn block_copy(BlockCopyState *s, s->in_flight_bytes); trace_block_copy_skip_range(s, start, status_bytes); start += status_bytes; + bytes -= status_bytes; continue; } - chunk_end = MIN(chunk_end, start + status_bytes); + cur_bytes = MIN(cur_bytes, status_bytes); trace_block_copy_process(s, start); - bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start); - s->in_flight_bytes += chunk_end - start; + bdrv_reset_dirty_bitmap(s->copy_bitmap, start, cur_bytes); + s->in_flight_bytes += cur_bytes; - co_get_from_shres(s->mem, chunk_end - start); - ret = block_copy_do_copy(s, start, chunk_end, ret & BDRV_BLOCK_ZERO, + co_get_from_shres(s->mem, cur_bytes); + ret = block_copy_do_copy(s, start, cur_bytes, ret & BDRV_BLOCK_ZERO, error_is_read); - co_put_to_shres(s->mem, chunk_end - start); - s->in_flight_bytes -= chunk_end - start; + co_put_to_shres(s->mem, cur_bytes); + s->in_flight_bytes -= cur_bytes; if (ret < 0) { - bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start); + bdrv_set_dirty_bitmap(s->copy_bitmap, start, cur_bytes); break; } - progress_work_done(s->progress, chunk_end - start); - s->progress_bytes_callback(chunk_end - start, s->progress_opaque); - start = chunk_end; - ret = 0; + progress_work_done(s->progress, cur_bytes); + s->progress_bytes_callback(cur_bytes, s->progress_opaque); + start += cur_bytes; + bytes -= cur_bytes; } block_copy_inflight_req_end(&req); diff --git a/include/block/block-copy.h b/include/block/block-copy.h index 9def00068c..7fd36e528b 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -19,8 +19,8 @@ #include "qemu/co-shared-resource.h" typedef struct BlockCopyInFlightReq { - int64_t start_byte; - int64_t end_byte; + int64_t start; + int64_t bytes; QLIST_ENTRY(BlockCopyInFlightReq) list; CoQueue wait_queue; /* coroutines blocked on this request */ } BlockCopyInFlightReq; @@ -85,7 +85,7 @@ void block_copy_state_free(BlockCopyState *s); int64_t block_copy_reset_unallocated(BlockCopyState *s, int64_t offset, int64_t *count); -int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes, +int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, bool *error_is_read); #endif /* BLOCK_COPY_H */ From 8719091f9d591595ea6876d25de9c0956951c513 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:30:02 +0300 Subject: [PATCH 17/19] block/block-copy: rename start to offset in interfaces offset/bytes pair is more usual naming in block layer, let's use it. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-8-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/block-copy.c | 82 +++++++++++++++++++------------------- include/block/block-copy.h | 4 +- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index 4c947e548b..8b91fa0b06 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -25,13 +25,13 @@ #define BLOCK_COPY_MAX_MEM (128 * MiB) static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, - int64_t start, + int64_t offset, int64_t bytes) { BlockCopyInFlightReq *req; QLIST_FOREACH(req, &s->inflight_reqs, list) { - if (start + bytes > req->start && start < req->start + req->bytes) { + if (offset + bytes > req->offset && offset < req->offset + req->bytes) { return req; } } @@ -40,21 +40,21 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, } static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s, - int64_t start, + int64_t offset, int64_t bytes) { BlockCopyInFlightReq *req; - while ((req = find_conflicting_inflight_req(s, start, bytes))) { + while ((req = find_conflicting_inflight_req(s, offset, bytes))) { qemu_co_queue_wait(&req->wait_queue, NULL); } } static void block_copy_inflight_req_begin(BlockCopyState *s, BlockCopyInFlightReq *req, - int64_t start, int64_t bytes) + int64_t offset, int64_t bytes) { - req->start = start; + req->offset = offset; req->bytes = bytes; qemu_co_queue_init(&req->wait_queue); QLIST_INSERT_HEAD(&s->inflight_reqs, req, list); @@ -161,26 +161,26 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) * Returns 0 on success. */ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, - int64_t start, int64_t bytes, + int64_t offset, int64_t bytes, bool zeroes, bool *error_is_read) { int ret; - int64_t nbytes = MIN(start + bytes, s->len) - start; + int64_t nbytes = MIN(offset + bytes, s->len) - offset; void *bounce_buffer = NULL; - assert(start >= 0 && bytes > 0 && INT64_MAX - start >= bytes); - assert(QEMU_IS_ALIGNED(start, s->cluster_size)); + assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes); + assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); - assert(start < s->len); - assert(start + bytes <= s->len || - start + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); + assert(offset < s->len); + assert(offset + bytes <= s->len || + offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); assert(nbytes < INT_MAX); if (zeroes) { - ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags & + ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & ~BDRV_REQ_WRITE_COMPRESSED); if (ret < 0) { - trace_block_copy_write_zeroes_fail(s, start, ret); + trace_block_copy_write_zeroes_fail(s, offset, ret); if (error_is_read) { *error_is_read = false; } @@ -189,10 +189,10 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, } if (s->use_copy_range) { - ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes, + ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, 0, s->write_flags); if (ret < 0) { - trace_block_copy_copy_range_fail(s, start, ret); + trace_block_copy_copy_range_fail(s, offset, ret); s->use_copy_range = false; s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); /* Fallback to read+write with allocated buffer */ @@ -228,19 +228,19 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s, bounce_buffer = qemu_blockalign(s->source->bs, nbytes); - ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0); + ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0); if (ret < 0) { - trace_block_copy_read_fail(s, start, ret); + trace_block_copy_read_fail(s, offset, ret); if (error_is_read) { *error_is_read = true; } goto out; } - ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer, + ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer, s->write_flags); if (ret < 0) { - trace_block_copy_write_fail(s, start, ret); + trace_block_copy_write_fail(s, offset, ret); if (error_is_read) { *error_is_read = false; } @@ -358,7 +358,7 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s, } int coroutine_fn block_copy(BlockCopyState *s, - int64_t start, int64_t bytes, + int64_t offset, int64_t bytes, bool *error_is_read) { int ret = 0; @@ -371,64 +371,64 @@ int coroutine_fn block_copy(BlockCopyState *s, assert(bdrv_get_aio_context(s->source->bs) == bdrv_get_aio_context(s->target->bs)); - assert(QEMU_IS_ALIGNED(start, s->cluster_size)); + assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); - block_copy_wait_inflight_reqs(s, start, bytes); - block_copy_inflight_req_begin(s, &req, start, bytes); + block_copy_wait_inflight_reqs(s, offset, bytes); + block_copy_inflight_req_begin(s, &req, offset, bytes); while (bytes) { int64_t next_zero, cur_bytes, status_bytes; - if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) { - trace_block_copy_skip(s, start); - start += s->cluster_size; + if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) { + trace_block_copy_skip(s, offset); + offset += s->cluster_size; bytes -= s->cluster_size; continue; /* already copied */ } cur_bytes = MIN(bytes, s->copy_size); - next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start, + next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset, cur_bytes); if (next_zero >= 0) { - assert(next_zero > start); /* start is dirty */ - assert(next_zero < start + cur_bytes); /* no need to do MIN() */ - cur_bytes = next_zero - start; + assert(next_zero > offset); /* offset is dirty */ + assert(next_zero < offset + cur_bytes); /* no need to do MIN() */ + cur_bytes = next_zero - offset; } - ret = block_copy_block_status(s, start, cur_bytes, &status_bytes); + ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes); if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { - bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes); + bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, status_bytes); progress_set_remaining(s->progress, bdrv_get_dirty_count(s->copy_bitmap) + s->in_flight_bytes); - trace_block_copy_skip_range(s, start, status_bytes); - start += status_bytes; + trace_block_copy_skip_range(s, offset, status_bytes); + offset += status_bytes; bytes -= status_bytes; continue; } cur_bytes = MIN(cur_bytes, status_bytes); - trace_block_copy_process(s, start); + trace_block_copy_process(s, offset); - bdrv_reset_dirty_bitmap(s->copy_bitmap, start, cur_bytes); + bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, cur_bytes); s->in_flight_bytes += cur_bytes; co_get_from_shres(s->mem, cur_bytes); - ret = block_copy_do_copy(s, start, cur_bytes, ret & BDRV_BLOCK_ZERO, + ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO, error_is_read); co_put_to_shres(s->mem, cur_bytes); s->in_flight_bytes -= cur_bytes; if (ret < 0) { - bdrv_set_dirty_bitmap(s->copy_bitmap, start, cur_bytes); + bdrv_set_dirty_bitmap(s->copy_bitmap, offset, cur_bytes); break; } progress_work_done(s->progress, cur_bytes); s->progress_bytes_callback(cur_bytes, s->progress_opaque); - start += cur_bytes; + offset += cur_bytes; bytes -= cur_bytes; } diff --git a/include/block/block-copy.h b/include/block/block-copy.h index 7fd36e528b..b76efb736f 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -19,7 +19,7 @@ #include "qemu/co-shared-resource.h" typedef struct BlockCopyInFlightReq { - int64_t start; + int64_t offset; int64_t bytes; QLIST_ENTRY(BlockCopyInFlightReq) list; CoQueue wait_queue; /* coroutines blocked on this request */ @@ -85,7 +85,7 @@ void block_copy_state_free(BlockCopyState *s); int64_t block_copy_reset_unallocated(BlockCopyState *s, int64_t offset, int64_t *count); -int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, +int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, bool *error_is_read); #endif /* BLOCK_COPY_H */ From 5332e5d21060c48511a73bc5dccd15960f7fa395 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:30:03 +0300 Subject: [PATCH 18/19] block/block-copy: reduce intersecting request lock Currently, block_copy operation lock the whole requested region. But there is no reason to lock clusters, which are already copied, it will disturb other parallel block_copy requests for no reason. Let's instead do the following: Lock only sub-region, which we are going to operate on. Then, after copying all dirty sub-regions, we should wait for intersecting requests block-copy, if they failed, we should retry these new dirty clusters. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Message-Id: <20200311103004.7649-9-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/block-copy.c | 129 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 105 insertions(+), 24 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index 8b91fa0b06..44a64a94c8 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -39,29 +39,72 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, return NULL; } -static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s, - int64_t offset, - int64_t bytes) +/* + * If there are no intersecting requests return false. Otherwise, wait for the + * first found intersecting request to finish and return true. + */ +static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset, + int64_t bytes) { - BlockCopyInFlightReq *req; + BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes); - while ((req = find_conflicting_inflight_req(s, offset, bytes))) { - qemu_co_queue_wait(&req->wait_queue, NULL); + if (!req) { + return false; } + + qemu_co_queue_wait(&req->wait_queue, NULL); + + return true; } +/* Called only on full-dirty region */ static void block_copy_inflight_req_begin(BlockCopyState *s, BlockCopyInFlightReq *req, int64_t offset, int64_t bytes) { + assert(!find_conflicting_inflight_req(s, offset, bytes)); + + bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); + s->in_flight_bytes += bytes; + req->offset = offset; req->bytes = bytes; qemu_co_queue_init(&req->wait_queue); QLIST_INSERT_HEAD(&s->inflight_reqs, req, list); } -static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req) +/* + * block_copy_inflight_req_shrink + * + * Drop the tail of the request to be handled later. Set dirty bits back and + * wake up all requests waiting for us (may be some of them are not intersecting + * with shrunk request) + */ +static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s, + BlockCopyInFlightReq *req, int64_t new_bytes) { + if (new_bytes == req->bytes) { + return; + } + + assert(new_bytes > 0 && new_bytes < req->bytes); + + s->in_flight_bytes -= req->bytes - new_bytes; + bdrv_set_dirty_bitmap(s->copy_bitmap, + req->offset + new_bytes, req->bytes - new_bytes); + + req->bytes = new_bytes; + qemu_co_queue_restart_all(&req->wait_queue); +} + +static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s, + BlockCopyInFlightReq *req, + int ret) +{ + s->in_flight_bytes -= req->bytes; + if (ret < 0) { + bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes); + } QLIST_REMOVE(req, list); qemu_co_queue_restart_all(&req->wait_queue); } @@ -357,12 +400,19 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s, return ret; } -int coroutine_fn block_copy(BlockCopyState *s, - int64_t offset, int64_t bytes, - bool *error_is_read) +/* + * block_copy_dirty_clusters + * + * Copy dirty clusters in @offset/@bytes range. + * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty + * clusters found and -errno on failure. + */ +static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s, + int64_t offset, int64_t bytes, + bool *error_is_read) { int ret = 0; - BlockCopyInFlightReq req; + bool found_dirty = false; /* * block_copy() user is responsible for keeping source and target in same @@ -374,10 +424,8 @@ int coroutine_fn block_copy(BlockCopyState *s, assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); - block_copy_wait_inflight_reqs(s, offset, bytes); - block_copy_inflight_req_begin(s, &req, offset, bytes); - while (bytes) { + BlockCopyInFlightReq req; int64_t next_zero, cur_bytes, status_bytes; if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) { @@ -387,6 +435,8 @@ int coroutine_fn block_copy(BlockCopyState *s, continue; /* already copied */ } + found_dirty = true; + cur_bytes = MIN(bytes, s->copy_size); next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset, @@ -396,10 +446,14 @@ int coroutine_fn block_copy(BlockCopyState *s, assert(next_zero < offset + cur_bytes); /* no need to do MIN() */ cur_bytes = next_zero - offset; } + block_copy_inflight_req_begin(s, &req, offset, cur_bytes); ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes); + assert(ret >= 0); /* never fail */ + cur_bytes = MIN(cur_bytes, status_bytes); + block_copy_inflight_req_shrink(s, &req, cur_bytes); if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { - bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, status_bytes); + block_copy_inflight_req_end(s, &req, 0); progress_set_remaining(s->progress, bdrv_get_dirty_count(s->copy_bitmap) + s->in_flight_bytes); @@ -409,21 +463,15 @@ int coroutine_fn block_copy(BlockCopyState *s, continue; } - cur_bytes = MIN(cur_bytes, status_bytes); - trace_block_copy_process(s, offset); - bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, cur_bytes); - s->in_flight_bytes += cur_bytes; - co_get_from_shres(s->mem, cur_bytes); ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO, error_is_read); co_put_to_shres(s->mem, cur_bytes); - s->in_flight_bytes -= cur_bytes; + block_copy_inflight_req_end(s, &req, ret); if (ret < 0) { - bdrv_set_dirty_bitmap(s->copy_bitmap, offset, cur_bytes); - break; + return ret; } progress_work_done(s->progress, cur_bytes); @@ -432,7 +480,40 @@ int coroutine_fn block_copy(BlockCopyState *s, bytes -= cur_bytes; } - block_copy_inflight_req_end(&req); + return found_dirty; +} + +/* + * block_copy + * + * Copy requested region, accordingly to dirty bitmap. + * Collaborate with parallel block_copy requests: if they succeed it will help + * us. If they fail, we will retry not-copied regions. So, if we return error, + * it means that some I/O operation failed in context of _this_ block_copy call, + * not some parallel operation. + */ +int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, + bool *error_is_read) +{ + int ret; + + do { + ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read); + + if (ret == 0) { + ret = block_copy_wait_one(s, offset, bytes); + } + + /* + * We retry in two cases: + * 1. Some progress done + * Something was copied, which means that there were yield points + * and some new dirty bits may have appeared (due to failed parallel + * block-copy requests). + * 2. We have waited for some intersecting block-copy request + * It may have failed and produced new dirty bits. + */ + } while (ret > 0); return ret; } From 397f4e9d83e9c0000905f0a988ba1aeda162571c Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Wed, 11 Mar 2020 13:30:04 +0300 Subject: [PATCH 19/19] block/block-copy: hide structure definitions Hide structure definitions and add explicit API instead, to keep an eye on the scope of the shared fields. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Andrey Shinkevich Reviewed-by: Max Reitz Message-Id: <20200311103004.7649-10-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz --- block/backup-top.c | 6 ++-- block/backup.c | 25 ++++++++-------- block/block-copy.c | 59 ++++++++++++++++++++++++++++++++++++++ include/block/block-copy.h | 52 +++------------------------------ 4 files changed, 80 insertions(+), 62 deletions(-) diff --git a/block/backup-top.c b/block/backup-top.c index 1bfb360bd3..3b50c06e2c 100644 --- a/block/backup-top.c +++ b/block/backup-top.c @@ -38,6 +38,7 @@ typedef struct BDRVBackupTopState { BlockCopyState *bcs; BdrvChild *target; bool active; + int64_t cluster_size; } BDRVBackupTopState; static coroutine_fn int backup_top_co_preadv( @@ -57,8 +58,8 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset, return 0; } - off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size); - end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size); + off = QEMU_ALIGN_DOWN(offset, s->cluster_size); + end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size); return block_copy(s->bcs, off, end - off, NULL); } @@ -238,6 +239,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, goto fail; } + state->cluster_size = cluster_size; state->bcs = block_copy_state_new(top->backing, state->target, cluster_size, write_flags, &local_err); if (local_err) { diff --git a/block/backup.c b/block/backup.c index 8694e0394b..7430ca5883 100644 --- a/block/backup.c +++ b/block/backup.c @@ -102,7 +102,7 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) { /* If we failed and synced, merge in the bits we didn't copy: */ - bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap, + bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs), NULL, true); } } @@ -145,7 +145,8 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) return; } - bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len); + bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0, + backup_job->len); } static BlockErrorAction backup_error_action(BackupBlockJob *job, @@ -190,7 +191,7 @@ static int coroutine_fn backup_loop(BackupBlockJob *job) BdrvDirtyBitmapIter *bdbi; int ret = 0; - bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap); + bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs)); while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) { do { if (yield_and_check(job)) { @@ -210,14 +211,14 @@ static int coroutine_fn backup_loop(BackupBlockJob *job) return ret; } -static void backup_init_copy_bitmap(BackupBlockJob *job) +static void backup_init_bcs_bitmap(BackupBlockJob *job) { bool ret; uint64_t estimate; + BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs); if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) { - ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap, - job->sync_bitmap, + ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap, NULL, true); assert(ret); } else { @@ -226,12 +227,12 @@ static void backup_init_copy_bitmap(BackupBlockJob *job) * We can't hog the coroutine to initialize this thoroughly. * Set a flag and resume work when we are able to yield safely. */ - job->bcs->skip_unallocated = true; + block_copy_set_skip_unallocated(job->bcs, true); } - bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len); + bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len); } - estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap); + estimate = bdrv_get_dirty_count(bcs_bitmap); job_progress_set_remaining(&job->common.job, estimate); } @@ -240,7 +241,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp) BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); int ret = 0; - backup_init_copy_bitmap(s); + backup_init_bcs_bitmap(s); if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { int64_t offset = 0; @@ -259,12 +260,12 @@ static int coroutine_fn backup_run(Job *job, Error **errp) offset += count; } - s->bcs->skip_unallocated = false; + block_copy_set_skip_unallocated(s->bcs, false); } if (s->sync_mode == MIRROR_SYNC_MODE_NONE) { /* - * All bits are set in copy_bitmap to allow any cluster to be copied. + * All bits are set in bcs bitmap to allow any cluster to be copied. * This does not actually require them to be copied. */ while (!job_is_cancelled(job)) { diff --git a/block/block-copy.c b/block/block-copy.c index 44a64a94c8..05227e18bf 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -24,6 +24,55 @@ #define BLOCK_COPY_MAX_BUFFER (1 * MiB) #define BLOCK_COPY_MAX_MEM (128 * MiB) +typedef struct BlockCopyInFlightReq { + int64_t offset; + int64_t bytes; + QLIST_ENTRY(BlockCopyInFlightReq) list; + CoQueue wait_queue; /* coroutines blocked on this request */ +} BlockCopyInFlightReq; + +typedef struct BlockCopyState { + /* + * BdrvChild objects are not owned or managed by block-copy. They are + * provided by block-copy user and user is responsible for appropriate + * permissions on these children. + */ + BdrvChild *source; + BdrvChild *target; + BdrvDirtyBitmap *copy_bitmap; + int64_t in_flight_bytes; + int64_t cluster_size; + bool use_copy_range; + int64_t copy_size; + uint64_t len; + QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs; + + BdrvRequestFlags write_flags; + + /* + * skip_unallocated: + * + * Used by sync=top jobs, which first scan the source node for unallocated + * areas and clear them in the copy_bitmap. During this process, the bitmap + * is thus not fully initialized: It may still have bits set for areas that + * are unallocated and should actually not be copied. + * + * This is indicated by skip_unallocated. + * + * In this case, block_copy() will query the source’s allocation status, + * skip unallocated regions, clear them in the copy_bitmap, and invoke + * block_copy_reset_unallocated() every time it does. + */ + bool skip_unallocated; + + ProgressMeter *progress; + /* progress_bytes_callback: called when some copying progress is done. */ + ProgressBytesCallbackFunc progress_bytes_callback; + void *progress_opaque; + + SharedResource *mem; +} BlockCopyState; + static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s, int64_t offset, int64_t bytes) @@ -517,3 +566,13 @@ int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, return ret; } + +BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) +{ + return s->copy_bitmap; +} + +void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) +{ + s->skip_unallocated = skip; +} diff --git a/include/block/block-copy.h b/include/block/block-copy.h index b76efb736f..aac85e1488 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -18,55 +18,8 @@ #include "block/block.h" #include "qemu/co-shared-resource.h" -typedef struct BlockCopyInFlightReq { - int64_t offset; - int64_t bytes; - QLIST_ENTRY(BlockCopyInFlightReq) list; - CoQueue wait_queue; /* coroutines blocked on this request */ -} BlockCopyInFlightReq; - typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque); -typedef struct BlockCopyState { - /* - * BdrvChild objects are not owned or managed by block-copy. They are - * provided by block-copy user and user is responsible for appropriate - * permissions on these children. - */ - BdrvChild *source; - BdrvChild *target; - BdrvDirtyBitmap *copy_bitmap; - int64_t in_flight_bytes; - int64_t cluster_size; - bool use_copy_range; - int64_t copy_size; - uint64_t len; - QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs; - - BdrvRequestFlags write_flags; - - /* - * skip_unallocated: - * - * Used by sync=top jobs, which first scan the source node for unallocated - * areas and clear them in the copy_bitmap. During this process, the bitmap - * is thus not fully initialized: It may still have bits set for areas that - * are unallocated and should actually not be copied. - * - * This is indicated by skip_unallocated. - * - * In this case, block_copy() will query the source’s allocation status, - * skip unallocated regions, clear them in the copy_bitmap, and invoke - * block_copy_reset_unallocated() every time it does. - */ - bool skip_unallocated; - - ProgressMeter *progress; - /* progress_bytes_callback: called when some copying progress is done. */ - ProgressBytesCallbackFunc progress_bytes_callback; - void *progress_opaque; - - SharedResource *mem; -} BlockCopyState; +typedef struct BlockCopyState BlockCopyState; BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, int64_t cluster_size, @@ -88,4 +41,7 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s, int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, bool *error_is_read); +BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s); +void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip); + #endif /* BLOCK_COPY_H */