diff --git a/debian/patches/pve/0038-block-io-accept-NULL-qiov-in-bdrv_pad_request.patch b/debian/patches/pve/0038-block-io-accept-NULL-qiov-in-bdrv_pad_request.patch index 851851f..bb9b72c 100644 --- a/debian/patches/pve/0038-block-io-accept-NULL-qiov-in-bdrv_pad_request.patch +++ b/debian/patches/pve/0038-block-io-accept-NULL-qiov-in-bdrv_pad_request.patch @@ -8,26 +8,57 @@ results (only copy-on-read matters). In this case they will pass NULL as the target QEMUIOVector, which will however trip bdrv_pad_request, since it wants to extend its passed vector. -Simply check for NULL and do nothing, there's no reason to pad the -target if it will be discarded anyway. +If there is no qiov, no operation can be done with it, but the bytes +and offset still need to be updated, so the subsequent aligned read +will actually be aligned and not run into an assertion failure. Signed-off-by: Thomas Lamprecht +[FE: do update bytes and offset in any case] +Signed-off-by: Fiona Ebner --- - block/io.c | 4 ++++ - 1 file changed, 4 insertions(+) + block/io.c | 29 ++++++++++++++++------------- + 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/block/io.c b/block/io.c -index 83d1b1dfdc..24a3c84c93 100644 +index 83d1b1dfdc..e927881e40 100644 --- a/block/io.c +++ b/block/io.c -@@ -1710,6 +1710,10 @@ static int bdrv_pad_request(BlockDriverState *bs, - int sliced_niov; - size_t sliced_head, sliced_tail; +@@ -1723,22 +1723,25 @@ static int bdrv_pad_request(BlockDriverState *bs, + return 0; + } -+ if (!qiov) { -+ return 0; -+ } +- sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, +- &sliced_head, &sliced_tail, +- &sliced_niov); +- +- /* Guaranteed by bdrv_check_request32() */ +- assert(*bytes <= SIZE_MAX); +- ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, +- sliced_head, *bytes); +- if (ret < 0) { +- bdrv_padding_finalize(pad); +- return ret; ++ if (qiov && *qiov) { ++ sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, ++ &sliced_head, &sliced_tail, ++ &sliced_niov); + - /* Should have been checked by the caller already */ - ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset); - if (ret < 0) { ++ /* Guaranteed by bdrv_check_request32() */ ++ assert(*bytes <= SIZE_MAX); ++ ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, ++ sliced_head, *bytes); ++ if (ret < 0) { ++ bdrv_padding_finalize(pad); ++ return ret; ++ } ++ *qiov = &pad->local_qiov; ++ *qiov_offset = 0; + } ++ + *bytes += pad->head + pad->tail; + *offset -= pad->head; +- *qiov = &pad->local_qiov; +- *qiov_offset = 0; + if (padded) { + *padded = true; + }