Block patches:

- Parallelized request handling for qcow2
 - Backup job refactoring to use a filter node instead of before-write
   notifiers
 - Add discard accounting information to file-posix nodes
 - Allow trivial reopening of nbd nodes
 - Some iotest fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl2fGLISHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9A5JEH/2Hluzk0kfpYK+Ju3Mpf6syE2XdtYL7q
 zJNQgx4aIQOnBkCnUhQckNnRLWbiv9DxcJQ9iueRyst5nQhOpGisNw5LS4vYUbKV
 rHA3oITdV9Ozsr2d8SL+ncvY91I5zpzFySDsYIwMD6Y7H42NVcs7yvUjilHW2SmN
 9bPFp0mocMLeH/2keQE3H5mJGb+tAogM9FW/jQ/fjD5eql05gb9McDjjPD6jHLk6
 AEzWxWh6M56krEgke390gxy/N7r9u5+HHRAfFldEGoI+jw0iTt3L1MXcz0zytxfx
 Gdh+gyihauQVTIfTvyAAHHYaOuXUwSWJOjlbILleLEhnTYd/cFGRYzM=
 =u0I2
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2019-10-10' into staging

Block patches:
- Parallelized request handling for qcow2
- Backup job refactoring to use a filter node instead of before-write
  notifiers
- Add discard accounting information to file-posix nodes
- Allow trivial reopening of nbd nodes
- Some iotest fixes

# gpg: Signature made Thu 10 Oct 2019 12:40:34 BST
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2019-10-10: (36 commits)
  iotests/162: Fix for newer Linux 5.3+
  tests: fix I/O test for hosts defaulting to LUKSv2
  nbd: add empty .bdrv_reopen_prepare
  block/backup: use backup-top instead of write notifiers
  block: introduce backup-top filter driver
  block/block-copy: split block_copy_set_callbacks function
  block/backup: move write_flags calculation inside backup_job_create
  block/backup: move in-flight requests handling from backup to block-copy
  iotests: Use stat -c %b in 125
  iotests: Disable 125 on broken XFS versions
  iotests: Fix 125 for growth_mode = metadata
  qapi: query-blockstat: add driver specific file-posix stats
  file-posix: account discard operations
  scsi: account unmap operations
  scsi: move unmap error checking to the complete callback
  scsi: store unmap offset and nb_sectors in request struct
  ide: account UNMAP (TRIM) operations
  block: add empty account cookie type
  qapi: add unmap to BlockDeviceStats
  qapi: group BlockDeviceStats fields
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2019-10-14 12:26:37 +01:00
commit 088d67096d
40 changed files with 2108 additions and 1249 deletions

43
block.c
View File

@ -5155,6 +5155,15 @@ ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
return NULL;
}
BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
if (!drv || !drv->bdrv_get_specific_stats) {
return NULL;
}
return drv->bdrv_get_specific_stats(bs);
}
void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event)
{
if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
@ -5164,14 +5173,35 @@ void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event)
bs->drv->bdrv_debug_event(bs, event);
}
int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
const char *tag)
static BlockDriverState *bdrv_find_debug_node(BlockDriverState *bs)
{
while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
bs = bs->file ? bs->file->bs : NULL;
if (bs->file) {
bs = bs->file->bs;
continue;
}
if (bs->drv->is_filter && bs->backing) {
bs = bs->backing->bs;
continue;
}
break;
}
if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
assert(bs->drv->bdrv_debug_remove_breakpoint);
return bs;
}
return NULL;
}
int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
const char *tag)
{
bs = bdrv_find_debug_node(bs);
if (bs) {
return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
}
@ -5180,11 +5210,8 @@ int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
{
while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
bs = bs->file ? bs->file->bs : NULL;
}
if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
bs = bdrv_find_debug_node(bs);
if (bs) {
return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
}

View File

@ -37,9 +37,13 @@ block-obj-y += write-threshold.o
block-obj-y += backup.o
block-obj-$(CONFIG_REPLICATION) += replication.o
block-obj-y += throttle.o copy-on-read.o
block-obj-y += block-copy.o
block-obj-y += crypto.o
block-obj-y += aio_task.o
block-obj-y += backup-top.o
common-obj-y += stream.o
nfs.o-libs := $(LIBNFS_LIBS)

View File

@ -195,6 +195,10 @@ static void block_account_one_io(BlockAcctStats *stats, BlockAcctCookie *cookie,
assert(cookie->type < BLOCK_MAX_IOTYPE);
if (cookie->type == BLOCK_ACCT_NONE) {
return;
}
qemu_mutex_lock(&stats->lock);
if (failed) {
@ -217,6 +221,8 @@ static void block_account_one_io(BlockAcctStats *stats, BlockAcctCookie *cookie,
}
qemu_mutex_unlock(&stats->lock);
cookie->type = BLOCK_ACCT_NONE;
}
void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie)

124
block/aio_task.c Normal file
View File

@ -0,0 +1,124 @@
/*
* Aio tasks loops
*
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "block/aio.h"
#include "block/aio_task.h"
struct AioTaskPool {
Coroutine *main_co;
int status;
int max_busy_tasks;
int busy_tasks;
bool waiting;
};
static void coroutine_fn aio_task_co(void *opaque)
{
AioTask *task = opaque;
AioTaskPool *pool = task->pool;
assert(pool->busy_tasks < pool->max_busy_tasks);
pool->busy_tasks++;
task->ret = task->func(task);
pool->busy_tasks--;
if (task->ret < 0 && pool->status == 0) {
pool->status = task->ret;
}
g_free(task);
if (pool->waiting) {
pool->waiting = false;
aio_co_wake(pool->main_co);
}
}
void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool)
{
assert(pool->busy_tasks > 0);
assert(qemu_coroutine_self() == pool->main_co);
pool->waiting = true;
qemu_coroutine_yield();
assert(!pool->waiting);
assert(pool->busy_tasks < pool->max_busy_tasks);
}
void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool)
{
if (pool->busy_tasks < pool->max_busy_tasks) {
return;
}
aio_task_pool_wait_one(pool);
}
void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool)
{
while (pool->busy_tasks > 0) {
aio_task_pool_wait_one(pool);
}
}
void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task)
{
aio_task_pool_wait_slot(pool);
task->pool = pool;
qemu_coroutine_enter(qemu_coroutine_create(aio_task_co, task));
}
AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks)
{
AioTaskPool *pool = g_new0(AioTaskPool, 1);
pool->main_co = qemu_coroutine_self();
pool->max_busy_tasks = max_busy_tasks;
return pool;
}
void aio_task_pool_free(AioTaskPool *pool)
{
g_free(pool);
}
int aio_task_pool_status(AioTaskPool *pool)
{
if (!pool) {
return 0; /* Sugar for lazy allocation of aio pool */
}
return pool->status;
}
bool aio_task_pool_empty(AioTaskPool *pool)
{
return pool->busy_tasks == 0;
}

276
block/backup-top.c Normal file
View File

@ -0,0 +1,276 @@
/*
* backup-top filter driver
*
* The driver performs Copy-Before-Write (CBW) operation: it is injected above
* some node, and before each write it copies _old_ data to the target node.
*
* Copyright (c) 2018-2019 Virtuozzo International GmbH.
*
* Author:
* Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "sysemu/block-backend.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "block/block_int.h"
#include "block/qdict.h"
#include "block/block-copy.h"
#include "block/backup-top.h"
typedef struct BDRVBackupTopState {
BlockCopyState *bcs;
BdrvChild *target;
bool active;
} BDRVBackupTopState;
static coroutine_fn int backup_top_co_preadv(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
uint64_t bytes)
{
BDRVBackupTopState *s = bs->opaque;
uint64_t end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size);
uint64_t off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size);
return block_copy(s->bcs, off, end - off, NULL);
}
static int coroutine_fn backup_top_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
{
int ret = backup_top_cbw(bs, offset, bytes);
if (ret < 0) {
return ret;
}
return bdrv_co_pdiscard(bs->backing, offset, bytes);
}
static int coroutine_fn backup_top_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
{
int ret = backup_top_cbw(bs, offset, bytes);
if (ret < 0) {
return ret;
}
return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
}
static coroutine_fn int backup_top_co_pwritev(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
if (!(flags & BDRV_REQ_WRITE_UNCHANGED)) {
int ret = backup_top_cbw(bs, offset, bytes);
if (ret < 0) {
return ret;
}
}
return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
}
static int coroutine_fn backup_top_co_flush(BlockDriverState *bs)
{
if (!bs->backing) {
return 0;
}
return bdrv_co_flush(bs->backing->bs);
}
static void backup_top_refresh_filename(BlockDriverState *bs)
{
if (bs->backing == NULL) {
/*
* we can be here after failed bdrv_attach_child in
* bdrv_set_backing_hd
*/
return;
}
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
bs->backing->bs->filename);
}
static void backup_top_child_perm(BlockDriverState *bs, BdrvChild *c,
const BdrvChildRole *role,
BlockReopenQueue *reopen_queue,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
BDRVBackupTopState *s = bs->opaque;
if (!s->active) {
/*
* The filter node may be in process of bdrv_append(), which firstly do
* bdrv_set_backing_hd() and then bdrv_replace_node(). This means that
* we can't unshare BLK_PERM_WRITE during bdrv_append() operation. So,
* let's require nothing during bdrv_append() and refresh permissions
* after it (see bdrv_backup_top_append()).
*/
*nperm = 0;
*nshared = BLK_PERM_ALL;
return;
}
if (role == &child_file) {
/*
* Target child
*
* Share write to target (child_file), to not interfere
* with guest writes to its disk which may be in target backing chain.
*/
*nshared = BLK_PERM_ALL;
*nperm = BLK_PERM_WRITE;
} else {
/* Source child */
bdrv_filter_default_perms(bs, c, role, reopen_queue, perm, shared,
nperm, nshared);
if (perm & BLK_PERM_WRITE) {
*nperm = *nperm | BLK_PERM_CONSISTENT_READ;
}
*nshared &= ~BLK_PERM_WRITE;
}
}
BlockDriver bdrv_backup_top_filter = {
.format_name = "backup-top",
.instance_size = sizeof(BDRVBackupTopState),
.bdrv_co_preadv = backup_top_co_preadv,
.bdrv_co_pwritev = backup_top_co_pwritev,
.bdrv_co_pwrite_zeroes = backup_top_co_pwrite_zeroes,
.bdrv_co_pdiscard = backup_top_co_pdiscard,
.bdrv_co_flush = backup_top_co_flush,
.bdrv_co_block_status = bdrv_co_block_status_from_backing,
.bdrv_refresh_filename = backup_top_refresh_filename,
.bdrv_child_perm = backup_top_child_perm,
.is_filter = true,
};
BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
BlockDriverState *target,
const char *filter_node_name,
uint64_t cluster_size,
BdrvRequestFlags write_flags,
BlockCopyState **bcs,
Error **errp)
{
Error *local_err = NULL;
BDRVBackupTopState *state;
BlockDriverState *top = bdrv_new_open_driver(&bdrv_backup_top_filter,
filter_node_name,
BDRV_O_RDWR, errp);
if (!top) {
return NULL;
}
top->total_sectors = source->total_sectors;
top->opaque = state = g_new0(BDRVBackupTopState, 1);
bdrv_ref(target);
state->target = bdrv_attach_child(top, target, "target", &child_file, errp);
if (!state->target) {
bdrv_unref(target);
bdrv_unref(top);
return NULL;
}
bdrv_drained_begin(source);
bdrv_ref(top);
bdrv_append(top, source, &local_err);
if (local_err) {
error_prepend(&local_err, "Cannot append backup-top filter: ");
goto append_failed;
}
/*
* bdrv_append() finished successfully, now we can require permissions
* we want.
*/
state->active = true;
bdrv_child_refresh_perms(top, top->backing, &local_err);
if (local_err) {
error_prepend(&local_err,
"Cannot set permissions for backup-top filter: ");
goto failed_after_append;
}
state->bcs = block_copy_state_new(top->backing, state->target,
cluster_size, write_flags, &local_err);
if (local_err) {
error_prepend(&local_err, "Cannot create block-copy-state: ");
goto failed_after_append;
}
*bcs = state->bcs;
bdrv_drained_end(source);
return top;
failed_after_append:
state->active = false;
bdrv_backup_top_drop(top);
append_failed:
bdrv_drained_end(source);
bdrv_unref_child(top, state->target);
bdrv_unref(top);
error_propagate(errp, local_err);
return NULL;
}
void bdrv_backup_top_drop(BlockDriverState *bs)
{
BDRVBackupTopState *s = bs->opaque;
AioContext *aio_context = bdrv_get_aio_context(bs);
block_copy_state_free(s->bcs);
aio_context_acquire(aio_context);
bdrv_drained_begin(bs);
s->active = false;
bdrv_child_refresh_perms(bs, bs->backing, &error_abort);
bdrv_replace_node(bs, backing_bs(bs), &error_abort);
bdrv_set_backing_hd(bs, NULL, &error_abort);
bdrv_drained_end(bs);
bdrv_unref(bs);
aio_context_release(aio_context);
}

41
block/backup-top.h Normal file
View File

@ -0,0 +1,41 @@
/*
* backup-top filter driver
*
* The driver performs Copy-Before-Write (CBW) operation: it is injected above
* some node, and before each write it copies _old_ data to the target node.
*
* Copyright (c) 2018-2019 Virtuozzo International GmbH.
*
* Author:
* Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef BACKUP_TOP_H
#define BACKUP_TOP_H
#include "block/block_int.h"
#include "block/block-copy.h"
BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
BlockDriverState *target,
const char *filter_node_name,
uint64_t cluster_size,
BdrvRequestFlags write_flags,
BlockCopyState **bcs,
Error **errp);
void bdrv_backup_top_drop(BlockDriverState *bs);
#endif /* BACKUP_TOP_H */

View File

@ -2,6 +2,7 @@
* QEMU backup
*
* Copyright (C) 2013 Proxmox Server Solutions
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Authors:
* Dietmar Maurer (dietmar@proxmox.com)
@ -18,6 +19,7 @@
#include "block/block_int.h"
#include "block/blockjob_int.h"
#include "block/block_backup.h"
#include "block/block-copy.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
@ -26,333 +28,68 @@
#include "qemu/bitmap.h"
#include "qemu/error-report.h"
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
#include "block/backup-top.h"
typedef struct CowRequest {
int64_t start_byte;
int64_t end_byte;
QLIST_ENTRY(CowRequest) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} CowRequest;
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
typedef struct BackupBlockJob {
BlockJob common;
BlockBackend *target;
BlockDriverState *backup_top;
BlockDriverState *source_bs;
BdrvDirtyBitmap *sync_bitmap;
BdrvDirtyBitmap *copy_bitmap;
MirrorSyncMode sync_mode;
BitmapSyncMode bitmap_mode;
BlockdevOnError on_source_error;
BlockdevOnError on_target_error;
CoRwlock flush_rwlock;
uint64_t len;
uint64_t bytes_read;
int64_t cluster_size;
NotifierWithReturn before_write;
QLIST_HEAD(, CowRequest) inflight_reqs;
bool use_copy_range;
int64_t copy_range_size;
BdrvRequestFlags write_flags;
bool initializing_bitmap;
BlockCopyState *bcs;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
/* See if in-flight requests overlap and wait for them to complete */
static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
int64_t start,
int64_t end)
static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
{
CowRequest *req;
bool retry;
BackupBlockJob *s = opaque;
do {
retry = false;
QLIST_FOREACH(req, &job->inflight_reqs, list) {
if (end > req->start_byte && start < req->end_byte) {
qemu_co_queue_wait(&req->wait_queue, NULL);
retry = true;
break;
}
}
} while (retry);
s->bytes_read += bytes;
job_progress_update(&s->common.job, bytes);
}
/* Keep track of an in-flight request */
static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
int64_t start, int64_t end)
static void backup_progress_reset_callback(void *opaque)
{
req->start_byte = start;
req->end_byte = end;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
}
BackupBlockJob *s = opaque;
uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);
/* Forget about a completed request */
static void cow_request_end(CowRequest *req)
{
QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue);
}
/* Copy range to target with a bounce buffer and return the bytes copied. If
* error occurred, return a negative error number */
static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
int64_t start,
int64_t end,
bool is_write_notifier,
bool *error_is_read,
void **bounce_buffer)
{
int ret;
BlockBackend *blk = job->common.blk;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
nbytes = MIN(job->cluster_size, job->len - start);
if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
}
ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags);
if (ret < 0) {
trace_backup_do_cow_read_fail(job, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto fail;
}
ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer,
job->write_flags);
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto fail;
}
return nbytes;
fail:
bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
return ret;
}
/* Copy range to target and return the bytes copied. If error occurred, return a
* negative error number. */
static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
int64_t start,
int64_t end,
bool is_write_notifier)
{
int ret;
int nr_clusters;
BlockBackend *blk = job->common.blk;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
nbytes = MIN(job->copy_range_size, end - start);
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
job->cluster_size * nr_clusters);
ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
read_flags, job->write_flags);
if (ret < 0) {
trace_backup_do_cow_copy_range_fail(job, start, ret);
bdrv_set_dirty_bitmap(job->copy_bitmap, start,
job->cluster_size * nr_clusters);
return ret;
}
return nbytes;
}
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
int64_t *pnum)
{
BlockDriverState *bs = blk_bs(s->common.blk);
int64_t count, total_count = 0;
int64_t bytes = s->len - offset;
int ret;
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
ret = bdrv_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
}
total_count += count;
if (ret || count == 0) {
/*
* ret: partial segment(s) are considered allocated.
* otherwise: unallocated tail is treated as an entire segment.
*/
*pnum = DIV_ROUND_UP(total_count, s->cluster_size);
return ret;
}
/* Unallocated segment(s) with uncertain following segment(s) */
if (total_count >= s->cluster_size) {
*pnum = total_count / s->cluster_size;
return 0;
}
offset += count;
bytes -= count;
}
}
/**
* Reset bits in copy_bitmap starting at offset if they represent unallocated
* data in the image. May reset subsequent contiguous bits.
* @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error.
*/
static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
int64_t offset, int64_t *count)
{
int ret;
int64_t clusters, bytes, estimate;
ret = backup_is_cluster_allocated(s, offset, &clusters);
if (ret < 0) {
return ret;
}
bytes = clusters * s->cluster_size;
if (!ret) {
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
estimate = bdrv_get_dirty_count(s->copy_bitmap);
job_progress_set_remaining(&s->common.job, estimate);
}
*count = bytes;
return ret;
job_progress_set_remaining(&s->common.job, estimate);
}
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t offset, uint64_t bytes,
bool *error_is_read,
bool is_write_notifier)
bool *error_is_read)
{
CowRequest cow_request;
int ret = 0;
int64_t start, end; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
qemu_co_rwlock_rdlock(&job->flush_rwlock);
start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
trace_backup_do_cow_enter(job, start, offset, bytes);
wait_for_overlapping_requests(job, start, end);
cow_request_begin(&cow_request, job, start, end);
while (start < end) {
int64_t dirty_end;
if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) {
trace_backup_do_cow_skip(job, start);
start += job->cluster_size;
continue; /* already copied */
}
dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start,
(end - start));
if (dirty_end < 0) {
dirty_end = end;
}
if (job->initializing_bitmap) {
ret = backup_bitmap_reset_unallocated(job, start, &status_bytes);
if (ret == 0) {
trace_backup_do_cow_skip_range(job, start, status_bytes);
start += status_bytes;
continue;
}
/* Clamp to known allocated region */
dirty_end = MIN(dirty_end, start + status_bytes);
}
trace_backup_do_cow_process(job, start);
if (job->use_copy_range) {
ret = backup_cow_with_offload(job, start, dirty_end,
is_write_notifier);
if (ret < 0) {
job->use_copy_range = false;
}
}
if (!job->use_copy_range) {
ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
is_write_notifier,
error_is_read, &bounce_buffer);
}
if (ret < 0) {
break;
}
/* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset.
*/
start += ret;
job->bytes_read += ret;
job_progress_update(&job->common.job, ret);
ret = 0;
}
if (bounce_buffer) {
qemu_vfree(bounce_buffer);
}
cow_request_end(&cow_request);
ret = block_copy(job->bcs, start, end - start, error_is_read);
trace_backup_do_cow_return(job, offset, bytes, ret);
qemu_co_rwlock_unlock(&job->flush_rwlock);
return ret;
}
static int coroutine_fn backup_before_write_notify(
NotifierWithReturn *notifier,
void *opaque)
{
BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
BdrvTrackedRequest *req = opaque;
assert(req->bs == blk_bs(job->common.blk));
assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE));
assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE));
return backup_do_cow(job, req->offset, req->bytes, NULL, true);
}
static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
{
BdrvDirtyBitmap *bm;
BlockDriverState *bs = blk_bs(job->common.blk);
bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \
&& (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER));
@ -361,20 +98,20 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
* We succeeded, or we always intended to sync the bitmap.
* Delete this bitmap and install the child.
*/
bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
bm = bdrv_dirty_bitmap_abdicate(job->source_bs, job->sync_bitmap, NULL);
} else {
/*
* We failed, or we never intended to sync the bitmap anyway.
* Merge the successor back into the parent, keeping all data.
*/
bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
bm = bdrv_reclaim_dirty_bitmap(job->source_bs, job->sync_bitmap, NULL);
}
assert(bm);
if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
/* If we failed and synced, merge in the bits we didn't copy: */
bdrv_dirty_bitmap_merge_internal(bm, job->copy_bitmap,
bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
NULL, true);
}
}
@ -398,16 +135,8 @@ static void backup_abort(Job *job)
static void backup_clean(Job *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BlockDriverState *bs = blk_bs(s->common.blk);
if (s->copy_bitmap) {
bdrv_release_dirty_bitmap(bs, s->copy_bitmap);
s->copy_bitmap = NULL;
}
assert(s->target);
blk_unref(s->target);
s->target = NULL;
bdrv_backup_top_drop(s->backup_top);
}
void backup_do_checkpoint(BlockJob *job, Error **errp)
@ -422,7 +151,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
return;
}
bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len);
bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
}
static BlockErrorAction backup_error_action(BackupBlockJob *job,
@ -445,8 +174,10 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
return true;
}
/* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
* return. Without a yield, the VM would not reboot. */
/*
* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
* return. Without a yield, the VM would not reboot.
*/
delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
job->bytes_read = 0;
job_sleep_ns(&job->common.job, delay_ns);
@ -465,14 +196,13 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
BdrvDirtyBitmapIter *bdbi;
int ret = 0;
bdbi = bdrv_dirty_iter_new(job->copy_bitmap);
bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
do {
if (yield_and_check(job)) {
goto out;
}
ret = backup_do_cow(job, offset,
job->cluster_size, &error_is_read, false);
ret = backup_do_cow(job, offset, job->cluster_size, &error_is_read);
if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT)
{
@ -492,7 +222,7 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
uint64_t estimate;
if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
ret = bdrv_dirty_bitmap_merge_internal(job->copy_bitmap,
ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
job->sync_bitmap,
NULL, true);
assert(ret);
@ -502,29 +232,22 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
* We can't hog the coroutine to initialize this thoroughly.
* Set a flag and resume work when we are able to yield safely.
*/
job->initializing_bitmap = true;
job->bcs->skip_unallocated = true;
}
bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len);
bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
}
estimate = bdrv_get_dirty_count(job->copy_bitmap);
estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
job_progress_set_remaining(&job->common.job, estimate);
}
static int coroutine_fn backup_run(Job *job, Error **errp)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BlockDriverState *bs = blk_bs(s->common.blk);
int ret = 0;
QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock);
backup_init_copy_bitmap(s);
s->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &s->before_write);
if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
int64_t offset = 0;
int64_t count;
@ -535,22 +258,26 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
goto out;
}
ret = backup_bitmap_reset_unallocated(s, offset, &count);
ret = block_copy_reset_unallocated(s->bcs, offset, &count);
if (ret < 0) {
goto out;
}
offset += count;
}
s->initializing_bitmap = false;
s->bcs->skip_unallocated = false;
}
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */
/*
* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied.
*/
while (!job_is_cancelled(job)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
/*
* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests.
*/
job_yield(job);
}
} else {
@ -558,12 +285,6 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
}
out:
notifier_with_return_remove(&s->before_write);
/* wait until pending backup_do_cow() calls have completed */
qemu_co_rwlock_wrlock(&s->flush_rwlock);
qemu_co_rwlock_unlock(&s->flush_rwlock);
return ret;
}
@ -621,6 +342,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
BitmapSyncMode bitmap_mode,
bool compress,
const char *filter_node_name,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
int creation_flags,
@ -629,9 +351,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
{
int64_t len;
BackupBlockJob *job = NULL;
int ret;
int64_t cluster_size;
BdrvDirtyBitmap *copy_bitmap = NULL;
BdrvRequestFlags write_flags;
BlockDriverState *backup_top = NULL;
BlockCopyState *bcs = NULL;
assert(bs);
assert(target);
@ -696,76 +419,66 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}
copy_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
if (!copy_bitmap) {
/*
* If source is in backing chain of target assume that target is going to be
* used for "image fleecing", i.e. it should represent a kind of snapshot of
* source at backup-start point in time. And target is going to be read by
* somebody (for example, used as NBD export) during backup job.
*
* In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
* intersection of backup writes and third party reads from target,
* otherwise reading from target we may occasionally read already updated by
* guest data.
*
* For more information see commit f8d59dfb40bb and test
* tests/qemu-iotests/222
*/
write_flags = (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
(compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
backup_top = bdrv_backup_top_append(bs, target, filter_node_name,
cluster_size, write_flags, &bcs, errp);
if (!backup_top) {
goto error;
}
bdrv_disable_dirty_bitmap(copy_bitmap);
/* job->len is fixed, so we can't allow resize */
job = block_job_create(job_id, &backup_job_driver, txn, bs,
BLK_PERM_CONSISTENT_READ,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
job = block_job_create(job_id, &backup_job_driver, txn, backup_top,
0, BLK_PERM_ALL,
speed, creation_flags, cb, opaque, errp);
if (!job) {
goto error;
}
/* The target must match the source in size, so no resize here either */
job->target = blk_new(job->common.job.aio_context,
BLK_PERM_WRITE,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
ret = blk_insert_bs(job->target, target, errp);
if (ret < 0) {
goto error;
}
blk_set_disable_request_queuing(job->target, true);
job->backup_top = backup_top;
job->source_bs = bs;
job->on_source_error = on_source_error;
job->on_target_error = on_target_error;
job->sync_mode = sync_mode;
job->sync_bitmap = sync_bitmap;
job->bitmap_mode = bitmap_mode;
/*
* Set write flags:
* 1. Detect image-fleecing (and similar) schemes
* 2. Handle compression
*/
job->write_flags =
(bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
(compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
job->bcs = bcs;
job->cluster_size = cluster_size;
job->copy_bitmap = copy_bitmap;
copy_bitmap = NULL;
job->use_copy_range = !compress; /* compression isn't supported for it */
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
blk_get_max_transfer(job->target));
job->copy_range_size = MAX(job->cluster_size,
QEMU_ALIGN_UP(job->copy_range_size,
job->cluster_size));
job->len = len;
/* Required permissions are already taken with target's blk_new() */
block_copy_set_callbacks(bcs, backup_progress_bytes_callback,
backup_progress_reset_callback, job);
/* Required permissions are already taken by backup-top target */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
job->len = len;
return &job->common;
error:
if (copy_bitmap) {
assert(!job || !job->copy_bitmap);
bdrv_release_dirty_bitmap(bs, copy_bitmap);
}
if (sync_bitmap) {
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
}
if (job) {
backup_clean(&job->common.job);
job_early_fail(&job->common.job);
} else if (backup_top) {
bdrv_backup_top_drop(backup_top);
}
return NULL;

345
block/block-copy.c Normal file
View File

@ -0,0 +1,345 @@
/*
* block_copy API
*
* Copyright (C) 2013 Proxmox Server Solutions
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Authors:
* Dietmar Maurer (dietmar@proxmox.com)
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "trace.h"
#include "qapi/error.h"
#include "block/block-copy.h"
#include "sysemu/block-backend.h"
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
int64_t start,
int64_t end)
{
BlockCopyInFlightReq *req;
bool waited;
do {
waited = false;
QLIST_FOREACH(req, &s->inflight_reqs, list) {
if (end > req->start_byte && start < req->end_byte) {
qemu_co_queue_wait(&req->wait_queue, NULL);
waited = true;
break;
}
}
} while (waited);
}
static void block_copy_inflight_req_begin(BlockCopyState *s,
BlockCopyInFlightReq *req,
int64_t start, int64_t end)
{
req->start_byte = start;
req->end_byte = end;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
}
static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
{
QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue);
}
void block_copy_state_free(BlockCopyState *s)
{
if (!s) {
return;
}
bdrv_release_dirty_bitmap(s->source->bs, s->copy_bitmap);
g_free(s);
}
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size,
BdrvRequestFlags write_flags, Error **errp)
{
BlockCopyState *s;
BdrvDirtyBitmap *copy_bitmap;
uint32_t max_transfer =
MIN_NON_ZERO(INT_MAX, MIN_NON_ZERO(source->bs->bl.max_transfer,
target->bs->bl.max_transfer));
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
errp);
if (!copy_bitmap) {
return NULL;
}
bdrv_disable_dirty_bitmap(copy_bitmap);
s = g_new(BlockCopyState, 1);
*s = (BlockCopyState) {
.source = source,
.target = target,
.copy_bitmap = copy_bitmap,
.cluster_size = cluster_size,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
.write_flags = write_flags,
};
s->copy_range_size = QEMU_ALIGN_DOWN(max_transfer, cluster_size),
/*
* Set use_copy_range, consider the following:
* 1. Compression is not supported for copy_range.
* 2. copy_range does not respect max_transfer (it's a TODO), so we factor
* that in here. If max_transfer is smaller than the job->cluster_size,
* we do not use copy_range (in that case it's zero after aligning down
* above).
*/
s->use_copy_range =
!(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
QLIST_INIT(&s->inflight_reqs);
return s;
}
void block_copy_set_callbacks(
BlockCopyState *s,
ProgressBytesCallbackFunc progress_bytes_callback,
ProgressResetCallbackFunc progress_reset_callback,
void *progress_opaque)
{
s->progress_bytes_callback = progress_bytes_callback;
s->progress_reset_callback = progress_reset_callback;
s->progress_opaque = progress_opaque;
}
/*
* Copy range to target with a bounce buffer and return the bytes copied. If
* error occurred, return a negative error number
*/
static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
int64_t start,
int64_t end,
bool *error_is_read,
void **bounce_buffer)
{
int ret;
int nbytes;
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
nbytes = MIN(s->cluster_size, s->len - start);
if (!*bounce_buffer) {
*bounce_buffer = qemu_blockalign(s->source->bs, s->cluster_size);
}
ret = bdrv_co_pread(s->source, start, nbytes, *bounce_buffer, 0);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto fail;
}
ret = bdrv_co_pwrite(s->target, start, nbytes, *bounce_buffer,
s->write_flags);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto fail;
}
return nbytes;
fail:
bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
return ret;
}
/*
* Copy range to target and return the bytes copied. If error occurred, return a
* negative error number.
*/
static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
int64_t start,
int64_t end)
{
int ret;
int nr_clusters;
int nbytes;
assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
nbytes = MIN(s->copy_range_size, MIN(end, s->len) - start);
nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
s->cluster_size * nr_clusters);
ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
0, s->write_flags);
if (ret < 0) {
trace_block_copy_with_offload_fail(s, start, ret);
bdrv_set_dirty_bitmap(s->copy_bitmap, start,
s->cluster_size * nr_clusters);
return ret;
}
return nbytes;
}
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
int64_t *pnum)
{
BlockDriverState *bs = s->source->bs;
int64_t count, total_count = 0;
int64_t bytes = s->len - offset;
int ret;
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
ret = bdrv_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
}
total_count += count;
if (ret || count == 0) {
/*
* ret: partial segment(s) are considered allocated.
* otherwise: unallocated tail is treated as an entire segment.
*/
*pnum = DIV_ROUND_UP(total_count, s->cluster_size);
return ret;
}
/* Unallocated segment(s) with uncertain following segment(s) */
if (total_count >= s->cluster_size) {
*pnum = total_count / s->cluster_size;
return 0;
}
offset += count;
bytes -= count;
}
}
/*
* Reset bits in copy_bitmap starting at offset if they represent unallocated
* data in the image. May reset subsequent contiguous bits.
* @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error.
*/
int64_t block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count)
{
int ret;
int64_t clusters, bytes;
ret = block_copy_is_cluster_allocated(s, offset, &clusters);
if (ret < 0) {
return ret;
}
bytes = clusters * s->cluster_size;
if (!ret) {
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->progress_reset_callback(s->progress_opaque);
}
*count = bytes;
return ret;
}
int coroutine_fn block_copy(BlockCopyState *s,
int64_t start, uint64_t bytes,
bool *error_is_read)
{
int ret = 0;
int64_t end = bytes + start; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
BlockCopyInFlightReq req;
/*
* block_copy() user is responsible for keeping source and target in same
* aio context
*/
assert(bdrv_get_aio_context(s->source->bs) ==
bdrv_get_aio_context(s->target->bs));
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
block_copy_wait_inflight_reqs(s, start, bytes);
block_copy_inflight_req_begin(s, &req, start, end);
while (start < end) {
int64_t dirty_end;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
start += s->cluster_size;
continue; /* already copied */
}
dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
(end - start));
if (dirty_end < 0) {
dirty_end = end;
}
if (s->skip_unallocated) {
ret = block_copy_reset_unallocated(s, start, &status_bytes);
if (ret == 0) {
trace_block_copy_skip_range(s, start, status_bytes);
start += status_bytes;
continue;
}
/* Clamp to known allocated region */
dirty_end = MIN(dirty_end, start + status_bytes);
}
trace_block_copy_process(s, start);
if (s->use_copy_range) {
ret = block_copy_with_offload(s, start, dirty_end);
if (ret < 0) {
s->use_copy_range = false;
}
}
if (!s->use_copy_range) {
ret = block_copy_with_bounce_buffer(s, start, dirty_end,
error_is_read, &bounce_buffer);
}
if (ret < 0) {
break;
}
start += ret;
s->progress_bytes_callback(ret, s->progress_opaque);
ret = 0;
}
if (bounce_buffer) {
qemu_vfree(bounce_buffer);
}
block_copy_inflight_req_end(&req);
return ret;
}

View File

@ -161,6 +161,11 @@ typedef struct BDRVRawState {
bool needs_alignment;
bool drop_cache;
bool check_cache_dropped;
struct {
uint64_t discard_nb_ok;
uint64_t discard_nb_failed;
uint64_t discard_bytes_ok;
} stats;
PRManager *pr_mgr;
} BDRVRawState;
@ -2660,11 +2665,22 @@ static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs,
#endif /* !__linux__ */
}
static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
{
if (ret) {
s->stats.discard_nb_failed++;
} else {
s->stats.discard_nb_ok++;
s->stats.discard_bytes_ok += nbytes;
}
}
static coroutine_fn int
raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
int ret;
acb = (RawPosixAIOData) {
.bs = bs,
@ -2678,7 +2694,9 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
acb.aio_type |= QEMU_AIO_BLKDEV;
}
return raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
raw_account_discard(s, bytes, ret);
return ret;
}
static coroutine_fn int
@ -2735,6 +2753,36 @@ static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
return 0;
}
static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
return (BlockStatsSpecificFile) {
.discard_nb_ok = s->stats.discard_nb_ok,
.discard_nb_failed = s->stats.discard_nb_failed,
.discard_bytes_ok = s->stats.discard_bytes_ok,
};
}
static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs)
{
BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
stats->driver = BLOCKDEV_DRIVER_FILE;
stats->u.file = get_blockstats_specific_file(bs);
return stats;
}
static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs)
{
BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE;
stats->u.host_device = get_blockstats_specific_file(bs);
return stats;
}
static QemuOptsList raw_create_opts = {
.name = "raw-create-opts",
.head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head),
@ -2942,6 +2990,7 @@ BlockDriver bdrv_file = {
.bdrv_get_info = raw_get_info,
.bdrv_get_allocated_file_size
= raw_get_allocated_file_size,
.bdrv_get_specific_stats = raw_get_specific_stats,
.bdrv_check_perm = raw_check_perm,
.bdrv_set_perm = raw_set_perm,
.bdrv_abort_perm_update = raw_abort_perm_update,
@ -3301,10 +3350,12 @@ static int fd_open(BlockDriverState *bs)
static coroutine_fn int
hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
{
BDRVRawState *s = bs->opaque;
int ret;
ret = fd_open(bs);
if (ret < 0) {
raw_account_discard(s, bytes, ret);
return ret;
}
return raw_do_pdiscard(bs, offset, bytes, true);
@ -3418,6 +3469,7 @@ static BlockDriver bdrv_host_device = {
.bdrv_get_info = raw_get_info,
.bdrv_get_allocated_file_size
= raw_get_allocated_file_size,
.bdrv_get_specific_stats = hdev_get_specific_stats,
.bdrv_check_perm = raw_check_perm,
.bdrv_set_perm = raw_set_perm,
.bdrv_abort_perm_update = raw_abort_perm_update,

View File

@ -1158,6 +1158,18 @@ static int coroutine_fn nbd_client_co_block_status(
BDRV_BLOCK_OFFSET_VALID;
}
static int nbd_client_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
{
BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
error_setg(errp, "Can't reopen read-only NBD mount as read/write");
return -EACCES;
}
return 0;
}
static void nbd_client_close(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
@ -1798,6 +1810,7 @@ static BlockDriver bdrv_nbd = {
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
.bdrv_reopen_prepare = nbd_client_reopen_prepare,
.bdrv_co_preadv = nbd_client_co_preadv,
.bdrv_co_pwritev = nbd_client_co_pwritev,
.bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
@ -1820,6 +1833,7 @@ static BlockDriver bdrv_nbd_tcp = {
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
.bdrv_reopen_prepare = nbd_client_reopen_prepare,
.bdrv_co_preadv = nbd_client_co_preadv,
.bdrv_co_pwritev = nbd_client_co_pwritev,
.bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
@ -1842,6 +1856,7 @@ static BlockDriver bdrv_nbd_unix = {
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
.bdrv_reopen_prepare = nbd_client_reopen_prepare,
.bdrv_co_preadv = nbd_client_co_preadv,
.bdrv_co_pwritev = nbd_client_co_pwritev,
.bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,

View File

@ -440,24 +440,30 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
ds->invalid_flush_operations =
stats->invalid_ops[BLOCK_ACCT_FLUSH];
ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
ds->has_idle_time_ns = stats->last_access_time_ns > 0;
if (ds->has_idle_time_ns) {
@ -537,6 +543,11 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
s->driver_specific = bdrv_get_specific_stats(bs);
if (s->driver_specific) {
s->has_driver_specific = true;
}
if (bs->file) {
s->has_parent = true;
s->parent = bdrv_query_bds_stats(bs->file->bs, blk_level);

View File

@ -41,6 +41,7 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
#include "crypto.h"
#include "block/aio_task.h"
/*
Differences with QCOW:
@ -1972,20 +1973,184 @@ out:
return ret;
}
static coroutine_fn int
qcow2_co_preadv_encrypted(BlockDriverState *bs,
uint64_t file_cluster_offset,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
uint64_t qiov_offset)
{
int ret;
BDRVQcow2State *s = bs->opaque;
uint8_t *buf;
assert(bs->encrypted && s->crypto);
assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
/*
* For encrypted images, read everything into a temporary
* contiguous buffer on which the AES functions can work.
* Also, decryption in a separate buffer is better as it
* prevents the guest from learning information about the
* encrypted nature of the virtual disk.
*/
buf = qemu_try_blockalign(s->data_file->bs, bytes);
if (buf == NULL) {
return -ENOMEM;
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_pread(s->data_file,
file_cluster_offset + offset_into_cluster(s, offset),
bytes, buf, 0);
if (ret < 0) {
goto fail;
}
assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
if (qcow2_co_decrypt(bs,
file_cluster_offset + offset_into_cluster(s, offset),
offset, buf, bytes) < 0)
{
ret = -EIO;
goto fail;
}
qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes);
fail:
qemu_vfree(buf);
return ret;
}
typedef struct Qcow2AioTask {
AioTask task;
BlockDriverState *bs;
QCow2ClusterType cluster_type; /* only for read */
uint64_t file_cluster_offset;
uint64_t offset;
uint64_t bytes;
QEMUIOVector *qiov;
uint64_t qiov_offset;
QCowL2Meta *l2meta; /* only for write */
} Qcow2AioTask;
static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task);
static coroutine_fn int qcow2_add_task(BlockDriverState *bs,
AioTaskPool *pool,
AioTaskFunc func,
QCow2ClusterType cluster_type,
uint64_t file_cluster_offset,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset,
QCowL2Meta *l2meta)
{
Qcow2AioTask local_task;
Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task;
*task = (Qcow2AioTask) {
.task.func = func,
.bs = bs,
.cluster_type = cluster_type,
.qiov = qiov,
.file_cluster_offset = file_cluster_offset,
.offset = offset,
.bytes = bytes,
.qiov_offset = qiov_offset,
.l2meta = l2meta,
};
trace_qcow2_add_task(qemu_coroutine_self(), bs, pool,
func == qcow2_co_preadv_task_entry ? "read" : "write",
cluster_type, file_cluster_offset, offset, bytes,
qiov, qiov_offset);
if (!pool) {
return func(&task->task);
}
aio_task_pool_start_task(pool, &task->task);
return 0;
}
static coroutine_fn int qcow2_co_preadv_task(BlockDriverState *bs,
QCow2ClusterType cluster_type,
uint64_t file_cluster_offset,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset)
{
BDRVQcow2State *s = bs->opaque;
int offset_in_cluster = offset_into_cluster(s, offset);
switch (cluster_type) {
case QCOW2_CLUSTER_ZERO_PLAIN:
case QCOW2_CLUSTER_ZERO_ALLOC:
/* Both zero types are handled in qcow2_co_preadv_part */
g_assert_not_reached();
case QCOW2_CLUSTER_UNALLOCATED:
assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
return bdrv_co_preadv_part(bs->backing, offset, bytes,
qiov, qiov_offset, 0);
case QCOW2_CLUSTER_COMPRESSED:
return qcow2_co_preadv_compressed(bs, file_cluster_offset,
offset, bytes, qiov, qiov_offset);
case QCOW2_CLUSTER_NORMAL:
if ((file_cluster_offset & 511) != 0) {
return -EIO;
}
if (bs->encrypted) {
return qcow2_co_preadv_encrypted(bs, file_cluster_offset,
offset, bytes, qiov, qiov_offset);
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_preadv_part(s->data_file,
file_cluster_offset + offset_in_cluster,
bytes, qiov, qiov_offset, 0);
default:
g_assert_not_reached();
}
g_assert_not_reached();
}
static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task)
{
Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
assert(!t->l2meta);
return qcow2_co_preadv_task(t->bs, t->cluster_type, t->file_cluster_offset,
t->offset, t->bytes, t->qiov, t->qiov_offset);
}
static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset, int flags)
{
BDRVQcow2State *s = bs->opaque;
int offset_in_cluster;
int ret;
int ret = 0;
unsigned int cur_bytes; /* number of bytes in current iteration */
uint64_t cluster_offset = 0;
uint8_t *cluster_data = NULL;
while (bytes != 0) {
AioTaskPool *aio = NULL;
while (bytes != 0 && aio_task_pool_status(aio) == 0) {
/* prepare next request */
cur_bytes = MIN(bytes, INT_MAX);
if (s->crypto) {
@ -1997,110 +2162,39 @@ static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs,
ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset);
qemu_co_mutex_unlock(&s->lock);
if (ret < 0) {
goto fail;
goto out;
}
offset_in_cluster = offset_into_cluster(s, offset);
switch (ret) {
case QCOW2_CLUSTER_UNALLOCATED:
if (bs->backing) {
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
ret = bdrv_co_preadv_part(bs->backing, offset, cur_bytes,
qiov, qiov_offset, 0);
if (ret < 0) {
goto fail;
}
} else {
/* Note: in this case, no need to wait */
qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes);
}
break;
case QCOW2_CLUSTER_ZERO_PLAIN:
case QCOW2_CLUSTER_ZERO_ALLOC:
if (ret == QCOW2_CLUSTER_ZERO_PLAIN ||
ret == QCOW2_CLUSTER_ZERO_ALLOC ||
(ret == QCOW2_CLUSTER_UNALLOCATED && !bs->backing))
{
qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes);
break;
case QCOW2_CLUSTER_COMPRESSED:
ret = qcow2_co_preadv_compressed(bs, cluster_offset,
offset, cur_bytes,
qiov, qiov_offset);
} else {
if (!aio && cur_bytes != bytes) {
aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
}
ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, ret,
cluster_offset, offset, cur_bytes,
qiov, qiov_offset, NULL);
if (ret < 0) {
goto fail;
goto out;
}
break;
case QCOW2_CLUSTER_NORMAL:
if ((cluster_offset & 511) != 0) {
ret = -EIO;
goto fail;
}
if (bs->encrypted) {
assert(s->crypto);
/*
* For encrypted images, read everything into a temporary
* contiguous buffer on which the AES functions can work.
*/
if (!cluster_data) {
cluster_data =
qemu_try_blockalign(s->data_file->bs,
QCOW_MAX_CRYPT_CLUSTERS
* s->cluster_size);
if (cluster_data == NULL) {
ret = -ENOMEM;
goto fail;
}
}
assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_pread(s->data_file,
cluster_offset + offset_in_cluster,
cur_bytes, cluster_data, 0);
if (ret < 0) {
goto fail;
}
assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
assert(QEMU_IS_ALIGNED(cur_bytes, BDRV_SECTOR_SIZE));
if (qcow2_co_decrypt(bs, cluster_offset + offset_in_cluster,
offset,
cluster_data, cur_bytes) < 0) {
ret = -EIO;
goto fail;
}
qemu_iovec_from_buf(qiov, qiov_offset, cluster_data, cur_bytes);
} else {
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_preadv_part(s->data_file,
cluster_offset + offset_in_cluster,
cur_bytes, qiov, qiov_offset, 0);
if (ret < 0) {
goto fail;
}
}
break;
default:
g_assert_not_reached();
ret = -EIO;
goto fail;
}
bytes -= cur_bytes;
offset += cur_bytes;
qiov_offset += cur_bytes;
}
ret = 0;
fail:
qemu_vfree(cluster_data);
out:
if (aio) {
aio_task_pool_wait_all(aio);
if (ret == 0) {
ret = aio_task_pool_status(aio);
}
g_free(aio);
}
return ret;
}
@ -2225,6 +2319,99 @@ static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
return 0;
}
/*
* qcow2_co_pwritev_task
* Called with s->lock unlocked
* l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must
* not use it somehow after qcow2_co_pwritev_task() call
*/
static coroutine_fn int qcow2_co_pwritev_task(BlockDriverState *bs,
uint64_t file_cluster_offset,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov,
uint64_t qiov_offset,
QCowL2Meta *l2meta)
{
int ret;
BDRVQcow2State *s = bs->opaque;
void *crypt_buf = NULL;
int offset_in_cluster = offset_into_cluster(s, offset);
QEMUIOVector encrypted_qiov;
if (bs->encrypted) {
assert(s->crypto);
assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
crypt_buf = qemu_try_blockalign(bs->file->bs, bytes);
if (crypt_buf == NULL) {
ret = -ENOMEM;
goto out_unlocked;
}
qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes);
if (qcow2_co_encrypt(bs, file_cluster_offset + offset_in_cluster,
offset, crypt_buf, bytes) < 0)
{
ret = -EIO;
goto out_unlocked;
}
qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes);
qiov = &encrypted_qiov;
qiov_offset = 0;
}
/* Try to efficiently initialize the physical space with zeroes */
ret = handle_alloc_space(bs, l2meta);
if (ret < 0) {
goto out_unlocked;
}
/*
* If we need to do COW, check if it's possible to merge the
* writing of the guest data together with that of the COW regions.
* If it's not possible (or not necessary) then write the
* guest data now.
*/
if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
file_cluster_offset + offset_in_cluster);
ret = bdrv_co_pwritev_part(s->data_file,
file_cluster_offset + offset_in_cluster,
bytes, qiov, qiov_offset, 0);
if (ret < 0) {
goto out_unlocked;
}
}
qemu_co_mutex_lock(&s->lock);
ret = qcow2_handle_l2meta(bs, &l2meta, true);
goto out_locked;
out_unlocked:
qemu_co_mutex_lock(&s->lock);
out_locked:
qcow2_handle_l2meta(bs, &l2meta, false);
qemu_co_mutex_unlock(&s->lock);
qemu_vfree(crypt_buf);
return ret;
}
static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task)
{
Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
assert(!t->cluster_type);
return qcow2_co_pwritev_task(t->bs, t->file_cluster_offset,
t->offset, t->bytes, t->qiov, t->qiov_offset,
t->l2meta);
}
static coroutine_fn int qcow2_co_pwritev_part(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset, int flags)
@ -2234,16 +2421,12 @@ static coroutine_fn int qcow2_co_pwritev_part(
int ret;
unsigned int cur_bytes; /* number of sectors in current iteration */
uint64_t cluster_offset;
QEMUIOVector encrypted_qiov;
uint64_t bytes_done = 0;
uint8_t *cluster_data = NULL;
QCowL2Meta *l2meta = NULL;
AioTaskPool *aio = NULL;
trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
qemu_co_mutex_lock(&s->lock);
while (bytes != 0) {
while (bytes != 0 && aio_task_pool_status(aio) == 0) {
l2meta = NULL;
@ -2256,6 +2439,8 @@ static coroutine_fn int qcow2_co_pwritev_part(
- offset_in_cluster);
}
qemu_co_mutex_lock(&s->lock);
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
&cluster_offset, &l2meta);
if (ret < 0) {
@ -2273,73 +2458,24 @@ static coroutine_fn int qcow2_co_pwritev_part(
qemu_co_mutex_unlock(&s->lock);
if (bs->encrypted) {
assert(s->crypto);
if (!cluster_data) {
cluster_data = qemu_try_blockalign(bs->file->bs,
QCOW_MAX_CRYPT_CLUSTERS
* s->cluster_size);
if (cluster_data == NULL) {
ret = -ENOMEM;
goto out_unlocked;
}
}
assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
qemu_iovec_to_buf(qiov, qiov_offset + bytes_done,
cluster_data, cur_bytes);
if (qcow2_co_encrypt(bs, cluster_offset + offset_in_cluster, offset,
cluster_data, cur_bytes) < 0) {
ret = -EIO;
goto out_unlocked;
}
qemu_iovec_init_buf(&encrypted_qiov, cluster_data, cur_bytes);
if (!aio && cur_bytes != bytes) {
aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
}
/* Try to efficiently initialize the physical space with zeroes */
ret = handle_alloc_space(bs, l2meta);
ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
cluster_offset, offset, cur_bytes,
qiov, qiov_offset, l2meta);
l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
if (ret < 0) {
goto out_unlocked;
}
/* If we need to do COW, check if it's possible to merge the
* writing of the guest data together with that of the COW regions.
* If it's not possible (or not necessary) then write the
* guest data now. */
if (!merge_cow(offset, cur_bytes,
bs->encrypted ? &encrypted_qiov : qiov,
bs->encrypted ? 0 : qiov_offset + bytes_done, l2meta))
{
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
cluster_offset + offset_in_cluster);
ret = bdrv_co_pwritev_part(
s->data_file, cluster_offset + offset_in_cluster, cur_bytes,
bs->encrypted ? &encrypted_qiov : qiov,
bs->encrypted ? 0 : qiov_offset + bytes_done, 0);
if (ret < 0) {
goto out_unlocked;
}
}
qemu_co_mutex_lock(&s->lock);
ret = qcow2_handle_l2meta(bs, &l2meta, true);
if (ret) {
goto out_locked;
goto fail_nometa;
}
bytes -= cur_bytes;
offset += cur_bytes;
bytes_done += cur_bytes;
qiov_offset += cur_bytes;
trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
}
ret = 0;
goto out_locked;
out_unlocked:
qemu_co_mutex_lock(&s->lock);
out_locked:
@ -2347,7 +2483,15 @@ out_locked:
qemu_co_mutex_unlock(&s->lock);
qemu_vfree(cluster_data);
fail_nometa:
if (aio) {
aio_task_pool_wait_all(aio);
if (ret == 0) {
ret = aio_task_pool_status(aio);
}
g_free(aio);
}
trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
return ret;

View File

@ -65,6 +65,9 @@
#define QCOW2_MAX_BITMAPS 65535
#define QCOW2_MAX_BITMAP_DIRECTORY_SIZE (1024 * QCOW2_MAX_BITMAPS)
/* Maximum of parallel sub-request per guest request */
#define QCOW2_MAX_WORKERS 8
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
/* indicate that the cluster is compressed (they never have the copied flag) */

View File

@ -543,7 +543,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, NULL,
BLOCKDEV_ON_ERROR_REPORT,
BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
backup_job_completed, bs, NULL, &local_err);

View File

@ -40,12 +40,14 @@ mirror_yield_in_flight(void *s, int64_t offset, int in_flight) "s %p offset %" P
# backup.c
backup_do_cow_enter(void *job, int64_t start, int64_t offset, uint64_t bytes) "job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64
backup_do_cow_return(void *job, int64_t offset, uint64_t bytes, int ret) "job %p offset %" PRId64 " bytes %" PRIu64 " ret %d"
backup_do_cow_skip(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_skip_range(void *job, int64_t start, uint64_t bytes) "job %p start %"PRId64" bytes %"PRId64
backup_do_cow_process(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_read_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
backup_do_cow_write_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
backup_do_cow_copy_range_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
# block-copy.c
block_copy_skip(void *bcs, int64_t start) "bcs %p start %"PRId64
block_copy_skip_range(void *bcs, int64_t start, uint64_t bytes) "bcs %p start %"PRId64" bytes %"PRId64
block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64
block_copy_with_bounce_buffer_read_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
block_copy_with_bounce_buffer_write_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
block_copy_with_offload_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
# ../blockdev.c
qmp_block_job_cancel(void *job) "job %p"
@ -62,6 +64,7 @@ file_paio_submit(void *acb, void *opaque, int64_t offset, int count, int type) "
file_copy_file_range(void *bs, int src, int64_t src_off, int dst, int64_t dst_off, int64_t bytes, int flags, int64_t ret) "bs %p src_fd %d offset %"PRIu64" dst_fd %d offset %"PRIu64" bytes %"PRIu64" flags %d ret %"PRId64
# qcow2.c
qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t file_cluster_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu"
qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d"
qcow2_writev_done_req(void *co, int ret) "co %p ret %d"
qcow2_writev_start_part(void *co) "co %p"

View File

@ -3601,6 +3601,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, bmap, backup->bitmap_mode,
backup->compress,
backup->filter_node_name,
backup->on_source_error,
backup->on_target_error,
job_flags, NULL, NULL, txn, errp);

View File

@ -442,6 +442,14 @@ static void ide_issue_trim_cb(void *opaque, int ret)
TrimAIOCB *iocb = opaque;
IDEState *s = iocb->s;
if (iocb->i >= 0) {
if (ret >= 0) {
block_acct_done(blk_get_stats(s->blk), &s->acct);
} else {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
}
}
if (ret >= 0) {
while (iocb->j < iocb->qiov->niov) {
int j = iocb->j;
@ -459,10 +467,14 @@ static void ide_issue_trim_cb(void *opaque, int ret)
}
if (!ide_sect_range_ok(s, sector, count)) {
block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
iocb->ret = -EINVAL;
goto done;
}
block_acct_start(blk_get_stats(s->blk), &s->acct,
count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
/* Got an entry! Submit and exit. */
iocb->aiocb = blk_aio_pdiscard(s->blk,
sector << BDRV_SECTOR_BITS,

View File

@ -1608,25 +1608,28 @@ static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
{
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint64_t sector_num;
uint32_t nb_sectors;
assert(r->req.aiocb == NULL);
if (scsi_disk_req_check_error(r, ret, false)) {
goto done;
}
if (data->count > 0) {
sector_num = ldq_be_p(&data->inbuf[0]);
nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
if (!check_lba_range(s, sector_num, nb_sectors)) {
r->sector = ldq_be_p(&data->inbuf[0])
* (s->qdev.blocksize / BDRV_SECTOR_SIZE);
r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL)
* (s->qdev.blocksize / BDRV_SECTOR_SIZE);
if (!check_lba_range(s, r->sector, r->sector_count)) {
block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
BLOCK_ACCT_UNMAP);
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
goto done;
}
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
r->sector_count * BDRV_SECTOR_SIZE,
BLOCK_ACCT_UNMAP);
r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
sector_num * s->qdev.blocksize,
nb_sectors * s->qdev.blocksize,
r->sector * BDRV_SECTOR_SIZE,
r->sector_count * BDRV_SECTOR_SIZE,
scsi_unmap_complete, data);
data->count--;
data->inbuf += 16;
@ -1650,7 +1653,13 @@ static void scsi_unmap_complete(void *opaque, int ret)
r->req.aiocb = NULL;
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
scsi_unmap_complete_noio(data, ret);
if (scsi_disk_req_check_error(r, ret, true)) {
scsi_req_unref(&r->req);
g_free(data);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
scsi_unmap_complete_noio(data, ret);
}
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
@ -1680,6 +1689,7 @@ static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
}
if (blk_is_read_only(s->qdev.conf.blk)) {
block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
return;
}
@ -1695,10 +1705,12 @@ static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
return;
invalid_param_len:
block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
return;
invalid_field:
block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
}

View File

@ -33,9 +33,11 @@ typedef struct BlockAcctTimedStats BlockAcctTimedStats;
typedef struct BlockAcctStats BlockAcctStats;
enum BlockAcctType {
BLOCK_ACCT_NONE = 0,
BLOCK_ACCT_READ,
BLOCK_ACCT_WRITE,
BLOCK_ACCT_FLUSH,
BLOCK_ACCT_UNMAP,
BLOCK_MAX_IOTYPE,
};

54
include/block/aio_task.h Normal file
View File

@ -0,0 +1,54 @@
/*
* Aio tasks loops
*
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef BLOCK_AIO_TASK_H
#define BLOCK_AIO_TASK_H
#include "qemu/coroutine.h"
typedef struct AioTaskPool AioTaskPool;
typedef struct AioTask AioTask;
typedef int coroutine_fn (*AioTaskFunc)(AioTask *task);
struct AioTask {
AioTaskPool *pool;
AioTaskFunc func;
int ret;
};
AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks);
void aio_task_pool_free(AioTaskPool *);
/* error code of failed task or 0 if all is OK */
int aio_task_pool_status(AioTaskPool *pool);
bool aio_task_pool_empty(AioTaskPool *pool);
/* User provides filled @task, however task->pool will be set automatically */
void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task);
void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool);
void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool);
void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool);
#endif /* BLOCK_AIO_TASK_H */

View File

@ -0,0 +1,93 @@
/*
* block_copy API
*
* Copyright (C) 2013 Proxmox Server Solutions
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Authors:
* Dietmar Maurer (dietmar@proxmox.com)
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef BLOCK_COPY_H
#define BLOCK_COPY_H
#include "block/block.h"
typedef struct BlockCopyInFlightReq {
int64_t start_byte;
int64_t end_byte;
QLIST_ENTRY(BlockCopyInFlightReq) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} BlockCopyInFlightReq;
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
typedef void (*ProgressResetCallbackFunc)(void *opaque);
typedef struct BlockCopyState {
/*
* BdrvChild objects are not owned or managed by block-copy. They are
* provided by block-copy user and user is responsible for appropriate
* permissions on these children.
*/
BdrvChild *source;
BdrvChild *target;
BdrvDirtyBitmap *copy_bitmap;
int64_t cluster_size;
bool use_copy_range;
int64_t copy_range_size;
uint64_t len;
QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
BdrvRequestFlags write_flags;
/*
* skip_unallocated:
*
* Used by sync=top jobs, which first scan the source node for unallocated
* areas and clear them in the copy_bitmap. During this process, the bitmap
* is thus not fully initialized: It may still have bits set for areas that
* are unallocated and should actually not be copied.
*
* This is indicated by skip_unallocated.
*
* In this case, block_copy() will query the sources allocation status,
* skip unallocated regions, clear them in the copy_bitmap, and invoke
* block_copy_reset_unallocated() every time it does.
*/
bool skip_unallocated;
/* progress_bytes_callback: called when some copying progress is done. */
ProgressBytesCallbackFunc progress_bytes_callback;
/*
* progress_reset_callback: called when some bytes reset from copy_bitmap
* (see @skip_unallocated above). The callee is assumed to recalculate how
* many bytes remain based on the dirty bit count of copy_bitmap.
*/
ProgressResetCallbackFunc progress_reset_callback;
void *progress_opaque;
} BlockCopyState;
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size,
BdrvRequestFlags write_flags,
Error **errp);
void block_copy_set_callbacks(
BlockCopyState *s,
ProgressBytesCallbackFunc progress_bytes_callback,
ProgressResetCallbackFunc progress_reset_callback,
void *progress_opaque);
void block_copy_state_free(BlockCopyState *s);
int64_t block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
bool *error_is_read);
#endif /* BLOCK_COPY_H */

View File

@ -501,6 +501,7 @@ int bdrv_get_flags(BlockDriverState *bs);
int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
Error **errp);
BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
void bdrv_round_to_clusters(BlockDriverState *bs,
int64_t offset, int64_t bytes,
int64_t *cluster_offset,

View File

@ -366,6 +366,7 @@ struct BlockDriver {
int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs,
Error **errp);
BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs);
int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs,
QEMUIOVector *qiov,
@ -1196,6 +1197,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BdrvDirtyBitmap *sync_bitmap,
BitmapSyncMode bitmap_mode,
bool compress,
const char *filter_node_name,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
int creation_flags,

View File

@ -860,6 +860,8 @@
#
# @wr_bytes: The number of bytes written by the device.
#
# @unmap_bytes: The number of bytes unmapped by the device (Since 4.2)
#
# @rd_operations: The number of read operations performed by the device.
#
# @wr_operations: The number of write operations performed by the device.
@ -867,12 +869,18 @@
# @flush_operations: The number of cache flush operations performed by the
# device (since 0.15.0)
#
# @flush_total_time_ns: Total time spend on cache flushes in nano-seconds
# @unmap_operations: The number of unmap operations performed by the device
# (Since 4.2)
#
# @rd_total_time_ns: Total time spent on reads in nanoseconds (since 0.15.0).
#
# @wr_total_time_ns: Total time spent on writes in nanoseconds (since 0.15.0).
#
# @flush_total_time_ns: Total time spent on cache flushes in nanoseconds
# (since 0.15.0).
#
# @wr_total_time_ns: Total time spend on writes in nano-seconds (since 0.15.0).
#
# @rd_total_time_ns: Total_time_spend on reads in nano-seconds (since 0.15.0).
# @unmap_total_time_ns: Total time spent on unmap operations in nanoseconds
# (Since 4.2)
#
# @wr_highest_offset: The offset after the greatest byte written to the
# device. The intended use of this information is for
@ -885,6 +893,9 @@
# @wr_merged: Number of write requests that have been merged into another
# request (Since 2.3).
#
# @unmap_merged: Number of unmap requests that have been merged into another
# request (Since 4.2)
#
# @idle_time_ns: Time since the last I/O operation, in
# nanoseconds. If the field is absent it means that
# there haven't been any operations yet (Since 2.5).
@ -898,6 +909,9 @@
# @failed_flush_operations: The number of failed flush operations
# performed by the device (Since 2.5)
#
# @failed_unmap_operations: The number of failed unmap operations performed
# by the device (Since 4.2)
#
# @invalid_rd_operations: The number of invalid read operations
# performed by the device (Since 2.5)
#
@ -907,6 +921,9 @@
# @invalid_flush_operations: The number of invalid flush operations
# performed by the device (Since 2.5)
#
# @invalid_unmap_operations: The number of invalid unmap operations performed
# by the device (Since 4.2)
#
# @account_invalid: Whether invalid operations are included in the
# last access statistics (Since 2.5)
#
@ -925,20 +942,59 @@
# Since: 0.14.0
##
{ 'struct': 'BlockDeviceStats',
'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'rd_operations': 'int',
'wr_operations': 'int', 'flush_operations': 'int',
'flush_total_time_ns': 'int', 'wr_total_time_ns': 'int',
'rd_total_time_ns': 'int', 'wr_highest_offset': 'int',
'rd_merged': 'int', 'wr_merged': 'int', '*idle_time_ns': 'int',
'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'unmap_bytes' : 'int',
'rd_operations': 'int', 'wr_operations': 'int',
'flush_operations': 'int', 'unmap_operations': 'int',
'rd_total_time_ns': 'int', 'wr_total_time_ns': 'int',
'flush_total_time_ns': 'int', 'unmap_total_time_ns': 'int',
'wr_highest_offset': 'int',
'rd_merged': 'int', 'wr_merged': 'int', 'unmap_merged': 'int',
'*idle_time_ns': 'int',
'failed_rd_operations': 'int', 'failed_wr_operations': 'int',
'failed_flush_operations': 'int', 'invalid_rd_operations': 'int',
'invalid_wr_operations': 'int', 'invalid_flush_operations': 'int',
'failed_flush_operations': 'int', 'failed_unmap_operations': 'int',
'invalid_rd_operations': 'int', 'invalid_wr_operations': 'int',
'invalid_flush_operations': 'int', 'invalid_unmap_operations': 'int',
'account_invalid': 'bool', 'account_failed': 'bool',
'timed_stats': ['BlockDeviceTimedStats'],
'*rd_latency_histogram': 'BlockLatencyHistogramInfo',
'*wr_latency_histogram': 'BlockLatencyHistogramInfo',
'*flush_latency_histogram': 'BlockLatencyHistogramInfo' } }
##
# @BlockStatsSpecificFile:
#
# File driver statistics
#
# @discard-nb-ok: The number of successful discard operations performed by
# the driver.
#
# @discard-nb-failed: The number of failed discard operations performed by
# the driver.
#
# @discard-bytes-ok: The number of bytes discarded by the driver.
#
# Since: 4.2
##
{ 'struct': 'BlockStatsSpecificFile',
'data': {
'discard-nb-ok': 'uint64',
'discard-nb-failed': 'uint64',
'discard-bytes-ok': 'uint64' } }
##
# @BlockStatsSpecific:
#
# Block driver specific statistics
#
# Since: 4.2
##
{ 'union': 'BlockStatsSpecific',
'base': { 'driver': 'BlockdevDriver' },
'discriminator': 'driver',
'data': {
'file': 'BlockStatsSpecificFile',
'host_device': 'BlockStatsSpecificFile' } }
##
# @BlockStats:
#
@ -954,6 +1010,8 @@
#
# @stats: A @BlockDeviceStats for the device.
#
# @driver-specific: Optional driver-specific stats. (Since 4.2)
#
# @parent: This describes the file block device if it has one.
# Contains recursively the statistics of the underlying
# protocol (e.g. the host file for a qcow2 image). If there is
@ -967,6 +1025,7 @@
{ 'struct': 'BlockStats',
'data': {'*device': 'str', '*qdev': 'str', '*node-name': 'str',
'stats': 'BlockDeviceStats',
'*driver-specific': 'BlockStatsSpecific',
'*parent': 'BlockStats',
'*backing': 'BlockStats'} }
@ -1391,6 +1450,11 @@
# list without user intervention.
# Defaults to true. (Since 2.12)
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the backup job inserts into the graph
# above node specified by @drive. If this option is not given,
# a node name is autogenerated. (Since: 4.2)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
@ -1404,7 +1468,8 @@
'*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
'*auto-finalize': 'bool', '*auto-dismiss': 'bool',
'*filter-node-name': 'str' } }
##
# @DriveBackup:

View File

@ -107,7 +107,7 @@ if [ "$event" == "l2_load" ]; then
$QEMU_IO -c "read $vmstate 0 128k " "$BLKDBG_TEST_IMG" | _filter_qemu_io
fi
_check_test_img 2>&1 | grep -v "refcount=1 reference=0"
_check_test_img_ignore_leaks 2>&1 | grep -v "refcount=1 reference=0"
done
done
@ -152,7 +152,7 @@ echo
echo "Event: $event; errno: $errno; imm: $imm; once: $once; write $vmstate"
$QEMU_IO -c "write $vmstate 0 64M" "$BLKDBG_TEST_IMG" | _filter_qemu_io
_check_test_img 2>&1 | grep -v "refcount=1 reference=0"
_check_test_img_ignore_leaks 2>&1 | grep -v "refcount=1 reference=0"
done
done
@ -191,7 +191,7 @@ echo
echo "Event: $event; errno: $errno; imm: $imm; once: $once"
$QEMU_IO -c "write -b 0 64k" "$BLKDBG_TEST_IMG" | _filter_qemu_io
_check_test_img 2>&1 | grep -v "refcount=1 reference=0"
_check_test_img_ignore_leaks 2>&1 | grep -v "refcount=1 reference=0"
done
done

View File

@ -17,18 +17,14 @@ Event: l1_update; errno: 5; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_update; errno: 5; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_update; errno: 28; imm: off; once: on; write
@ -45,18 +41,14 @@ Event: l1_update; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_update; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_load; errno: 5; imm: off; once: on; write
@ -137,18 +129,14 @@ Event: l2_update; errno: 5; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_update; errno: 5; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_update; errno: 28; imm: off; once: on; write
@ -165,18 +153,14 @@ Event: l2_update; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_update; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_alloc_write; errno: 5; imm: off; once: on; write
@ -200,9 +184,7 @@ Event: l2_alloc_write; errno: 5; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_alloc_write; errno: 28; imm: off; once: on; write
@ -226,9 +208,7 @@ Event: l2_alloc_write; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: write_aio; errno: 5; imm: off; once: on; write
@ -480,18 +460,14 @@ Event: refblock_alloc_hookup; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
55 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_hookup; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
251 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write; errno: 28; imm: off; once: on; write
@ -532,18 +508,14 @@ Event: refblock_alloc_write_blocks; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
10 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write_blocks; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
23 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write_table; errno: 28; imm: off; once: on; write
@ -560,18 +532,14 @@ Event: refblock_alloc_write_table; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
10 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write_table; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
23 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_switch_table; errno: 28; imm: off; once: on; write
@ -588,18 +556,14 @@ Event: refblock_alloc_switch_table; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
10 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_switch_table; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
23 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
=== L1 growth tests ===
@ -658,9 +622,7 @@ Event: l1_grow_activate_table; errno: 5; imm: off; once: off
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
96 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_grow_activate_table; errno: 28; imm: off; once: on
@ -672,9 +634,7 @@ Event: l1_grow_activate_table; errno: 28; imm: off; once: off
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
96 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
=== Avoid cluster leaks after temporary failure ===

View File

@ -17,18 +17,14 @@ Event: l1_update; errno: 5; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_update; errno: 5; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_update; errno: 28; imm: off; once: on; write
@ -45,18 +41,14 @@ Event: l1_update; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_update; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_load; errno: 5; imm: off; once: on; write
@ -140,9 +132,7 @@ qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
wrote 131072/131072 bytes at offset 0
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_update; errno: 5; imm: off; once: off; write -b
@ -150,9 +140,7 @@ qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
wrote 131072/131072 bytes at offset 0
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_update; errno: 28; imm: off; once: on; write
@ -172,9 +160,7 @@ qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
wrote 131072/131072 bytes at offset 0
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_update; errno: 28; imm: off; once: off; write -b
@ -182,9 +168,7 @@ qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
wrote 131072/131072 bytes at offset 0
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
127 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_alloc_write; errno: 5; imm: off; once: on; write
@ -208,9 +192,7 @@ Event: l2_alloc_write; errno: 5; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l2_alloc_write; errno: 28; imm: off; once: on; write
@ -234,9 +216,7 @@ Event: l2_alloc_write; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
1 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: write_aio; errno: 5; imm: off; once: on; write
@ -488,18 +468,14 @@ Event: refblock_alloc_hookup; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
55 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_hookup; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
251 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write; errno: 28; imm: off; once: on; write
@ -540,18 +516,14 @@ Event: refblock_alloc_write_blocks; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
10 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write_blocks; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
23 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write_table; errno: 28; imm: off; once: on; write
@ -568,18 +540,14 @@ Event: refblock_alloc_write_table; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
10 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_write_table; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
23 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_switch_table; errno: 28; imm: off; once: on; write
@ -596,18 +564,14 @@ Event: refblock_alloc_switch_table; errno: 28; imm: off; once: off; write
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
10 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc_switch_table; errno: 28; imm: off; once: off; write -b
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
23 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
=== L1 growth tests ===
@ -666,9 +630,7 @@ Event: l1_grow_activate_table; errno: 5; imm: off; once: off
qemu-io: Failed to flush the L2 table cache: Input/output error
qemu-io: Failed to flush the refcount block cache: Input/output error
write failed: Input/output error
96 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: l1_grow_activate_table; errno: 28; imm: off; once: on
@ -680,9 +642,7 @@ Event: l1_grow_activate_table; errno: 28; imm: off; once: off
qemu-io: Failed to flush the L2 table cache: No space left on device
qemu-io: Failed to flush the refcount block cache: No space left on device
write failed: No space left on device
96 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
No errors were found on the image.
=== Avoid cluster leaks after temporary failure ===

View File

@ -133,6 +133,7 @@ class BackupTest(iotests.QMPTestCase):
self.vm = iotests.VM()
self.test_img = img_create('test')
self.dest_img = img_create('dest')
self.dest_img2 = img_create('dest2')
self.ref_img = img_create('ref')
self.vm.add_drive(self.test_img)
self.vm.launch()
@ -141,6 +142,7 @@ class BackupTest(iotests.QMPTestCase):
self.vm.shutdown()
try_remove(self.test_img)
try_remove(self.dest_img)
try_remove(self.dest_img2)
try_remove(self.ref_img)
def hmp_io_writes(self, drive, patterns):
@ -253,9 +255,9 @@ class BackupTest(iotests.QMPTestCase):
res = self.vm.qmp('query-block-jobs')
self.assert_qmp(res, 'return[0]/status', 'concluded')
# Leave zombie job un-dismissed, observe a failure:
res = self.qmp_backup_and_wait(serror="Node 'drive0' is busy: block device is in use by block job: backup",
res = self.qmp_backup_and_wait(serror="Job ID 'drive0' already in use",
device='drive0', format=iotests.imgfmt,
sync='full', target=self.dest_img,
sync='full', target=self.dest_img2,
auto_dismiss=False)
self.assertEqual(res, False)
# OK, dismiss the zombie.
@ -265,7 +267,7 @@ class BackupTest(iotests.QMPTestCase):
self.assert_qmp(res, 'return', [])
# Ensure it's really gone.
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
sync='full', target=self.dest_img,
sync='full', target=self.dest_img2,
auto_dismiss=False)
def dismissal_failure(self, dismissal_opt):

View File

@ -105,7 +105,7 @@ class TestIncrementalBackupBase(iotests.QMPTestCase):
# Create a base image with a distinctive patterning
drive0 = self.add_node('drive0')
self.img_create(drive0['file'], drive0['fmt'])
self.vm.add_drive(drive0['file'])
self.vm.add_drive(drive0['file'], opts='node-name=node0')
self.write_default_pattern(drive0['file'])
self.vm.launch()
@ -348,12 +348,14 @@ class TestIncrementalBackup(TestIncrementalBackupBase):
('0xfe', '16M', '256k'),
('0x64', '32736k', '64k')))
# Check the dirty bitmap stats
result = self.vm.qmp('query-block')
self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/name', 'bitmap0')
self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/count', 458752)
self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/granularity', 65536)
self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/status', 'active')
self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/persistent', False)
self.assertTrue(self.vm.check_bitmap_status(
'node0', bitmap0.name, {
'name': 'bitmap0',
'count': 458752,
'granularity': 65536,
'status': 'active',
'persistent': False
}))
# Prepare a cluster_size=128k backup target without a backing file.
(target, _) = bitmap0.new_target()
@ -670,9 +672,8 @@ class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
"""
drive0 = self.drives[0]
# NB: The blkdebug script here looks for a "flush, read, read" pattern.
# The flush occurs in hmp_io_writes, the first read in device_add, and
# the last read during the block job.
# NB: The blkdebug script here looks for a "flush, read" pattern.
# The flush occurs in hmp_io_writes, and the read during the block job.
result = self.vm.qmp('blockdev-add',
node_name=drive0['id'],
driver=drive0['fmt'],
@ -686,15 +687,11 @@ class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
'event': 'flush_to_disk',
'state': 1,
'new_state': 2
},{
'event': 'read_aio',
'state': 2,
'new_state': 3
}],
'inject-error': [{
'event': 'read_aio',
'errno': 5,
'state': 3,
'state': 2,
'immediately': False,
'once': True
}],
@ -708,23 +705,15 @@ class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
('0xfe', '16M', '256k'),
('0x64', '32736k', '64k')))
# For the purposes of query-block visibility of bitmaps, add a drive
# frontend after we've written data; otherwise we can't use hmp-io
result = self.vm.qmp("device_add",
id="device0",
drive=drive0['id'],
driver="virtio-blk")
self.assert_qmp(result, 'return', {})
# Bitmap Status Check
query = self.vm.qmp('query-block')
ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
if bmap.get('name') == bitmap.name][0]
self.assert_qmp(ret, 'count', 458752)
self.assert_qmp(ret, 'granularity', 65536)
self.assert_qmp(ret, 'status', 'active')
self.assert_qmp(ret, 'busy', False)
self.assert_qmp(ret, 'recording', True)
self.assertTrue(self.vm.check_bitmap_status(
drive0['id'], bitmap.name, {
'count': 458752,
'granularity': 65536,
'status': 'active',
'busy': False,
'recording': True
}))
# Start backup
parent, _ = bitmap.last_target()
@ -748,14 +737,14 @@ class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
'operation': 'read'})
# Bitmap Status Check
query = self.vm.qmp('query-block')
ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
if bmap.get('name') == bitmap.name][0]
self.assert_qmp(ret, 'count', 458752)
self.assert_qmp(ret, 'granularity', 65536)
self.assert_qmp(ret, 'status', 'frozen')
self.assert_qmp(ret, 'busy', True)
self.assert_qmp(ret, 'recording', True)
self.assertTrue(self.vm.check_bitmap_status(
drive0['id'], bitmap.name, {
'count': 458752,
'granularity': 65536,
'status': 'frozen',
'busy': True,
'recording': True
}))
# Resume and check incremental backup for consistency
res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
@ -763,14 +752,14 @@ class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
self.wait_qmp_backup(bitmap.drive['id'])
# Bitmap Status Check
query = self.vm.qmp('query-block')
ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
if bmap.get('name') == bitmap.name][0]
self.assert_qmp(ret, 'count', 0)
self.assert_qmp(ret, 'granularity', 65536)
self.assert_qmp(ret, 'status', 'active')
self.assert_qmp(ret, 'busy', False)
self.assert_qmp(ret, 'recording', True)
self.assertTrue(self.vm.check_bitmap_status(
drive0['id'], bitmap.name, {
'count': 0,
'granularity': 65536,
'status': 'active',
'busy': False,
'recording': True
}))
# Finalize / Cleanup
self.make_reference_backup(bitmap)

View File

@ -34,8 +34,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
get_image_size_on_host()
{
$QEMU_IMG info -f "$IMGFMT" "$TEST_IMG" | grep "disk size" \
| sed -e 's/^[^0-9]*\([0-9]\+\).*$/\1/'
echo $(($(stat -c '%b * %B' "$TEST_IMG_FILE")))
}
# get standard environment and filters
@ -49,6 +48,46 @@ if [ -z "$TEST_IMG_FILE" ]; then
TEST_IMG_FILE=$TEST_IMG
fi
# Test whether we are running on a broken XFS version. There is this
# bug:
# $ rm -f foo
# $ touch foo
# $ block_size=4096 # Your FS's block size
# $ fallocate -o $((block_size / 2)) -l $block_size foo
# $ LANG=C xfs_bmap foo | grep hole
# 1: [8..15]: hole
#
# The problem is that the XFS driver rounds down the offset and
# rounds up the length to the block size, but independently. As
# such, it only allocates the first block in the example above,
# even though it should allocate the first two blocks (because our
# request is to fallocate something that touches both the first
# two blocks).
#
# This means that when you then write to the beginning of the
# second block, the disk usage of the first two blocks grows.
#
# That is precisely what fallocate() promises, though: That when you
# write to an area that you have fallocated, no new blocks will have
# to be allocated.
touch "$TEST_IMG_FILE"
# Assuming there is no FS with a block size greater than 64k
fallocate -o 65535 -l 2 "$TEST_IMG_FILE"
len0=$(get_image_size_on_host)
# Write to something that in theory we have just fallocated
# (Thus, the on-disk size should not increase)
poke_file "$TEST_IMG_FILE" 65536 42
len1=$(get_image_size_on_host)
if [ $len1 -gt $len0 ]; then
_notrun "the test filesystem's fallocate() is broken"
fi
rm -f "$TEST_IMG_FILE"
# Generally, we create some image with or without existing preallocation and
# then resize it. Then we write some data into the image and verify that its
# size does not change if we have used preallocation.
@ -111,7 +150,7 @@ for GROWTH_SIZE in 16 48 80; do
if [ $file_length_2 -gt $file_length_1 ]; then
echo "ERROR (grow): Image length has grown from $file_length_1 to $file_length_2"
fi
if [ $create_mode != metadata ]; then
if [ $growth_mode != metadata ]; then
# The host size should not have grown either
if [ $host_size_2 -gt $host_size_1 ]; then
echo "ERROR (grow): Host size has grown from $host_size_1 to $host_size_2"

View File

@ -10,7 +10,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m.
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 0, "speed": 0, "type": "backup"}}

View File

@ -153,7 +153,7 @@ def cryptsetup_format(config):
(password, slot) = config.first_password()
args = ["luksFormat"]
args = ["luksFormat", "--type", "luks1"]
cipher = config.cipher + "-" + config.mode + "-" + config.ivgen
if config.ivgen_hash is not None:
cipher = cipher + ":" + config.ivgen_hash

View File

@ -2,7 +2,7 @@
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha1.img qiotest-145-aes-256-xts-plain64-sha1
# Write test pattern 0xa7
@ -122,7 +122,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain64-sha1.img
# Create image
truncate TEST_DIR/luks-twofish-256-xts-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher twofish-xts-plain64 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-twofish-256-xts-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher twofish-xts-plain64 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-twofish-256-xts-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-256-xts-plain64-sha1.img qiotest-145-twofish-256-xts-plain64-sha1
# Write test pattern 0xa7
@ -242,7 +242,7 @@ unlink TEST_DIR/luks-twofish-256-xts-plain64-sha1.img
# Create image
truncate TEST_DIR/luks-serpent-256-xts-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher serpent-xts-plain64 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-serpent-256-xts-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher serpent-xts-plain64 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-serpent-256-xts-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-256-xts-plain64-sha1.img qiotest-145-serpent-256-xts-plain64-sha1
# Write test pattern 0xa7
@ -362,7 +362,7 @@ unlink TEST_DIR/luks-serpent-256-xts-plain64-sha1.img
# Create image
truncate TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher cast5-cbc-plain64 --key-size 128 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher cast5-cbc-plain64 --key-size 128 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img qiotest-145-cast5-128-cbc-plain64-sha1
# Write test pattern 0xa7
@ -483,7 +483,7 @@ Skipping cast6-256-xts-plain64-sha1 in blacklist
# Create image
truncate TEST_DIR/luks-aes-256-cbc-plain-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-plain-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-cbc-plain --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-plain-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain-sha1.img qiotest-145-aes-256-cbc-plain-sha1
# Write test pattern 0xa7
@ -603,7 +603,7 @@ unlink TEST_DIR/luks-aes-256-cbc-plain-sha1.img
# Create image
truncate TEST_DIR/luks-aes-256-cbc-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-cbc-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha1.img qiotest-145-aes-256-cbc-plain64-sha1
# Write test pattern 0xa7
@ -723,7 +723,7 @@ unlink TEST_DIR/luks-aes-256-cbc-plain64-sha1.img
# Create image
truncate TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-cbc-essiv:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-cbc-essiv:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img qiotest-145-aes-256-cbc-essiv-sha256-sha1
# Write test pattern 0xa7
@ -843,7 +843,7 @@ unlink TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-essiv:sha256 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-essiv:sha256 --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img qiotest-145-aes-256-xts-essiv-sha256-sha1
# Write test pattern 0xa7
@ -963,7 +963,7 @@ unlink TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img
# Create image
truncate TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img qiotest-145-aes-128-xts-plain64-sha256-sha1
# Write test pattern 0xa7
@ -1083,7 +1083,7 @@ unlink TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img
# Create image
truncate TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 384 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 384 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img qiotest-145-aes-192-xts-plain64-sha256-sha1
# Write test pattern 0xa7
@ -1203,7 +1203,7 @@ unlink TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img
# Create image
truncate TEST_DIR/luks-twofish-128-xts-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher twofish-xts-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-twofish-128-xts-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher twofish-xts-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-twofish-128-xts-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-128-xts-plain64-sha1.img qiotest-145-twofish-128-xts-plain64-sha1
# Write test pattern 0xa7
@ -1324,7 +1324,7 @@ Skipping twofish-192-xts-plain64-sha1 in blacklist
# Create image
truncate TEST_DIR/luks-serpent-128-xts-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher serpent-xts-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-serpent-128-xts-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher serpent-xts-plain64 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-serpent-128-xts-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-128-xts-plain64-sha1.img qiotest-145-serpent-128-xts-plain64-sha1
# Write test pattern 0xa7
@ -1444,7 +1444,7 @@ unlink TEST_DIR/luks-serpent-128-xts-plain64-sha1.img
# Create image
truncate TEST_DIR/luks-serpent-192-xts-plain64-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher serpent-xts-plain64 --key-size 384 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-serpent-192-xts-plain64-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher serpent-xts-plain64 --key-size 384 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-serpent-192-xts-plain64-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-192-xts-plain64-sha1.img qiotest-145-serpent-192-xts-plain64-sha1
# Write test pattern 0xa7
@ -1566,7 +1566,7 @@ Skipping cast6-192-xts-plain64-sha1 in blacklist
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain64-sha224.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash sha224 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha224.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 512 --hash sha224 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha224.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha224.img qiotest-145-aes-256-xts-plain64-sha224
# Write test pattern 0xa7
@ -1686,7 +1686,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain64-sha224.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain64-sha256.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash sha256 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha256.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 512 --hash sha256 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha256.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha256.img qiotest-145-aes-256-xts-plain64-sha256
# Write test pattern 0xa7
@ -1806,7 +1806,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain64-sha256.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain64-sha384.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash sha384 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha384.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 512 --hash sha384 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha384.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha384.img qiotest-145-aes-256-xts-plain64-sha384
# Write test pattern 0xa7
@ -1926,7 +1926,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain64-sha384.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain64-sha512.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash sha512 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha512.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 512 --hash sha512 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-sha512.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha512.img qiotest-145-aes-256-xts-plain64-sha512
# Write test pattern 0xa7
@ -2046,7 +2046,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain64-sha512.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash ripemd160 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain64 --key-size 512 --hash ripemd160 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img qiotest-145-aes-256-xts-plain64-ripemd160
# Write test pattern 0xa7
@ -2166,7 +2166,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain --key-size 512 --hash sha1 --key-slot 3 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain --key-size 512 --hash sha1 --key-slot 3 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img qiotest-145-aes-256-xts-plain-sha1-pwslot3
# Write test pattern 0xa7
@ -2226,7 +2226,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img
# Create image
truncate TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-xts-plain --key-size 512 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img
# Add password slot 1
sudo cryptsetup -q -v luksAddKey TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img --key-slot 1 --key-file - --iter-time 10 TEST_DIR/passwd.txt
# Add password slot 2
@ -2360,7 +2360,7 @@ unlink TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img
# Create image
truncate TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-cbc-essiv:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-cbc-essiv:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
# Write test pattern 0xa7
@ -2480,7 +2480,7 @@ unlink TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
# Create image
truncate TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img --size 4194304MB
# Format image
sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain64:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
sudo cryptsetup -q -v luksFormat --type luks1 --cipher aes-cbc-plain64:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - --iter-time 10 TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
# Open dev
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
# Write test pattern 0xa7

View File

@ -46,7 +46,7 @@ echo '=== NBD ==='
# NBD expects all of its arguments to be strings
# So this should not crash
$QEMU_IMG info 'json:{"driver": "nbd", "host": 42}'
$QEMU_IMG info 'json:{"driver": "nbd", "host": -1}'
# And this should not treat @port as if it had not been specified
# (We need to set up a server here, because the error message for "Connection

View File

@ -1,7 +1,7 @@
QA output created by 162
=== NBD ===
qemu-img: Could not open 'json:{"driver": "nbd", "host": 42}': Failed to connect socket: Invalid argument
qemu-img: Could not open 'json:{"driver": "nbd", "host": -1}': address resolution failed for -1:10809: Name or service not known
image: nbd://localhost:PORT
image: nbd+unix://?socket=42

View File

@ -15,6 +15,8 @@ Testing: -drive driver=null-co,read-zeroes=on,if=virtio
{
"device": "virtio0",
"stats": {
"unmap_operations": 0,
"unmap_merged": 0,
"flush_total_time_ns": 0,
"wr_highest_offset": 0,
"wr_total_time_ns": 0,
@ -24,13 +26,17 @@ Testing: -drive driver=null-co,read-zeroes=on,if=virtio
"wr_bytes": 0,
"timed_stats": [
],
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": true,
"rd_total_time_ns": 0,
"invalid_unmap_operations": 0,
"flush_operations": 0,
"wr_operations": 0,
"unmap_bytes": 0,
"rd_merged": 0,
"rd_bytes": 0,
"unmap_total_time_ns": 0,
"invalid_flush_operations": 0,
"account_failed": true,
"rd_operations": 0,
@ -74,6 +80,8 @@ Testing: -drive driver=null-co,if=none
{
"device": "none0",
"stats": {
"unmap_operations": 0,
"unmap_merged": 0,
"flush_total_time_ns": 0,
"wr_highest_offset": 0,
"wr_total_time_ns": 0,
@ -83,13 +91,17 @@ Testing: -drive driver=null-co,if=none
"wr_bytes": 0,
"timed_stats": [
],
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": true,
"rd_total_time_ns": 0,
"invalid_unmap_operations": 0,
"flush_operations": 0,
"wr_operations": 0,
"unmap_bytes": 0,
"rd_merged": 0,
"rd_bytes": 0,
"unmap_total_time_ns": 0,
"invalid_flush_operations": 0,
"account_failed": true,
"rd_operations": 0,
@ -163,6 +175,8 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b
{
"device": "",
"stats": {
"unmap_operations": 0,
"unmap_merged": 0,
"flush_total_time_ns": 0,
"wr_highest_offset": 0,
"wr_total_time_ns": 0,
@ -172,13 +186,17 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b
"wr_bytes": 0,
"timed_stats": [
],
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 0,
"invalid_unmap_operations": 0,
"flush_operations": 0,
"wr_operations": 0,
"unmap_bytes": 0,
"rd_merged": 0,
"rd_bytes": 0,
"unmap_total_time_ns": 0,
"invalid_flush_operations": 0,
"account_failed": false,
"rd_operations": 0,

View File

@ -148,11 +148,6 @@ class Drive:
self.fmt = None
self.size = None
self.node = None
self.device = None
@property
def name(self):
return self.node or self.device
def img_create(self, fmt, size):
self.fmt = fmt
@ -188,25 +183,6 @@ class Drive:
self.size = size
self.node = name
def query_bitmaps(vm):
res = vm.qmp("query-block")
return {"bitmaps": {device['device'] or device['qdev']:
device.get('dirty-bitmaps', []) for
device in res['return']}}
def get_bitmap(bitmaps, drivename, name, recording=None):
"""
get a specific bitmap from the object returned by query_bitmaps.
:param recording: If specified, filter results by the specified value.
"""
for bitmap in bitmaps['bitmaps'][drivename]:
if bitmap.get('name', '') == name:
if recording is None:
return bitmap
elif bitmap.get('recording') == recording:
return bitmap
return None
def blockdev_backup(vm, device, target, sync, **kwargs):
# Strip any arguments explicitly nulled by the caller:
kwargs = {key: val for key, val in kwargs.items() if val is not None}
@ -214,13 +190,14 @@ def blockdev_backup(vm, device, target, sync, **kwargs):
device=device,
target=target,
sync=sync,
filter_node_name='backup-top',
**kwargs)
return result
def blockdev_backup_mktarget(drive, target_id, filepath, sync, **kwargs):
target_drive = Drive(filepath, vm=drive.vm)
target_drive.create_target(target_id, drive.fmt, drive.size)
blockdev_backup(drive.vm, drive.name, target_id, sync, **kwargs)
blockdev_backup(drive.vm, drive.node, target_id, sync, **kwargs)
def reference_backup(drive, n, filepath):
log("--- Reference Backup #{:d} ---\n".format(n))
@ -240,7 +217,7 @@ def backup(drive, n, filepath, sync, **kwargs):
job_id=job_id, **kwargs)
return job_id
def perform_writes(drive, n):
def perform_writes(drive, n, filter_node_name=None):
log("--- Write #{:d} ---\n".format(n))
for pattern in GROUPS[n].patterns:
cmd = "write -P{:s} 0x{:07x} 0x{:x}".format(
@ -248,9 +225,9 @@ def perform_writes(drive, n):
pattern.offset,
pattern.size)
log(cmd)
log(drive.vm.hmp_qemu_io(drive.name, cmd))
bitmaps = query_bitmaps(drive.vm)
log(bitmaps, indent=2)
log(drive.vm.hmp_qemu_io(filter_node_name or drive.node, cmd))
bitmaps = drive.vm.query_bitmaps()
log({'bitmaps': bitmaps}, indent=2)
log('')
return bitmaps
@ -343,26 +320,19 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
}]
}
drive0.node = 'drive0'
vm.qmp_log('blockdev-add',
filters=[iotests.filter_qmp_testfiles],
node_name="drive0",
node_name=drive0.node,
driver=drive0.fmt,
file=file_config)
drive0.node = 'drive0'
drive0.device = 'device0'
# Use share-rw to allow writes directly to the node;
# The anonymous block-backend for this configuration prevents us
# from using HMP's qemu-io commands to address the device.
vm.qmp_log("device_add", id=drive0.device,
drive=drive0.name, driver="scsi-hd",
share_rw=True)
log('')
# 0 - Writes and Reference Backup
perform_writes(drive0, 0)
reference_backup(drive0, 0, fbackup0)
log('--- Add Bitmap ---\n')
vm.qmp_log("block-dirty-bitmap-add", node=drive0.name,
vm.qmp_log("block-dirty-bitmap-add", node=drive0.node,
name="bitmap0", granularity=GRANULARITY)
log('')
ebitmap = EmulatedBitmap()
@ -370,14 +340,14 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
# 1 - Writes and Reference Backup
bitmaps = perform_writes(drive0, 1)
ebitmap.dirty_group(1)
bitmap = get_bitmap(bitmaps, drive0.device, 'bitmap0')
bitmap = vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps)
ebitmap.compare(bitmap)
reference_backup(drive0, 1, fbackup1)
# 1 - Test Backup (w/ Optional induced failure)
if failure == 'intermediate':
# Activate blkdebug induced failure for second-to-next read
log(vm.hmp_qemu_io(drive0.name, 'flush'))
log(vm.hmp_qemu_io(drive0.node, 'flush'))
log('')
job = backup(drive0, 1, bsync1, msync_mode,
bitmap="bitmap0", bitmap_mode=bsync_mode)
@ -386,14 +356,15 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
"""Issue writes while the job is open to test bitmap divergence."""
# Note: when `failure` is 'intermediate', this isn't called.
log('')
bitmaps = perform_writes(drive0, 2)
bitmaps = perform_writes(drive0, 2, filter_node_name='backup-top')
# Named bitmap (static, should be unchanged)
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0',
bitmaps=bitmaps))
# Anonymous bitmap (dynamic, shows new writes)
anonymous = EmulatedBitmap()
anonymous.dirty_group(2)
anonymous.compare(get_bitmap(bitmaps, drive0.device, '',
recording=True))
anonymous.compare(vm.get_bitmap(drive0.node, '', recording=True,
bitmaps=bitmaps))
# Simulate the order in which this will happen:
# group 1 gets cleared first, then group two gets written.
@ -405,8 +376,8 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
vm.run_job(job, auto_dismiss=True, auto_finalize=False,
pre_finalize=_callback,
cancel=(failure == 'simulated'))
bitmaps = query_bitmaps(vm)
log(bitmaps, indent=2)
bitmaps = vm.query_bitmaps()
log({'bitmaps': bitmaps}, indent=2)
log('')
if bsync_mode == 'always' and failure == 'intermediate':
@ -423,29 +394,30 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
ebitmap.clear()
ebitmap.dirty_bits(range(fail_bit, SIZE // GRANULARITY))
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps))
# 2 - Writes and Reference Backup
bitmaps = perform_writes(drive0, 3)
ebitmap.dirty_group(3)
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps))
reference_backup(drive0, 2, fbackup2)
# 2 - Bitmap Backup (In failure modes, this is a recovery.)
job = backup(drive0, 2, bsync2, "bitmap",
bitmap="bitmap0", bitmap_mode=bsync_mode)
vm.run_job(job, auto_dismiss=True, auto_finalize=False)
bitmaps = query_bitmaps(vm)
log(bitmaps, indent=2)
bitmaps = vm.query_bitmaps()
log({'bitmaps': bitmaps}, indent=2)
log('')
if bsync_mode != 'never':
ebitmap.clear()
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps))
log('--- Cleanup ---\n')
vm.qmp_log("block-dirty-bitmap-remove",
node=drive0.name, name="bitmap0")
log(query_bitmaps(vm), indent=2)
node=drive0.node, name="bitmap0")
bitmaps = vm.query_bitmaps()
log({'bitmaps': bitmaps}, indent=2)
vm.shutdown()
log('')
@ -484,22 +456,19 @@ def test_backup_api():
'filename': drive0.path
}
drive0.node = 'drive0'
vm.qmp_log('blockdev-add',
filters=[iotests.filter_qmp_testfiles],
node_name="drive0",
node_name=drive0.node,
driver=drive0.fmt,
file=file_config)
drive0.node = 'drive0'
drive0.device = 'device0'
vm.qmp_log("device_add", id=drive0.device,
drive=drive0.name, driver="scsi-hd")
log('')
target0 = Drive(backup_path, vm=vm)
target0.create_target("backup_target", drive0.fmt, drive0.size)
log('')
vm.qmp_log("block-dirty-bitmap-add", node=drive0.name,
vm.qmp_log("block-dirty-bitmap-add", node=drive0.node,
name="bitmap0", granularity=GRANULARITY)
log('')
@ -538,7 +507,7 @@ def test_backup_api():
log("-- Sync mode {:s} tests --\n".format(sync_mode))
for bitmap in (None, 'bitmap404', 'bitmap0'):
for policy in error_cases[sync_mode][bitmap]:
blockdev_backup(drive0.vm, drive0.name, "backup_target",
blockdev_backup(drive0.vm, drive0.node, "backup_target",
sync_mode, job_id='api_job',
bitmap=bitmap, bitmap_mode=policy)
log('')

File diff suppressed because it is too large Load Diff

View File

@ -405,6 +405,23 @@ _check_test_img()
$QEMU_IMG check "$@" -f $IMGFMT "$TEST_IMG" 2>&1
fi
) | _filter_testdir | _filter_qemu_img_check
# return real qemu_img check status, to analyze in
# _check_test_img_ignore_leaks
return ${PIPESTATUS[0]}
}
_check_test_img_ignore_leaks()
{
out=$(_check_test_img "$@")
status=$?
if [ $status = 3 ]; then
# This must correspond to success output in dump_human_image_check()
echo "No errors were found on the image."
return 0
fi
echo "$out"
return $status
}
_img_info()

View File

@ -641,6 +641,33 @@ class VM(qtest.QEMUQtestMachine):
return x
return None
def query_bitmaps(self):
res = self.qmp("query-named-block-nodes")
return {device['node-name']: device['dirty-bitmaps']
for device in res['return'] if 'dirty-bitmaps' in device}
def get_bitmap(self, node_name, bitmap_name, recording=None, bitmaps=None):
"""
get a specific bitmap from the object returned by query_bitmaps.
:param recording: If specified, filter results by the specified value.
:param bitmaps: If specified, use it instead of call query_bitmaps()
"""
if bitmaps is None:
bitmaps = self.query_bitmaps()
for bitmap in bitmaps[node_name]:
if bitmap.get('name', '') == bitmap_name:
if recording is None:
return bitmap
elif bitmap.get('recording') == recording:
return bitmap
return None
def check_bitmap_status(self, node_name, bitmap_name, fields):
ret = self.get_bitmap(node_name, bitmap_name)
return fields.items() <= ret.items()
index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')