block/backup: move in-flight requests handling from backup to block-copy

Move synchronization mechanism to block-copy, to be able to use one
block-copy instance from backup job and backup-top filter in parallel.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-2-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
master
Vladimir Sementsov-Ogievskiy 2019-10-01 16:14:05 +03:00 committed by Max Reitz
parent f2d86ade4d
commit a6ffe1998c
3 changed files with 51 additions and 52 deletions

View File

@ -29,13 +29,6 @@
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
typedef struct CowRequest {
int64_t start_byte;
int64_t end_byte;
QLIST_ENTRY(CowRequest) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} CowRequest;
typedef struct BackupBlockJob {
BlockJob common;
BlockDriverState *source_bs;
@ -51,50 +44,12 @@ typedef struct BackupBlockJob {
uint64_t bytes_read;
int64_t cluster_size;
NotifierWithReturn before_write;
QLIST_HEAD(, CowRequest) inflight_reqs;
BlockCopyState *bcs;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
/* See if in-flight requests overlap and wait for them to complete */
static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
int64_t start,
int64_t end)
{
CowRequest *req;
bool retry;
do {
retry = false;
QLIST_FOREACH(req, &job->inflight_reqs, list) {
if (end > req->start_byte && start < req->end_byte) {
qemu_co_queue_wait(&req->wait_queue, NULL);
retry = true;
break;
}
}
} while (retry);
}
/* Keep track of an in-flight request */
static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
int64_t start, int64_t end)
{
req->start_byte = start;
req->end_byte = end;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
}
/* Forget about a completed request */
static void cow_request_end(CowRequest *req)
{
QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue);
}
static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
{
BackupBlockJob *s = opaque;
@ -116,7 +71,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
bool *error_is_read,
bool is_write_notifier)
{
CowRequest cow_request;
int ret = 0;
int64_t start, end; /* bytes */
@ -127,14 +81,9 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
trace_backup_do_cow_enter(job, start, offset, bytes);
wait_for_overlapping_requests(job, start, end);
cow_request_begin(&cow_request, job, start, end);
ret = block_copy(job->bcs, start, end - start, error_is_read,
is_write_notifier);
cow_request_end(&cow_request);
trace_backup_do_cow_return(job, offset, bytes, ret);
qemu_co_rwlock_unlock(&job->flush_rwlock);
@ -316,7 +265,6 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
int ret = 0;
QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock);
backup_init_copy_bitmap(s);

View File

@ -19,6 +19,41 @@
#include "block/block-copy.h"
#include "sysemu/block-backend.h"
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
int64_t start,
int64_t end)
{
BlockCopyInFlightReq *req;
bool waited;
do {
waited = false;
QLIST_FOREACH(req, &s->inflight_reqs, list) {
if (end > req->start_byte && start < req->end_byte) {
qemu_co_queue_wait(&req->wait_queue, NULL);
waited = true;
break;
}
}
} while (waited);
}
static void block_copy_inflight_req_begin(BlockCopyState *s,
BlockCopyInFlightReq *req,
int64_t start, int64_t end)
{
req->start_byte = start;
req->end_byte = end;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
}
static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
{
QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue);
}
void block_copy_state_free(BlockCopyState *s)
{
if (!s) {
@ -79,6 +114,8 @@ BlockCopyState *block_copy_state_new(
s->use_copy_range =
!(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
QLIST_INIT(&s->inflight_reqs);
/*
* We just allow aio context change on our block backends. block_copy() user
* (now it's only backup) is responsible for source and target being in same
@ -266,6 +303,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
int64_t end = bytes + start; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
BlockCopyInFlightReq req;
/*
* block_copy() user is responsible for keeping source and target in same
@ -276,6 +314,9 @@ int coroutine_fn block_copy(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
block_copy_wait_inflight_reqs(s, start, bytes);
block_copy_inflight_req_begin(s, &req, start, end);
while (start < end) {
int64_t dirty_end;
@ -329,5 +370,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
qemu_vfree(bounce_buffer);
}
block_copy_inflight_req_end(&req);
return ret;
}

View File

@ -17,6 +17,13 @@
#include "block/block.h"
typedef struct BlockCopyInFlightReq {
int64_t start_byte;
int64_t end_byte;
QLIST_ENTRY(BlockCopyInFlightReq) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} BlockCopyInFlightReq;
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
typedef void (*ProgressResetCallbackFunc)(void *opaque);
typedef struct BlockCopyState {
@ -27,6 +34,7 @@ typedef struct BlockCopyState {
bool use_copy_range;
int64_t copy_range_size;
uint64_t len;
QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
BdrvRequestFlags write_flags;