block/block-copy: refactor task creation

Instead of just relying on the comment "Called only on full-dirty
region" in block_copy_task_create() let's move initial dirty area
search directly to block_copy_task_create(). Let's also use effective
bdrv_dirty_bitmap_next_dirty_area instead of looping through all
non-dirty clusters.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-Id: <20200429130847.28124-5-vsementsov@virtuozzo.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
master
Vladimir Sementsov-Ogievskiy 2020-04-29 16:08:46 +03:00 committed by Max Reitz
parent 1348a65774
commit 42ac214406
1 changed files with 46 additions and 34 deletions

View File

@ -32,6 +32,11 @@ typedef struct BlockCopyTask {
CoQueue wait_queue; /* coroutines blocked on this task */ CoQueue wait_queue; /* coroutines blocked on this task */
} BlockCopyTask; } BlockCopyTask;
static int64_t task_end(BlockCopyTask *task)
{
return task->offset + task->bytes;
}
typedef struct BlockCopyState { typedef struct BlockCopyState {
/* /*
* BdrvChild objects are not owned or managed by block-copy. They are * BdrvChild objects are not owned or managed by block-copy. They are
@ -106,17 +111,29 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
return true; return true;
} }
/* Called only on full-dirty region */ /*
* Search for the first dirty area in offset/bytes range and create task at
* the beginning of it.
*/
static BlockCopyTask *block_copy_task_create(BlockCopyState *s, static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
int64_t offset, int64_t bytes) int64_t offset, int64_t bytes)
{ {
BlockCopyTask *task = g_new(BlockCopyTask, 1); BlockCopyTask *task;
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
offset, offset + bytes,
s->copy_size, &offset, &bytes))
{
return NULL;
}
/* region is dirty, so no existent tasks possible in it */
assert(!find_conflicting_task(s, offset, bytes)); assert(!find_conflicting_task(s, offset, bytes));
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->in_flight_bytes += bytes; s->in_flight_bytes += bytes;
task = g_new(BlockCopyTask, 1);
*task = (BlockCopyTask) { *task = (BlockCopyTask) {
.s = s, .s = s,
.offset = offset, .offset = offset,
@ -466,6 +483,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
{ {
int ret = 0; int ret = 0;
bool found_dirty = false; bool found_dirty = false;
int64_t end = offset + bytes;
/* /*
* block_copy() user is responsible for keeping source and target in same * block_copy() user is responsible for keeping source and target in same
@ -479,58 +497,52 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
while (bytes) { while (bytes) {
g_autofree BlockCopyTask *task = NULL; g_autofree BlockCopyTask *task = NULL;
int64_t next_zero, cur_bytes, status_bytes; int64_t status_bytes;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) { task = block_copy_task_create(s, offset, bytes);
trace_block_copy_skip(s, offset); if (!task) {
offset += s->cluster_size; /* No more dirty bits in the bitmap */
bytes -= s->cluster_size; trace_block_copy_skip_range(s, offset, bytes);
continue; /* already copied */ break;
}
if (task->offset > offset) {
trace_block_copy_skip_range(s, offset, task->offset - offset);
} }
found_dirty = true; found_dirty = true;
cur_bytes = MIN(bytes, s->copy_size); ret = block_copy_block_status(s, task->offset, task->bytes,
&status_bytes);
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
cur_bytes);
if (next_zero >= 0) {
assert(next_zero > offset); /* offset is dirty */
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
cur_bytes = next_zero - offset;
}
task = block_copy_task_create(s, offset, cur_bytes);
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
assert(ret >= 0); /* never fail */ assert(ret >= 0); /* never fail */
cur_bytes = MIN(cur_bytes, status_bytes); if (status_bytes < task->bytes) {
block_copy_task_shrink(task, cur_bytes); block_copy_task_shrink(task, status_bytes);
}
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
block_copy_task_end(task, 0); block_copy_task_end(task, 0);
progress_set_remaining(s->progress, progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) + bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes); s->in_flight_bytes);
trace_block_copy_skip_range(s, offset, status_bytes); trace_block_copy_skip_range(s, task->offset, task->bytes);
offset += status_bytes; offset = task_end(task);
bytes -= status_bytes; bytes = end - offset;
continue; continue;
} }
trace_block_copy_process(s, offset); trace_block_copy_process(s, task->offset);
co_get_from_shres(s->mem, cur_bytes); co_get_from_shres(s->mem, task->bytes);
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO, ret = block_copy_do_copy(s, task->offset, task->bytes,
error_is_read); ret & BDRV_BLOCK_ZERO, error_is_read);
co_put_to_shres(s->mem, cur_bytes); co_put_to_shres(s->mem, task->bytes);
block_copy_task_end(task, ret); block_copy_task_end(task, ret);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
progress_work_done(s->progress, cur_bytes); progress_work_done(s->progress, task->bytes);
s->progress_bytes_callback(cur_bytes, s->progress_opaque); s->progress_bytes_callback(task->bytes, s->progress_opaque);
offset += cur_bytes; offset = task_end(task);
bytes -= cur_bytes; bytes = end - offset;
} }
return found_dirty; return found_dirty;