block: Protect bs->backing with graph_lock

Almost all functions that access bs->backing already take the graph
lock now. Add locking to the remaining users and finally annotate the
struct field itself as protected by the graph lock.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231027155333.420094-18-kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
master
Kevin Wolf 2023-10-27 17:53:26 +02:00
parent ccd6a37947
commit 004915a96a
9 changed files with 70 additions and 27 deletions

27
block.c
View File

@ -3560,10 +3560,14 @@ out:
int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
Error **errp)
{
BlockDriverState *drain_bs = bs->backing ? bs->backing->bs : bs;
BlockDriverState *drain_bs;
int ret;
GLOBAL_STATE_CODE();
bdrv_graph_rdlock_main_loop();
drain_bs = bs->backing ? bs->backing->bs : bs;
bdrv_graph_rdunlock_main_loop();
bdrv_ref(drain_bs);
bdrv_drained_begin(drain_bs);
bdrv_graph_wrlock(backing_hd);
@ -3602,6 +3606,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
Error *local_err = NULL;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bs->backing != NULL) {
goto free_exit;
@ -3643,10 +3648,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
implicit_backing = !strcmp(bs->auto_backing_file, bs->backing_file);
}
bdrv_graph_rdlock_main_loop();
backing_filename = bdrv_get_full_backing_filename(bs, &local_err);
bdrv_graph_rdunlock_main_loop();
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
@ -3677,9 +3679,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
}
if (implicit_backing) {
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(backing_hd);
bdrv_graph_rdunlock_main_loop();
pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
backing_hd->filename);
}
@ -4750,8 +4750,8 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
{
BlockDriverState *bs = reopen_state->bs;
BlockDriverState *new_child_bs;
BlockDriverState *old_child_bs = is_backing ? child_bs(bs->backing) :
child_bs(bs->file);
BlockDriverState *old_child_bs;
const char *child_name = is_backing ? "backing" : "file";
QObject *value;
const char *str;
@ -4797,6 +4797,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
g_assert_not_reached();
}
old_child_bs = is_backing ? child_bs(bs->backing) : child_bs(bs->file);
if (old_child_bs == new_child_bs) {
ret = 0;
goto out_rdlock;
@ -5008,13 +5009,16 @@ bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
* file or if the image file has a backing file name as part of
* its metadata. Otherwise the 'backing' option can be omitted.
*/
bdrv_graph_rdlock_main_loop();
if (drv->supports_backing && reopen_state->backing_missing &&
(reopen_state->bs->backing || reopen_state->bs->backing_file[0])) {
error_setg(errp, "backing is missing for '%s'",
reopen_state->bs->node_name);
bdrv_graph_rdunlock_main_loop();
ret = -EINVAL;
goto error;
}
bdrv_graph_rdunlock_main_loop();
/*
* Allow changing the 'backing' option. The new value can be
@ -5204,10 +5208,11 @@ static void bdrv_close(BlockDriverState *bs)
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
bdrv_unref_child(bs, child);
}
bdrv_graph_wrunlock();
assert(!bs->backing);
assert(!bs->file);
bdrv_graph_wrunlock();
g_free(bs->opaque);
bs->opaque = NULL;
qatomic_set(&bs->copy_on_read, 0);
@ -5531,7 +5536,9 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
GLOBAL_STATE_CODE();
bdrv_graph_rdlock_main_loop();
assert(!bs_new->backing);
bdrv_graph_rdunlock_main_loop();
old_context = bdrv_get_aio_context(bs_top);
bdrv_drained_begin(bs_top);
@ -8115,7 +8122,7 @@ static bool append_strong_runtime_options(QDict *d, BlockDriverState *bs)
/* Note: This function may return false positives; it may return true
* even if opening the backing file specified by bs's image header
* would result in exactly bs->backing. */
static bool bdrv_backing_overridden(BlockDriverState *bs)
static bool GRAPH_RDLOCK bdrv_backing_overridden(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
if (bs->backing) {

View File

@ -95,7 +95,10 @@ static void commit_abort(Job *job)
* XXX Can (or should) we somehow keep 'consistent read' blocked even
* after the failed/cancelled commit job is gone? If we already wrote
* something to base, the intermediate images aren't valid any more. */
bdrv_graph_rdlock_main_loop();
commit_top_backing_bs = s->commit_top_bs->backing->bs;
bdrv_graph_rdunlock_main_loop();
bdrv_drained_begin(commit_top_backing_bs);
bdrv_graph_wrlock(commit_top_backing_bs);
bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
@ -219,7 +222,7 @@ bdrv_commit_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
static void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
static GRAPH_RDLOCK void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
{
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
bs->backing->bs->filename);

View File

@ -479,7 +479,7 @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
return bytes_handled;
}
static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s)
{
BlockDriverState *source = s->mirror_top_bs->backing->bs;
MirrorOp *pseudo_op;
@ -839,14 +839,18 @@ static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
}
}
static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
{
int64_t offset;
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
BlockDriverState *bs;
BlockDriverState *target_bs = blk_bs(s->target);
int ret;
int64_t count;
bdrv_graph_co_rdlock();
bs = s->mirror_top_bs->backing->bs;
bdrv_graph_co_rdunlock();
if (s->zero_target) {
if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
@ -926,7 +930,7 @@ static int coroutine_fn mirror_flush(MirrorBlockJob *s)
static int coroutine_fn mirror_run(Job *job, Error **errp)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
BlockDriverState *bs;
MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true;
@ -938,6 +942,10 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
checking for a NULL string */
int ret = 0;
bdrv_graph_co_rdlock();
bs = bdrv_filter_bs(s->mirror_top_bs);
bdrv_graph_co_rdunlock();
if (job_is_cancelled(&s->common.job)) {
goto immediate_exit;
}
@ -1070,7 +1078,9 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
mirror_wait_for_free_in_flight_slot(s);
continue;
} else if (cnt != 0) {
bdrv_graph_co_rdlock();
mirror_iteration(s);
bdrv_graph_co_rdunlock();
}
}
@ -1640,7 +1650,7 @@ bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
offset, bytes, NULL, 0);
}
static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
static void GRAPH_RDLOCK bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
{
if (bs->backing == NULL) {
/* we can be here after failed bdrv_attach_child in

View File

@ -1138,7 +1138,7 @@ out:
/**
* Check if the QED_F_NEED_CHECK bit should be set during allocating write
*/
static bool qed_should_set_need_check(BDRVQEDState *s)
static bool GRAPH_RDLOCK qed_should_set_need_check(BDRVQEDState *s)
{
/* The flush before L2 update path ensures consistency */
if (s->bs->backing) {

View File

@ -363,6 +363,9 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
BdrvChild *hidden_disk, *secondary_disk;
BlockReopenQueue *reopen_queue = NULL;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/*
* s->hidden_disk and s->secondary_disk may not be set yet, as they will
* only be set after the children are writable.
@ -496,9 +499,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
case REPLICATION_MODE_PRIMARY:
break;
case REPLICATION_MODE_SECONDARY:
bdrv_graph_rdlock_main_loop();
active_disk = bs->file;
if (!active_disk || !active_disk->bs || !active_disk->bs->backing) {
error_setg(errp, "Active disk doesn't have backing file");
bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
@ -506,11 +511,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
hidden_disk = active_disk->bs->backing;
if (!hidden_disk->bs || !hidden_disk->bs->backing) {
error_setg(errp, "Hidden disk doesn't have backing file");
bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
bdrv_graph_rdlock_main_loop();
secondary_disk = hidden_disk->bs->backing;
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
error_setg(errp, "The secondary disk doesn't have block backend");

View File

@ -380,7 +380,7 @@ out:
return ret;
}
static int coroutine_fn vmdk_is_cid_valid(BlockDriverState *bs)
static int coroutine_fn GRAPH_RDLOCK vmdk_is_cid_valid(BlockDriverState *bs)
{
BDRVVmdkState *s = bs->opaque;
uint32_t cur_pcid;
@ -3044,8 +3044,9 @@ vmdk_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
return 0;
}
static void vmdk_gather_child_options(BlockDriverState *bs, QDict *target,
bool backing_overridden)
static void GRAPH_RDLOCK
vmdk_gather_child_options(BlockDriverState *bs, QDict *target,
bool backing_overridden)
{
/* No children but file and backing can be explicitly specified (TODO) */
qdict_put(target, "file",

View File

@ -1178,7 +1178,7 @@ struct BlockDriverState {
* are connected with BdrvChildRole.
*/
QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) children;
BdrvChild *backing;
BdrvChild * GRAPH_RDLOCK_PTR backing;
BdrvChild *file;
QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) parents;

View File

@ -218,8 +218,14 @@ static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *
}
}
static void test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
bool recursive)
/*
* Locking the block graph would be a bit cumbersome here because this function
* is called both in coroutine and non-coroutine context. We know this is a test
* and nothing else is running, so don't bother with TSA.
*/
static void coroutine_mixed_fn TSA_NO_TSA
test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
bool recursive)
{
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *backing = bs->backing->bs;
@ -307,8 +313,14 @@ static void test_drv_cb_co_drain(void)
blk_unref(blk);
}
static void test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
bool recursive)
/*
* Locking the block graph would be a bit cumbersome here because this function
* is called both in coroutine and non-coroutine context. We know this is a test
* and nothing else is running, so don't bother with TSA.
*/
static void coroutine_mixed_fn TSA_NO_TSA
test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
bool recursive)
{
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *backing = bs->backing->bs;
@ -1868,6 +1880,8 @@ static void bdrv_replace_test_drain_end(BlockDriverState *bs)
{
BDRVReplaceTestState *s = bs->opaque;
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!s->setup_completed) {
return;
}

View File

@ -206,15 +206,18 @@ static void test_should_update_child(void)
bdrv_set_backing_hd(target, bs, &error_abort);
g_assert(target->backing->bs == bs);
bdrv_graph_wrlock(NULL);
g_assert(target->backing->bs == bs);
bdrv_attach_child(filter, target, "target", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
bdrv_graph_wrunlock();
aio_context_acquire(qemu_get_aio_context());
bdrv_append(filter, bs, &error_abort);
aio_context_release(qemu_get_aio_context());
bdrv_graph_rdlock_main_loop();
g_assert(target->backing->bs == bs);
bdrv_graph_rdunlock_main_loop();
bdrv_unref(filter);
bdrv_unref(bs);