migration: split migration_incoming_co

Originally, migration_incoming_co was introduced by
25d0c16f62
   "migration: Switch to COLO process after finishing loadvm"
to be able to enter from COLO code to one specific yield point, added
by 25d0c16f62.

Later in 923709896b
 "migration: poll the cm event for destination qemu"
we reused this variable to wake the migration incoming coroutine from
RDMA code.

That was doubtful idea. Entering coroutines is a very fragile thing:
you should be absolutely sure which yield point you are going to enter.

I don't know how much is it safe to enter during qemu_loadvm_state()
which I think what RDMA want to do. But for sure RDMA shouldn't enter
the special COLO-related yield-point. As well, COLO code doesn't want
to enter during qemu_loadvm_state(), it want to enter it's own specific
yield-point.

As well, when in 8e48ac9586
 "COLO: Add block replication into colo process" we added
bdrv_invalidate_cache_all() call (now it's called activate_all())
it became possible to enter the migration incoming coroutine during
that call which is wrong too.

So, let't make these things separate and disjoint: loadvm_co for RDMA,
non-NULL during qemu_loadvm_state(), and colo_incoming_co for COLO,
non-NULL only around specific yield.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <20230515130640.46035-3-vsementsov@yandex-team.ru>
Signed-off-by: Juan Quintela <quintela@redhat.com>
master
Vladimir Sementsov-Ogievskiy 2023-05-15 16:06:39 +03:00 committed by Juan Quintela
parent 6c1e3906ce
commit dd42ce24a3
4 changed files with 18 additions and 8 deletions

View File

@ -145,8 +145,8 @@ static void secondary_vm_do_failover(void)
qemu_sem_post(&mis->colo_incoming_sem);
/* For Secondary VM, jump to incoming co */
if (mis->migration_incoming_co) {
qemu_coroutine_enter(mis->migration_incoming_co);
if (mis->colo_incoming_co) {
qemu_coroutine_enter(mis->colo_incoming_co);
}
}

View File

@ -520,12 +520,14 @@ process_incoming_migration_co(void *opaque)
goto fail;
}
mis->migration_incoming_co = qemu_coroutine_self();
mis->largest_page_size = qemu_ram_pagesize_largest();
postcopy_state_set(POSTCOPY_INCOMING_NONE);
migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
MIGRATION_STATUS_ACTIVE);
mis->loadvm_co = qemu_coroutine_self();
ret = qemu_loadvm_state(mis->from_src_file);
mis->loadvm_co = NULL;
ps = postcopy_state_get();
trace_process_incoming_migration_co_end(ret, ps);
@ -566,7 +568,10 @@ process_incoming_migration_co(void *opaque)
qemu_thread_create(&colo_incoming_thread, "COLO incoming",
colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
mis->colo_incoming_co = qemu_coroutine_self();
qemu_coroutine_yield();
mis->colo_incoming_co = NULL;
qemu_mutex_unlock_iothread();
/* Wait checkpoint incoming thread exit before free resource */
@ -578,7 +583,6 @@ process_incoming_migration_co(void *opaque)
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
qemu_bh_schedule(mis->bh);
mis->migration_incoming_co = NULL;
return;
fail:
local_err = NULL;

View File

@ -162,8 +162,15 @@ struct MigrationIncomingState {
int state;
/*
* The incoming migration coroutine, non-NULL during qemu_loadvm_state().
* Used to wake the migration incoming coroutine from rdma code. How much is
* it safe - it's a question.
*/
Coroutine *loadvm_co;
/* The coroutine we should enter (back) after failover */
Coroutine *migration_incoming_co;
Coroutine *colo_incoming_co;
QemuSemaphore colo_incoming_sem;
/*

View File

@ -3342,9 +3342,8 @@ static void rdma_cm_poll_handler(void *opaque)
}
}
rdma_ack_cm_event(cm_event);
if (mis->migration_incoming_co) {
qemu_coroutine_enter(mis->migration_incoming_co);
if (mis->loadvm_co) {
qemu_coroutine_enter(mis->loadvm_co);
}
return;
}