migration: move vm_old_running into global state

Firstly, it was passed around.  Let's just move it into MigrationState
just like many other variables as state of migration, renaming it to
vm_was_running.

One thing to mention is that for postcopy, we actually don't need this
knowledge at all since postcopy can't resume a VM even if it fails (we
can see that from the old code too: when we try to resume we also check
against "entered_postcopy" variable).  So further we do this:

- in postcopy_start(), we don't update vm_old_running since useless
- in migration_thread(), we don't need to check entered_postcopy when
  resume, since it's only used for precopy.

Comment this out too for that variable definition.

Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
master
Peter Xu 2018-01-03 20:20:09 +08:00 committed by Juan Quintela
parent 4af246a34e
commit 7287cbd46e
2 changed files with 13 additions and 10 deletions

View File

@ -1302,6 +1302,7 @@ MigrationState *migrate_init(void)
s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->total_time = 0; s->total_time = 0;
s->vm_was_running = false;
return s; return s;
} }
@ -1885,7 +1886,7 @@ static int await_return_path_close_on_source(MigrationState *ms)
* Switch from normal iteration to postcopy * Switch from normal iteration to postcopy
* Returns non-0 on error * Returns non-0 on error
*/ */
static int postcopy_start(MigrationState *ms, bool *old_vm_running) static int postcopy_start(MigrationState *ms)
{ {
int ret; int ret;
QIOChannelBuffer *bioc; QIOChannelBuffer *bioc;
@ -1903,7 +1904,6 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
trace_postcopy_start_set_run(); trace_postcopy_start_set_run();
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
*old_vm_running = runstate_is_running();
global_state_store(); global_state_store();
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
if (ret < 0) { if (ret < 0) {
@ -2094,11 +2094,9 @@ static int migration_maybe_pause(MigrationState *s,
* *
* @s: Current migration state * @s: Current migration state
* @current_active_state: The migration state we expect to be in * @current_active_state: The migration state we expect to be in
* @*old_vm_running: Pointer to old_vm_running flag
* @*start_time: Pointer to time to update * @*start_time: Pointer to time to update
*/ */
static void migration_completion(MigrationState *s, int current_active_state, static void migration_completion(MigrationState *s, int current_active_state,
bool *old_vm_running,
int64_t *start_time) int64_t *start_time)
{ {
int ret; int ret;
@ -2107,7 +2105,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
*start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
*old_vm_running = runstate_is_running(); s->vm_was_running = runstate_is_running();
ret = global_state_store(); ret = global_state_store();
if (!ret) { if (!ret) {
@ -2213,7 +2211,6 @@ static void *migration_thread(void *opaque)
int64_t threshold_size = 0; int64_t threshold_size = 0;
int64_t start_time = initial_time; int64_t start_time = initial_time;
int64_t end_time; int64_t end_time;
bool old_vm_running = false;
bool entered_postcopy = false; bool entered_postcopy = false;
/* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
@ -2272,7 +2269,7 @@ static void *migration_thread(void *opaque)
pend_nonpost <= threshold_size && pend_nonpost <= threshold_size &&
atomic_read(&s->start_postcopy)) { atomic_read(&s->start_postcopy)) {
if (!postcopy_start(s, &old_vm_running)) { if (!postcopy_start(s)) {
current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
entered_postcopy = true; entered_postcopy = true;
} }
@ -2284,7 +2281,7 @@ static void *migration_thread(void *opaque)
} else { } else {
trace_migration_thread_low_pending(pending_size); trace_migration_thread_low_pending(pending_size);
migration_completion(s, current_active_state, migration_completion(s, current_active_state,
&old_vm_running, &start_time); &start_time);
break; break;
} }
} }
@ -2357,9 +2354,9 @@ static void *migration_thread(void *opaque)
* Fixme: we will run VM in COLO no matter its old running state. * Fixme: we will run VM in COLO no matter its old running state.
* After exited COLO, we will keep running. * After exited COLO, we will keep running.
*/ */
old_vm_running = true; s->vm_was_running = true;
} }
if (old_vm_running && !entered_postcopy) { if (s->vm_was_running) {
vm_start(); vm_start();
} else { } else {
if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {

View File

@ -123,6 +123,12 @@ struct MigrationState
int64_t expected_downtime; int64_t expected_downtime;
bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
int64_t setup_time; int64_t setup_time;
/*
* Whether guest was running when we enter the completion stage.
* If migration is interrupted by any reason, we need to continue
* running the guest on source.
*/
bool vm_was_running;
/* Flag set once the migration has been asked to enter postcopy */ /* Flag set once the migration has been asked to enter postcopy */
bool start_postcopy; bool start_postcopy;