diff --git a/migration/migration.c b/migration/migration.c index 0e7f101d64..d5f705ceef 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -199,8 +199,39 @@ void migration_object_init(void) dirty_bitmap_mig_init(); } -static void migration_bh_schedule(MigrationState *s, QEMUBH *bh) +typedef struct { + QEMUBH *bh; + QEMUBHFunc *cb; + void *opaque; +} MigrationBH; + +static void migration_bh_dispatch_bh(void *opaque) { + MigrationState *s = migrate_get_current(); + MigrationBH *migbh = opaque; + + /* cleanup this BH */ + qemu_bh_delete(migbh->bh); + migbh->bh = NULL; + + /* dispatch the other one */ + migbh->cb(migbh->opaque); + object_unref(OBJECT(s)); + + g_free(migbh); +} + +void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) +{ + MigrationState *s = migrate_get_current(); + MigrationBH *migbh = g_new0(MigrationBH, 1); + QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); + + /* Store these to dispatch when the BH runs */ + migbh->bh = bh; + migbh->cb = cb; + migbh->opaque = opaque; + /* * Ref the state for bh, because it may be called when * there're already no other refs @@ -656,9 +687,7 @@ static void process_incoming_migration_bh(void *opaque) */ migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COMPLETED); - qemu_bh_delete(mis->bh); migration_incoming_state_destroy(); - object_unref(OBJECT(migrate_get_current())); } static void coroutine_fn @@ -723,8 +752,7 @@ process_incoming_migration_co(void *opaque) goto fail; } - mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); - migration_bh_schedule(migrate_get_current(), mis->bh); + migration_bh_schedule(process_incoming_migration_bh, mis); return; fail: migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, @@ -1285,9 +1313,6 @@ void migrate_set_state(int *state, int old_state, int new_state) static void migrate_fd_cleanup(MigrationState *s) { - qemu_bh_delete(s->cleanup_bh); - s->cleanup_bh = NULL; - g_free(s->hostname); s->hostname = NULL; json_writer_free(s->vmdesc); @@ -1343,9 +1368,7 @@ static void migrate_fd_cleanup(MigrationState *s) static void migrate_fd_cleanup_bh(void *opaque) { - MigrationState *s = opaque; - migrate_fd_cleanup(s); - object_unref(OBJECT(s)); + migrate_fd_cleanup(opaque); } void migrate_set_error(MigrationState *s, const Error *error) @@ -1568,8 +1591,6 @@ int migrate_init(MigrationState *s, Error **errp) * parameters/capabilities that the user set, and * locks. */ - s->cleanup_bh = 0; - s->vm_start_bh = 0; s->to_dst_file = NULL; s->state = MIGRATION_STATUS_NONE; s->rp_state.from_dst_file = NULL; @@ -3139,7 +3160,8 @@ static void migration_iteration_finish(MigrationState *s) error_report("%s: Unknown ending state %d", __func__, s->state); break; } - migration_bh_schedule(s, s->cleanup_bh); + + migration_bh_schedule(migrate_fd_cleanup_bh, s); bql_unlock(); } @@ -3170,7 +3192,7 @@ static void bg_migration_iteration_finish(MigrationState *s) break; } - migration_bh_schedule(s, s->cleanup_bh); + migration_bh_schedule(migrate_fd_cleanup_bh, s); bql_unlock(); } @@ -3376,12 +3398,8 @@ static void bg_migration_vm_start_bh(void *opaque) { MigrationState *s = opaque; - qemu_bh_delete(s->vm_start_bh); - s->vm_start_bh = NULL; - vm_resume(s->vm_old_state); migration_downtime_end(s); - object_unref(OBJECT(s)); } /** @@ -3485,8 +3503,7 @@ static void *bg_migration_thread(void *opaque) * calling VM state change notifiers from vm_start() would initiate * writes to virtio VQs memory which is in write-protected region. */ - s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); - migration_bh_schedule(s, s->vm_start_bh); + migration_bh_schedule(bg_migration_vm_start_bh, s); bql_unlock(); while (migration_is_active(s)) { @@ -3542,12 +3559,6 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) migrate_error_free(s); s->expected_downtime = migrate_downtime_limit(); - if (resume) { - assert(s->cleanup_bh); - } else { - assert(!s->cleanup_bh); - s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s); - } if (error_in) { migrate_fd_error(s, error_in); if (resume) { diff --git a/migration/migration.h b/migration/migration.h index a589ae8650..f2c8b8f286 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -159,8 +159,6 @@ struct MigrationIncomingState { /* PostCopyFD's for external userfaultfds & handlers of shared memory */ GArray *postcopy_remote_fds; - QEMUBH *bh; - int state; /* @@ -255,8 +253,6 @@ struct MigrationState { /*< public >*/ QemuThread thread; - QEMUBH *vm_start_bh; - QEMUBH *cleanup_bh; /* Protected by qemu_file_lock */ QEMUFile *to_dst_file; /* Postcopy specific transfer channel */ @@ -528,6 +524,7 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); void migration_make_urgent_request(void); void migration_consume_urgent_request(void); bool migration_rate_limit(void); +void migration_bh_schedule(QEMUBHFunc *cb, void *opaque); void migration_cancel(const Error *error); void migration_populate_vfio_info(MigrationInfo *info); diff --git a/migration/savevm.c b/migration/savevm.c index 93387350c7..d612c8a902 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2171,10 +2171,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) runstate_set(RUN_STATE_PAUSED); } - qemu_bh_delete(mis->bh); - trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started"); - object_unref(OBJECT(migration_get_current())); } /* After all discards we can start running and asking for pages */ @@ -2189,9 +2186,7 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) } postcopy_state_set(POSTCOPY_INCOMING_RUNNING); - mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); - object_ref(OBJECT(migration_get_current())); - qemu_bh_schedule(mis->bh); + migration_bh_schedule(loadvm_postcopy_handle_run_bh, mis); /* We need to finish reading the stream from the package * and also stop reading anything more from the stream that loaded the