diff --git a/migration/migration.c b/migration/migration.c index 3a50d4c35c..108c3d7142 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -2852,6 +2852,16 @@ static void migration_iteration_finish(MigrationState *s) qemu_mutex_unlock_iothread(); } +void migration_make_urgent_request(void) +{ + qemu_sem_post(&migrate_get_current()->rate_limit_sem); +} + +void migration_consume_urgent_request(void) +{ + qemu_sem_wait(&migrate_get_current()->rate_limit_sem); +} + /* * Master migration thread on the source VM. * It drives the migration and pumps the data down the outgoing channel. @@ -2861,6 +2871,7 @@ static void *migration_thread(void *opaque) MigrationState *s = opaque; int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); MigThrError thr_error; + bool urgent = false; rcu_register_thread(); @@ -2901,7 +2912,7 @@ static void *migration_thread(void *opaque) s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { int64_t current_time; - if (!qemu_file_rate_limit(s->to_dst_file)) { + if (urgent || !qemu_file_rate_limit(s->to_dst_file)) { MigIterateState iter_state = migration_iteration_run(s); if (iter_state == MIG_ITERATE_SKIP) { continue; @@ -2932,10 +2943,24 @@ static void *migration_thread(void *opaque) migration_update_counters(s, current_time); + urgent = false; if (qemu_file_rate_limit(s->to_dst_file)) { - /* usleep expects microseconds */ - g_usleep((s->iteration_start_time + BUFFER_DELAY - - current_time) * 1000); + /* Wait for a delay to do rate limiting OR + * something urgent to post the semaphore. + */ + int ms = s->iteration_start_time + BUFFER_DELAY - current_time; + trace_migration_thread_ratelimit_pre(ms); + if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { + /* We were worken by one or more urgent things but + * the timedwait will have consumed one of them. + * The service routine for the urgent wake will dec + * the semaphore itself for each item it consumes, + * so add this one we just eat back. + */ + qemu_sem_post(&s->rate_limit_sem); + urgent = true; + } + trace_migration_thread_ratelimit_post(urgent); } } @@ -3109,6 +3134,7 @@ static void migration_instance_finalize(Object *obj) qemu_mutex_destroy(&ms->qemu_file_lock); g_free(params->tls_hostname); g_free(params->tls_creds); + qemu_sem_destroy(&ms->rate_limit_sem); qemu_sem_destroy(&ms->pause_sem); qemu_sem_destroy(&ms->postcopy_pause_sem); qemu_sem_destroy(&ms->postcopy_pause_rp_sem); @@ -3147,6 +3173,7 @@ static void migration_instance_init(Object *obj) qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); qemu_sem_init(&ms->rp_state.rp_sem, 0); + qemu_sem_init(&ms->rate_limit_sem, 0); qemu_mutex_init(&ms->qemu_file_lock); } diff --git a/migration/migration.h b/migration/migration.h index 31d3ed12dc..64a7b33735 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -121,6 +121,11 @@ struct MigrationState */ QemuMutex qemu_file_lock; + /* + * Used to allow urgent requests to override rate limiting. + */ + QemuSemaphore rate_limit_sem; + /* bytes already send at the beggining of current interation */ uint64_t iteration_initial_bytes; /* time at the start of current iteration */ @@ -287,4 +292,7 @@ void init_dirty_bitmap_incoming_migration(void); #define qemu_ram_foreach_block \ #warning "Use qemu_ram_foreach_block_migratable in migration code" +void migration_make_urgent_request(void); +void migration_consume_urgent_request(void); + #endif diff --git a/migration/trace-events b/migration/trace-events index 4a768eaaeb..3f67758893 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -108,6 +108,8 @@ migration_return_path_end_before(void) "" migration_return_path_end_after(int rp_error) "%d" migration_thread_after_loop(void) "" migration_thread_file_err(void) "" +migration_thread_ratelimit_pre(int ms) "%d ms" +migration_thread_ratelimit_post(int urgent) "urgent: %d" migration_thread_setup_complete(void) "" open_return_path_on_source(void) "" open_return_path_on_source_continue(void) ""