From 118b07308a8cedc16ef63d7ab243a95f1701db40 Mon Sep 17 00:00:00 2001 From: Sergey Fedorov Date: Fri, 15 Jul 2016 20:58:44 +0300 Subject: [PATCH] tcg: Prepare safe access to tb_flushed out of tb_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure atomicity and ordering of CPU's 'tb_flushed' access for future translation block lookup out of 'tb_lock'. This field can only be touched from another thread by tb_flush() in user mode emulation. So the only access to be sequential atomic is: * a single write in tb_flush(); * reads/writes out of 'tb_lock'. In future, before enabling MTTCG in system mode, tb_flush() must be safe and this field becomes unnecessary. Signed-off-by: Sergey Fedorov Signed-off-by: Sergey Fedorov Reviewed-by: Alex Bennée Message-Id: <20160715175852.30749-5-sergey.fedorov@linaro.org> Signed-off-by: Paolo Bonzini --- cpu-exec.c | 16 +++++++--------- translate-all.c | 4 ++-- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/cpu-exec.c b/cpu-exec.c index 32b58edb31..877ff8ed70 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -338,13 +338,6 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu, tb->flags != flags)) { tb = tb_find_slow(cpu, pc, cs_base, flags); } - if (cpu->tb_flushed) { - /* Ensure that no TB jump will be modified as the - * translation buffer has been flushed. - */ - last_tb = NULL; - cpu->tb_flushed = false; - } #ifndef CONFIG_USER_ONLY /* We don't take care of direct jumps when address mapping changes in * system emulation. So it's not safe to make a direct jump to a TB @@ -356,7 +349,12 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu, #endif /* See if we can patch the calling TB. */ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - tb_add_jump(last_tb, tb_exit, tb); + /* Check if translation buffer has been flushed */ + if (cpu->tb_flushed) { + cpu->tb_flushed = false; + } else { + tb_add_jump(last_tb, tb_exit, tb); + } } tb_unlock(); return tb; @@ -617,7 +615,7 @@ int cpu_exec(CPUState *cpu) break; } - cpu->tb_flushed = false; /* reset before first TB lookup */ + atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */ for(;;) { cpu_handle_interrupt(cpu, &last_tb); tb = tb_find_fast(cpu, last_tb, tb_exit); diff --git a/translate-all.c b/translate-all.c index 77ae59d7e9..e753a50640 100644 --- a/translate-all.c +++ b/translate-all.c @@ -848,7 +848,6 @@ void tb_flush(CPUState *cpu) > tcg_ctx.code_gen_buffer_size) { cpu_abort(cpu, "Internal error: code buffer overflow\n"); } - tcg_ctx.tb_ctx.nb_tbs = 0; CPU_FOREACH(cpu) { int i; @@ -856,9 +855,10 @@ void tb_flush(CPUState *cpu) for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { atomic_set(&cpu->tb_jmp_cache[i], NULL); } - cpu->tb_flushed = true; + atomic_mb_set(&cpu->tb_flushed, true); } + tcg_ctx.tb_ctx.nb_tbs = 0; qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); page_flush_tb();