tcg: Avoid bouncing tb_lock between tb_gen_code() and tb_add_jump()

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20160715175852.30749-11-sergey.fedorov@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
master
Sergey Fedorov 2016-07-15 20:58:50 +03:00 committed by Paolo Bonzini
parent 518615c650
commit 74d356dd48
1 changed files with 11 additions and 4 deletions

View File

@ -282,7 +282,8 @@ static TranslationBlock *tb_find_physical(CPUState *cpu,
static TranslationBlock *tb_find_slow(CPUState *cpu, static TranslationBlock *tb_find_slow(CPUState *cpu,
target_ulong pc, target_ulong pc,
target_ulong cs_base, target_ulong cs_base,
uint32_t flags) uint32_t flags,
bool *have_tb_lock)
{ {
TranslationBlock *tb; TranslationBlock *tb;
@ -295,6 +296,7 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
*/ */
mmap_lock(); mmap_lock();
tb_lock(); tb_lock();
*have_tb_lock = true;
/* There's a chance that our desired tb has been translated while /* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock. * taking the locks so we check again inside the lock.
@ -305,7 +307,6 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
tb = tb_gen_code(cpu, pc, cs_base, flags, 0); tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
} }
tb_unlock();
mmap_unlock(); mmap_unlock();
} }
@ -322,6 +323,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags;
bool have_tb_lock = false;
/* we record a subset of the CPU state. It will /* we record a subset of the CPU state. It will
always be the same before a given translated block always be the same before a given translated block
@ -330,7 +332,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) { tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags); tb = tb_find_slow(cpu, pc, cs_base, flags, &have_tb_lock);
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in /* We don't take care of direct jumps when address mapping changes in
@ -343,13 +345,18 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
#endif #endif
/* See if we can patch the calling TB. */ /* See if we can patch the calling TB. */
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tb_lock(); if (!have_tb_lock) {
tb_lock();
have_tb_lock = true;
}
/* Check if translation buffer has been flushed */ /* Check if translation buffer has been flushed */
if (cpu->tb_flushed) { if (cpu->tb_flushed) {
cpu->tb_flushed = false; cpu->tb_flushed = false;
} else if (!tb->invalid) { } else if (!tb->invalid) {
tb_add_jump(last_tb, tb_exit, tb); tb_add_jump(last_tb, tb_exit, tb);
} }
}
if (have_tb_lock) {
tb_unlock(); tb_unlock();
} }
return tb; return tb;