accel/tcg: Move can_do_io to CPUNegativeOffsetState

Minimize the displacement to can_do_io, since it may
be touched at the start of each TranslationBlock.
It fits into other padding within the substructure.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2023-09-15 15:41:39 -07:00
parent e62de98111
commit 464dacf609
14 changed files with 21 additions and 21 deletions

View File

@ -27,7 +27,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
current_cpu = cpu;
#ifndef _WIN32

View File

@ -428,7 +428,7 @@ static void *hvf_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
current_cpu = cpu;
hvf_init_vcpu(cpu);

View File

@ -36,7 +36,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal);

View File

@ -36,7 +36,7 @@ void cpu_loop_exit_noexc(CPUState *cpu)
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
/* Undo any setting in generated code. */
qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1);

View File

@ -455,7 +455,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
qemu_plugin_disable_mem_helpers(cpu);
/*
* TODO: Delay swapping back to the read-write region of the TB

View File

@ -1361,7 +1361,7 @@ io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
section = iotlb_to_section(cpu, xlat, attrs);
mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
if (!cpu->can_do_io) {
if (!cpu->neg.can_do_io) {
cpu_io_recompile(cpu, retaddr);
}

View File

@ -153,7 +153,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
tcg_handle_interrupt(cpu, mask);
if (qemu_cpu_is_self(cpu) &&
!cpu->can_do_io
!cpu->neg.can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}

View File

@ -80,7 +80,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
current_cpu = cpu;
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);

View File

@ -192,7 +192,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
@ -334,7 +334,7 @@ void rr_start_vcpu_thread(CPUState *cpu)
cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond;
cpu->thread_id = first_cpu->thread_id;
cpu->can_do_io = 1;
cpu->neg.can_do_io = 1;
cpu->created = true;
}
}

View File

@ -20,9 +20,11 @@ static void set_can_do_io(DisasContextBase *db, bool val)
{
if (db->saved_can_do_io != val) {
db->saved_can_do_io = val;
tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
tcg_gen_st8_i32(tcg_constant_i32(val), cpu_env,
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
offsetof(ArchCPU, env));
}
}
@ -83,7 +85,7 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
}
/*
* cpu->can_do_io is set automatically here at the beginning of
* cpu->neg.can_do_io is set automatically here at the beginning of
* each translation block. The cost is minimal, plus it would be
* very easy to forget doing it in the translator.
*/

View File

@ -131,7 +131,7 @@ static void cpu_common_reset_hold(Object *obj)
cpu->mem_io_pc = 0;
cpu->icount_extra = 0;
qatomic_set(&cpu->neg.icount_decr.u32, 0);
cpu->can_do_io = 1;
cpu->neg.can_do_io = true;
cpu->exception_index = -1;
cpu->crash_occurred = false;
cpu->cflags_next_tb = -1;

View File

@ -351,6 +351,7 @@ typedef union IcountDecr {
typedef struct CPUNegativeOffsetState {
CPUTLB tlb;
IcountDecr icount_decr;
bool can_do_io;
} CPUNegativeOffsetState;
typedef struct CPUBreakpoint {
@ -420,9 +421,7 @@ struct qemu_work_item;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
* requires that IO only be performed on the last instruction of a TB
* so that interrupts take effect immediately.
* @neg.can_do_io: True if memory-mapped IO is allowed.
* @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
* AddressSpaces this CPU has)
* @num_ases: number of CPUAddressSpaces in @cpu_ases
@ -547,7 +546,6 @@ struct CPUState {
int cluster_index;
uint32_t tcg_cflags;
uint32_t halted;
uint32_t can_do_io;
int32_t exception_index;
AccelCPUState *accel;

View File

@ -111,7 +111,7 @@ static int64_t icount_get_raw_locked(void)
CPUState *cpu = current_cpu;
if (cpu && cpu->running) {
if (!cpu->can_do_io) {
if (!cpu->neg.can_do_io) {
error_report("Bad icount read");
exit(1);
}

View File

@ -177,7 +177,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
* Force recompile to succeed, because icount may
* be read only at the end of the block.
*/
if (!cpu->can_do_io) {
if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
| curr_cflags(cpu);