cpus: pass CPUState to run_on_cpu helpers

CPUState is a fairly common pointer to pass to these helpers. This means
if you need other arguments for the async_run_on_cpu case you end up
having to do a g_malloc to stuff additional data into the routine. For
the current users this isn't a massive deal but for MTTCG this gets
cumbersome when the only other parameter is often an address.

This adds the typedef run_on_cpu_func for helper functions which has an
explicit CPUState * passed as the first parameter. All the users of
run_on_cpu and async_run_on_cpu have had their helpers updated to use
CPUState where available.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
[Sergey Fedorov:
 - eliminate more CPUState in user data;
 - remove unnecessary user data passing;
 - fix target-s390x/kvm.c and target-s390x/misc_helper.c]
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Acked-by: David Gibson <david@gibson.dropbear.id.au> (ppc parts)
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> (s390 parts)
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1470158864-17651-3-git-send-email-alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
master
Alex Bennée 2016-08-02 18:27:33 +01:00 committed by Paolo Bonzini
parent 4a0588996a
commit e0eeb4a21a
14 changed files with 109 additions and 138 deletions

15
cpus.c
View File

@ -557,9 +557,8 @@ static const VMStateDescription vmstate_timers = {
} }
}; };
static void cpu_throttle_thread(void *opaque) static void cpu_throttle_thread(CPUState *cpu, void *opaque)
{ {
CPUState *cpu = opaque;
double pct; double pct;
double throttle_ratio; double throttle_ratio;
long sleeptime_ns; long sleeptime_ns;
@ -589,7 +588,7 @@ static void cpu_throttle_timer_tick(void *opaque)
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
async_run_on_cpu(cpu, cpu_throttle_thread, cpu); async_run_on_cpu(cpu, cpu_throttle_thread, NULL);
} }
} }
@ -917,12 +916,12 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread); qemu_thread_get_self(&io_thread);
} }
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{ {
struct qemu_work_item wi; struct qemu_work_item wi;
if (qemu_cpu_is_self(cpu)) { if (qemu_cpu_is_self(cpu)) {
func(data); func(cpu, data);
return; return;
} }
@ -950,12 +949,12 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
} }
} }
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{ {
struct qemu_work_item *wi; struct qemu_work_item *wi;
if (qemu_cpu_is_self(cpu)) { if (qemu_cpu_is_self(cpu)) {
func(data); func(cpu, data);
return; return;
} }
@ -1006,7 +1005,7 @@ static void flush_queued_work(CPUState *cpu)
cpu->queued_work_last = NULL; cpu->queued_work_last = NULL;
} }
qemu_mutex_unlock(&cpu->work_mutex); qemu_mutex_unlock(&cpu->work_mutex);
wi->func(wi->data); wi->func(cpu, wi->data);
qemu_mutex_lock(&cpu->work_mutex); qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) { if (wi->free) {
g_free(wi); g_free(wi);

View File

@ -125,7 +125,7 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
} }
} }
static void kvm_apic_put(void *data) static void kvm_apic_put(CPUState *cs, void *data)
{ {
APICCommonState *s = data; APICCommonState *s = data;
struct kvm_lapic_state kapic; struct kvm_lapic_state kapic;
@ -146,10 +146,9 @@ static void kvm_apic_post_load(APICCommonState *s)
run_on_cpu(CPU(s->cpu), kvm_apic_put, s); run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
} }
static void do_inject_external_nmi(void *data) static void do_inject_external_nmi(CPUState *cpu, void *data)
{ {
APICCommonState *s = data; APICCommonState *s = data;
CPUState *cpu = CPU(s->cpu);
uint32_t lvt; uint32_t lvt;
int ret; int ret;

View File

@ -483,7 +483,7 @@ typedef struct VAPICEnableTPRReporting {
bool enable; bool enable;
} VAPICEnableTPRReporting; } VAPICEnableTPRReporting;
static void vapic_do_enable_tpr_reporting(void *data) static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data)
{ {
VAPICEnableTPRReporting *info = data; VAPICEnableTPRReporting *info = data;
@ -734,10 +734,10 @@ static void vapic_realize(DeviceState *dev, Error **errp)
nb_option_roms++; nb_option_roms++;
} }
static void do_vapic_enable(void *data) static void do_vapic_enable(CPUState *cs, void *data)
{ {
VAPICROMState *s = data; VAPICROMState *s = data;
X86CPU *cpu = X86_CPU(first_cpu); X86CPU *cpu = X86_CPU(cs);
static const uint8_t enabled = 1; static const uint8_t enabled = 1;
cpu_physical_memory_write(s->vapic_paddr + offsetof(VAPICState, enabled), cpu_physical_memory_write(s->vapic_paddr + offsetof(VAPICState, enabled),

View File

@ -54,11 +54,6 @@ typedef struct SpinState {
SpinInfo spin[MAX_CPUS]; SpinInfo spin[MAX_CPUS];
} SpinState; } SpinState;
typedef struct spin_kick {
PowerPCCPU *cpu;
SpinInfo *spin;
} SpinKick;
static void spin_reset(void *opaque) static void spin_reset(void *opaque)
{ {
SpinState *s = opaque; SpinState *s = opaque;
@ -89,16 +84,15 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
env->tlb_dirty = true; env->tlb_dirty = true;
} }
static void spin_kick(void *data) static void spin_kick(CPUState *cs, void *data)
{ {
SpinKick *kick = data; PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUState *cpu = CPU(kick->cpu); CPUPPCState *env = &cpu->env;
CPUPPCState *env = &kick->cpu->env; SpinInfo *curspin = data;
SpinInfo *curspin = kick->spin;
hwaddr map_size = 64 * 1024 * 1024; hwaddr map_size = 64 * 1024 * 1024;
hwaddr map_start; hwaddr map_start;
cpu_synchronize_state(cpu); cpu_synchronize_state(cs);
stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]); stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
env->nip = ldq_p(&curspin->addr) & (map_size - 1); env->nip = ldq_p(&curspin->addr) & (map_size - 1);
env->gpr[3] = ldq_p(&curspin->r3); env->gpr[3] = ldq_p(&curspin->r3);
@ -112,10 +106,10 @@ static void spin_kick(void *data)
map_start = ldq_p(&curspin->addr) & ~(map_size - 1); map_start = ldq_p(&curspin->addr) & ~(map_size - 1);
mmubooke_create_initial_mapping(env, 0, map_start, map_size); mmubooke_create_initial_mapping(env, 0, map_start, map_size);
cpu->halted = 0; cs->halted = 0;
cpu->exception_index = -1; cs->exception_index = -1;
cpu->stopped = false; cs->stopped = false;
qemu_cpu_kick(cpu); qemu_cpu_kick(cs);
} }
static void spin_write(void *opaque, hwaddr addr, uint64_t value, static void spin_write(void *opaque, hwaddr addr, uint64_t value,
@ -153,12 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,
if (!(ldq_p(&curspin->addr) & 1)) { if (!(ldq_p(&curspin->addr) & 1)) {
/* run CPU */ /* run CPU */
SpinKick kick = { run_on_cpu(cpu, spin_kick, curspin);
.cpu = POWERPC_CPU(cpu),
.spin = curspin,
};
run_on_cpu(cpu, spin_kick, &kick);
} }
} }

View File

@ -2134,10 +2134,8 @@ static void spapr_machine_finalizefn(Object *obj)
g_free(spapr->kvm_type); g_free(spapr->kvm_type);
} }
static void ppc_cpu_do_nmi_on_cpu(void *arg) static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg)
{ {
CPUState *cs = arg;
cpu_synchronize_state(cs); cpu_synchronize_state(cs);
ppc_cpu_do_system_reset(cs); ppc_cpu_do_system_reset(cs);
} }
@ -2147,7 +2145,7 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
CPUState *cs; CPUState *cs;
CPU_FOREACH(cs) { CPU_FOREACH(cs) {
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, cs); async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL);
} }
} }

View File

@ -13,19 +13,18 @@
#include "kvm_ppc.h" #include "kvm_ppc.h"
struct SPRSyncState { struct SPRSyncState {
CPUState *cs;
int spr; int spr;
target_ulong value; target_ulong value;
target_ulong mask; target_ulong mask;
}; };
static void do_spr_sync(void *arg) static void do_spr_sync(CPUState *cs, void *arg)
{ {
struct SPRSyncState *s = arg; struct SPRSyncState *s = arg;
PowerPCCPU *cpu = POWERPC_CPU(s->cs); PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
cpu_synchronize_state(s->cs); cpu_synchronize_state(cs);
env->spr[s->spr] &= ~s->mask; env->spr[s->spr] &= ~s->mask;
env->spr[s->spr] |= s->value; env->spr[s->spr] |= s->value;
} }
@ -34,7 +33,6 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
target_ulong mask) target_ulong mask)
{ {
struct SPRSyncState s = { struct SPRSyncState s = {
.cs = cs,
.spr = spr, .spr = spr,
.value = value, .value = value,
.mask = mask .mask = mask
@ -909,17 +907,17 @@ static target_ulong cas_get_option_vector(int vector, target_ulong table)
} }
typedef struct { typedef struct {
PowerPCCPU *cpu;
uint32_t cpu_version; uint32_t cpu_version;
Error *err; Error *err;
} SetCompatState; } SetCompatState;
static void do_set_compat(void *arg) static void do_set_compat(CPUState *cs, void *arg)
{ {
PowerPCCPU *cpu = POWERPC_CPU(cs);
SetCompatState *s = arg; SetCompatState *s = arg;
cpu_synchronize_state(CPU(s->cpu)); cpu_synchronize_state(cs);
ppc_set_compat(s->cpu, s->cpu_version, &s->err); ppc_set_compat(cpu, s->cpu_version, &s->err);
} }
#define get_compat_level(cpuver) ( \ #define get_compat_level(cpuver) ( \
@ -1015,7 +1013,6 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
if (old_cpu_version != cpu_version) { if (old_cpu_version != cpu_version) {
CPU_FOREACH(cs) { CPU_FOREACH(cs) {
SetCompatState s = { SetCompatState s = {
.cpu = POWERPC_CPU(cs),
.cpu_version = cpu_version, .cpu_version = cpu_version,
.err = NULL, .err = NULL,
}; };

View File

@ -232,9 +232,11 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* work queue */ /* work queue */
typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);
struct qemu_work_item { struct qemu_work_item {
struct qemu_work_item *next; struct qemu_work_item *next;
void (*func)(void *data); run_on_cpu_func func;
void *data; void *data;
int done; int done;
bool free; bool free;
@ -623,7 +625,7 @@ bool cpu_is_stopped(CPUState *cpu);
* *
* Schedules the function @func for execution on the vCPU @cpu. * Schedules the function @func for execution on the vCPU @cpu.
*/ */
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
/** /**
* async_run_on_cpu: * async_run_on_cpu:
@ -633,7 +635,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
* *
* Schedules the function @func for execution on the vCPU @cpu asynchronously. * Schedules the function @func for execution on the vCPU @cpu asynchronously.
*/ */
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
/** /**
* qemu_get_cpu: * qemu_get_cpu:

View File

@ -1847,10 +1847,8 @@ void kvm_flush_coalesced_mmio_buffer(void)
s->coalesced_flush_in_progress = false; s->coalesced_flush_in_progress = false;
} }
static void do_kvm_cpu_synchronize_state(void *arg) static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
{ {
CPUState *cpu = arg;
if (!cpu->kvm_vcpu_dirty) { if (!cpu->kvm_vcpu_dirty) {
kvm_arch_get_registers(cpu); kvm_arch_get_registers(cpu);
cpu->kvm_vcpu_dirty = true; cpu->kvm_vcpu_dirty = true;
@ -1860,34 +1858,30 @@ static void do_kvm_cpu_synchronize_state(void *arg)
void kvm_cpu_synchronize_state(CPUState *cpu) void kvm_cpu_synchronize_state(CPUState *cpu)
{ {
if (!cpu->kvm_vcpu_dirty) { if (!cpu->kvm_vcpu_dirty) {
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu); run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL);
} }
} }
static void do_kvm_cpu_synchronize_post_reset(void *arg) static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
{ {
CPUState *cpu = arg;
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false; cpu->kvm_vcpu_dirty = false;
} }
void kvm_cpu_synchronize_post_reset(CPUState *cpu) void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{ {
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu); run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL);
} }
static void do_kvm_cpu_synchronize_post_init(void *arg) static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
{ {
CPUState *cpu = arg;
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false; cpu->kvm_vcpu_dirty = false;
} }
void kvm_cpu_synchronize_post_init(CPUState *cpu) void kvm_cpu_synchronize_post_init(CPUState *cpu)
{ {
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu); run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL);
} }
int kvm_cpu_exec(CPUState *cpu) int kvm_cpu_exec(CPUState *cpu)
@ -2216,7 +2210,7 @@ struct kvm_set_guest_debug_data {
int err; int err;
}; };
static void kvm_invoke_set_guest_debug(void *data) static void kvm_invoke_set_guest_debug(CPUState *unused_cpu, void *data)
{ {
struct kvm_set_guest_debug_data *dbg_data = data; struct kvm_set_guest_debug_data *dbg_data = data;
@ -2234,7 +2228,6 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
} }
kvm_arch_update_guest_debug(cpu, &data.dbg); kvm_arch_update_guest_debug(cpu, &data.dbg);
data.cpu = cpu;
run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data); run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
return data.err; return data.err;

View File

@ -1113,7 +1113,6 @@ out:
typedef struct MCEInjectionParams { typedef struct MCEInjectionParams {
Monitor *mon; Monitor *mon;
X86CPU *cpu;
int bank; int bank;
uint64_t status; uint64_t status;
uint64_t mcg_status; uint64_t mcg_status;
@ -1122,14 +1121,14 @@ typedef struct MCEInjectionParams {
int flags; int flags;
} MCEInjectionParams; } MCEInjectionParams;
static void do_inject_x86_mce(void *data) static void do_inject_x86_mce(CPUState *cs, void *data)
{ {
MCEInjectionParams *params = data; MCEInjectionParams *params = data;
CPUX86State *cenv = &params->cpu->env; X86CPU *cpu = X86_CPU(cs);
CPUState *cpu = CPU(params->cpu); CPUX86State *cenv = &cpu->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank; uint64_t *banks = cenv->mce_banks + 4 * params->bank;
cpu_synchronize_state(cpu); cpu_synchronize_state(cs);
/* /*
* If there is an MCE exception being processed, ignore this SRAO MCE * If there is an MCE exception being processed, ignore this SRAO MCE
@ -1149,7 +1148,7 @@ static void do_inject_x86_mce(void *data)
if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) { if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
monitor_printf(params->mon, monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled\n", "CPU %d: Uncorrected error reporting disabled\n",
cpu->cpu_index); cs->cpu_index);
return; return;
} }
@ -1161,7 +1160,7 @@ static void do_inject_x86_mce(void *data)
monitor_printf(params->mon, monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled for" "CPU %d: Uncorrected error reporting disabled for"
" bank %d\n", " bank %d\n",
cpu->cpu_index, params->bank); cs->cpu_index, params->bank);
return; return;
} }
@ -1170,7 +1169,7 @@ static void do_inject_x86_mce(void *data)
monitor_printf(params->mon, monitor_printf(params->mon,
"CPU %d: Previous MCE still in progress, raising" "CPU %d: Previous MCE still in progress, raising"
" triple fault\n", " triple fault\n",
cpu->cpu_index); cs->cpu_index);
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
qemu_system_reset_request(); qemu_system_reset_request();
return; return;
@ -1182,7 +1181,7 @@ static void do_inject_x86_mce(void *data)
banks[3] = params->misc; banks[3] = params->misc;
cenv->mcg_status = params->mcg_status; cenv->mcg_status = params->mcg_status;
banks[1] = params->status; banks[1] = params->status;
cpu_interrupt(cpu, CPU_INTERRUPT_MCE); cpu_interrupt(cs, CPU_INTERRUPT_MCE);
} else if (!(banks[1] & MCI_STATUS_VAL) } else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) { || !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL) { if (banks[1] & MCI_STATUS_VAL) {
@ -1204,7 +1203,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
CPUX86State *cenv = &cpu->env; CPUX86State *cenv = &cpu->env;
MCEInjectionParams params = { MCEInjectionParams params = {
.mon = mon, .mon = mon,
.cpu = cpu,
.bank = bank, .bank = bank,
.status = status, .status = status,
.mcg_status = mcg_status, .mcg_status = mcg_status,
@ -1245,7 +1243,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
if (other_cs == cs) { if (other_cs == cs) {
continue; continue;
} }
params.cpu = X86_CPU(other_cs);
run_on_cpu(other_cs, do_inject_x86_mce, &params); run_on_cpu(other_cs, do_inject_x86_mce, &params);
} }
} }

View File

@ -156,10 +156,8 @@ static int kvm_get_tsc(CPUState *cs)
return 0; return 0;
} }
static inline void do_kvm_synchronize_tsc(void *arg) static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg)
{ {
CPUState *cpu = arg;
kvm_get_tsc(cpu); kvm_get_tsc(cpu);
} }
@ -169,7 +167,7 @@ void kvm_synchronize_all_tsc(void)
if (kvm_enabled()) { if (kvm_enabled()) {
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu); run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL);
} }
} }
} }

View File

@ -164,7 +164,7 @@ static void s390_cpu_machine_reset_cb(void *opaque)
{ {
S390CPU *cpu = opaque; S390CPU *cpu = opaque;
run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, CPU(cpu)); run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, NULL);
} }
#endif #endif
@ -220,7 +220,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
s390_cpu_gdb_init(cs); s390_cpu_gdb_init(cs);
qemu_init_vcpu(cs); qemu_init_vcpu(cs);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
run_on_cpu(cs, s390_do_cpu_full_reset, cs); run_on_cpu(cs, s390_do_cpu_full_reset, NULL);
#else #else
cpu_reset(cs); cpu_reset(cs);
#endif #endif

View File

@ -502,17 +502,14 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
#define decode_basedisp_rs decode_basedisp_s #define decode_basedisp_rs decode_basedisp_s
/* helper functions for run_on_cpu() */ /* helper functions for run_on_cpu() */
static inline void s390_do_cpu_reset(void *arg) static inline void s390_do_cpu_reset(CPUState *cs, void *arg)
{ {
CPUState *cs = arg;
S390CPUClass *scc = S390_CPU_GET_CLASS(cs); S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
scc->cpu_reset(cs); scc->cpu_reset(cs);
} }
static inline void s390_do_cpu_full_reset(void *arg) static inline void s390_do_cpu_full_reset(CPUState *cs, void *arg)
{ {
CPUState *cs = arg;
cpu_reset(cs); cpu_reset(cs);
} }

View File

@ -1385,7 +1385,6 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
} }
typedef struct SigpInfo { typedef struct SigpInfo {
S390CPU *cpu;
uint64_t param; uint64_t param;
int cc; int cc;
uint64_t *status_reg; uint64_t *status_reg;
@ -1398,38 +1397,40 @@ static void set_sigp_status(SigpInfo *si, uint64_t status)
si->cc = SIGP_CC_STATUS_STORED; si->cc = SIGP_CC_STATUS_STORED;
} }
static void sigp_start(void *arg) static void sigp_start(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
return; return;
} }
s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu); s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static void sigp_stop(void *arg) static void sigp_stop(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
struct kvm_s390_irq irq = { struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP, .type = KVM_S390_SIGP_STOP,
}; };
if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) { if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
return; return;
} }
/* disabled wait - sleeping in user space */ /* disabled wait - sleeping in user space */
if (CPU(si->cpu)->halted) { if (cs->halted) {
s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu); s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
} else { } else {
/* execute the stop function */ /* execute the stop function */
si->cpu->env.sigp_order = SIGP_STOP; cpu->env.sigp_order = SIGP_STOP;
kvm_s390_vcpu_interrupt(si->cpu, &irq); kvm_s390_vcpu_interrupt(cpu, &irq);
} }
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
@ -1496,56 +1497,58 @@ static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
return 0; return 0;
} }
static void sigp_stop_and_store_status(void *arg) static void sigp_stop_and_store_status(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
struct kvm_s390_irq irq = { struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP, .type = KVM_S390_SIGP_STOP,
}; };
/* disabled wait - sleeping in user space */ /* disabled wait - sleeping in user space */
if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING && if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
CPU(si->cpu)->halted) { s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
} }
switch (s390_cpu_get_state(si->cpu)) { switch (s390_cpu_get_state(cpu)) {
case CPU_STATE_OPERATING: case CPU_STATE_OPERATING:
si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
kvm_s390_vcpu_interrupt(si->cpu, &irq); kvm_s390_vcpu_interrupt(cpu, &irq);
/* store will be performed when handling the stop intercept */ /* store will be performed when handling the stop intercept */
break; break;
case CPU_STATE_STOPPED: case CPU_STATE_STOPPED:
/* already stopped, just store the status */ /* already stopped, just store the status */
cpu_synchronize_state(CPU(si->cpu)); cpu_synchronize_state(cs);
kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true); kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
break; break;
} }
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static void sigp_store_status_at_address(void *arg) static void sigp_store_status_at_address(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
uint32_t address = si->param & 0x7ffffe00u; uint32_t address = si->param & 0x7ffffe00u;
/* cpu has to be stopped */ /* cpu has to be stopped */
if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
return; return;
} }
cpu_synchronize_state(CPU(si->cpu)); cpu_synchronize_state(cs);
if (kvm_s390_store_status(si->cpu, address, false)) { if (kvm_s390_store_status(cpu, address, false)) {
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
return; return;
} }
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static void sigp_store_adtl_status(void *arg) static void sigp_store_adtl_status(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
if (!s390_has_feat(S390_FEAT_VECTOR)) { if (!s390_has_feat(S390_FEAT_VECTOR)) {
@ -1554,7 +1557,7 @@ static void sigp_store_adtl_status(void *arg)
} }
/* cpu has to be stopped */ /* cpu has to be stopped */
if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
return; return;
} }
@ -1565,31 +1568,32 @@ static void sigp_store_adtl_status(void *arg)
return; return;
} }
cpu_synchronize_state(CPU(si->cpu)); cpu_synchronize_state(cs);
if (kvm_s390_store_adtl_status(si->cpu, si->param)) { if (kvm_s390_store_adtl_status(cpu, si->param)) {
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
return; return;
} }
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static void sigp_restart(void *arg) static void sigp_restart(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
struct kvm_s390_irq irq = { struct kvm_s390_irq irq = {
.type = KVM_S390_RESTART, .type = KVM_S390_RESTART,
}; };
switch (s390_cpu_get_state(si->cpu)) { switch (s390_cpu_get_state(cpu)) {
case CPU_STATE_STOPPED: case CPU_STATE_STOPPED:
/* the restart irq has to be delivered prior to any other pending irq */ /* the restart irq has to be delivered prior to any other pending irq */
cpu_synchronize_state(CPU(si->cpu)); cpu_synchronize_state(cs);
do_restart_interrupt(&si->cpu->env); do_restart_interrupt(&cpu->env);
s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu); s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
break; break;
case CPU_STATE_OPERATING: case CPU_STATE_OPERATING:
kvm_s390_vcpu_interrupt(si->cpu, &irq); kvm_s390_vcpu_interrupt(cpu, &irq);
break; break;
} }
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
@ -1597,20 +1601,18 @@ static void sigp_restart(void *arg)
int kvm_s390_cpu_restart(S390CPU *cpu) int kvm_s390_cpu_restart(S390CPU *cpu)
{ {
SigpInfo si = { SigpInfo si = {};
.cpu = cpu,
};
run_on_cpu(CPU(cpu), sigp_restart, &si); run_on_cpu(CPU(cpu), sigp_restart, &si);
DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
return 0; return 0;
} }
static void sigp_initial_cpu_reset(void *arg) static void sigp_initial_cpu_reset(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg; SigpInfo *si = arg;
CPUState *cs = CPU(si->cpu);
S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
cpu_synchronize_state(cs); cpu_synchronize_state(cs);
scc->initial_cpu_reset(cs); scc->initial_cpu_reset(cs);
@ -1618,11 +1620,11 @@ static void sigp_initial_cpu_reset(void *arg)
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static void sigp_cpu_reset(void *arg) static void sigp_cpu_reset(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg; SigpInfo *si = arg;
CPUState *cs = CPU(si->cpu);
S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
cpu_synchronize_state(cs); cpu_synchronize_state(cs);
scc->cpu_reset(cs); scc->cpu_reset(cs);
@ -1630,12 +1632,13 @@ static void sigp_cpu_reset(void *arg)
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static void sigp_set_prefix(void *arg) static void sigp_set_prefix(CPUState *cs, void *arg)
{ {
S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg; SigpInfo *si = arg;
uint32_t addr = si->param & 0x7fffe000u; uint32_t addr = si->param & 0x7fffe000u;
cpu_synchronize_state(CPU(si->cpu)); cpu_synchronize_state(cs);
if (!address_space_access_valid(&address_space_memory, addr, if (!address_space_access_valid(&address_space_memory, addr,
sizeof(struct LowCore), false)) { sizeof(struct LowCore), false)) {
@ -1644,13 +1647,13 @@ static void sigp_set_prefix(void *arg)
} }
/* cpu has to be stopped */ /* cpu has to be stopped */
if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
return; return;
} }
si->cpu->env.psa = addr; cpu->env.psa = addr;
cpu_synchronize_post_init(CPU(si->cpu)); cpu_synchronize_post_init(cs);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
} }
@ -1658,7 +1661,6 @@ static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
uint64_t param, uint64_t *status_reg) uint64_t param, uint64_t *status_reg)
{ {
SigpInfo si = { SigpInfo si = {
.cpu = dst_cpu,
.param = param, .param = param,
.status_reg = status_reg, .status_reg = status_reg,
}; };

View File

@ -126,7 +126,7 @@ static int modified_clear_reset(S390CPU *cpu)
pause_all_vcpus(); pause_all_vcpus();
cpu_synchronize_all_states(); cpu_synchronize_all_states();
CPU_FOREACH(t) { CPU_FOREACH(t) {
run_on_cpu(t, s390_do_cpu_full_reset, t); run_on_cpu(t, s390_do_cpu_full_reset, NULL);
} }
s390_cmma_reset(); s390_cmma_reset();
subsystem_reset(); subsystem_reset();
@ -145,7 +145,7 @@ static int load_normal_reset(S390CPU *cpu)
pause_all_vcpus(); pause_all_vcpus();
cpu_synchronize_all_states(); cpu_synchronize_all_states();
CPU_FOREACH(t) { CPU_FOREACH(t) {
run_on_cpu(t, s390_do_cpu_reset, t); run_on_cpu(t, s390_do_cpu_reset, NULL);
} }
s390_cmma_reset(); s390_cmma_reset();
subsystem_reset(); subsystem_reset();