x86: use caller supplied CPUState for interrupt related stuff

Several x86 specific functions are called from cpu-exec.c with the
assumption that global env register is valid. This will be changed
later, so make the functions use caller supplied CPUState parameter.

It would be cleaner to move the functions to helper.c, but there are
quite a lot of dependencies between do_interrupt() and other functions.

Add helpers for svm_check_intercept() and cpu_cc_compute_all() instead
of calling the helper (which uses global env, AREG0) directly.

Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
master
Blue Swirl 2011-05-16 19:38:48 +00:00
parent e67768d0f1
commit e694d4e289
4 changed files with 106 additions and 44 deletions

View File

@ -266,26 +266,13 @@ int cpu_exec(CPUState *env1)
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
do_interrupt_user(env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
/* successfully delivered */
env->old_exception = -1;
do_interrupt(env);
#endif
ret = env->exception_index;
break;
#else
#if defined(TARGET_I386)
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
do_interrupt(env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
/* successfully delivered */
env->old_exception = -1;
do_interrupt(env);
#elif defined(TARGET_PPC)
do_interrupt(env);
#elif defined(TARGET_LM32)
@ -341,7 +328,7 @@ int cpu_exec(CPUState *env1)
#endif
#if defined(TARGET_I386)
if (interrupt_request & CPU_INTERRUPT_INIT) {
svm_check_intercept(SVM_EXIT_INIT);
svm_check_intercept(env, SVM_EXIT_INIT);
do_cpu_init(env);
env->exception_index = EXCP_HALTED;
cpu_loop_exit(env);
@ -350,19 +337,19 @@ int cpu_exec(CPUState *env1)
} else if (env->hflags2 & HF2_GIF_MASK) {
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK)) {
svm_check_intercept(SVM_EXIT_SMI);
svm_check_intercept(env, SVM_EXIT_SMI);
env->interrupt_request &= ~CPU_INTERRUPT_SMI;
do_smm_enter();
do_smm_enter(env);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
!(env->hflags2 & HF2_NMI_MASK)) {
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
env->hflags2 |= HF2_NMI_MASK;
do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
next_tb = 0;
} else if (interrupt_request & CPU_INTERRUPT_MCE) {
env->interrupt_request &= ~CPU_INTERRUPT_MCE;
do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(((env->hflags2 & HF2_VINTR_MASK) &&
@ -371,7 +358,7 @@ int cpu_exec(CPUState *env1)
(env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
svm_check_intercept(SVM_EXIT_INTR);
svm_check_intercept(env, SVM_EXIT_INTR);
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
@ -380,7 +367,7 @@ int cpu_exec(CPUState *env1)
env = cpu_single_env;
#define env cpu_single_env
#endif
do_interrupt(intno, 0, 0, 0, 1);
do_interrupt_x86_hardirq(env, intno, 1);
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
@ -390,10 +377,10 @@ int cpu_exec(CPUState *env1)
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
/* FIXME: this should respect TPR */
svm_check_intercept(SVM_EXIT_VINTR);
svm_check_intercept(env, SVM_EXIT_VINTR);
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt(intno, 0, 0, 0, 1);
do_interrupt_x86_hardirq(env, intno, 1);
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
next_tb = 0;
#endif
@ -570,7 +557,8 @@ int cpu_exec(CPUState *env1)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
/* restore flags in standard format */
#if defined(TARGET_I386)
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
| (DF & DF_MASK);
log_cpu_state(env, X86_DUMP_CCOP);
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_M68K)
@ -661,7 +649,8 @@ int cpu_exec(CPUState *env1)
#if defined(TARGET_I386)
/* restore flags in standard format */
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
| (DF & DF_MASK);
#elif defined(TARGET_ARM)
/* XXX: Save/restore host fpu exception state?. */
#elif defined(TARGET_UNICORE32)

View File

@ -1000,4 +1000,14 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
/* op_helper.c */
void do_interrupt(CPUState *env);
void do_interrupt_x86_hardirq(CPUState *env, int intno, int is_hw);
void do_smm_enter(CPUState *env1);
void svm_check_intercept(CPUState *env1, uint32_t type);
uint32_t cpu_cc_compute_all(CPUState *env1, int op);
#endif /* CPU_I386_H */

View File

@ -67,14 +67,9 @@ register struct CPUX86State *env asm(AREG0);
#include "exec-all.h"
/* op_helper.c */
void do_interrupt(int intno, int is_int, int error_code,
target_ulong next_eip, int is_hw);
void do_interrupt_user(int intno, int is_int, int error_code,
target_ulong next_eip);
void QEMU_NORETURN raise_exception_err(int exception_index, int error_code);
void QEMU_NORETURN raise_exception(int exception_index);
void QEMU_NORETURN raise_exception_env(int exception_index, CPUState *nenv);
void do_smm_enter(void);
/* n must be a constant to be efficient */
static inline target_long lshift(target_long x, int n)
@ -87,11 +82,6 @@ static inline target_long lshift(target_long x, int n)
#include "helper.h"
static inline void svm_check_intercept(uint32_t type)
{
helper_svm_check_intercept_param(type, 0);
}
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"

View File

@ -1150,9 +1150,10 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
}
#if defined(CONFIG_USER_ONLY)
/* fake user mode interrupt */
void do_interrupt_user(int intno, int is_int, int error_code,
target_ulong next_eip)
static void do_interrupt_user(int intno, int is_int, int error_code,
target_ulong next_eip)
{
SegmentCache *dt;
target_ulong ptr;
@ -1181,7 +1182,8 @@ void do_interrupt_user(int intno, int is_int, int error_code,
EIP = next_eip;
}
#if !defined(CONFIG_USER_ONLY)
#else
static void handle_even_inj(int intno, int is_int, int error_code,
int is_hw, int rm)
{
@ -1207,8 +1209,8 @@ static void handle_even_inj(int intno, int is_int, int error_code,
* the int instruction. next_eip is the EIP value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
void do_interrupt(int intno, int is_int, int error_code,
target_ulong next_eip, int is_hw)
static void do_interrupt_all(int intno, int is_int, int error_code,
target_ulong next_eip, int is_hw)
{
if (qemu_loglevel_mask(CPU_LOG_INT)) {
if ((env->cr[0] & CR0_PE_MASK)) {
@ -1270,6 +1272,46 @@ void do_interrupt(int intno, int is_int, int error_code,
#endif
}
void do_interrupt(CPUState *env1)
{
CPUState *saved_env;
saved_env = env;
env = env1;
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
do_interrupt_user(env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
/* successfully delivered */
env->old_exception = -1;
#else
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
do_interrupt_all(env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
/* successfully delivered */
env->old_exception = -1;
#endif
env = saved_env;
}
void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
{
CPUState *saved_env;
saved_env = env;
env = env1;
do_interrupt_all(intno, 0, 0, 0, is_hw);
env = saved_env;
}
/* This should come from sysemu.h - if we could include it here... */
void qemu_system_reset_request(void);
@ -1359,7 +1401,7 @@ void raise_exception_env(int exception_index, CPUState *nenv)
#if defined(CONFIG_USER_ONLY)
void do_smm_enter(void)
void do_smm_enter(CPUState *env1)
{
}
@ -1375,11 +1417,15 @@ void helper_rsm(void)
#define SMM_REVISION_ID 0x00020000
#endif
void do_smm_enter(void)
void do_smm_enter(CPUState *env1)
{
target_ulong sm_state;
SegmentCache *dt;
int i, offset;
CPUState *saved_env;
saved_env = env;
env = env1;
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
@ -1506,6 +1552,7 @@ void do_smm_enter(void)
cpu_x86_update_cr4(env, 0);
env->dr[7] = 0x00000400;
CC_OP = CC_OP_EFLAGS;
env = saved_env;
}
void helper_rsm(void)
@ -4859,6 +4906,10 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
{
}
void svm_check_intercept(CPUState *env1, uint32_t type)
{
}
void helper_svm_check_io(uint32_t port, uint32_t param,
uint32_t next_eip_addend)
{
@ -5040,7 +5091,7 @@ void helper_vmrun(int aflag, int next_eip_addend)
env->exception_next_eip = -1;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
/* XXX: is it always correct ? */
do_interrupt(vector, 0, 0, 0, 1);
do_interrupt_all(vector, 0, 0, 0, 1);
break;
case SVM_EVTINJ_TYPE_NMI:
env->exception_index = EXCP02_NMI;
@ -5249,6 +5300,16 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
}
}
void svm_check_intercept(CPUState *env1, uint32_t type)
{
CPUState *saved_env;
saved_env = env;
env = env1;
helper_svm_check_intercept_param(type, 0);
env = saved_env;
}
void helper_svm_check_io(uint32_t port, uint32_t param,
uint32_t next_eip_addend)
{
@ -5575,6 +5636,18 @@ uint32_t helper_cc_compute_all(int op)
}
}
uint32_t cpu_cc_compute_all(CPUState *env1, int op)
{
CPUState *saved_env;
uint32_t ret;
saved_env = env;
env = env1;
ret = helper_cc_compute_all(op);
env = saved_env;
return ret;
}
uint32_t helper_cc_compute_c(int op)
{
switch (op) {