target-arm: Move setting of exception info into tlb_fill

Move the code which sets exception information out of
arm_cpu_handle_mmu_fault and into tlb_fill. tlb_fill
is the only caller which wants to raise_exception()
so it makes more sense for it to handle the whole of
the exception setup.

As part of this cleanup, move the user-mode-only
implementation function for the handle_mmu_fault CPU
method into cpu.c so we don't need to make it globally
visible, and rename the softmmu-only utility function
arm_cpu_handle_mmu_fault to arm_tlb_fill so it's clear
that it's not the same thing.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
master
Peter Maydell 2015-05-29 11:28:51 +01:00
parent f2932df777
commit 8c6084bf10
5 changed files with 52 additions and 44 deletions

View File

@ -1197,6 +1197,23 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
#ifdef CONFIG_USER_ONLY
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
env->exception.vaddress = address;
if (rw == 2) {
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
cs->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
#endif
static void arm_cpu_class_init(ObjectClass *oc, void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);

View File

@ -504,8 +504,6 @@ static inline bool is_a64(CPUARMState *env)
is returned if the signal was handled by the virtual CPU. */
int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc);
int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
/**
* pmccntr_sync

View File

@ -4047,21 +4047,6 @@ uint32_t HELPER(rbit)(uint32_t x)
#if defined(CONFIG_USER_ONLY)
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
env->exception.vaddress = address;
if (rw == 2) {
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
cs->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
/* These should probably raise undefined insn exceptions. */
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
@ -5826,8 +5811,12 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address,
}
}
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int access_type, int mmu_idx)
/* Walk the page table and (if the mapping exists) add the page
* to the TLB. Return 0 on success, or an ARM DFSR/IFSR fault
* register format value on failure.
*/
int arm_tlb_fill(CPUState *cs, vaddr address,
int access_type, int mmu_idx)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@ -5835,8 +5824,6 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
target_ulong page_size;
int prot;
int ret;
uint32_t syn;
bool same_el = (arm_current_el(env) != 0);
MemTxAttrs attrs = {};
ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
@ -5850,27 +5837,7 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
return 0;
}
/* AArch64 syndrome does not have an LPAE bit */
syn = ret & ~(1 << 9);
/* For insn and data aborts we assume there is no instruction syndrome
* information; this is always true for exceptions reported to EL1.
*/
if (access_type == 2) {
syn = syn_insn_abort(same_el, 0, 0, syn);
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
ret |= (1 << 11);
}
cs->exception_index = EXCP_DATA_ABORT;
}
env->exception.syndrome = syn;
env->exception.vaddress = address;
env->exception.fsr = ret;
return 1;
return ret;
}
hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)

View File

@ -387,4 +387,7 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
void arm_handle_psci_call(ARMCPU *cpu);
#endif
/* Do a page table walk and add page to TLB if possible */
int arm_tlb_fill(CPUState *cpu, vaddr address, int rw, int mmu_idx);
#endif

View File

@ -80,16 +80,39 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
{
int ret;
ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
ret = arm_tlb_fill(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
uint32_t syn, exc;
bool same_el = (arm_current_el(env) != 0);
if (retaddr) {
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr);
}
raise_exception(env, cs->exception_index);
/* AArch64 syndrome does not have an LPAE bit */
syn = ret & ~(1 << 9);
/* For insn and data aborts we assume there is no instruction syndrome
* information; this is always true for exceptions reported to EL1.
*/
if (is_write == 2) {
syn = syn_insn_abort(same_el, 0, 0, syn);
exc = EXCP_PREFETCH_ABORT;
} else {
syn = syn_data_abort(same_el, 0, 0, 0, is_write == 1, syn);
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
ret |= (1 << 11);
}
exc = EXCP_DATA_ABORT;
}
env->exception.syndrome = syn;
env->exception.vaddress = addr;
env->exception.fsr = ret;
raise_exception(env, exc);
}
}
#endif