target-microblaze: Make compute_ldst_addr always use a temp

Make compute_ldst_addr always use a temp. This simplifies
the code a bit in preparation for adding support for
64bit addresses.

No functional change.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
master
Edgar E. Iglesias 2018-04-13 16:12:56 +02:00
parent a2de5ca451
commit 0dc4af5c1a
1 changed files with 37 additions and 74 deletions

View File

@ -848,7 +848,7 @@ static void dec_imm(DisasContext *dc)
dc->clear_imm = 0; dc->clear_imm = 0;
} }
static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t) static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
{ {
bool extimm = dc->tb_flags & IMM_FLAG; bool extimm = dc->tb_flags & IMM_FLAG;
/* Should be set to true if r1 is used by loadstores. */ /* Should be set to true if r1 is used by loadstores. */
@ -861,47 +861,47 @@ static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
/* Treat the common cases first. */ /* Treat the common cases first. */
if (!dc->type_b) { if (!dc->type_b) {
/* If any of the regs is r0, return a ptr to the other. */ /* If any of the regs is r0, set t to the value of the other reg. */
if (dc->ra == 0) { if (dc->ra == 0) {
return &cpu_R[dc->rb]; tcg_gen_mov_i32(*t, cpu_R[dc->rb]);
return;
} else if (dc->rb == 0) { } else if (dc->rb == 0) {
return &cpu_R[dc->ra]; tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
return;
} }
if (dc->rb == 1 && dc->cpu->cfg.stackprot) { if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
stackprot = true; stackprot = true;
} }
*t = tcg_temp_new_i32();
tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]); tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
if (stackprot) { if (stackprot) {
gen_helper_stackprot(cpu_env, *t); gen_helper_stackprot(cpu_env, *t);
} }
return t; return;
} }
/* Immediate. */ /* Immediate. */
if (!extimm) { if (!extimm) {
if (dc->imm == 0) { if (dc->imm == 0) {
return &cpu_R[dc->ra]; tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
return;
} }
*t = tcg_temp_new_i32();
tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm)); tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
tcg_gen_add_i32(*t, cpu_R[dc->ra], *t); tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
} else { } else {
*t = tcg_temp_new_i32();
tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc))); tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
} }
if (stackprot) { if (stackprot) {
gen_helper_stackprot(cpu_env, *t); gen_helper_stackprot(cpu_env, *t);
} }
return t; return;
} }
static void dec_load(DisasContext *dc) static void dec_load(DisasContext *dc)
{ {
TCGv_i32 t, v, *addr; TCGv_i32 v, addr;
unsigned int size; unsigned int size;
bool rev = false, ex = false; bool rev = false, ex = false;
TCGMemOp mop; TCGMemOp mop;
@ -928,7 +928,8 @@ static void dec_load(DisasContext *dc)
ex ? "x" : ""); ex ? "x" : "");
t_sync_flags(dc); t_sync_flags(dc);
addr = compute_ldst_addr(dc, &t); addr = tcg_temp_new_i32();
compute_ldst_addr(dc, &addr);
/* /*
* When doing reverse accesses we need to do two things. * When doing reverse accesses we need to do two things.
@ -947,17 +948,10 @@ static void dec_load(DisasContext *dc)
11 -> 00 */ 11 -> 00 */
TCGv_i32 low = tcg_temp_new_i32(); TCGv_i32 low = tcg_temp_new_i32();
/* Force addr into the temp. */ tcg_gen_andi_i32(low, addr, 3);
if (addr != &t) {
t = tcg_temp_new_i32();
tcg_gen_mov_i32(t, *addr);
addr = &t;
}
tcg_gen_andi_i32(low, t, 3);
tcg_gen_sub_i32(low, tcg_const_i32(3), low); tcg_gen_sub_i32(low, tcg_const_i32(3), low);
tcg_gen_andi_i32(t, t, ~3); tcg_gen_andi_i32(addr, addr, ~3);
tcg_gen_or_i32(t, t, low); tcg_gen_or_i32(addr, addr, low);
tcg_temp_free_i32(low); tcg_temp_free_i32(low);
break; break;
} }
@ -965,14 +959,7 @@ static void dec_load(DisasContext *dc)
case 2: case 2:
/* 00 -> 10 /* 00 -> 10
10 -> 00. */ 10 -> 00. */
/* Force addr into the temp. */ tcg_gen_xori_i32(addr, addr, 2);
if (addr != &t) {
t = tcg_temp_new_i32();
tcg_gen_xori_i32(t, *addr, 2);
addr = &t;
} else {
tcg_gen_xori_i32(t, t, 2);
}
break; break;
default: default:
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n"); cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
@ -982,13 +969,7 @@ static void dec_load(DisasContext *dc)
/* lwx does not throw unaligned access errors, so force alignment */ /* lwx does not throw unaligned access errors, so force alignment */
if (ex) { if (ex) {
/* Force addr into the temp. */ tcg_gen_andi_i32(addr, addr, ~3);
if (addr != &t) {
t = tcg_temp_new_i32();
tcg_gen_mov_i32(t, *addr);
addr = &t;
}
tcg_gen_andi_i32(t, t, ~3);
} }
/* If we get a fault on a dslot, the jmpstate better be in sync. */ /* If we get a fault on a dslot, the jmpstate better be in sync. */
@ -1002,16 +983,16 @@ static void dec_load(DisasContext *dc)
* address and if that succeeds we write into the destination reg. * address and if that succeeds we write into the destination reg.
*/ */
v = tcg_temp_new_i32(); v = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop); tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc); tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd), gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
tcg_const_i32(0), tcg_const_i32(size - 1)); tcg_const_i32(0), tcg_const_i32(size - 1));
} }
if (ex) { if (ex) {
tcg_gen_mov_i32(env_res_addr, *addr); tcg_gen_mov_i32(env_res_addr, addr);
tcg_gen_mov_i32(env_res_val, v); tcg_gen_mov_i32(env_res_val, v);
} }
if (dc->rd) { if (dc->rd) {
@ -1024,13 +1005,12 @@ static void dec_load(DisasContext *dc)
write_carryi(dc, 0); write_carryi(dc, 0);
} }
if (addr == &t) tcg_temp_free_i32(addr);
tcg_temp_free_i32(t);
} }
static void dec_store(DisasContext *dc) static void dec_store(DisasContext *dc)
{ {
TCGv_i32 t, *addr, swx_addr; TCGv_i32 addr;
TCGLabel *swx_skip = NULL; TCGLabel *swx_skip = NULL;
unsigned int size; unsigned int size;
bool rev = false, ex = false; bool rev = false, ex = false;
@ -1059,21 +1039,19 @@ static void dec_store(DisasContext *dc)
t_sync_flags(dc); t_sync_flags(dc);
/* If we get a fault on a dslot, the jmpstate better be in sync. */ /* If we get a fault on a dslot, the jmpstate better be in sync. */
sync_jmpstate(dc); sync_jmpstate(dc);
addr = compute_ldst_addr(dc, &t); /* SWX needs a temp_local. */
addr = ex ? tcg_temp_local_new_i32() : tcg_temp_new_i32();
compute_ldst_addr(dc, &addr);
swx_addr = tcg_temp_local_new_i32();
if (ex) { /* swx */ if (ex) { /* swx */
TCGv_i32 tval; TCGv_i32 tval;
/* Force addr into the swx_addr. */
tcg_gen_mov_i32(swx_addr, *addr);
addr = &swx_addr;
/* swx does not throw unaligned access errors, so force alignment */ /* swx does not throw unaligned access errors, so force alignment */
tcg_gen_andi_i32(swx_addr, swx_addr, ~3); tcg_gen_andi_i32(addr, addr, ~3);
write_carryi(dc, 1); write_carryi(dc, 1);
swx_skip = gen_new_label(); swx_skip = gen_new_label();
tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, swx_addr, swx_skip); tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, addr, swx_skip);
/* Compare the value loaded at lwx with current contents of /* Compare the value loaded at lwx with current contents of
the reserved location. the reserved location.
@ -1081,8 +1059,8 @@ static void dec_store(DisasContext *dc)
this compare and the following write to be atomic. For user this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */ emulation we need to add atomicity between threads. */
tval = tcg_temp_new_i32(); tval = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false), tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
MO_TEUL); MO_TEUL);
tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip); tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0); write_carryi(dc, 0);
tcg_temp_free_i32(tval); tcg_temp_free_i32(tval);
@ -1099,17 +1077,10 @@ static void dec_store(DisasContext *dc)
11 -> 00 */ 11 -> 00 */
TCGv_i32 low = tcg_temp_new_i32(); TCGv_i32 low = tcg_temp_new_i32();
/* Force addr into the temp. */ tcg_gen_andi_i32(low, addr, 3);
if (addr != &t) {
t = tcg_temp_new_i32();
tcg_gen_mov_i32(t, *addr);
addr = &t;
}
tcg_gen_andi_i32(low, t, 3);
tcg_gen_sub_i32(low, tcg_const_i32(3), low); tcg_gen_sub_i32(low, tcg_const_i32(3), low);
tcg_gen_andi_i32(t, t, ~3); tcg_gen_andi_i32(addr, addr, ~3);
tcg_gen_or_i32(t, t, low); tcg_gen_or_i32(addr, addr, low);
tcg_temp_free_i32(low); tcg_temp_free_i32(low);
break; break;
} }
@ -1118,20 +1089,14 @@ static void dec_store(DisasContext *dc)
/* 00 -> 10 /* 00 -> 10
10 -> 00. */ 10 -> 00. */
/* Force addr into the temp. */ /* Force addr into the temp. */
if (addr != &t) { tcg_gen_xori_i32(addr, addr, 2);
t = tcg_temp_new_i32();
tcg_gen_xori_i32(t, *addr, 2);
addr = &t;
} else {
tcg_gen_xori_i32(t, t, 2);
}
break; break;
default: default:
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n"); cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
break; break;
} }
} }
tcg_gen_qemu_st_i32(cpu_R[dc->rd], *addr, tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
cpu_mmu_index(&dc->cpu->env, false), mop); cpu_mmu_index(&dc->cpu->env, false), mop);
/* Verify alignment if needed. */ /* Verify alignment if needed. */
@ -1143,17 +1108,15 @@ static void dec_store(DisasContext *dc)
* the alignment checks in between the probe and the mem * the alignment checks in between the probe and the mem
* access. * access.
*/ */
gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd), gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
tcg_const_i32(1), tcg_const_i32(size - 1)); tcg_const_i32(1), tcg_const_i32(size - 1));
} }
if (ex) { if (ex) {
gen_set_label(swx_skip); gen_set_label(swx_skip);
} }
tcg_temp_free_i32(swx_addr);
if (addr == &t) tcg_temp_free_i32(addr);
tcg_temp_free_i32(t);
} }
static inline void eval_cc(DisasContext *dc, unsigned int cc, static inline void eval_cc(DisasContext *dc, unsigned int cc,