tcg/s390: Use softmmu fast path for unaligned accesses

Signed-off-by: Richard Henderson <rth@twiddle.net>
master
Richard Henderson 2015-07-23 13:32:35 -07:00
parent 68d45bb61c
commit a5e39810b9
1 changed files with 21 additions and 5 deletions

View File

@ -1504,20 +1504,36 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
int mem_index, bool is_ld) int mem_index, bool is_ld)
{ {
TCGMemOp s_bits = opc & MO_SIZE; int s_mask = (1 << (opc & MO_SIZE)) - 1;
uint64_t tlb_mask = TARGET_PAGE_MASK | ((1 << s_bits) - 1); int ofs, a_off;
int ofs; uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
a_off = 0;
tlb_mask = TARGET_PAGE_MASK | s_mask;
} else {
a_off = s_mask;
tlb_mask = TARGET_PAGE_MASK;
}
if (facilities & FACILITY_GEN_INST_EXT) { if (facilities & FACILITY_GEN_INST_EXT) {
tcg_out_risbg(s, TCG_REG_R2, addr_reg, tcg_out_risbg(s, TCG_REG_R2, addr_reg,
64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS, 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
63 - CPU_TLB_ENTRY_BITS, 63 - CPU_TLB_ENTRY_BITS,
64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1); 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); if (a_off) {
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
} else {
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
}
} else { } else {
tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_R3, addr_reg); tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2, tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);