target-ppc: convert FPU load/store to TCG

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5786 c046a42c-6fe2-441c-8c8c-71466251a162
master
aurel32 2008-11-23 16:30:50 +00:00
parent a7859e892b
commit a0d7d5a776
4 changed files with 159 additions and 216 deletions

View File

@ -35,6 +35,8 @@ DEF_HELPER_0(reset_fpstatus, void)
DEF_HELPER_2(compute_fprf, i32, i64, i32)
DEF_HELPER_2(store_fpscr, void, i64, i32)
DEF_HELPER_1(fpscr_setbit, void, i32)
DEF_HELPER_1(float64_to_float32, i32, i64)
DEF_HELPER_1(float32_to_float64, i64, i32)
DEF_HELPER_1(fctiw, i64, i64)
DEF_HELPER_1(fctiwz, i64, i64)

View File

@ -237,6 +237,24 @@ target_ulong helper_popcntb_64 (target_ulong val)
/*****************************************************************************/
/* Floating point operations helpers */
uint64_t helper_float32_to_float64(uint32_t arg)
{
CPU_FloatU f;
CPU_DoubleU d;
f.l = arg;
d.d = float32_to_float64(f.f, &env->fp_status);
return d.ll;
}
uint32_t helper_float64_to_float32(uint64_t arg)
{
CPU_FloatU f;
CPU_DoubleU d;
d.ll = arg;
f.f = float64_to_float32(d.d, &env->fp_status);
return f.l;
}
static always_inline int fpisneg (float64 d)
{
CPU_DoubleU u;

View File

@ -145,139 +145,6 @@ void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
}
#endif
/*** Floating-point store ***/
#define PPC_STF_OP(name, op) \
void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint32_t)T0, FT0); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define PPC_STF_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint64_t)T0, FT0); \
RETURN(); \
}
#endif
static always_inline void glue(stfs, MEMSUFFIX) (target_ulong EA, float64 d)
{
glue(stfl, MEMSUFFIX)(EA, float64_to_float32(d, &env->fp_status));
}
static always_inline void glue(stfiw, MEMSUFFIX) (target_ulong EA, float64 d)
{
CPU_DoubleU u;
/* Store the low order 32 bits without any conversion */
u.d = d;
glue(st32, MEMSUFFIX)(EA, u.l.lower);
}
PPC_STF_OP(fd, stfq);
PPC_STF_OP(fs, stfs);
PPC_STF_OP(fiw, stfiw);
#if defined(TARGET_PPC64)
PPC_STF_OP_64(fd, stfq);
PPC_STF_OP_64(fs, stfs);
PPC_STF_OP_64(fiw, stfiw);
#endif
static always_inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, float64 d)
{
CPU_DoubleU u;
u.d = d;
u.ll = bswap64(u.ll);
glue(stfq, MEMSUFFIX)(EA, u.d);
}
static always_inline void glue(stfsr, MEMSUFFIX) (target_ulong EA, float64 d)
{
CPU_FloatU u;
u.f = float64_to_float32(d, &env->fp_status);
u.l = bswap32(u.l);
glue(stfl, MEMSUFFIX)(EA, u.f);
}
static always_inline void glue(stfiwr, MEMSUFFIX) (target_ulong EA, float64 d)
{
CPU_DoubleU u;
/* Store the low order 32 bits without any conversion */
u.d = d;
u.l.lower = bswap32(u.l.lower);
glue(st32, MEMSUFFIX)(EA, u.l.lower);
}
PPC_STF_OP(fd_le, stfqr);
PPC_STF_OP(fs_le, stfsr);
PPC_STF_OP(fiw_le, stfiwr);
#if defined(TARGET_PPC64)
PPC_STF_OP_64(fd_le, stfqr);
PPC_STF_OP_64(fs_le, stfsr);
PPC_STF_OP_64(fiw_le, stfiwr);
#endif
/*** Floating-point load ***/
#define PPC_LDF_OP(name, op) \
void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \
{ \
FT0 = glue(op, MEMSUFFIX)((uint32_t)T0); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define PPC_LDF_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \
{ \
FT0 = glue(op, MEMSUFFIX)((uint64_t)T0); \
RETURN(); \
}
#endif
static always_inline float64 glue(ldfs, MEMSUFFIX) (target_ulong EA)
{
return float32_to_float64(glue(ldfl, MEMSUFFIX)(EA), &env->fp_status);
}
PPC_LDF_OP(fd, ldfq);
PPC_LDF_OP(fs, ldfs);
#if defined(TARGET_PPC64)
PPC_LDF_OP_64(fd, ldfq);
PPC_LDF_OP_64(fs, ldfs);
#endif
static always_inline float64 glue(ldfqr, MEMSUFFIX) (target_ulong EA)
{
CPU_DoubleU u;
u.d = glue(ldfq, MEMSUFFIX)(EA);
u.ll = bswap64(u.ll);
return u.d;
}
static always_inline float64 glue(ldfsr, MEMSUFFIX) (target_ulong EA)
{
CPU_FloatU u;
u.f = glue(ldfl, MEMSUFFIX)(EA);
u.l = bswap32(u.l);
return float32_to_float64(u.f, &env->fp_status);
}
PPC_LDF_OP(fd_le, ldfqr);
PPC_LDF_OP(fs_le, ldfsr);
#if defined(TARGET_PPC64)
PPC_LDF_OP_64(fd_le, ldfqr);
PPC_LDF_OP_64(fs_le, ldfsr);
#endif
/* Load and set reservation */
void OPPROTO glue(op_lwarx, MEMSUFFIX) (void)
{

View File

@ -2524,17 +2524,6 @@ static always_inline void gen_addr_register (TCGv EA,
#endif
/*** Integer load ***/
#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
#define OP_LD_TABLE(width) \
static GenOpFunc *gen_op_l##width[NB_MEM_FUNCS] = { \
GEN_MEM_FUNCS(l##width), \
};
#define OP_ST_TABLE(width) \
static GenOpFunc *gen_op_st##width[NB_MEM_FUNCS] = { \
GEN_MEM_FUNCS(st##width), \
};
#if defined(TARGET_PPC64)
#define GEN_QEMU_LD_PPC64(width) \
static always_inline void gen_qemu_ld##width##_ppc64(TCGv t0, TCGv t1, int flags)\
@ -2700,10 +2689,10 @@ static always_inline void gen_qemu_st64(TCGv arg0, TCGv arg1, int flags)
#else /* defined(TARGET_PPC64) */
#define GEN_QEMU_LD_PPC32(width) \
static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags)\
{ \
tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1); \
#define GEN_QEMU_LD_PPC32(width) \
static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
{ \
tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1); \
}
GEN_QEMU_LD_PPC32(8u)
GEN_QEMU_LD_PPC32(8s)
@ -2711,15 +2700,23 @@ GEN_QEMU_LD_PPC32(16u)
GEN_QEMU_LD_PPC32(16s)
GEN_QEMU_LD_PPC32(32u)
GEN_QEMU_LD_PPC32(32s)
static always_inline void gen_qemu_ld64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
{
tcg_gen_qemu_ld64(arg0, arg1, flags >> 1);
}
#define GEN_QEMU_ST_PPC32(width) \
static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags)\
{ \
tcg_gen_qemu_st##width(arg0, arg1, flags >> 1); \
#define GEN_QEMU_ST_PPC32(width) \
static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
{ \
tcg_gen_qemu_st##width(arg0, arg1, flags >> 1); \
}
GEN_QEMU_ST_PPC32(8)
GEN_QEMU_ST_PPC32(16)
GEN_QEMU_ST_PPC32(32)
static always_inline void gen_qemu_st64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
{
tcg_gen_qemu_st64(arg0, arg1, flags >> 1);
}
static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
{
@ -2755,6 +2752,13 @@ static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
tcg_gen_bswap_i32(arg0, arg0);
}
static always_inline void gen_qemu_ld64(TCGv_i64 arg0, TCGv arg1, int flags)
{
gen_qemu_ld64_ppc32(arg0, arg1, flags);
if (unlikely(flags & 1))
tcg_gen_bswap_i64(arg0, arg0);
}
static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
{
gen_qemu_st8_ppc32(arg0, arg1, flags);
@ -2783,6 +2787,16 @@ static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
gen_qemu_st32_ppc32(arg0, arg1, flags);
}
static always_inline void gen_qemu_st64(TCGv_i64 arg0, TCGv arg1, int flags)
{
if (unlikely(flags & 1)) {
TCGv_i64 temp = tcg_temp_new_i64();
tcg_gen_bswap_i64(temp, arg0);
gen_qemu_st64_ppc32(temp, arg1, flags);
tcg_temp_free_i64(temp);
} else
gen_qemu_st64_ppc32(arg0, arg1, flags);
}
#endif
#define GEN_LD(name, ldop, opc, type) \
@ -3325,22 +3339,25 @@ GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT)
}
/*** Floating-point load ***/
#define GEN_LDF(width, opc, type) \
GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, type) \
#define GEN_LDF(name, ldop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_imm_index(cpu_T[0], ctx, 0); \
op_ldst(l##width); \
tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
EA = tcg_temp_new(); \
gen_addr_imm_index(EA, ctx, 0); \
gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
#define GEN_LDUF(width, opc, type) \
GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
#define GEN_LDUF(name, ldop, opc, type) \
GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@ -3350,15 +3367,17 @@ GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_imm_index(cpu_T[0], ctx, 0); \
op_ldst(l##width); \
tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
EA = tcg_temp_new(); \
gen_addr_imm_index(EA, ctx, 0); \
gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_LDUXF(width, opc, type) \
GEN_HANDLER(l##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
#define GEN_LDUXF(name, ldop, opc, type) \
GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@ -3368,54 +3387,70 @@ GEN_HANDLER(l##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_reg_index(cpu_T[0], ctx); \
op_ldst(l##width); \
tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
EA = tcg_temp_new(); \
gen_addr_reg_index(EA, ctx); \
gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_LDXF(width, opc2, opc3, type) \
GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
#define GEN_LDXF(name, ldop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_reg_index(cpu_T[0], ctx); \
op_ldst(l##width); \
tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
EA = tcg_temp_new(); \
gen_addr_reg_index(EA, ctx); \
gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
#define GEN_LDFS(width, op, type) \
OP_LD_TABLE(width); \
GEN_LDF(width, op | 0x20, type); \
GEN_LDUF(width, op | 0x21, type); \
GEN_LDUXF(width, op | 0x01, type); \
GEN_LDXF(width, 0x17, op | 0x00, type)
#define GEN_LDFS(name, ldop, op, type) \
GEN_LDF(name, ldop, op | 0x20, type); \
GEN_LDUF(name, ldop, op | 0x21, type); \
GEN_LDUXF(name, ldop, op | 0x01, type); \
GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
/* lfd lfdu lfdux lfdx */
GEN_LDFS(fd, 0x12, PPC_FLOAT);
/* lfs lfsu lfsux lfsx */
GEN_LDFS(fs, 0x10, PPC_FLOAT);
static always_inline void gen_qemu_ld32fs(TCGv_i64 arg1, TCGv arg2, int flags)
{
TCGv t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new_i32();
gen_qemu_ld32u(t0, arg2, flags);
tcg_gen_trunc_tl_i32(t1, t0);
tcg_temp_free(t0);
gen_helper_float32_to_float64(arg1, t1);
tcg_temp_free_i32(t1);
}
/* lfd lfdu lfdux lfdx */
GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
/* lfs lfsu lfsux lfsx */
GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
/*** Floating-point store ***/
#define GEN_STF(width, opc, type) \
GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, type) \
#define GEN_STF(name, stop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_imm_index(cpu_T[0], ctx, 0); \
tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
op_ldst(st##width); \
EA = tcg_temp_new(); \
gen_addr_imm_index(EA, ctx, 0); \
gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
#define GEN_STUF(width, opc, type) \
GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
#define GEN_STUF(name, stop, opc, type) \
GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@ -3425,15 +3460,17 @@ GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_imm_index(cpu_T[0], ctx, 0); \
tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
op_ldst(st##width); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
EA = tcg_temp_new(); \
gen_addr_imm_index(EA, ctx, 0); \
gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_STUXF(width, opc, type) \
GEN_HANDLER(st##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
#define GEN_STUXF(name, stop, opc, type) \
GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@ -3443,41 +3480,60 @@ GEN_HANDLER(st##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_reg_index(cpu_T[0], ctx); \
tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
op_ldst(st##width); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
EA = tcg_temp_new(); \
gen_addr_reg_index(EA, ctx); \
gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_STXF(width, opc2, opc3, type) \
GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
#define GEN_STXF(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
gen_set_access_type(ACCESS_FLOAT); \
gen_addr_reg_index(cpu_T[0], ctx); \
tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
op_ldst(st##width); \
EA = tcg_temp_new(); \
gen_addr_reg_index(EA, ctx); \
gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
#define GEN_STFS(width, op, type) \
OP_ST_TABLE(width); \
GEN_STF(width, op | 0x20, type); \
GEN_STUF(width, op | 0x21, type); \
GEN_STUXF(width, op | 0x01, type); \
GEN_STXF(width, 0x17, op | 0x00, type)
#define GEN_STFS(name, stop, op, type) \
GEN_STF(name, stop, op | 0x20, type); \
GEN_STUF(name, stop, op | 0x21, type); \
GEN_STUXF(name, stop, op | 0x01, type); \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
static always_inline void gen_qemu_st32fs(TCGv_i64 arg1, TCGv arg2, int flags)
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv t1 = tcg_temp_new();
gen_helper_float64_to_float32(t0, arg1);
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
gen_qemu_st32(t1, arg2, flags);
tcg_temp_free(t1);
}
/* stfd stfdu stfdux stfdx */
GEN_STFS(fd, 0x16, PPC_FLOAT);
GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
/* stfs stfsu stfsux stfsx */
GEN_STFS(fs, 0x14, PPC_FLOAT);
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
/* Optional: */
static always_inline void gen_qemu_st32fiw(TCGv_i64 arg1, TCGv arg2, int flags)
{
TCGv t0 = tcg_temp_new();
tcg_gen_trunc_i64_tl(t0, arg1),
gen_qemu_st32(t0, arg2, flags);
tcg_temp_free(t0);
}
/* stfiwx */
OP_ST_TABLE(fiw);
GEN_STXF(fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
/*** Branch ***/
static always_inline void gen_goto_tb (DisasContext *ctx, int n,