Make it safe to use 64 bits GPR and/or 64 bits host registers.

For "symetry", add 64 bits versions of all modified functions.
As a side effect, add a lot of code provision for PowerPC 64 support.
Move overflow and carry checks in common routines for simple cases.
Add isel and popcntb instructions from PowerPC 2.03 specification.
Remove remaining micro-operations helpers prototypes from op.c.
Fix XER_BC field to be 7 bits long.
Add power management support for PowerPC 603 & 604.
Fix compilation warnings.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2482 c046a42c-6fe2-441c-8c8c-71466251a162
master
j_mayer 2007-03-17 14:02:15 +00:00
parent 5fd46862e5
commit d9bce9d99f
9 changed files with 3045 additions and 639 deletions

View File

@ -365,6 +365,8 @@ enum {
PPC_E500_VECTOR = 0x20000000,
/* PowerPC 4xx dedicated instructions */
PPC_4xx_COMMON = 0x40000000,
/* PowerPC 2.03 specification extensions */
PPC_203 = 0x80000000,
};
/* CPU run-time flags (MMU and exception model) */
@ -385,6 +387,8 @@ enum {
PPC_FLAGS_MMU_403 = 0x00000005,
/* Freescale e500 MMU model */
PPC_FLAGS_MMU_e500 = 0x00000006,
/* BookE MMU model */
PPC_FLAGS_MMU_BOOKE = 0x00000007,
/* Exception model */
PPC_FLAGS_EXCP_MASK = 0x000000F0,
/* Standard PowerPC exception model */
@ -407,6 +411,8 @@ enum {
PPC_FLAGS_EXCP_74xx = 0x00000080,
/* PowerPC 970 exception model */
PPC_FLAGS_EXCP_970 = 0x00000090,
/* BookE exception model */
PPC_FLAGS_EXCP_BOOKE = 0x000000A0,
};
#define PPC_MMU(env) (env->flags & PPC_FLAGS_MMU_MASK)
@ -437,11 +443,11 @@ enum {
/* PowerPC 440 */
#define PPC_INSNS_440 (PPC_INSNS_EMB | PPC_CACHE_OPT | PPC_BOOKE | \
PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC)
#define PPC_FLAGS_440 (PPC_FLAGS_TODO)
#define PPC_FLAGS_440 (PPC_FLAGS_MMU_BOOKE | PPC_FLAGS_EXCP_BOOKE)
/* Generic BookE PowerPC */
#define PPC_INSNS_BOOKE (PPC_INSNS_EMB | PPC_BOOKE | PPC_MEM_EIEIO | \
PPC_FLOAT | PPC_FLOAT_OPT | PPC_CACHE_OPT)
#define PPC_FLAGS_BOOKE (PPC_FLAGS_MMU_SOFT_4xx | PPC_FLAGS_EXCP_40x)
#define PPC_FLAGS_BOOKE (PPC_FLAGS_MMU_BOOKE | PPC_FLAGS_EXCP_BOOKE)
/* e500 core */
#define PPC_INSNS_E500 (PPC_INSNS_EMB | PPC_BOOKE | PPC_MEM_EIEIO | \
PPC_CACHE_OPT | PPC_E500_VECTOR)
@ -502,7 +508,6 @@ typedef struct ppc_dcr_t ppc_dcr_t;
typedef struct ppc_avr_t ppc_avr_t;
typedef struct ppc_tlb_t ppc_tlb_t;
/* SPR access micro-ops generations callbacks */
struct ppc_spr_t {
void (*uea_read)(void *opaque, int spr_num);
@ -619,6 +624,8 @@ struct CPUPPCState {
*/
target_ulong t0, t1, t2;
#endif
ppc_avr_t t0_avr, t1_avr, t2_avr;
/* general purpose registers */
ppc_gpr_t gpr[32];
/* LR */
@ -674,6 +681,9 @@ struct CPUPPCState {
/* Altivec registers */
ppc_avr_t avr[32];
uint32_t vscr;
/* SPE registers */
ppc_gpr_t spe_acc;
uint32_t spe_fscr;
/* Internal devices resources */
/* Time base and decrementer */
@ -762,8 +772,10 @@ void do_store_dbatu (CPUPPCState *env, int nr, target_ulong value);
void do_store_dbatl (CPUPPCState *env, int nr, target_ulong value);
target_ulong do_load_sdr1 (CPUPPCState *env);
void do_store_sdr1 (CPUPPCState *env, target_ulong value);
target_ulong do_load_asr (CPUPPCState *env);
void do_store_asr (CPUPPCState *env, target_ulong value);
#if defined(TARGET_PPC64)
target_ulong ppc_load_asr (CPUPPCState *env);
void ppc_store_asr (CPUPPCState *env, target_ulong value);
#endif
target_ulong do_load_sr (CPUPPCState *env, int srnum);
void do_store_sr (CPUPPCState *env, int srnum, target_ulong value);
#endif
@ -771,6 +783,7 @@ uint32_t ppc_load_xer (CPUPPCState *env);
void ppc_store_xer (CPUPPCState *env, uint32_t value);
target_ulong do_load_msr (CPUPPCState *env);
void do_store_msr (CPUPPCState *env, target_ulong value);
void ppc_store_msr32 (CPUPPCState *env, uint32_t value);
void do_compute_hflags (CPUPPCState *env);
@ -787,6 +800,16 @@ void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value);
void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
uint32_t cpu_ppc_load_decr (CPUPPCState *env);
void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value);
uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env);
uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env);
#if !defined(CONFIG_USER_ONLY)
void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value);
void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value);
target_ulong load_40x_pit (CPUPPCState *env);
void store_40x_pit (CPUPPCState *env, target_ulong val);
void store_booke_tcr (CPUPPCState *env, target_ulong val);
void store_booke_tsr (CPUPPCState *env, target_ulong val);
#endif
#endif
#define TARGET_PAGE_BITS 12

View File

@ -34,19 +34,25 @@ register struct CPUPPCState *env asm(AREG0);
#define T1 (env->t1)
#define T2 (env->t2)
#else
/* This may be more efficient if HOST_LONG_BITS > TARGET_LONG_BITS
* To be set to one when we'll be sure it does not cause bugs....
*/
#if 0
register unsigned long T0 asm(AREG1);
register unsigned long T1 asm(AREG2);
register unsigned long T2 asm(AREG3);
#endif
/* We may, sometime, need 64 bits registers on 32 bits target */
#if defined(TARGET_PPC64) || (HOST_LONG_BITS == 64)
#define T0_64 T0
#define T1_64 T0
#define T2_64 T0
#else
register target_ulong T0 asm(AREG1);
register target_ulong T1 asm(AREG2);
register target_ulong T2 asm(AREG3);
#endif
/* no registers can be used */
#define T0_64 (env->t0)
#define T1_64 (env->t1)
#define T2_64 (env->t2)
#endif
/* Provision for Altivec */
#define T0_avr (env->t0_avr)
#define T1_avr (env->t1_avr)
#define T2_avr (env->t2_avr)
/* XXX: to clean: remove this mess */
#define PARAM(n) ((uint32_t)PARAM##n)

View File

@ -37,12 +37,12 @@
/*****************************************************************************/
/* PowerPC MMU emulation */
#if defined(CONFIG_USER_ONLY)
#if defined(CONFIG_USER_ONLY)
int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
int is_user, int is_softmmu)
{
int exception, error_code;
if (rw == 2) {
exception = EXCP_ISI;
error_code = 0;
@ -277,7 +277,7 @@ static int ppc6xx_tlb_check (CPUState *env, mmu_ctx_t *ctx,
ppc_tlb_t *tlb;
int nr, best, way;
int ret;
best = -1;
ret = -1; /* No TLB found */
for (way = 0; way < env->nb_ways; way++) {
@ -672,7 +672,7 @@ int get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr,
if (loglevel > 0) {
fprintf(logfile, "%s\n", __func__);
}
#endif
#endif
if ((access_type == ACCESS_CODE && msr_ir == 0) ||
(access_type != ACCESS_CODE && msr_dr == 0)) {
/* No address translation */
@ -693,7 +693,7 @@ int get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr,
__func__, eaddr, ctx->raddr);
}
#endif
return ret;
}
@ -715,7 +715,7 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
int exception = 0, error_code = 0;
int access_type;
int ret = 0;
if (rw == 2) {
/* code access */
rw = 0;
@ -975,6 +975,21 @@ void do_store_dbatl (CPUPPCState *env, int nr, target_ulong value)
/*****************************************************************************/
/* Special registers manipulation */
#if defined(TARGET_PPC64)
target_ulong ppc_load_asr (CPUPPCState *env)
{
return env->asr;
}
void ppc_store_asr (CPUPPCState *env, target_ulong value)
{
if (env->asr != value) {
env->asr = value;
tlb_flush(env, 1);
}
}
#endif
target_ulong do_load_sdr1 (CPUPPCState *env)
{
return env->sdr1;
@ -1039,7 +1054,7 @@ void ppc_store_xer (CPUPPCState *env, uint32_t value)
xer_ov = (value >> XER_OV) & 0x01;
xer_ca = (value >> XER_CA) & 0x01;
xer_cmp = (value >> XER_CMP) & 0xFF;
xer_bc = (value >> XER_BC) & 0x3F;
xer_bc = (value >> XER_BC) & 0x7F;
}
/* Swap temporary saved registers with GPRs */
@ -1066,34 +1081,34 @@ target_ulong do_load_msr (CPUPPCState *env)
{
return
#if defined (TARGET_PPC64)
(msr_sf << MSR_SF) |
(msr_isf << MSR_ISF) |
(msr_hv << MSR_HV) |
((target_ulong)msr_sf << MSR_SF) |
((target_ulong)msr_isf << MSR_ISF) |
((target_ulong)msr_hv << MSR_HV) |
#endif
(msr_ucle << MSR_UCLE) |
(msr_vr << MSR_VR) | /* VR / SPE */
(msr_ap << MSR_AP) |
(msr_sa << MSR_SA) |
(msr_key << MSR_KEY) |
(msr_pow << MSR_POW) | /* POW / WE */
(msr_tlb << MSR_TLB) | /* TLB / TGPE / CE */
(msr_ile << MSR_ILE) |
(msr_ee << MSR_EE) |
(msr_pr << MSR_PR) |
(msr_fp << MSR_FP) |
(msr_me << MSR_ME) |
(msr_fe0 << MSR_FE0) |
(msr_se << MSR_SE) | /* SE / DWE / UBLE */
(msr_be << MSR_BE) | /* BE / DE */
(msr_fe1 << MSR_FE1) |
(msr_al << MSR_AL) |
(msr_ip << MSR_IP) |
(msr_ir << MSR_IR) | /* IR / IS */
(msr_dr << MSR_DR) | /* DR / DS */
(msr_pe << MSR_PE) | /* PE / EP */
(msr_px << MSR_PX) | /* PX / PMM */
(msr_ri << MSR_RI) |
(msr_le << MSR_LE);
((target_ulong)msr_ucle << MSR_UCLE) |
((target_ulong)msr_vr << MSR_VR) | /* VR / SPE */
((target_ulong)msr_ap << MSR_AP) |
((target_ulong)msr_sa << MSR_SA) |
((target_ulong)msr_key << MSR_KEY) |
((target_ulong)msr_pow << MSR_POW) | /* POW / WE */
((target_ulong)msr_tlb << MSR_TLB) | /* TLB / TGPE / CE */
((target_ulong)msr_ile << MSR_ILE) |
((target_ulong)msr_ee << MSR_EE) |
((target_ulong)msr_pr << MSR_PR) |
((target_ulong)msr_fp << MSR_FP) |
((target_ulong)msr_me << MSR_ME) |
((target_ulong)msr_fe0 << MSR_FE0) |
((target_ulong)msr_se << MSR_SE) | /* SE / DWE / UBLE */
((target_ulong)msr_be << MSR_BE) | /* BE / DE */
((target_ulong)msr_fe1 << MSR_FE1) |
((target_ulong)msr_al << MSR_AL) |
((target_ulong)msr_ip << MSR_IP) |
((target_ulong)msr_ir << MSR_IR) | /* IR / IS */
((target_ulong)msr_dr << MSR_DR) | /* DR / DS */
((target_ulong)msr_pe << MSR_PE) | /* PE / EP */
((target_ulong)msr_px << MSR_PX) | /* PX / PMM */
((target_ulong)msr_ri << MSR_RI) |
((target_ulong)msr_le << MSR_LE);
}
void do_store_msr (CPUPPCState *env, target_ulong value)
@ -1156,6 +1171,17 @@ void do_store_msr (CPUPPCState *env, target_ulong value)
enter_pm = 0;
switch (PPC_EXCP(env)) {
case PPC_FLAGS_EXCP_603:
/* Don't handle SLEEP mode: we should disable all clocks...
* No dynamic power-management.
*/
if (msr_pow == 1 && (env->spr[SPR_HID0] & 0x00C00000) != 0)
enter_pm = 1;
break;
case PPC_FLAGS_EXCP_604:
if (msr_pow == 1)
enter_pm = 1;
break;
case PPC_FLAGS_EXCP_7x0:
if (msr_pow == 1 && (env->spr[SPR_HID0] & 0x00E00000) != 0)
enter_pm = 1;
@ -1171,15 +1197,22 @@ void do_store_msr (CPUPPCState *env, target_ulong value)
}
}
#if defined(TARGET_PPC64)
void ppc_store_msr_32 (CPUPPCState *env, target_ulong value)
{
do_store_msr(env, (uint32_t)value);
}
#endif
void do_compute_hflags (CPUPPCState *env)
{
/* Compute current hflags */
env->hflags = (msr_pr << MSR_PR) | (msr_le << MSR_LE) |
(msr_fp << MSR_FP) | (msr_fe0 << MSR_FE0) | (msr_fe1 << MSR_FE1) |
(msr_vr << MSR_VR) | (msr_ap << MSR_AP) | (msr_sa << MSR_SA) |
(msr_vr << MSR_VR) | (msr_ap << MSR_AP) | (msr_sa << MSR_SA) |
(msr_se << MSR_SE) | (msr_be << MSR_BE);
#if defined (TARGET_PPC64)
env->hflags |= (msr_sf << MSR_SF) | (msr_hv << MSR_HV);
env->hflags |= (msr_sf << (MSR_SF - 32)) | (msr_hv << (MSR_HV - 32));
#endif
}
@ -1193,8 +1226,8 @@ void do_interrupt (CPUState *env)
#else /* defined (CONFIG_USER_ONLY) */
static void dump_syscall(CPUState *env)
{
fprintf(logfile, "syscall r0=0x%08x r3=0x%08x r4=0x%08x "
"r5=0x%08x r6=0x%08x nip=0x%08x\n",
fprintf(logfile, "syscall r0=0x" REGX " r3=0x" REGX " r4=0x" REGX
" r5=0x" REGX " r6=0x" REGX " nip=0x" REGX "\n",
env->gpr[0], env->gpr[3], env->gpr[4],
env->gpr[5], env->gpr[6], env->nip);
}

File diff suppressed because it is too large Load Diff

View File

@ -33,10 +33,6 @@
//#define DEBUG_SOFTWARE_TLB
//#define FLUSH_ALL_TLBS
#define Ts0 (long)((target_long)T0)
#define Ts1 (long)((target_long)T1)
#define Ts2 (long)((target_long)T2)
/*****************************************************************************/
/* Exceptions processing helpers */
void cpu_loop_exit (void)
@ -106,7 +102,7 @@ void do_store_xer (void)
xer_ov = (T0 >> XER_OV) & 0x01;
xer_ca = (T0 >> XER_CA) & 0x01;
xer_cmp = (T0 >> XER_CMP) & 0xFF;
xer_bc = (T0 >> XER_BC) & 0x3F;
xer_bc = (T0 >> XER_BC) & 0x7F;
}
void do_load_fpscr (void)
@ -122,7 +118,7 @@ void do_load_fpscr (void)
} u;
int i;
#ifdef WORDS_BIGENDIAN
#if defined(WORDS_BIGENDIAN)
#define WORD0 0
#define WORD1 1
#else
@ -182,68 +178,110 @@ void do_store_fpscr (uint32_t mask)
/*****************************************************************************/
/* Fixed point operations helpers */
void do_addo (void)
#if defined(TARGET_PPC64)
static void add128 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
{
T2 = T0;
T0 += T1;
if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
*plow += a;
/* carry test */
if (*plow < a)
(*phigh)++;
*phigh += b;
}
static void neg128 (uint64_t *plow, uint64_t *phigh)
{
*plow = ~ *plow;
*phigh = ~ *phigh;
add128(plow, phigh, 1, 0);
}
static void mul64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
{
uint32_t a0, a1, b0, b1;
uint64_t v;
a0 = a;
a1 = a >> 32;
b0 = b;
b1 = b >> 32;
v = (uint64_t)a0 * (uint64_t)b0;
*plow = v;
*phigh = 0;
v = (uint64_t)a0 * (uint64_t)b1;
add128(plow, phigh, v << 32, v >> 32);
v = (uint64_t)a1 * (uint64_t)b0;
add128(plow, phigh, v << 32, v >> 32);
v = (uint64_t)a1 * (uint64_t)b1;
*phigh += v;
#if defined(DEBUG_MULDIV)
printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
a, b, *phigh, *plow);
#endif
}
void do_mul64 (uint64_t *plow, uint64_t *phigh)
{
mul64(plow, phigh, T0, T1);
}
static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
{
int sa, sb;
sa = (a < 0);
if (sa)
a = -a;
sb = (b < 0);
if (sb)
b = -b;
mul64(plow, phigh, a, b);
if (sa ^ sb) {
neg128(plow, phigh);
}
}
void do_addco (void)
void do_imul64 (uint64_t *plow, uint64_t *phigh)
{
T2 = T0;
T0 += T1;
if (likely(T0 >= T2)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
imul64(plow, phigh, T0, T1);
}
#endif
void do_adde (void)
{
T2 = T0;
T0 += T1 + xer_ca;
if (likely(!(T0 < T2 || (xer_ca == 1 && T0 == T2)))) {
if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
(xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
void do_addeo (void)
#if defined(TARGET_PPC64)
void do_adde_64 (void)
{
T2 = T0;
T0 += T1 + xer_ca;
if (likely(!(T0 < T2 || (xer_ca == 1 && T0 == T2)))) {
if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
(xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
}
#endif
void do_addmeo (void)
{
T1 = T0;
T0 += xer_ca + (-1);
if (likely(!(T1 & (T1 ^ T0) & (1 << 31)))) {
if (likely(!((uint32_t)T1 &
((uint32_t)T1 ^ (uint32_t)T0) & (1UL << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
@ -253,28 +291,29 @@ void do_addmeo (void)
xer_ca = 1;
}
void do_addzeo (void)
#if defined(TARGET_PPC64)
void do_addmeo_64 (void)
{
T1 = T0;
T0 += xer_ca;
if (likely(!((T1 ^ (-1)) & (T1 ^ T0) & (1 << 31)))) {
T0 += xer_ca + (-1);
if (likely(!((uint64_t)T1 &
((uint64_t)T1 ^ (uint64_t)T0) & (1ULL << 63)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
if (likely(T0 >= T1)) {
xer_ca = 0;
} else {
if (likely(T1 != 0))
xer_ca = 1;
}
}
#endif
void do_divwo (void)
{
if (likely(!((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0))) {
if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) ||
(int32_t)T1 == 0))) {
xer_ov = 0;
T0 = (Ts0 / Ts1);
T0 = (int32_t)T0 / (int32_t)T1;
} else {
xer_so = 1;
xer_ov = 1;
@ -282,6 +321,21 @@ void do_divwo (void)
}
}
#if defined(TARGET_PPC64)
void do_divdo (void)
{
if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == -1ULL) ||
(int64_t)T1 == 0))) {
xer_ov = 0;
T0 = (int64_t)T0 / (int64_t)T1;
} else {
xer_so = 1;
xer_ov = 1;
T0 = (-1ULL) * ((uint64_t)T0 >> 63);
}
}
#endif
void do_divwuo (void)
{
if (likely((uint32_t)T1 != 0)) {
@ -294,9 +348,23 @@ void do_divwuo (void)
}
}
#if defined(TARGET_PPC64)
void do_divduo (void)
{
if (likely((uint64_t)T1 != 0)) {
xer_ov = 0;
T0 = (uint64_t)T0 / (uint64_t)T1;
} else {
xer_so = 1;
xer_ov = 1;
T0 = 0;
}
}
#endif
void do_mullwo (void)
{
int64_t res = (int64_t)Ts0 * (int64_t)Ts1;
int64_t res = (int64_t)T0 * (int64_t)T1;
if (likely((int32_t)res == res)) {
xer_ov = 0;
@ -307,112 +375,148 @@ void do_mullwo (void)
T0 = (int32_t)res;
}
#if defined(TARGET_PPC64)
void do_mulldo (void)
{
int64_t th;
uint64_t tl;
do_imul64(&tl, &th);
if (likely(th == 0)) {
xer_ov = 0;
} else {
xer_ov = 1;
xer_so = 1;
}
T0 = (int64_t)tl;
}
#endif
void do_nego (void)
{
if (likely(T0 != INT32_MIN)) {
if (likely((int32_t)T0 != INT32_MIN)) {
xer_ov = 0;
T0 = -Ts0;
T0 = -(int32_t)T0;
} else {
xer_ov = 1;
xer_so = 1;
}
}
void do_subfo (void)
#if defined(TARGET_PPC64)
void do_nego_64 (void)
{
T2 = T0;
T0 = T1 - T0;
if (likely(!(((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)))) {
if (likely((int64_t)T0 != INT64_MIN)) {
xer_ov = 0;
T0 = -(int64_t)T0;
} else {
xer_so = 1;
xer_ov = 1;
}
RETURN();
}
void do_subfco (void)
{
T2 = T0;
T0 = T1 - T0;
if (likely(T0 > T1)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
if (likely(!(((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
}
#endif
void do_subfe (void)
{
T0 = T1 + ~T0 + xer_ca;
if (likely(T0 >= T1 && (xer_ca == 0 || T0 != T1))) {
if (likely((uint32_t)T0 >= (uint32_t)T1 &&
(xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
void do_subfeo (void)
#if defined(TARGET_PPC64)
void do_subfe_64 (void)
{
T2 = T0;
T0 = T1 + ~T0 + xer_ca;
if (likely(!((~T2 ^ T1 ^ (-1)) & (~T2 ^ T0) & (1 << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
if (likely(T0 >= T1 && (xer_ca == 0 || T0 != T1))) {
if (likely((uint64_t)T0 >= (uint64_t)T1 &&
(xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
#endif
void do_subfmeo (void)
{
T1 = T0;
T0 = ~T0 + xer_ca - 1;
if (likely(!(~T1 & (~T1 ^ T0) & (1 << 31)))) {
if (likely(!((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0) &
(1UL << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
if (likely(T1 != -1))
if (likely((uint32_t)T1 != UINT32_MAX))
xer_ca = 1;
}
#if defined(TARGET_PPC64)
void do_subfmeo_64 (void)
{
T1 = T0;
T0 = ~T0 + xer_ca - 1;
if (likely(!((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0) &
(1ULL << 63)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
if (likely((uint64_t)T1 != UINT64_MAX))
xer_ca = 1;
}
#endif
void do_subfzeo (void)
{
T1 = T0;
T0 = ~T0 + xer_ca;
if (likely(!((~T1 ^ (-1)) & ((~T1) ^ T0) & (1 << 31)))) {
if (likely(!(((uint32_t)~T1 ^ UINT32_MAX) &
((uint32_t)(~T1) ^ (uint32_t)T0) & (1UL << 31)))) {
xer_ov = 0;
} else {
xer_ov = 1;
xer_so = 1;
}
if (likely(T0 >= ~T1)) {
if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
#if defined(TARGET_PPC64)
void do_subfzeo_64 (void)
{
T1 = T0;
T0 = ~T0 + xer_ca;
if (likely(!(((uint64_t)~T1 ^ UINT64_MAX) &
((uint64_t)(~T1) ^ (uint64_t)T0) & (1ULL << 63)))) {
xer_ov = 0;
} else {
xer_ov = 1;
xer_so = 1;
}
if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
#endif
/* shift right arithmetic helper */
void do_sraw (void)
{
int32_t ret;
if (likely(!(T1 & 0x20UL))) {
if (likely(T1 != 0)) {
if (likely((uint32_t)T1 != 0)) {
ret = (int32_t)T0 >> (T1 & 0x1fUL);
if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
xer_ca = 0;
@ -434,6 +538,69 @@ void do_sraw (void)
T0 = ret;
}
#if defined(TARGET_PPC64)
void do_srad (void)
{
int64_t ret;
if (likely(!(T1 & 0x40UL))) {
if (likely((uint64_t)T1 != 0)) {
ret = (int64_t)T0 >> (T1 & 0x3FUL);
if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
} else {
ret = T0;
xer_ca = 0;
}
} else {
ret = (-1) * ((uint64_t)T0 >> 63);
if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
T0 = ret;
}
#endif
static inline int popcnt (uint32_t val)
{
int i;
for (i = 0; val != 0;)
val = val ^ (val - 1);
return i;
}
void do_popcntb (void)
{
uint32_t ret;
int i;
ret = 0;
for (i = 0; i < 32; i += 8)
ret |= popcnt((T0 >> i) & 0xFF) << i;
T0 = ret;
}
#if defined(TARGET_PPC64)
void do_popcntb_64 (void)
{
uint64_t ret;
int i;
ret = 0;
for (i = 0; i < 64; i += 8)
ret |= popcnt((T0 >> i) & 0xFF) << i;
T0 = ret;
}
#endif
/*****************************************************************************/
/* Floating point operations helpers */
void do_fctiw (void)
@ -459,7 +626,7 @@ void do_fctiwz (void)
} p;
/* XXX: higher bits are not supposed to be significant.
* to make tests easier, return the same as a real PowerPC 750 (aka G3)
* to make tests easier, return the same as a real PowerPC 750 (aka G3)
*/
p.i = float64_to_int32_round_to_zero(FT0, &env->fp_status);
p.i |= 0xFFF80000ULL << 32;
@ -596,8 +763,20 @@ void do_fcmpo (void)
#if !defined (CONFIG_USER_ONLY)
void do_rfi (void)
{
env->nip = env->spr[SPR_SRR0] & ~0x00000003;
T0 = env->spr[SPR_SRR1] & ~0xFFFF0000UL;
env->nip = (target_ulong)(env->spr[SPR_SRR0] & ~0x00000003);
T0 = (target_ulong)(env->spr[SPR_SRR1] & ~0xFFFF0000UL);
do_store_msr(env, T0);
#if defined (DEBUG_OP)
dump_rfi();
#endif
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
#if defined(TARGET_PPC64)
void do_rfi_32 (void)
{
env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
T0 = (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL);
do_store_msr(env, T0);
#if defined (DEBUG_OP)
dump_rfi();
@ -605,17 +784,30 @@ void do_rfi (void)
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
#endif
#endif
void do_tw (int flags)
{
if (!likely(!((Ts0 < Ts1 && (flags & 0x10)) ||
(Ts0 > Ts1 && (flags & 0x08)) ||
(Ts0 == Ts1 && (flags & 0x04)) ||
(T0 < T1 && (flags & 0x02)) ||
(T0 > T1 && (flags & 0x01)))))
if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01)))))
do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
}
#if defined(TARGET_PPC64)
void do_td (int flags)
{
if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
}
#endif
/* Instruction cache invalidation helper */
void do_icbi (void)
{
@ -625,20 +817,31 @@ void do_icbi (void)
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
*/
#if defined(TARGET_PPC64)
if (!msr_sf)
T0 &= 0xFFFFFFFFULL;
#endif
tmp = ldl_kernel(T0);
tmp = ldl_kernel((uint32_t)T0);
T0 &= ~(ICACHE_LINE_SIZE - 1);
tb_invalidate_page_range(T0, T0 + ICACHE_LINE_SIZE);
tb_invalidate_page_range((uint32_t)T0, (uint32_t)(T0 + ICACHE_LINE_SIZE));
}
#if defined(TARGET_PPC64)
void do_icbi_64 (void)
{
uint64_t tmp;
/* Invalidate one cache line :
* PowerPC specification says this is to be treated like a load
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
*/
tmp = ldq_kernel((uint64_t)T0);
T0 &= ~(ICACHE_LINE_SIZE - 1);
tb_invalidate_page_range((uint64_t)T0, (uint64_t)(T0 + ICACHE_LINE_SIZE));
}
#endif
/*****************************************************************************/
/* PowerPC 601 specific instructions (POWER bridge) */
void do_POWER_abso (void)
{
if (T0 == INT32_MIN) {
if ((uint32_t)T0 == INT32_MIN) {
T0 = INT32_MAX;
xer_ov = 1;
xer_so = 1;
@ -679,13 +882,13 @@ void do_POWER_div (void)
{
uint64_t tmp;
if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
} else {
tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
env->spr[SPR_MQ] = tmp % T1;
T0 = tmp / Ts1;
T0 = tmp / (int32_t)T1;
}
}
@ -693,7 +896,7 @@ void do_POWER_divo (void)
{
int64_t tmp;
if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
xer_ov = 1;
@ -701,7 +904,7 @@ void do_POWER_divo (void)
} else {
tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
env->spr[SPR_MQ] = tmp % T1;
tmp /= Ts1;
tmp /= (int32_t)T1;
if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
xer_ov = 1;
xer_so = 1;
@ -714,35 +917,36 @@ void do_POWER_divo (void)
void do_POWER_divs (void)
{
if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
} else {
env->spr[SPR_MQ] = T0 % T1;
T0 = Ts0 / Ts1;
T0 = (int32_t)T0 / (int32_t)T1;
}
}
void do_POWER_divso (void)
{
if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
xer_ov = 1;
xer_so = 1;
} else {
T0 = Ts0 / Ts1;
env->spr[SPR_MQ] = Ts0 % Ts1;
T0 = (int32_t)T0 / (int32_t)T1;
env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
xer_ov = 0;
}
}
void do_POWER_dozo (void)
{
if (Ts1 > Ts0) {
if ((int32_t)T1 > (int32_t)T0) {
T2 = T0;
T0 = T1 - T0;
if (((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)) {
if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
xer_so = 1;
xer_ov = 1;
} else {
@ -758,12 +962,12 @@ void do_POWER_maskg (void)
{
uint32_t ret;
if (T0 == T1 + 1) {
if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
ret = -1;
} else {
ret = (((uint32_t)(-1)) >> (T0)) ^
(((uint32_t)(-1) >> (T1)) >> 1);
if (T0 > T1)
ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^
(((uint32_t)(-1) >> ((uint32_t)T1)) >> 1);
if ((uint32_t)T0 > (uint32_t)T1)
ret = ~ret;
}
T0 = ret;
@ -812,7 +1016,7 @@ void do_POWER_rfsvc (void)
/* PowerPC 601 BAT management helper */
void do_store_601_batu (int nr)
{
do_store_ibatu(env, nr, T0);
do_store_ibatu(env, nr, (uint32_t)T0);
env->DBAT[0][nr] = env->IBAT[0][nr];
env->DBAT[1][nr] = env->IBAT[1][nr];
}
@ -826,7 +1030,7 @@ void do_store_601_batu (int nr)
void do_op_602_mfrom (void)
{
if (likely(T0 < 602)) {
#ifdef USE_MFROM_ROM_TABLE
#if defined(USE_MFROM_ROM_TABLE)
#include "mfrom_table.c"
T0 = mfrom_ROM_table[T0];
#else
@ -854,7 +1058,8 @@ void do_op_602_mfrom (void)
/* Embedded PowerPC specific helpers */
void do_405_check_ov (void)
{
if (likely(((T1 ^ T2) >> 31) || !((T0 ^ T2) >> 31))) {
if (likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
!(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
xer_ov = 0;
} else {
xer_ov = 1;
@ -864,7 +1069,8 @@ void do_405_check_ov (void)
void do_405_check_sat (void)
{
if (!likely(((T1 ^ T2) >> 31) || !((T0 ^ T2) >> 31))) {
if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
!(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
/* Saturate result */
if (T2 >> 31) {
T0 = INT32_MIN;
@ -1010,6 +1216,7 @@ void do_tlbia (void)
void do_tlbie (void)
{
T0 = (uint32_t)T0;
#if !defined(FLUSH_ALL_TLBS)
if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) {
ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
@ -1050,13 +1257,78 @@ void do_tlbie (void)
#endif
}
#if defined(TARGET_PPC64)
void do_tlbie_64 (void)
{
T0 = (uint64_t)T0;
#if !defined(FLUSH_ALL_TLBS)
if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) {
ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
if (env->id_tlbs == 1)
ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 1);
} else if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_4xx)) {
/* XXX: TODO */
#if 0
ppcbooke_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK,
env->spr[SPR_BOOKE_PID]);
#endif
} else {
/* tlbie invalidate TLBs for all segments
* As we have 2^36 segments, invalidate all qemu TLBs
*/
#if 0
T0 &= TARGET_PAGE_MASK;
T0 &= ~((target_ulong)-1 << 28);
/* XXX: this case should be optimized,
* giving a mask to tlb_flush_page
*/
tlb_flush_page(env, T0 | (0x0 << 28));
tlb_flush_page(env, T0 | (0x1 << 28));
tlb_flush_page(env, T0 | (0x2 << 28));
tlb_flush_page(env, T0 | (0x3 << 28));
tlb_flush_page(env, T0 | (0x4 << 28));
tlb_flush_page(env, T0 | (0x5 << 28));
tlb_flush_page(env, T0 | (0x6 << 28));
tlb_flush_page(env, T0 | (0x7 << 28));
tlb_flush_page(env, T0 | (0x8 << 28));
tlb_flush_page(env, T0 | (0x9 << 28));
tlb_flush_page(env, T0 | (0xA << 28));
tlb_flush_page(env, T0 | (0xB << 28));
tlb_flush_page(env, T0 | (0xC << 28));
tlb_flush_page(env, T0 | (0xD << 28));
tlb_flush_page(env, T0 | (0xE << 28));
tlb_flush_page(env, T0 | (0xF << 28));
#else
tlb_flush(env, 1);
#endif
}
#else
do_tlbia();
#endif
}
#endif
#if defined(TARGET_PPC64)
void do_slbia (void)
{
/* XXX: TODO */
tlb_flush(env, 1);
}
void do_slbie (void)
{
/* XXX: TODO */
tlb_flush(env, 1);
}
#endif
/* Software driven TLBs management */
/* PowerPC 602/603 software TLB load instructions helpers */
void do_load_6xx_tlb (int is_code)
{
target_ulong RPN, CMP, EPN;
int way;
RPN = env->spr[SPR_RPA];
if (is_code) {
CMP = env->spr[SPR_ICMP];
@ -1074,7 +1346,8 @@ void do_load_6xx_tlb (int is_code)
}
#endif
/* Store this TLB */
ppc6xx_tlb_store(env, T0 & TARGET_PAGE_MASK, way, is_code, CMP, RPN);
ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
way, is_code, CMP, RPN);
}
/* Helpers for 4xx TLB management */

View File

@ -35,6 +35,17 @@ void glue(do_POWER2_lfq_le, MEMSUFFIX) (void);
void glue(do_POWER2_stfq, MEMSUFFIX) (void);
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void);
#if defined(TARGET_PPC64)
void glue(do_lsw_64, MEMSUFFIX) (int dst);
void glue(do_lsw_le_64, MEMSUFFIX) (int dst);
void glue(do_stsw_64, MEMSUFFIX) (int src);
void glue(do_stsw_le_64, MEMSUFFIX) (int src);
void glue(do_lmw_64, MEMSUFFIX) (int dst);
void glue(do_lmw_le_64, MEMSUFFIX) (int dst);
void glue(do_stmw_64, MEMSUFFIX) (int src);
void glue(do_stmw_le_64, MEMSUFFIX) (int src);
#endif
#else
/* Registers load and stores */
@ -46,23 +57,34 @@ void do_load_fpscr (void);
void do_store_fpscr (uint32_t mask);
/* Integer arithmetic helpers */
void do_addo (void);
void do_addco (void);
void do_adde (void);
void do_addeo (void);
void do_addmeo (void);
void do_addzeo (void);
void do_divwo (void);
void do_divwuo (void);
void do_mullwo (void);
void do_nego (void);
void do_subfo (void);
void do_subfco (void);
void do_subfe (void);
void do_subfeo (void);
void do_subfmeo (void);
void do_subfzeo (void);
void do_sraw(void);
void do_sraw (void);
#if defined(TARGET_PPC64)
void do_adde_64 (void);
void do_addmeo_64 (void);
void do_imul64 (uint64_t *tl, uint64_t *th);
void do_mul64 (uint64_t *tl, uint64_t *th);
void do_divdo (void);
void do_divduo (void);
void do_mulldo (void);
void do_nego_64 (void);
void do_subfe_64 (void);
void do_subfmeo_64 (void);
void do_subfzeo_64 (void);
void do_srad (void);
#endif
void do_popcntb (void);
#if defined(TARGET_PPC64)
void do_popcntb_64 (void);
#endif
/* Floating-point arithmetic helpers */
void do_fsqrt (void);
@ -77,13 +99,29 @@ void do_fcmpu (void);
void do_fcmpo (void);
void do_tw (int flags);
#if defined(TARGET_PPC64)
void do_td (int flags);
#endif
void do_icbi (void);
#if defined(TARGET_PPC64)
void do_icbi_64 (void);
#endif
#if !defined(CONFIG_USER_ONLY)
void do_rfi (void);
#if defined(TARGET_PPC64)
void do_rfi_32 (void);
#endif
void do_tlbia (void);
void do_tlbie (void);
#if defined(TARGET_PPC64)
void do_tlbie_64 (void);
#endif
void do_load_6xx_tlb (int is_code);
#if defined(TARGET_PPC64)
void do_slbia (void);
void do_slbie (void);
#endif
#endif
/* POWER / PowerPC 601 specific helpers */

View File

@ -1,6 +1,6 @@
/*
* PowerPC emulation micro-operations helpers for qemu.
*
*
* Copyright (c) 2003-2007 Jocelyn Mayer
*
* This library is free software; you can redistribute it and/or
@ -37,99 +37,211 @@ static inline void glue(st32r, MEMSUFFIX) (target_ulong EA, target_ulong data)
void glue(do_lmw, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
ugpr(dst) = glue(ldl, MEMSUFFIX)(T0);
ugpr(dst) = glue(ldl, MEMSUFFIX)((uint32_t)T0);
}
}
#if defined(TARGET_PPC64)
void glue(do_lmw_64, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
ugpr(dst) = glue(ldl, MEMSUFFIX)((uint64_t)T0);
}
}
#endif
void glue(do_stmw, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
glue(stl, MEMSUFFIX)(T0, ugpr(src));
glue(stl, MEMSUFFIX)((uint32_t)T0, ugpr(src));
}
}
#if defined(TARGET_PPC64)
void glue(do_stmw_64, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
glue(stl, MEMSUFFIX)((uint64_t)T0, ugpr(src));
}
}
#endif
void glue(do_lmw_le, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
ugpr(dst) = glue(ld32r, MEMSUFFIX)(T0);
ugpr(dst) = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
}
}
#if defined(TARGET_PPC64)
void glue(do_lmw_le_64, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
ugpr(dst) = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
}
}
#endif
void glue(do_stmw_le, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
glue(st32r, MEMSUFFIX)(T0, ugpr(src));
glue(st32r, MEMSUFFIX)((uint32_t)T0, ugpr(src));
}
}
#if defined(TARGET_PPC64)
void glue(do_stmw_le_64, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
glue(st32r, MEMSUFFIX)((uint64_t)T0, ugpr(src));
}
}
#endif
void glue(do_lsw, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
ugpr(dst++) = glue(ldl, MEMSUFFIX)(T0);
ugpr(dst++) = glue(ldl, MEMSUFFIX)((uint32_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
}
ugpr(dst) = tmp;
}
}
#if defined(TARGET_PPC64)
void glue(do_lsw_64, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
ugpr(dst++) = glue(ldl, MEMSUFFIX)((uint64_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
}
ugpr(dst) = tmp;
}
}
#endif
void glue(do_stsw, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
glue(stl, MEMSUFFIX)(T0, ugpr(src++));
glue(stl, MEMSUFFIX)((uint32_t)T0, ugpr(src++));
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
glue(stb, MEMSUFFIX)(T0, (ugpr(src) >> sh) & 0xFF);
glue(stb, MEMSUFFIX)((uint32_t)T0, (ugpr(src) >> sh) & 0xFF);
}
}
#if defined(TARGET_PPC64)
void glue(do_stsw_64, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
glue(stl, MEMSUFFIX)((uint64_t)T0, ugpr(src++));
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
glue(stb, MEMSUFFIX)((uint64_t)T0, (ugpr(src) >> sh) & 0xFF);
}
}
#endif
void glue(do_lsw_le, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
ugpr(dst++) = glue(ld32r, MEMSUFFIX)(T0);
ugpr(dst++) = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
}
ugpr(dst) = tmp;
}
}
#if defined(TARGET_PPC64)
void glue(do_lsw_le_64, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
ugpr(dst++) = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
}
ugpr(dst) = tmp;
}
}
#endif
void glue(do_stsw_le, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
glue(st32r, MEMSUFFIX)(T0, ugpr(src++));
glue(st32r, MEMSUFFIX)((uint32_t)T0, ugpr(src++));
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
glue(stb, MEMSUFFIX)(T0, (ugpr(src) >> sh) & 0xFF);
glue(stb, MEMSUFFIX)((uint32_t)T0, (ugpr(src) >> sh) & 0xFF);
}
}
#if defined(TARGET_PPC64)
void glue(do_stsw_le_64, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
glue(st32r, MEMSUFFIX)((uint64_t)T0, ugpr(src++));
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
glue(stb, MEMSUFFIX)((uint64_t)T0, (ugpr(src) >> sh) & 0xFF);
}
}
#endif
/* PPC 601 specific instructions (POWER bridge) */
// XXX: to be tested
void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb)
@ -139,7 +251,7 @@ void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb)
d = 24;
reg = dest;
for (i = 0; i < T1; i++) {
c = glue(ldub, MEMSUFFIX)(T0++);
c = glue(ldub, MEMSUFFIX)((uint32_t)T0++);
/* ra (if not 0) and rb are never modified */
if (likely(reg != rb && (ra == 0 || reg != ra))) {
ugpr(reg) = (ugpr(reg) & ~(0xFF << d)) | (c << d);
@ -160,8 +272,8 @@ void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb)
/* XXX: TAGs are not managed */
void glue(do_POWER2_lfq, MEMSUFFIX) (void)
{
FT0 = glue(ldfq, MEMSUFFIX)(T0);
FT1 = glue(ldfq, MEMSUFFIX)(T0 + 4);
FT0 = glue(ldfq, MEMSUFFIX)((uint32_t)T0);
FT1 = glue(ldfq, MEMSUFFIX)((uint32_t)(T0 + 4));
}
static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
@ -186,14 +298,14 @@ static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
void glue(do_POWER2_lfq_le, MEMSUFFIX) (void)
{
FT0 = glue(ldfqr, MEMSUFFIX)(T0 + 4);
FT1 = glue(ldfqr, MEMSUFFIX)(T0);
FT0 = glue(ldfqr, MEMSUFFIX)((uint32_t)(T0 + 4));
FT1 = glue(ldfqr, MEMSUFFIX)((uint32_t)T0);
}
void glue(do_POWER2_stfq, MEMSUFFIX) (void)
{
glue(stfq, MEMSUFFIX)(T0, FT0);
glue(stfq, MEMSUFFIX)(T0 + 4, FT1);
glue(stfq, MEMSUFFIX)((uint32_t)T0, FT0);
glue(stfq, MEMSUFFIX)((uint32_t)(T0 + 4), FT1);
}
static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
@ -217,8 +329,8 @@ static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void)
{
glue(stfqr, MEMSUFFIX)(T0 + 4, FT0);
glue(stfqr, MEMSUFFIX)(T0, FT1);
glue(stfqr, MEMSUFFIX)((uint32_t)(T0 + 4), FT0);
glue(stfqr, MEMSUFFIX)((uint32_t)T0, FT1);
}
#undef MEMSUFFIX

View File

@ -37,6 +37,33 @@ static inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA)
((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
}
#if defined(TARGET_PPC64)
static inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA)
{
return (int32_t)glue(ldl, MEMSUFFIX)(EA);
}
static inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA)
{
uint64_t tmp = glue(ldq, MEMSUFFIX)(EA);
return ((tmp & 0xFF00000000000000ULL) >> 56) |
((tmp & 0x00FF000000000000ULL) >> 40) |
((tmp & 0x0000FF0000000000ULL) >> 24) |
((tmp & 0x000000FF00000000ULL) >> 8) |
((tmp & 0x00000000FF000000ULL) << 8) |
((tmp & 0x0000000000FF0000ULL) << 24) |
((tmp & 0x000000000000FF00ULL) << 40) |
((tmp & 0x00000000000000FFULL) << 54);
}
static inline int64_t glue(ld32rs, MEMSUFFIX) (target_ulong EA)
{
uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
return (int32_t)((tmp & 0xFF000000) >> 24) | ((tmp & 0x00FF0000) >> 8) |
((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
}
#endif
static inline void glue(st16r, MEMSUFFIX) (target_ulong EA, uint16_t data)
{
uint16_t tmp = ((data & 0xFF00) >> 8) | ((data & 0x00FF) << 8);
@ -50,140 +77,328 @@ static inline void glue(st32r, MEMSUFFIX) (target_ulong EA, uint32_t data)
glue(stl, MEMSUFFIX)(EA, tmp);
}
#if defined(TARGET_PPC64)
static inline void glue(st64r, MEMSUFFIX) (target_ulong EA, uint64_t data)
{
uint64_t tmp = ((data & 0xFF00000000000000ULL) >> 56) |
((data & 0x00FF000000000000ULL) >> 40) |
((data & 0x0000FF0000000000ULL) >> 24) |
((data & 0x000000FF00000000ULL) >> 8) |
((data & 0x00000000FF000000ULL) << 8) |
((data & 0x0000000000FF0000ULL) << 24) |
((data & 0x000000000000FF00ULL) << 40) |
((data & 0x00000000000000FFULL) << 56);
glue(stq, MEMSUFFIX)(EA, tmp);
}
#endif
/*** Integer load ***/
#define PPC_LD_OP(name, op) \
PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \
{ \
T1 = glue(op, MEMSUFFIX)(T0); \
T1 = glue(op, MEMSUFFIX)((uint32_t)T0); \
RETURN(); \
}
#define PPC_ST_OP(name, op) \
PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
#if defined(TARGET_PPC64)
#define PPC_LD_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)(T0, T1); \
T1 = glue(op, MEMSUFFIX)((uint64_t)T0); \
RETURN(); \
}
#endif
#define PPC_ST_OP(name, op) \
void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint32_t)T0, T1); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define PPC_ST_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint64_t)T0, T1); \
RETURN(); \
}
#endif
PPC_LD_OP(bz, ldub);
PPC_LD_OP(ha, ldsw);
PPC_LD_OP(hz, lduw);
PPC_LD_OP(wz, ldl);
#if defined(TARGET_PPC64)
PPC_LD_OP(d, ldq);
PPC_LD_OP(wa, ldsl);
PPC_LD_OP_64(d, ldq);
PPC_LD_OP_64(wa, ldsl);
PPC_LD_OP_64(bz, ldub);
PPC_LD_OP_64(ha, ldsw);
PPC_LD_OP_64(hz, lduw);
PPC_LD_OP_64(wz, ldl);
#endif
PPC_LD_OP(ha_le, ld16rs);
PPC_LD_OP(hz_le, ld16r);
PPC_LD_OP(wz_le, ld32r);
#if defined(TARGET_PPC64)
PPC_LD_OP(d_le, ld64r);
PPC_LD_OP(wa_le, ld32rs);
PPC_LD_OP_64(d_le, ld64r);
PPC_LD_OP_64(wa_le, ld32rs);
PPC_LD_OP_64(ha_le, ld16rs);
PPC_LD_OP_64(hz_le, ld16r);
PPC_LD_OP_64(wz_le, ld32r);
#endif
/*** Integer store ***/
PPC_ST_OP(b, stb);
PPC_ST_OP(h, stw);
PPC_ST_OP(w, stl);
#if defined(TARGET_PPC64)
PPC_ST_OP(d, stq);
PPC_ST_OP_64(d, stq);
PPC_ST_OP_64(b, stb);
PPC_ST_OP_64(h, stw);
PPC_ST_OP_64(w, stl);
#endif
PPC_ST_OP(h_le, st16r);
PPC_ST_OP(w_le, st32r);
#if defined(TARGET_PPC64)
PPC_ST_OP(d_le, st64r);
PPC_ST_OP_64(d_le, st64r);
PPC_ST_OP_64(h_le, st16r);
PPC_ST_OP_64(w_le, st32r);
#endif
/*** Integer load and store with byte reverse ***/
PPC_LD_OP(hbr, ld16r);
PPC_LD_OP(wbr, ld32r);
PPC_ST_OP(hbr, st16r);
PPC_ST_OP(wbr, st32r);
#if defined(TARGET_PPC64)
PPC_LD_OP_64(hbr, ld16r);
PPC_LD_OP_64(wbr, ld32r);
PPC_ST_OP_64(hbr, st16r);
PPC_ST_OP_64(wbr, st32r);
#endif
PPC_LD_OP(hbr_le, lduw);
PPC_LD_OP(wbr_le, ldl);
PPC_ST_OP(hbr_le, stw);
PPC_ST_OP(wbr_le, stl);
#if defined(TARGET_PPC64)
PPC_LD_OP_64(hbr_le, lduw);
PPC_LD_OP_64(wbr_le, ldl);
PPC_ST_OP_64(hbr_le, stw);
PPC_ST_OP_64(wbr_le, stl);
#endif
/*** Integer load and store multiple ***/
PPC_OP(glue(lmw, MEMSUFFIX))
void OPPROTO glue(op_lmw, MEMSUFFIX) (void)
{
glue(do_lmw, MEMSUFFIX)(PARAM1);
RETURN();
}
PPC_OP(glue(lmw_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lmw_64, MEMSUFFIX) (void)
{
glue(do_lmw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
void OPPROTO glue(op_lmw_le, MEMSUFFIX) (void)
{
glue(do_lmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
PPC_OP(glue(stmw, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lmw_le_64, MEMSUFFIX) (void)
{
glue(do_lmw_le_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
void OPPROTO glue(op_stmw, MEMSUFFIX) (void)
{
glue(do_stmw, MEMSUFFIX)(PARAM1);
RETURN();
}
PPC_OP(glue(stmw_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stmw_64, MEMSUFFIX) (void)
{
glue(do_stmw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
void OPPROTO glue(op_stmw_le, MEMSUFFIX) (void)
{
glue(do_stmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
/*** Integer load and store strings ***/
PPC_OP(glue(lswi, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stmw_le_64, MEMSUFFIX) (void)
{
glue(do_lsw, MEMSUFFIX)(PARAM(1));
glue(do_stmw_le_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
/*** Integer load and store strings ***/
void OPPROTO glue(op_lswi, MEMSUFFIX) (void)
{
glue(do_lsw, MEMSUFFIX)(PARAM1);
RETURN();
}
PPC_OP(glue(lswi_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void)
{
glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
glue(do_lsw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
void OPPROTO glue(op_lswi_le, MEMSUFFIX) (void)
{
glue(do_lsw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lswi_le_64, MEMSUFFIX) (void)
{
glue(do_lsw_le_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
* For now, I'll follow the spec...
*/
PPC_OP(glue(lswx, MEMSUFFIX))
void OPPROTO glue(op_lswx, MEMSUFFIX) (void)
{
if (unlikely(T1 > 0)) {
/* Note: T1 comes from xer_bc then no cast is needed */
if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
} else {
glue(do_lsw, MEMSUFFIX)(PARAM(1));
glue(do_lsw, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
PPC_OP(glue(lswx_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lswx_64, MEMSUFFIX) (void)
{
if (unlikely(T1 > 0)) {
/* Note: T1 comes from xer_bc then no cast is needed */
if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
} else {
glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
glue(do_lsw_64, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
#endif
void OPPROTO glue(op_lswx_le, MEMSUFFIX) (void)
{
/* Note: T1 comes from xer_bc then no cast is needed */
if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
} else {
glue(do_lsw_le, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
PPC_OP(glue(stsw, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lswx_le_64, MEMSUFFIX) (void)
{
glue(do_stsw, MEMSUFFIX)(PARAM(1));
/* Note: T1 comes from xer_bc then no cast is needed */
if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
} else {
glue(do_lsw_le_64, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
#endif
void OPPROTO glue(op_stsw, MEMSUFFIX) (void)
{
glue(do_stsw, MEMSUFFIX)(PARAM1);
RETURN();
}
PPC_OP(glue(stsw_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
{
glue(do_stsw_le, MEMSUFFIX)(PARAM(1));
glue(do_stsw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
void OPPROTO glue(op_stsw_le, MEMSUFFIX) (void)
{
glue(do_stsw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stsw_le_64, MEMSUFFIX) (void)
{
glue(do_stsw_le_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
/*** Floating-point store ***/
#define PPC_STF_OP(name, op) \
PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)(T0, FT0); \
glue(op, MEMSUFFIX)((uint32_t)T0, FT0); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define PPC_STF_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint64_t)T0, FT0); \
RETURN(); \
}
#endif
PPC_STF_OP(fd, stfq);
PPC_STF_OP(fs, stfl);
#if defined(TARGET_PPC64)
PPC_STF_OP_64(fd, stfq);
PPC_STF_OP_64(fs, stfl);
#endif
static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
{
@ -221,17 +436,34 @@ static inline void glue(stflr, MEMSUFFIX) (target_ulong EA, float f)
PPC_STF_OP(fd_le, stfqr);
PPC_STF_OP(fs_le, stflr);
#if defined(TARGET_PPC64)
PPC_STF_OP_64(fd_le, stfqr);
PPC_STF_OP_64(fs_le, stflr);
#endif
/*** Floating-point load ***/
#define PPC_LDF_OP(name, op) \
PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \
{ \
FT0 = glue(op, MEMSUFFIX)(T0); \
FT0 = glue(op, MEMSUFFIX)((uint32_t)T0); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define PPC_LDF_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \
{ \
FT0 = glue(op, MEMSUFFIX)((uint64_t)T0); \
RETURN(); \
}
#endif
PPC_LDF_OP(fd, ldfq);
PPC_LDF_OP(fs, ldfl);
#if defined(TARGET_PPC64)
PPC_LDF_OP_64(fd, ldfq);
PPC_LDF_OP_64(fs, ldfl);
#endif
static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
{
@ -271,40 +503,92 @@ static inline float glue(ldflr, MEMSUFFIX) (target_ulong EA)
PPC_LDF_OP(fd_le, ldfqr);
PPC_LDF_OP(fs_le, ldflr);
#if defined(TARGET_PPC64)
PPC_LDF_OP_64(fd_le, ldfqr);
PPC_LDF_OP_64(fs_le, ldflr);
#endif
/* Load and set reservation */
PPC_OP(glue(lwarx, MEMSUFFIX))
void OPPROTO glue(op_lwarx, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
T1 = glue(ldl, MEMSUFFIX)(T0);
regs->reserve = T0;
T1 = glue(ldl, MEMSUFFIX)((uint32_t)T0);
regs->reserve = (uint32_t)T0;
}
RETURN();
}
PPC_OP(glue(lwarx_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lwarx_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
T1 = glue(ld32r, MEMSUFFIX)(T0);
regs->reserve = T0;
T1 = glue(ldl, MEMSUFFIX)((uint64_t)T0);
regs->reserve = (uint64_t)T0;
}
RETURN();
}
void OPPROTO glue(op_ldarx_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
T1 = glue(ldq, MEMSUFFIX)((uint64_t)T0);
regs->reserve = (uint64_t)T0;
}
RETURN();
}
#endif
void OPPROTO glue(op_lwarx_le, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
T1 = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
regs->reserve = (uint32_t)T0;
}
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lwarx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
T1 = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
regs->reserve = (uint64_t)T0;
}
RETURN();
}
void OPPROTO glue(op_ldarx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
T1 = glue(ld64r, MEMSUFFIX)((uint64_t)T0);
regs->reserve = (uint64_t)T0;
}
RETURN();
}
#endif
/* Store with reservation */
PPC_OP(glue(stwcx, MEMSUFFIX))
void OPPROTO glue(op_stwcx, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
if (unlikely(regs->reserve != T0)) {
if (unlikely(regs->reserve != (uint32_t)T0)) {
env->crf[0] = xer_ov;
} else {
glue(stl, MEMSUFFIX)(T0, T1);
glue(stl, MEMSUFFIX)((uint32_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
@ -312,15 +596,16 @@ PPC_OP(glue(stwcx, MEMSUFFIX))
RETURN();
}
PPC_OP(glue(stwcx_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stwcx_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
if (unlikely(regs->reserve != T0)) {
if (unlikely(regs->reserve != (uint64_t)T0)) {
env->crf[0] = xer_ov;
} else {
glue(st32r, MEMSUFFIX)(T0, T1);
glue(stl, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
@ -328,61 +613,186 @@ PPC_OP(glue(stwcx_le, MEMSUFFIX))
RETURN();
}
PPC_OP(glue(dcbz, MEMSUFFIX))
void OPPROTO glue(op_stdcx_64, MEMSUFFIX) (void)
{
glue(stl, MEMSUFFIX)(T0 + 0x00, 0);
glue(stl, MEMSUFFIX)(T0 + 0x04, 0);
glue(stl, MEMSUFFIX)(T0 + 0x08, 0);
glue(stl, MEMSUFFIX)(T0 + 0x0C, 0);
glue(stl, MEMSUFFIX)(T0 + 0x10, 0);
glue(stl, MEMSUFFIX)(T0 + 0x14, 0);
glue(stl, MEMSUFFIX)(T0 + 0x18, 0);
glue(stl, MEMSUFFIX)(T0 + 0x1C, 0);
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
if (unlikely(regs->reserve != (uint64_t)T0)) {
env->crf[0] = xer_ov;
} else {
glue(stq, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
regs->reserve = -1;
RETURN();
}
#endif
void OPPROTO glue(op_stwcx_le, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
if (unlikely(regs->reserve != (uint32_t)T0)) {
env->crf[0] = xer_ov;
} else {
glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
regs->reserve = -1;
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stwcx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
if (unlikely(regs->reserve != (uint64_t)T0)) {
env->crf[0] = xer_ov;
} else {
glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
regs->reserve = -1;
RETURN();
}
void OPPROTO glue(op_stdcx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
if (unlikely(regs->reserve != (uint64_t)T0)) {
env->crf[0] = xer_ov;
} else {
glue(st64r, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
regs->reserve = -1;
RETURN();
}
#endif
void OPPROTO glue(op_dcbz, MEMSUFFIX) (void)
{
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
#if DCACHE_LINE_SIZE == 64
/* XXX: cache line size should be 64 for POWER & PowerPC 601 */
glue(stl, MEMSUFFIX)(T0 + 0x20UL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x24UL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x28UL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x2CUL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x30UL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x34UL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x38UL, 0);
glue(stl, MEMSUFFIX)(T0 + 0x3CUL, 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x20UL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x24UL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x28UL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x2CUL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x30UL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x34UL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x38UL), 0);
glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x3CUL), 0);
#endif
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_dcbz_64, MEMSUFFIX) (void)
{
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
#if DCACHE_LINE_SIZE == 64
/* XXX: cache line size should be 64 for POWER & PowerPC 601 */
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x20UL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x24UL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x28UL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x2CUL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x30UL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x34UL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x38UL), 0);
glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x3CUL), 0);
#endif
RETURN();
}
#endif
/* External access */
PPC_OP(glue(eciwx, MEMSUFFIX))
void OPPROTO glue(op_eciwx, MEMSUFFIX) (void)
{
T1 = glue(ldl, MEMSUFFIX)(T0);
T1 = glue(ldl, MEMSUFFIX)((uint32_t)T0);
RETURN();
}
PPC_OP(glue(ecowx, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_eciwx_64, MEMSUFFIX) (void)
{
glue(stl, MEMSUFFIX)(T0, T1);
T1 = glue(ldl, MEMSUFFIX)((uint64_t)T0);
RETURN();
}
#endif
void OPPROTO glue(op_ecowx, MEMSUFFIX) (void)
{
glue(stl, MEMSUFFIX)((uint32_t)T0, T1);
RETURN();
}
PPC_OP(glue(eciwx_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_ecowx_64, MEMSUFFIX) (void)
{
T1 = glue(ld32r, MEMSUFFIX)(T0);
glue(stl, MEMSUFFIX)((uint64_t)T0, T1);
RETURN();
}
#endif
void OPPROTO glue(op_eciwx_le, MEMSUFFIX) (void)
{
T1 = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
RETURN();
}
PPC_OP(glue(ecowx_le, MEMSUFFIX))
#if defined(TARGET_PPC64)
void OPPROTO glue(op_eciwx_le_64, MEMSUFFIX) (void)
{
glue(st32r, MEMSUFFIX)(T0, T1);
T1 = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
RETURN();
}
#endif
void OPPROTO glue(op_ecowx_le, MEMSUFFIX) (void)
{
glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_ecowx_le_64, MEMSUFFIX) (void)
{
glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
RETURN();
}
#endif
/* XXX: those micro-ops need tests ! */
/* PowerPC 601 specific instructions (POWER bridge) */
void OPPROTO glue(op_POWER_lscbx, MEMSUFFIX) (void)
{
/* When byte count is 0, do nothing */
if (likely(T1 > 0)) {
if (likely(T1 != 0)) {
glue(do_POWER_lscbx, MEMSUFFIX)(PARAM1, PARAM2, PARAM3);
}
RETURN();

File diff suppressed because it is too large Load Diff