-----BEGIN PGP SIGNATURE-----

iQIcBAABAgAGBQJYeeoXAAoJEPMMOL0/L7482jcQAJFoNya2rIDrK8ljb8XldnlJ
 kvgywaceh6jZUuGTfgJ4S1k9CUvSvC3Ds9Y7qkseZ9kzktS9oIbvcuEi+INWiE6J
 ZmjP4KkYeKQ2rhiglbirHkweFmEXONqu2Ca1DfV/O6DkhyF25y1HO26Zu7P1ai2/
 oq7xNaQbot8+6fntatiRNvAcWOwrq2zHG/BHU8anaNVdgYqHViZWFOKl3oM/GF7M
 jz26SWhSduCNg4dzgULlnQsin8mnd3k9U+r2Rdo5szUhly794EOay3eVNPCmN1RF
 5tGP+pyx2m34ZiCNBxwQlcHVjNGq6vQMCrIWBo6x9wABeGStxX7HhrZQ88JtCjWU
 DQ7nbhLKaE1t39I/Ax3MCbz1c+zREHC0ivnZEMiXwdr/leP1dj6cyLw2UVQ2uk7r
 azgV6Z6AbdmCr1/kSJQQkWxTvQFzQDWTgfFXgC6LvYj+5wFp1JXphPSrGTYM72sz
 e3UnKyDHiBIK00jJ16k5wAlGI1yvu7N8zxVjAQ7WCs896ki3ita1NsG1Mgs2QAHb
 FWkinnfLZE0W7MKX9Iw3X7YUxI4r4LhODC31X7CCP/LeZPjAGV4qS96cICtm8VqM
 9/4nFFhGH15eYVKNQu/vVgjdh1IMpcmqMBtmv9Ndr5f9c/IIHlWXKGlj5qkypMpL
 14YzVoiBxP+1aUIo6mts
 =iBam
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/vivier/tags/m68k-for-2.9-pull-request' into staging

# gpg: Signature made Sat 14 Jan 2017 09:06:31 GMT
# gpg:                using RSA key 0xF30C38BD3F2FBE3C
# gpg: Good signature from "Laurent Vivier <lvivier@redhat.com>"
# gpg:                 aka "Laurent Vivier <laurent@vivier.eu>"
# gpg:                 aka "Laurent Vivier (Red Hat) <lvivier@redhat.com>"
# Primary key fingerprint: CD2F 75DD C8E3 A4DC 2E4F  5173 F30C 38BD 3F2F BE3C

* remotes/vivier/tags/m68k-for-2.9-pull-request:
  target-m68k: increment/decrement with SP
  target-m68k: CAS doesn't need aligned access
  target-m68k: manage pre-dec et post-inc in CAS
  target-m68k: fix gen_flush_flags()
  target-m68k: fix bit operation with immediate value
  m68k: Remove PCI and USB from config file
  target-m68k: Implement bfffo
  target-m68k: Implement bitfield ops for memory
  target-m68k: Implement bitfield ops for registers

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2017-01-16 12:41:35 +00:00
commit 2ccede18bd
5 changed files with 636 additions and 10 deletions

View File

@ -1,6 +1,4 @@
# Default configuration for m68k-softmmu
include pci.mak
include usb.mak
CONFIG_COLDFIRE=y
CONFIG_PTIMER=y

View File

@ -37,6 +37,7 @@
#define OS_DOUBLE 4
#define OS_EXTENDED 5
#define OS_PACKED 6
#define OS_UNSIZED 7
#define MAX_QREGS 32

View File

@ -50,3 +50,13 @@ DEF_HELPER_2(flush_flags, void, env, i32)
DEF_HELPER_2(set_ccr, void, env, i32)
DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env)
DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_FLAGS_3(bfffo_reg, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
DEF_HELPER_FLAGS_4(bfexts_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfextu_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)
DEF_HELPER_FLAGS_5(bfins_mem, TCG_CALL_NO_WG, i32, env, i32, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfchg_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfclr_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfset_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfffo_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)

View File

@ -469,3 +469,209 @@ void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
env->dregs[Dc1] = l1;
env->dregs[Dc2] = l2;
}
struct bf_data {
uint32_t addr;
uint32_t bofs;
uint32_t blen;
uint32_t len;
};
static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len)
{
int bofs, blen;
/* Bound length; map 0 to 32. */
len = ((len - 1) & 31) + 1;
/* Note that ofs is signed. */
addr += ofs / 8;
bofs = ofs % 8;
if (bofs < 0) {
bofs += 8;
addr -= 1;
}
/* Compute the number of bytes required (minus one) to
satisfy the bitfield. */
blen = (bofs + len - 1) / 8;
/* Canonicalize the bit offset for data loaded into a 64-bit big-endian
word. For the cases where BLEN is not a power of 2, adjust ADDR so
that we can use the next power of two sized load without crossing a
page boundary, unless the field itself crosses the boundary. */
switch (blen) {
case 0:
bofs += 56;
break;
case 1:
bofs += 48;
break;
case 2:
if (addr & 1) {
bofs += 8;
addr -= 1;
}
/* fallthru */
case 3:
bofs += 32;
break;
case 4:
if (addr & 3) {
bofs += 8 * (addr & 3);
addr &= -4;
}
break;
default:
g_assert_not_reached();
}
return (struct bf_data){
.addr = addr,
.bofs = bofs,
.blen = blen,
.len = len,
};
}
static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen,
uintptr_t ra)
{
switch (blen) {
case 0:
return cpu_ldub_data_ra(env, addr, ra);
case 1:
return cpu_lduw_data_ra(env, addr, ra);
case 2:
case 3:
return cpu_ldl_data_ra(env, addr, ra);
case 4:
return cpu_ldq_data_ra(env, addr, ra);
default:
g_assert_not_reached();
}
}
static void bf_store(CPUM68KState *env, uint32_t addr, int blen,
uint64_t data, uintptr_t ra)
{
switch (blen) {
case 0:
cpu_stb_data_ra(env, addr, data, ra);
break;
case 1:
cpu_stw_data_ra(env, addr, data, ra);
break;
case 2:
case 3:
cpu_stl_data_ra(env, addr, data, ra);
break;
case 4:
cpu_stq_data_ra(env, addr, data, ra);
break;
default:
g_assert_not_reached();
}
}
uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
return (int64_t)(data << d.bofs) >> (64 - d.len);
}
uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
/* Put CC_N at the top of the high word; put the zero-extended value
at the bottom of the low word. */
data <<= d.bofs;
data >>= 64 - d.len;
data |= data << (64 - d.len);
return data;
}
uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs);
bf_store(env, d.addr, d.blen, data, ra);
/* The field at the top of the word is also CC_N for CC_OP_LOGIC. */
return val << (32 - d.len);
}
uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
bf_store(env, d.addr, d.blen, data ^ mask, ra);
return ((data & mask) << d.bofs) >> 32;
}
uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
bf_store(env, d.addr, d.blen, data & ~mask, ra);
return ((data & mask) << d.bofs) >> 32;
}
uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
bf_store(env, d.addr, d.blen, data | mask, ra);
return ((data & mask) << d.bofs) >> 32;
}
uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len)
{
return (n ? clz32(n) : len) + ofs;
}
uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
int32_t ofs, uint32_t len)
{
uintptr_t ra = GETPC();
struct bf_data d = bf_prep(addr, ofs, len);
uint64_t data = bf_load(env, d.addr, d.blen, ra);
uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
uint64_t n = (data & mask) << d.bofs;
uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len);
/* Return FFO in the low word and N in the high word.
Note that because of MASK and the shift, the low word
is already zero. */
return n | ffo;
}

View File

@ -595,18 +595,19 @@ static void gen_flush_flags(DisasContext *s)
case CC_OP_DYNAMIC:
gen_helper_flush_flags(cpu_env, QREG_CC_OP);
s->cc_op_synced = 1;
break;
default:
t0 = tcg_const_i32(s->cc_op);
gen_helper_flush_flags(cpu_env, t0);
tcg_temp_free(t0);
s->cc_op_synced = 1;
break;
}
/* Note that flush_flags also assigned to env->cc_op. */
s->cc_op = CC_OP_FLAGS;
s->cc_op_synced = 1;
}
static inline TCGv gen_extend(TCGv val, int opsize, int sign)
@ -711,13 +712,25 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
case 0: /* Data register direct. */
case 1: /* Address register direct. */
return NULL_QREG;
case 2: /* Indirect register */
case 3: /* Indirect postincrement. */
if (opsize == OS_UNSIZED) {
return NULL_QREG;
}
/* fallthru */
case 2: /* Indirect register */
return get_areg(s, reg0);
case 4: /* Indirect predecrememnt. */
if (opsize == OS_UNSIZED) {
return NULL_QREG;
}
reg = get_areg(s, reg0);
tmp = tcg_temp_new();
tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
if (reg0 == 7 && opsize == OS_BYTE &&
m68k_feature(s->env, M68K_FEATURE_M68000)) {
tcg_gen_subi_i32(tmp, reg, 2);
} else {
tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
}
return tmp;
case 5: /* Indirect displacement. */
reg = get_areg(s, reg0);
@ -793,7 +806,12 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
result = gen_ldst(s, opsize, reg, val, what);
if (what == EA_STORE || !addrp) {
TCGv tmp = tcg_temp_new();
tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
if (reg0 == 7 && opsize == OS_BYTE &&
m68k_feature(s->env, M68K_FEATURE_M68000)) {
tcg_gen_addi_i32(tmp, reg, 2);
} else {
tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
}
delay_set_areg(s, reg0, tmp, true);
}
return result;
@ -1794,9 +1812,16 @@ DISAS_INSN(bitop_im)
op = (insn >> 6) & 3;
bitnum = read_im16(env, s);
if (bitnum & 0xff00) {
disas_undef(env, s, insn);
return;
if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
if (bitnum & 0xfe00) {
disas_undef(env, s, insn);
return;
}
} else {
if (bitnum & 0xff00) {
disas_undef(env, s, insn);
return;
}
}
SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
@ -1919,7 +1944,6 @@ DISAS_INSN(cas)
default:
g_assert_not_reached();
}
opc |= MO_ALIGN;
ext = read_im16(env, s);
@ -1948,6 +1972,15 @@ DISAS_INSN(cas)
gen_partset_reg(opsize, DREG(ext, 0), load);
tcg_temp_free(load);
switch (extract32(insn, 3, 3)) {
case 3: /* Indirect postincrement. */
tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
break;
case 4: /* Indirect predecrememnt. */
tcg_gen_mov_i32(AREG(insn, 0), addr);
break;
}
}
DISAS_INSN(cas2w)
@ -3504,6 +3537,370 @@ DISAS_INSN(rotate_mem)
set_cc_op(s, CC_OP_FLAGS);
}
DISAS_INSN(bfext_reg)
{
int ext = read_im16(env, s);
int is_sign = insn & 0x200;
TCGv src = DREG(insn, 0);
TCGv dst = DREG(ext, 12);
int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
int ofs = extract32(ext, 6, 5); /* big bit-endian */
int pos = 32 - ofs - len; /* little bit-endian */
TCGv tmp = tcg_temp_new();
TCGv shift;
/* In general, we're going to rotate the field so that it's at the
top of the word and then right-shift by the compliment of the
width to extend the field. */
if (ext & 0x20) {
/* Variable width. */
if (ext & 0x800) {
/* Variable offset. */
tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
tcg_gen_rotl_i32(tmp, src, tmp);
} else {
tcg_gen_rotli_i32(tmp, src, ofs);
}
shift = tcg_temp_new();
tcg_gen_neg_i32(shift, DREG(ext, 0));
tcg_gen_andi_i32(shift, shift, 31);
tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
if (is_sign) {
tcg_gen_mov_i32(dst, QREG_CC_N);
} else {
tcg_gen_shr_i32(dst, tmp, shift);
}
tcg_temp_free(shift);
} else {
/* Immediate width. */
if (ext & 0x800) {
/* Variable offset */
tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
tcg_gen_rotl_i32(tmp, src, tmp);
src = tmp;
pos = 32 - len;
} else {
/* Immediate offset. If the field doesn't wrap around the
end of the word, rely on (s)extract completely. */
if (pos < 0) {
tcg_gen_rotli_i32(tmp, src, ofs);
src = tmp;
pos = 32 - len;
}
}
tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
if (is_sign) {
tcg_gen_mov_i32(dst, QREG_CC_N);
} else {
tcg_gen_extract_i32(dst, src, pos, len);
}
}
tcg_temp_free(tmp);
set_cc_op(s, CC_OP_LOGIC);
}
DISAS_INSN(bfext_mem)
{
int ext = read_im16(env, s);
int is_sign = insn & 0x200;
TCGv dest = DREG(ext, 12);
TCGv addr, len, ofs;
addr = gen_lea(env, s, insn, OS_UNSIZED);
if (IS_NULL_QREG(addr)) {
gen_addr_fault(s);
return;
}
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
len = tcg_const_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
ofs = tcg_const_i32(extract32(ext, 6, 5));
}
if (is_sign) {
gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
tcg_gen_mov_i32(QREG_CC_N, dest);
} else {
TCGv_i64 tmp = tcg_temp_new_i64();
gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
tcg_temp_free_i64(tmp);
}
set_cc_op(s, CC_OP_LOGIC);
if (!(ext & 0x20)) {
tcg_temp_free(len);
}
if (!(ext & 0x800)) {
tcg_temp_free(ofs);
}
}
DISAS_INSN(bfop_reg)
{
int ext = read_im16(env, s);
TCGv src = DREG(insn, 0);
int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
int ofs = extract32(ext, 6, 5); /* big bit-endian */
TCGv mask, tofs, tlen;
TCGV_UNUSED(tofs);
TCGV_UNUSED(tlen);
if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
tofs = tcg_temp_new();
tlen = tcg_temp_new();
}
if ((ext & 0x820) == 0) {
/* Immediate width and offset. */
uint32_t maski = 0x7fffffffu >> (len - 1);
if (ofs + len <= 32) {
tcg_gen_shli_i32(QREG_CC_N, src, ofs);
} else {
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
}
tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
mask = tcg_const_i32(ror32(maski, ofs));
if (!TCGV_IS_UNUSED(tofs)) {
tcg_gen_movi_i32(tofs, ofs);
tcg_gen_movi_i32(tlen, len);
}
} else {
TCGv tmp = tcg_temp_new();
if (ext & 0x20) {
/* Variable width */
tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
tcg_gen_andi_i32(tmp, tmp, 31);
mask = tcg_const_i32(0x7fffffffu);
tcg_gen_shr_i32(mask, mask, tmp);
if (!TCGV_IS_UNUSED(tlen)) {
tcg_gen_addi_i32(tlen, tmp, 1);
}
} else {
/* Immediate width */
mask = tcg_const_i32(0x7fffffffu >> (len - 1));
if (!TCGV_IS_UNUSED(tlen)) {
tcg_gen_movi_i32(tlen, len);
}
}
if (ext & 0x800) {
/* Variable offset */
tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotr_i32(mask, mask, tmp);
if (!TCGV_IS_UNUSED(tofs)) {
tcg_gen_mov_i32(tofs, tmp);
}
} else {
/* Immediate offset (and variable width) */
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotri_i32(mask, mask, ofs);
if (!TCGV_IS_UNUSED(tofs)) {
tcg_gen_movi_i32(tofs, ofs);
}
}
tcg_temp_free(tmp);
}
set_cc_op(s, CC_OP_LOGIC);
switch (insn & 0x0f00) {
case 0x0a00: /* bfchg */
tcg_gen_eqv_i32(src, src, mask);
break;
case 0x0c00: /* bfclr */
tcg_gen_and_i32(src, src, mask);
break;
case 0x0d00: /* bfffo */
gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
tcg_temp_free(tlen);
tcg_temp_free(tofs);
break;
case 0x0e00: /* bfset */
tcg_gen_orc_i32(src, src, mask);
break;
case 0x0800: /* bftst */
/* flags already set; no other work to do. */
break;
default:
g_assert_not_reached();
}
tcg_temp_free(mask);
}
DISAS_INSN(bfop_mem)
{
int ext = read_im16(env, s);
TCGv addr, len, ofs;
TCGv_i64 t64;
addr = gen_lea(env, s, insn, OS_UNSIZED);
if (IS_NULL_QREG(addr)) {
gen_addr_fault(s);
return;
}
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
len = tcg_const_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
ofs = tcg_const_i32(extract32(ext, 6, 5));
}
switch (insn & 0x0f00) {
case 0x0a00: /* bfchg */
gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
break;
case 0x0c00: /* bfclr */
gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
break;
case 0x0d00: /* bfffo */
t64 = tcg_temp_new_i64();
gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
tcg_temp_free_i64(t64);
break;
case 0x0e00: /* bfset */
gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
break;
case 0x0800: /* bftst */
gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
break;
default:
g_assert_not_reached();
}
set_cc_op(s, CC_OP_LOGIC);
if (!(ext & 0x20)) {
tcg_temp_free(len);
}
if (!(ext & 0x800)) {
tcg_temp_free(ofs);
}
}
DISAS_INSN(bfins_reg)
{
int ext = read_im16(env, s);
TCGv dst = DREG(insn, 0);
TCGv src = DREG(ext, 12);
int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
int ofs = extract32(ext, 6, 5); /* big bit-endian */
int pos = 32 - ofs - len; /* little bit-endian */
TCGv tmp;
tmp = tcg_temp_new();
if (ext & 0x20) {
/* Variable width */
tcg_gen_neg_i32(tmp, DREG(ext, 0));
tcg_gen_andi_i32(tmp, tmp, 31);
tcg_gen_shl_i32(QREG_CC_N, src, tmp);
} else {
/* Immediate width */
tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
}
set_cc_op(s, CC_OP_LOGIC);
/* Immediate width and offset */
if ((ext & 0x820) == 0) {
/* Check for suitability for deposit. */
if (pos >= 0) {
tcg_gen_deposit_i32(dst, dst, src, pos, len);
} else {
uint32_t maski = -2U << (len - 1);
uint32_t roti = (ofs + len) & 31;
tcg_gen_andi_i32(tmp, src, ~maski);
tcg_gen_rotri_i32(tmp, tmp, roti);
tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
tcg_gen_or_i32(dst, dst, tmp);
}
} else {
TCGv mask = tcg_temp_new();
TCGv rot = tcg_temp_new();
if (ext & 0x20) {
/* Variable width */
tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
tcg_gen_andi_i32(rot, rot, 31);
tcg_gen_movi_i32(mask, -2);
tcg_gen_shl_i32(mask, mask, rot);
tcg_gen_mov_i32(rot, DREG(ext, 0));
tcg_gen_andc_i32(tmp, src, mask);
} else {
/* Immediate width (variable offset) */
uint32_t maski = -2U << (len - 1);
tcg_gen_andi_i32(tmp, src, ~maski);
tcg_gen_movi_i32(mask, maski);
tcg_gen_movi_i32(rot, len & 31);
}
if (ext & 0x800) {
/* Variable offset */
tcg_gen_add_i32(rot, rot, DREG(ext, 6));
} else {
/* Immediate offset (variable width) */
tcg_gen_addi_i32(rot, rot, ofs);
}
tcg_gen_andi_i32(rot, rot, 31);
tcg_gen_rotr_i32(mask, mask, rot);
tcg_gen_rotr_i32(tmp, tmp, rot);
tcg_gen_and_i32(dst, dst, mask);
tcg_gen_or_i32(dst, dst, tmp);
tcg_temp_free(rot);
tcg_temp_free(mask);
}
tcg_temp_free(tmp);
}
DISAS_INSN(bfins_mem)
{
int ext = read_im16(env, s);
TCGv src = DREG(ext, 12);
TCGv addr, len, ofs;
addr = gen_lea(env, s, insn, OS_UNSIZED);
if (IS_NULL_QREG(addr)) {
gen_addr_fault(s);
return;
}
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
len = tcg_const_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
ofs = tcg_const_i32(extract32(ext, 6, 5));
}
gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
set_cc_op(s, CC_OP_LOGIC);
if (!(ext & 0x20)) {
tcg_temp_free(len);
}
if (!(ext & 0x800)) {
tcg_temp_free(ofs);
}
}
DISAS_INSN(ff1)
{
TCGv reg;
@ -4595,6 +4992,20 @@ void register_m68k_insns (CPUM68KState *env)
INSN(rotate8_reg, e030, f0f0, M68000);
INSN(rotate16_reg, e070, f0f0, M68000);
INSN(rotate_mem, e4c0, fcc0, M68000);
INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
INSN(bfext_reg, e9c0, fdf8, BITFIELD);
INSN(bfins_mem, efc0, ffc0, BITFIELD);
INSN(bfins_reg, efc0, fff8, BITFIELD);
INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
INSN(undef_fpu, f000, f000, CF_ISA_A);
INSN(fpu, f200, ffc0, CF_FPU);
INSN(fbcc, f280, ffc0, CF_FPU);