ppc patch queue 2018-07-03

Here's a last minue pull request before today's soft freeze.  Ideally
 I would have sent this earlier, but I was waiting for a couple of
 extra fixes I knew were close.  And the freeze crept up on me, like
 always.
 
 Most of the changes here are bugfixes in any case.  There are some
 cleanups as well, which have been in my staging tree for a little
 while.  There are a couple of truly new features (some extensions to
 the sam460ex platform), but these are low risk, since they only affect
 a new and not really stabilized machine type anyway.
 
 Higlights are:
   * Mac platform improvements from Mark Cave-Ayland
   * Sam460ex improvements from BALATON Zoltan et al.
   * XICS interrupt handler cleanups from Cédric Le Goater
   * TCG improvements for atomic loads and stores from Richard
     Henderson
   * Assorted other bugfixes
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAls7D8oACgkQbDjKyiDZ
 s5Lxmg//YzPfC/nKqTTKkyJPzh/NnSC+kRTMAT3mbxdRIc7yfgMqJtWGGbS1iKgK
 EeJ9hl5Qm0HfscfDuzf0xasU62ZEv3kNdLnWJEIgkqiXrxoO5KCnC0y4D8NN1W03
 mvINNCa8+QDg2OsirGmNUTkriiG3wLIrHTpLZ4+JuC2Bd9H3nTHZgJ0MXON/1VWY
 oRgr6kMZ5+IAzPhvYLFR6l3nPI883fgJOFyRo7YqYrkVBKFrFkfK0Xjw6vpsNxcx
 2dE/YCHhNIriLuBG5noewL7GuqZRtLnl6rjjee5VAKIe1EmFeR+jsXwNjzGOVOJg
 dhjOtsJsQQ3WdEw5uImJzE64kV228WCgmkeXzZd1010JBLr7sUkrd2EuoZ23vvat
 uvZAHVSBrJg5WvzMo1VMEoPU3VeeZQ5HL+MI80iKiU6oUgRK11gVJcebtA0sEKt+
 zhJC4JiUlHtZLTGIpMBmU8DJZ3Tyk1cBEm+Ky+SaPE+dsz16UHI0fazFQXJnXphE
 MLHEGAyQgzWYp7kIcAjUFev0Geq/Uovy4JKIGI6ISop1wRPEQDxkthfkfRyQxQkE
 zuse4EBcEH/Undw9KrmEQa0hCe+8BRkxklVbPesFPPdqH3PKNxtHYuWpSShQF0PW
 XMjw43O2Rbsl8kBUHCpy4pYSugD1hpfgaw/mVUOU1u/M1O6toTw=
 =AHrx
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-3.0-20180703' into staging

ppc patch queue 2018-07-03

Here's a last minue pull request before today's soft freeze.  Ideally
I would have sent this earlier, but I was waiting for a couple of
extra fixes I knew were close.  And the freeze crept up on me, like
always.

Most of the changes here are bugfixes in any case.  There are some
cleanups as well, which have been in my staging tree for a little
while.  There are a couple of truly new features (some extensions to
the sam460ex platform), but these are low risk, since they only affect
a new and not really stabilized machine type anyway.

Higlights are:
  * Mac platform improvements from Mark Cave-Ayland
  * Sam460ex improvements from BALATON Zoltan et al.
  * XICS interrupt handler cleanups from Cédric Le Goater
  * TCG improvements for atomic loads and stores from Richard
    Henderson
  * Assorted other bugfixes

# gpg: Signature made Tue 03 Jul 2018 06:55:22 BST
# gpg:                using RSA key 6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>"
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>"
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>"
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>"
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-3.0-20180703: (35 commits)
  ppc: Include vga cirrus card into the compiling process
  target/ppc: Relax reserved bitmask of indexed store instructions
  target/ppc: set is_jmp on ppc_tr_breakpoint_check
  spapr: compute default value of "hpt-max-page-size" later
  target/ppc/kvm: don't pass cpu to kvm_get_smmu_info()
  target/ppc/kvm: get rid of kvm_get_fallback_smmu_info()
  ppc440_uc: Basic emulation of PPC440 DMA controller
  sam460ex: Add RTC device
  hw/timer: Add basic M41T80 emulation
  ppc4xx_i2c: Rewrite to model hardware more closely
  hw/ppc: Give sam46ex its own config option
  fpu_helper.c: fix setting FPSCR[FI] bit
  target/ppc: Implement the rest of gen_st_atomic
  target/ppc: Implement the rest of gen_ld_atomic
  target/ppc: Use atomic min/max helpers
  target/ppc: Use MO_ALIGN for EXIWX and ECOWX
  target/ppc: Split out gen_st_atomic
  target/ppc: Split out gen_ld_atomic
  target/ppc: Split out gen_load_locked
  target/ppc: Tidy gen_conditional_store
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

# Conflicts:
#	hw/ppc/spapr.c
master
Peter Maydell 2018-07-03 14:59:27 +01:00
commit b07cd3e748
29 changed files with 1308 additions and 728 deletions

View File

@ -851,6 +851,7 @@ M: BALATON Zoltan <balaton@eik.bme.hu>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ide/sii3112.c
F: hw/timer/m41t80.c
SH4 Machines
------------

View File

@ -22,11 +22,14 @@ CONFIG_OPENPIC_KVM=$(call land,$(CONFIG_E500),$(CONFIG_KVM))
CONFIG_PLATFORM_BUS=y
CONFIG_ETSEC=y
# For Sam460ex
CONFIG_SAM460EX=y
CONFIG_USB_EHCI_SYSBUS=y
CONFIG_SM501=y
CONFIG_IDE_SII3112=y
CONFIG_I2C=y
CONFIG_BITBANG_I2C=y
CONFIG_M41T80=y
CONFIG_VGA_CIRRUS=y
# For Macs
CONFIG_MAC=y

View File

@ -34,16 +34,50 @@
#define PPC4xx_I2C_MEM_SIZE 18
enum {
IIC_MDBUF = 0,
/* IIC_SDBUF = 2, */
IIC_LMADR = 4,
IIC_HMADR,
IIC_CNTL,
IIC_MDCNTL,
IIC_STS,
IIC_EXTSTS,
IIC_LSADR,
IIC_HSADR,
IIC_CLKDIV,
IIC_INTRMSK,
IIC_XFRCNT,
IIC_XTCNTLSS,
IIC_DIRECTCNTL
/* IIC_INTR */
};
#define IIC_CNTL_PT (1 << 0)
#define IIC_CNTL_READ (1 << 1)
#define IIC_CNTL_CHT (1 << 2)
#define IIC_CNTL_RPST (1 << 3)
#define IIC_CNTL_AMD (1 << 6)
#define IIC_CNTL_HMT (1 << 7)
#define IIC_MDCNTL_EINT (1 << 2)
#define IIC_MDCNTL_ESM (1 << 3)
#define IIC_MDCNTL_FMDB (1 << 6)
#define IIC_STS_PT (1 << 0)
#define IIC_STS_IRQA (1 << 1)
#define IIC_STS_ERR (1 << 2)
#define IIC_STS_MDBF (1 << 4)
#define IIC_STS_MDBS (1 << 5)
#define IIC_EXTSTS_XFRA (1 << 0)
#define IIC_EXTSTS_BCS_FREE (4 << 4)
#define IIC_EXTSTS_BCS_BUSY (5 << 4)
#define IIC_INTRMSK_EIMTC (1 << 0)
#define IIC_INTRMSK_EITA (1 << 1)
#define IIC_INTRMSK_EIIC (1 << 2)
#define IIC_INTRMSK_EIHE (1 << 3)
#define IIC_XTCNTLSS_SRST (1 << 0)
@ -56,130 +90,83 @@ static void ppc4xx_i2c_reset(DeviceState *s)
{
PPC4xxI2CState *i2c = PPC4xx_I2C(s);
/* FIXME: Should also reset bus?
*if (s->address != ADDR_RESET) {
* i2c_end_transfer(s->bus);
*}
*/
i2c->mdata = 0;
i2c->lmadr = 0;
i2c->hmadr = 0;
i2c->mdidx = -1;
memset(i2c->mdata, 0, ARRAY_SIZE(i2c->mdata));
/* [hl][ms]addr are not affected by reset */
i2c->cntl = 0;
i2c->mdcntl = 0;
i2c->sts = 0;
i2c->extsts = 0x8f;
i2c->lsadr = 0;
i2c->hsadr = 0;
i2c->extsts = IIC_EXTSTS_BCS_FREE;
i2c->clkdiv = 0;
i2c->intrmsk = 0;
i2c->xfrcnt = 0;
i2c->xtcntlss = 0;
i2c->directcntl = 0xf;
}
static inline bool ppc4xx_i2c_is_master(PPC4xxI2CState *i2c)
{
return true;
i2c->directcntl = 0xf; /* all non-reserved bits set */
}
static uint64_t ppc4xx_i2c_readb(void *opaque, hwaddr addr, unsigned int size)
{
PPC4xxI2CState *i2c = PPC4xx_I2C(opaque);
uint64_t ret;
int i;
switch (addr) {
case 0:
ret = i2c->mdata;
if (ppc4xx_i2c_is_master(i2c)) {
case IIC_MDBUF:
if (i2c->mdidx < 0) {
ret = 0xff;
if (!(i2c->sts & IIC_STS_MDBS)) {
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to read "
"without starting transfer\n",
TYPE_PPC4xx_I2C, __func__);
} else {
int pending = (i2c->cntl >> 4) & 3;
/* get the next byte */
int byte = i2c_recv(i2c->bus);
if (byte < 0) {
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: read failed "
"for device 0x%02x\n", TYPE_PPC4xx_I2C,
__func__, i2c->lmadr);
ret = 0xff;
} else {
ret = byte;
/* Raise interrupt if enabled */
/*ppc4xx_i2c_raise_interrupt(i2c)*/;
}
if (!pending) {
i2c->sts &= ~IIC_STS_MDBS;
/*i2c_end_transfer(i2c->bus);*/
/*} else if (i2c->cntl & (IIC_CNTL_RPST | IIC_CNTL_CHT)) {*/
} else if (pending) {
/* current smbus implementation doesn't like
multibyte xfer repeated start */
i2c_end_transfer(i2c->bus);
if (i2c_start_transfer(i2c->bus, i2c->lmadr >> 1, 1)) {
/* if non zero is returned, the adress is not valid */
i2c->sts &= ~IIC_STS_PT;
i2c->sts |= IIC_STS_ERR;
i2c->extsts |= IIC_EXTSTS_XFRA;
} else {
/*i2c->sts |= IIC_STS_PT;*/
i2c->sts |= IIC_STS_MDBS;
i2c->sts &= ~IIC_STS_ERR;
i2c->extsts = 0;
}
}
pending--;
i2c->cntl = (i2c->cntl & 0xcf) | (pending << 4);
}
} else {
qemu_log_mask(LOG_UNIMP, "[%s]%s: slave mode not implemented\n",
TYPE_PPC4xx_I2C, __func__);
break;
}
ret = i2c->mdata[0];
if (i2c->mdidx == 3) {
i2c->sts &= ~IIC_STS_MDBF;
} else if (i2c->mdidx == 0) {
i2c->sts &= ~IIC_STS_MDBS;
}
for (i = 0; i < i2c->mdidx; i++) {
i2c->mdata[i] = i2c->mdata[i + 1];
}
if (i2c->mdidx >= 0) {
i2c->mdidx--;
}
break;
case 4:
case IIC_LMADR:
ret = i2c->lmadr;
break;
case 5:
case IIC_HMADR:
ret = i2c->hmadr;
break;
case 6:
case IIC_CNTL:
ret = i2c->cntl;
break;
case 7:
case IIC_MDCNTL:
ret = i2c->mdcntl;
break;
case 8:
case IIC_STS:
ret = i2c->sts;
break;
case 9:
ret = i2c->extsts;
case IIC_EXTSTS:
ret = i2c_bus_busy(i2c->bus) ?
IIC_EXTSTS_BCS_BUSY : IIC_EXTSTS_BCS_FREE;
break;
case 10:
case IIC_LSADR:
ret = i2c->lsadr;
break;
case 11:
case IIC_HSADR:
ret = i2c->hsadr;
break;
case 12:
case IIC_CLKDIV:
ret = i2c->clkdiv;
break;
case 13:
case IIC_INTRMSK:
ret = i2c->intrmsk;
break;
case 14:
case IIC_XFRCNT:
ret = i2c->xfrcnt;
break;
case 15:
case IIC_XTCNTLSS:
ret = i2c->xtcntlss;
break;
case 16:
case IIC_DIRECTCNTL:
ret = i2c->directcntl;
break;
default:
@ -202,99 +189,127 @@ static void ppc4xx_i2c_writeb(void *opaque, hwaddr addr, uint64_t value,
PPC4xxI2CState *i2c = opaque;
switch (addr) {
case 0:
i2c->mdata = value;
if (!i2c_bus_busy(i2c->bus)) {
/* assume we start a write transfer */
if (i2c_start_transfer(i2c->bus, i2c->lmadr >> 1, 0)) {
/* if non zero is returned, the adress is not valid */
i2c->sts &= ~IIC_STS_PT;
i2c->sts |= IIC_STS_ERR;
i2c->extsts |= IIC_EXTSTS_XFRA;
} else {
i2c->sts |= IIC_STS_PT;
i2c->sts &= ~IIC_STS_ERR;
i2c->extsts = 0;
}
case IIC_MDBUF:
if (i2c->mdidx >= 3) {
break;
}
if (i2c_bus_busy(i2c->bus)) {
if (i2c_send(i2c->bus, i2c->mdata)) {
/* if the target return non zero then end the transfer */
i2c->sts &= ~IIC_STS_PT;
i2c->sts |= IIC_STS_ERR;
i2c->extsts |= IIC_EXTSTS_XFRA;
i2c_end_transfer(i2c->bus);
}
i2c->mdata[++i2c->mdidx] = value;
if (i2c->mdidx == 3) {
i2c->sts |= IIC_STS_MDBF;
} else if (i2c->mdidx == 0) {
i2c->sts |= IIC_STS_MDBS;
}
break;
case 4:
case IIC_LMADR:
i2c->lmadr = value;
if (i2c_bus_busy(i2c->bus)) {
i2c_end_transfer(i2c->bus);
}
break;
case 5:
case IIC_HMADR:
i2c->hmadr = value;
break;
case 6:
i2c->cntl = value;
if (i2c->cntl & IIC_CNTL_PT) {
if (i2c->cntl & IIC_CNTL_READ) {
if (i2c_bus_busy(i2c->bus)) {
/* end previous transfer */
i2c->sts &= ~IIC_STS_PT;
i2c_end_transfer(i2c->bus);
case IIC_CNTL:
i2c->cntl = value & ~IIC_CNTL_PT;
if (value & IIC_CNTL_AMD) {
qemu_log_mask(LOG_UNIMP, "%s: only 7 bit addresses supported\n",
__func__);
}
if (value & IIC_CNTL_HMT && i2c_bus_busy(i2c->bus)) {
i2c_end_transfer(i2c->bus);
if (i2c->mdcntl & IIC_MDCNTL_EINT &&
i2c->intrmsk & IIC_INTRMSK_EIHE) {
i2c->sts |= IIC_STS_IRQA;
qemu_irq_raise(i2c->irq);
}
} else if (value & IIC_CNTL_PT) {
int recv = (value & IIC_CNTL_READ) >> 1;
int tct = value >> 4 & 3;
int i;
if (recv && (i2c->lmadr >> 1) >= 0x50 && (i2c->lmadr >> 1) < 0x58) {
/* smbus emulation does not like multi byte reads w/o restart */
value |= IIC_CNTL_RPST;
}
for (i = 0; i <= tct; i++) {
if (!i2c_bus_busy(i2c->bus)) {
i2c->extsts = IIC_EXTSTS_BCS_FREE;
if (i2c_start_transfer(i2c->bus, i2c->lmadr >> 1, recv)) {
i2c->sts |= IIC_STS_ERR;
i2c->extsts |= IIC_EXTSTS_XFRA;
break;
} else {
i2c->sts &= ~IIC_STS_ERR;
}
}
if (i2c_start_transfer(i2c->bus, i2c->lmadr >> 1, 1)) {
/* if non zero is returned, the adress is not valid */
i2c->sts &= ~IIC_STS_PT;
if (!(i2c->sts & IIC_STS_ERR) &&
i2c_send_recv(i2c->bus, &i2c->mdata[i], !recv)) {
i2c->sts |= IIC_STS_ERR;
i2c->extsts |= IIC_EXTSTS_XFRA;
} else {
/*i2c->sts |= IIC_STS_PT;*/
i2c->sts |= IIC_STS_MDBS;
i2c->sts &= ~IIC_STS_ERR;
i2c->extsts = 0;
break;
}
} else {
/* we actually already did the write transfer... */
i2c->sts &= ~IIC_STS_PT;
if (value & IIC_CNTL_RPST || !(value & IIC_CNTL_CHT)) {
i2c_end_transfer(i2c->bus);
}
}
i2c->xfrcnt = i;
i2c->mdidx = i - 1;
if (recv && i2c->mdidx >= 0) {
i2c->sts |= IIC_STS_MDBS;
}
if (recv && i2c->mdidx == 3) {
i2c->sts |= IIC_STS_MDBF;
}
if (i && i2c->mdcntl & IIC_MDCNTL_EINT &&
i2c->intrmsk & IIC_INTRMSK_EIMTC) {
i2c->sts |= IIC_STS_IRQA;
qemu_irq_raise(i2c->irq);
}
}
break;
case 7:
i2c->mdcntl = value & 0xdf;
case IIC_MDCNTL:
i2c->mdcntl = value & 0x3d;
if (value & IIC_MDCNTL_ESM) {
qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n",
__func__);
}
if (value & IIC_MDCNTL_FMDB) {
i2c->mdidx = -1;
memset(i2c->mdata, 0, ARRAY_SIZE(i2c->mdata));
i2c->sts &= ~(IIC_STS_MDBF | IIC_STS_MDBS);
}
break;
case 8:
i2c->sts &= ~(value & 0xa);
case IIC_STS:
i2c->sts &= ~(value & 0x0a);
if (value & IIC_STS_IRQA && i2c->mdcntl & IIC_MDCNTL_EINT) {
qemu_irq_lower(i2c->irq);
}
break;
case 9:
case IIC_EXTSTS:
i2c->extsts &= ~(value & 0x8f);
break;
case 10:
case IIC_LSADR:
i2c->lsadr = value;
break;
case 11:
case IIC_HSADR:
i2c->hsadr = value;
break;
case 12:
case IIC_CLKDIV:
i2c->clkdiv = value;
break;
case 13:
case IIC_INTRMSK:
i2c->intrmsk = value;
break;
case 14:
case IIC_XFRCNT:
i2c->xfrcnt = value & 0x77;
break;
case 15:
case IIC_XTCNTLSS:
i2c->xtcntlss &= ~(value & 0xf0);
if (value & IIC_XTCNTLSS_SRST) {
/* Is it actually a full reset? U-Boot sets some regs before */
ppc4xx_i2c_reset(DEVICE(i2c));
break;
}
i2c->xtcntlss = value;
break;
case 16:
case IIC_DIRECTCNTL:
i2c->directcntl = value & (IIC_DIRECTCNTL_SDAC & IIC_DIRECTCNTL_SCLC);
i2c->directcntl |= (value & IIC_DIRECTCNTL_SCLC ? 1 : 0);
bitbang_i2c_set(i2c->bitbang, BITBANG_I2C_SCL,

View File

@ -294,7 +294,6 @@ static const VMStateDescription vmstate_icp_server = {
static void icp_reset(void *dev)
{
ICPState *icp = ICP(dev);
ICPStateClass *icpc = ICP_GET_CLASS(icp);
icp->xirr = 0;
icp->pending_priority = 0xff;
@ -302,16 +301,11 @@ static void icp_reset(void *dev)
/* Make all outputs are deasserted */
qemu_set_irq(icp->output, 0);
if (icpc->reset) {
icpc->reset(icp);
}
}
static void icp_realize(DeviceState *dev, Error **errp)
{
ICPState *icp = ICP(dev);
ICPStateClass *icpc = ICP_GET_CLASS(dev);
PowerPCCPU *cpu;
CPUPPCState *env;
Object *obj;
@ -351,10 +345,6 @@ static void icp_realize(DeviceState *dev, Error **errp)
return;
}
if (icpc->realize) {
icpc->realize(icp, errp);
}
qemu_register_reset(icp_reset, dev);
vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
}
@ -547,9 +537,61 @@ static void ics_simple_eoi(ICSState *ics, uint32_t nr)
}
}
static void ics_simple_reset(void *dev)
static void ics_simple_reset(DeviceState *dev)
{
ICSStateClass *icsc = ICS_BASE_GET_CLASS(dev);
icsc->parent_reset(dev);
}
static void ics_simple_reset_handler(void *dev)
{
ics_simple_reset(dev);
}
static void ics_simple_realize(DeviceState *dev, Error **errp)
{
ICSState *ics = ICS_SIMPLE(dev);
ICSStateClass *icsc = ICS_BASE_GET_CLASS(ics);
Error *local_err = NULL;
icsc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
ics->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
qemu_register_reset(ics_simple_reset_handler, ics);
}
static void ics_simple_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ICSStateClass *isc = ICS_BASE_CLASS(klass);
device_class_set_parent_realize(dc, ics_simple_realize,
&isc->parent_realize);
device_class_set_parent_reset(dc, ics_simple_reset,
&isc->parent_reset);
isc->reject = ics_simple_reject;
isc->resend = ics_simple_resend;
isc->eoi = ics_simple_eoi;
}
static const TypeInfo ics_simple_info = {
.name = TYPE_ICS_SIMPLE,
.parent = TYPE_ICS_BASE,
.instance_size = sizeof(ICSState),
.class_init = ics_simple_class_init,
.class_size = sizeof(ICSStateClass),
};
static void ics_base_reset(DeviceState *dev)
{
ICSState *ics = ICS_BASE(dev);
int i;
uint8_t flags[ics->nr_irqs];
@ -566,7 +608,35 @@ static void ics_simple_reset(void *dev)
}
}
static int ics_simple_dispatch_pre_save(void *opaque)
static void ics_base_realize(DeviceState *dev, Error **errp)
{
ICSState *ics = ICS_BASE(dev);
Object *obj;
Error *err = NULL;
obj = object_property_get_link(OBJECT(dev), ICS_PROP_XICS, &err);
if (!obj) {
error_propagate(errp, err);
error_prepend(errp, "required link '" ICS_PROP_XICS "' not found: ");
return;
}
ics->xics = XICS_FABRIC(obj);
if (!ics->nr_irqs) {
error_setg(errp, "Number of interrupts needs to be greater 0");
return;
}
ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
}
static void ics_base_instance_init(Object *obj)
{
ICSState *ics = ICS_BASE(obj);
ics->offset = XICS_IRQ_BASE;
}
static int ics_base_dispatch_pre_save(void *opaque)
{
ICSState *ics = opaque;
ICSStateClass *info = ICS_BASE_GET_CLASS(ics);
@ -578,7 +648,7 @@ static int ics_simple_dispatch_pre_save(void *opaque)
return 0;
}
static int ics_simple_dispatch_post_load(void *opaque, int version_id)
static int ics_base_dispatch_post_load(void *opaque, int version_id)
{
ICSState *ics = opaque;
ICSStateClass *info = ICS_BASE_GET_CLASS(ics);
@ -590,7 +660,7 @@ static int ics_simple_dispatch_post_load(void *opaque, int version_id)
return 0;
}
static const VMStateDescription vmstate_ics_simple_irq = {
static const VMStateDescription vmstate_ics_base_irq = {
.name = "ics/irq",
.version_id = 2,
.minimum_version_id = 1,
@ -604,95 +674,36 @@ static const VMStateDescription vmstate_ics_simple_irq = {
},
};
static const VMStateDescription vmstate_ics_simple = {
static const VMStateDescription vmstate_ics_base = {
.name = "ics",
.version_id = 1,
.minimum_version_id = 1,
.pre_save = ics_simple_dispatch_pre_save,
.post_load = ics_simple_dispatch_post_load,
.pre_save = ics_base_dispatch_pre_save,
.post_load = ics_base_dispatch_post_load,
.fields = (VMStateField[]) {
/* Sanity check */
VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
vmstate_ics_simple_irq,
vmstate_ics_base_irq,
ICSIRQState),
VMSTATE_END_OF_LIST()
},
};
static void ics_simple_initfn(Object *obj)
{
ICSState *ics = ICS_SIMPLE(obj);
ics->offset = XICS_IRQ_BASE;
}
static void ics_simple_realize(ICSState *ics, Error **errp)
{
if (!ics->nr_irqs) {
error_setg(errp, "Number of interrupts needs to be greater 0");
return;
}
ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
ics->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
qemu_register_reset(ics_simple_reset, ics);
}
static Property ics_simple_properties[] = {
static Property ics_base_properties[] = {
DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
DEFINE_PROP_END_OF_LIST(),
};
static void ics_simple_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ICSStateClass *isc = ICS_BASE_CLASS(klass);
isc->realize = ics_simple_realize;
dc->props = ics_simple_properties;
dc->vmsd = &vmstate_ics_simple;
isc->reject = ics_simple_reject;
isc->resend = ics_simple_resend;
isc->eoi = ics_simple_eoi;
}
static const TypeInfo ics_simple_info = {
.name = TYPE_ICS_SIMPLE,
.parent = TYPE_ICS_BASE,
.instance_size = sizeof(ICSState),
.class_init = ics_simple_class_init,
.class_size = sizeof(ICSStateClass),
.instance_init = ics_simple_initfn,
};
static void ics_base_realize(DeviceState *dev, Error **errp)
{
ICSStateClass *icsc = ICS_BASE_GET_CLASS(dev);
ICSState *ics = ICS_BASE(dev);
Object *obj;
Error *err = NULL;
obj = object_property_get_link(OBJECT(dev), ICS_PROP_XICS, &err);
if (!obj) {
error_propagate(errp, err);
error_prepend(errp, "required link '" ICS_PROP_XICS "' not found: ");
return;
}
ics->xics = XICS_FABRIC(obj);
if (icsc->realize) {
icsc->realize(ics, errp);
}
}
static void ics_base_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ics_base_realize;
dc->props = ics_base_properties;
dc->reset = ics_base_reset;
dc->vmsd = &vmstate_ics_base;
}
static const TypeInfo ics_base_info = {
@ -700,6 +711,7 @@ static const TypeInfo ics_base_info = {
.parent = TYPE_DEVICE,
.abstract = true,
.instance_size = sizeof(ICSState),
.instance_init = ics_base_instance_init,
.class_init = ics_base_class_init,
.class_size = sizeof(ICSStateClass),
};

View File

@ -114,22 +114,38 @@ static int icp_set_kvm_state(ICPState *icp, int version_id)
return 0;
}
static void icp_kvm_reset(ICPState *icp)
static void icp_kvm_reset(DeviceState *dev)
{
icp_set_kvm_state(icp, 1);
ICPStateClass *icpc = ICP_GET_CLASS(dev);
icpc->parent_reset(dev);
icp_set_kvm_state(ICP(dev), 1);
}
static void icp_kvm_realize(ICPState *icp, Error **errp)
static void icp_kvm_realize(DeviceState *dev, Error **errp)
{
CPUState *cs = icp->cs;
ICPState *icp = ICP(dev);
ICPStateClass *icpc = ICP_GET_CLASS(icp);
Error *local_err = NULL;
CPUState *cs;
KVMEnabledICP *enabled_icp;
unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
unsigned long vcpu_id;
int ret;
if (kernel_xics_fd == -1) {
abort();
}
icpc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
cs = icp->cs;
vcpu_id = kvm_arch_vcpu_id(cs);
/*
* If we are reusing a parked vCPU fd corresponding to the CPU
* which was hot-removed earlier we don't have to renable
@ -154,12 +170,16 @@ static void icp_kvm_realize(ICPState *icp, Error **errp)
static void icp_kvm_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ICPStateClass *icpc = ICP_CLASS(klass);
device_class_set_parent_realize(dc, icp_kvm_realize,
&icpc->parent_realize);
device_class_set_parent_reset(dc, icp_kvm_reset,
&icpc->parent_reset);
icpc->pre_save = icp_get_kvm_state;
icpc->post_load = icp_set_kvm_state;
icpc->realize = icp_kvm_realize;
icpc->reset = icp_kvm_reset;
icpc->synchronize_state = icp_synchronize_state;
}
@ -304,44 +324,46 @@ static void ics_kvm_set_irq(void *opaque, int srcno, int val)
}
}
static void ics_kvm_reset(void *dev)
static void ics_kvm_reset(DeviceState *dev)
{
ICSState *ics = ICS_SIMPLE(dev);
int i;
uint8_t flags[ics->nr_irqs];
ICSStateClass *icsc = ICS_BASE_GET_CLASS(dev);
for (i = 0; i < ics->nr_irqs; i++) {
flags[i] = ics->irqs[i].flags;
}
icsc->parent_reset(dev);
memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
for (i = 0; i < ics->nr_irqs; i++) {
ics->irqs[i].priority = 0xff;
ics->irqs[i].saved_priority = 0xff;
ics->irqs[i].flags = flags[i];
}
ics_set_kvm_state(ics, 1);
ics_set_kvm_state(ICS_KVM(dev), 1);
}
static void ics_kvm_realize(ICSState *ics, Error **errp)
static void ics_kvm_reset_handler(void *dev)
{
if (!ics->nr_irqs) {
error_setg(errp, "Number of interrupts needs to be greater 0");
ics_kvm_reset(dev);
}
static void ics_kvm_realize(DeviceState *dev, Error **errp)
{
ICSState *ics = ICS_KVM(dev);
ICSStateClass *icsc = ICS_BASE_GET_CLASS(ics);
Error *local_err = NULL;
icsc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
ics->qirqs = qemu_allocate_irqs(ics_kvm_set_irq, ics, ics->nr_irqs);
qemu_register_reset(ics_kvm_reset, ics);
qemu_register_reset(ics_kvm_reset_handler, ics);
}
static void ics_kvm_class_init(ObjectClass *klass, void *data)
{
ICSStateClass *icsc = ICS_BASE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_parent_realize(dc, ics_kvm_realize,
&icsc->parent_realize);
device_class_set_parent_reset(dc, ics_kvm_reset,
&icsc->parent_reset);
icsc->realize = ics_kvm_realize;
icsc->pre_save = ics_get_kvm_state;
icsc->post_load = ics_set_kvm_state;
icsc->synchronize_state = ics_synchronize_state;
@ -349,7 +371,7 @@ static void ics_kvm_class_init(ObjectClass *klass, void *data)
static const TypeInfo ics_kvm_info = {
.name = TYPE_ICS_KVM,
.parent = TYPE_ICS_SIMPLE,
.parent = TYPE_ICS_BASE,
.instance_size = sizeof(ICSState),
.class_init = ics_kvm_class_init,
};

View File

@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "sysemu/sysemu.h"
#include "qemu/log.h"
#include "hw/ppc/xics.h"
@ -158,9 +159,18 @@ static const MemoryRegionOps pnv_icp_ops = {
},
};
static void pnv_icp_realize(ICPState *icp, Error **errp)
static void pnv_icp_realize(DeviceState *dev, Error **errp)
{
ICPState *icp = ICP(dev);
PnvICPState *pnv_icp = PNV_ICP(icp);
ICPStateClass *icpc = ICP_GET_CLASS(icp);
Error *local_err = NULL;
icpc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
memory_region_init_io(&pnv_icp->mmio, OBJECT(icp), &pnv_icp_ops,
icp, "icp-thread", 0x1000);
@ -171,7 +181,8 @@ static void pnv_icp_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
ICPStateClass *icpc = ICP_CLASS(klass);
icpc->realize = pnv_icp_realize;
device_class_set_parent_realize(dc, pnv_icp_realize,
&icpc->parent_realize);
dc->desc = "PowerNV ICP";
}

View File

@ -71,18 +71,19 @@ static DBDMAState *dbdma_from_ch(DBDMA_channel *ch)
}
#if DEBUG_DBDMA
static void dump_dbdma_cmd(dbdma_cmd *cmd)
static void dump_dbdma_cmd(DBDMA_channel *ch, dbdma_cmd *cmd)
{
printf("dbdma_cmd %p\n", cmd);
printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
DBDMA_DPRINTFCH(ch, "dbdma_cmd %p\n", cmd);
DBDMA_DPRINTFCH(ch, " req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
DBDMA_DPRINTFCH(ch, " command 0x%04x\n", le16_to_cpu(cmd->command));
DBDMA_DPRINTFCH(ch, " phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
DBDMA_DPRINTFCH(ch, " cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
DBDMA_DPRINTFCH(ch, " res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
DBDMA_DPRINTFCH(ch, " xfer_status 0x%04x\n",
le16_to_cpu(cmd->xfer_status));
}
#else
static void dump_dbdma_cmd(dbdma_cmd *cmd)
static void dump_dbdma_cmd(DBDMA_channel *ch, dbdma_cmd *cmd)
{
}
#endif
@ -448,7 +449,7 @@ static void channel_run(DBDMA_channel *ch)
uint32_t phy_addr;
DBDMA_DPRINTFCH(ch, "channel_run\n");
dump_dbdma_cmd(current);
dump_dbdma_cmd(ch, current);
/* clear WAKE flag at command fetch */

View File

@ -14,7 +14,8 @@ obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o
# PowerPC 4xx boards
obj-y += ppc4xx_devs.o ppc405_uc.o
obj-$(CONFIG_PPC4XX) += ppc4xx_pci.o ppc405_boards.o
obj-$(CONFIG_PPC4XX) += ppc440_bamboo.o ppc440_pcix.o ppc440_uc.o sam460ex.o
obj-$(CONFIG_PPC4XX) += ppc440_bamboo.o ppc440_pcix.o ppc440_uc.o
obj-$(CONFIG_SAM460EX) += sam460ex.o
# PReP
obj-$(CONFIG_PREP) += prep.o
obj-$(CONFIG_PREP) += prep_systemio.o

View File

@ -406,11 +406,11 @@ static void ppc_core99_init(MachineState *machine)
adb_bus = qdev_get_child_bus(dev, "adb.0");
dev = qdev_create(adb_bus, TYPE_ADB_KEYBOARD);
qdev_prop_set_bit(dev, "disable-direct-reg3-writes", has_pmu);
qdev_prop_set_bit(dev, "disable-direct-reg3-writes", true);
qdev_init_nofail(dev);
dev = qdev_create(adb_bus, TYPE_ADB_MOUSE);
qdev_prop_set_bit(dev, "disable-direct-reg3-writes", has_pmu);
qdev_prop_set_bit(dev, "disable-direct-reg3-writes", true);
qdev_init_nofail(dev);
}

View File

@ -150,6 +150,7 @@ static void pnv_core_realize(DeviceState *dev, Error **errp)
if (!chip) {
error_propagate(errp, local_err);
error_prepend(errp, "required link 'chip' not found: ");
return;
}
pc->threads = g_new(PowerPCCPU *, cc->nr_threads);

View File

@ -21,6 +21,7 @@ void ppc440_sdram_init(CPUPPCState *env, int nbanks,
hwaddr *ram_bases, hwaddr *ram_sizes,
int do_init);
void ppc4xx_ahb_init(CPUPPCState *env);
void ppc4xx_dma_init(CPUPPCState *env, int dcr_base);
void ppc460ex_pcie_init(CPUPPCState *env);
#endif /* PPC440_H */

View File

@ -13,6 +13,7 @@
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "cpu.h"
#include "hw/hw.h"
#include "exec/address-spaces.h"
@ -802,6 +803,227 @@ void ppc4xx_ahb_init(CPUPPCState *env)
qemu_register_reset(ppc4xx_ahb_reset, ahb);
}
/*****************************************************************************/
/* DMA controller */
#define DMA0_CR_CE (1 << 31)
#define DMA0_CR_PW (1 << 26 | 1 << 25)
#define DMA0_CR_DAI (1 << 24)
#define DMA0_CR_SAI (1 << 23)
#define DMA0_CR_DEC (1 << 2)
enum {
DMA0_CR = 0x00,
DMA0_CT,
DMA0_SAH,
DMA0_SAL,
DMA0_DAH,
DMA0_DAL,
DMA0_SGH,
DMA0_SGL,
DMA0_SR = 0x20,
DMA0_SGC = 0x23,
DMA0_SLP = 0x25,
DMA0_POL = 0x26,
};
typedef struct {
uint32_t cr;
uint32_t ct;
uint64_t sa;
uint64_t da;
uint64_t sg;
} PPC4xxDmaChnl;
typedef struct {
int base;
PPC4xxDmaChnl ch[4];
uint32_t sr;
} PPC4xxDmaState;
static uint32_t dcr_read_dma(void *opaque, int dcrn)
{
PPC4xxDmaState *dma = opaque;
uint32_t val = 0;
int addr = dcrn - dma->base;
int chnl = addr / 8;
switch (addr) {
case 0x00 ... 0x1f:
switch (addr % 8) {
case DMA0_CR:
val = dma->ch[chnl].cr;
break;
case DMA0_CT:
val = dma->ch[chnl].ct;
break;
case DMA0_SAH:
val = dma->ch[chnl].sa >> 32;
break;
case DMA0_SAL:
val = dma->ch[chnl].sa;
break;
case DMA0_DAH:
val = dma->ch[chnl].da >> 32;
break;
case DMA0_DAL:
val = dma->ch[chnl].da;
break;
case DMA0_SGH:
val = dma->ch[chnl].sg >> 32;
break;
case DMA0_SGL:
val = dma->ch[chnl].sg;
break;
}
break;
case DMA0_SR:
val = dma->sr;
break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimplemented register %x (%d, %x)\n",
__func__, dcrn, chnl, addr);
}
return val;
}
static void dcr_write_dma(void *opaque, int dcrn, uint32_t val)
{
PPC4xxDmaState *dma = opaque;
int addr = dcrn - dma->base;
int chnl = addr / 8;
switch (addr) {
case 0x00 ... 0x1f:
switch (addr % 8) {
case DMA0_CR:
dma->ch[chnl].cr = val;
if (val & DMA0_CR_CE) {
int count = dma->ch[chnl].ct & 0xffff;
if (count) {
int width, i, sidx, didx;
uint8_t *rptr, *wptr;
hwaddr rlen, wlen;
sidx = didx = 0;
width = 1 << ((val & DMA0_CR_PW) >> 25);
rptr = cpu_physical_memory_map(dma->ch[chnl].sa, &rlen, 0);
wptr = cpu_physical_memory_map(dma->ch[chnl].da, &wlen, 1);
if (rptr && wptr) {
if (!(val & DMA0_CR_DEC) &&
val & DMA0_CR_SAI && val & DMA0_CR_DAI) {
/* optimise common case */
memmove(wptr, rptr, count * width);
sidx = didx = count * width;
} else {
/* do it the slow way */
for (sidx = didx = i = 0; i < count; i++) {
uint64_t v = ldn_le_p(rptr + sidx, width);
stn_le_p(wptr + didx, width, v);
if (val & DMA0_CR_SAI) {
sidx += width;
}
if (val & DMA0_CR_DAI) {
didx += width;
}
}
}
}
if (wptr) {
cpu_physical_memory_unmap(wptr, wlen, 1, didx);
}
if (wptr) {
cpu_physical_memory_unmap(rptr, rlen, 0, sidx);
}
}
}
break;
case DMA0_CT:
dma->ch[chnl].ct = val;
break;
case DMA0_SAH:
dma->ch[chnl].sa &= 0xffffffffULL;
dma->ch[chnl].sa |= (uint64_t)val << 32;
break;
case DMA0_SAL:
dma->ch[chnl].sa &= 0xffffffff00000000ULL;
dma->ch[chnl].sa |= val;
break;
case DMA0_DAH:
dma->ch[chnl].da &= 0xffffffffULL;
dma->ch[chnl].da |= (uint64_t)val << 32;
break;
case DMA0_DAL:
dma->ch[chnl].da &= 0xffffffff00000000ULL;
dma->ch[chnl].da |= val;
break;
case DMA0_SGH:
dma->ch[chnl].sg &= 0xffffffffULL;
dma->ch[chnl].sg |= (uint64_t)val << 32;
break;
case DMA0_SGL:
dma->ch[chnl].sg &= 0xffffffff00000000ULL;
dma->ch[chnl].sg |= val;
break;
}
break;
case DMA0_SR:
dma->sr &= ~val;
break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimplemented register %x (%d, %x)\n",
__func__, dcrn, chnl, addr);
}
}
static void ppc4xx_dma_reset(void *opaque)
{
PPC4xxDmaState *dma = opaque;
int dma_base = dma->base;
memset(dma, 0, sizeof(*dma));
dma->base = dma_base;
}
void ppc4xx_dma_init(CPUPPCState *env, int dcr_base)
{
PPC4xxDmaState *dma;
int i;
dma = g_malloc0(sizeof(*dma));
dma->base = dcr_base;
qemu_register_reset(&ppc4xx_dma_reset, dma);
for (i = 0; i < 4; i++) {
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_CR,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_CT,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SAH,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SAL,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_DAH,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_DAL,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SGH,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SGL,
dma, &dcr_read_dma, &dcr_write_dma);
}
ppc_dcr_register(env, dcr_base + DMA0_SR,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + DMA0_SGC,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + DMA0_SLP,
dma, &dcr_read_dma, &dcr_write_dma);
ppc_dcr_register(env, dcr_base + DMA0_POL,
dma, &dcr_read_dma, &dcr_write_dma);
}
/*****************************************************************************/
/* PCI Express controller */
/* FIXME: This is not complete and does not work, only implemented partially

View File

@ -37,6 +37,8 @@
#include "hw/i2c/smbus.h"
#include "hw/usb/hcd-ehci.h"
#include <libfdt.h>
#define BINARY_DEVICE_TREE_FILE "canyonlands.dtb"
#define UBOOT_FILENAME "u-boot-sam460-20100605.bin"
/* to extract the official U-Boot bin from the updater: */
@ -67,6 +69,10 @@
*/
#define CPU_FREQ 1150000000
#define PLB_FREQ 230000000
#define OPB_FREQ 115000000
#define EBC_FREQ 115000000
#define UART_FREQ 11059200
#define SDRAM_NR_BANKS 4
/* FIXME: See u-boot.git 8ac41e, also fix in ppc440_uc.c */
@ -255,6 +261,7 @@ static int sam460ex_load_device_tree(hwaddr addr,
void *fdt;
uint32_t tb_freq = CPU_FREQ;
uint32_t clock_freq = CPU_FREQ;
int offset;
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
if (!filename) {
@ -308,6 +315,27 @@ static int sam460ex_load_device_tree(hwaddr addr,
qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency",
tb_freq);
/* Remove cpm node if it exists (it is not emulated) */
offset = fdt_path_offset(fdt, "/cpm");
if (offset >= 0) {
fdt_nop_node(fdt, offset);
}
/* set serial port clocks */
offset = fdt_node_offset_by_compatible(fdt, -1, "ns16550");
while (offset >= 0) {
fdt_setprop_cell(fdt, offset, "clock-frequency", UART_FREQ);
offset = fdt_node_offset_by_compatible(fdt, offset, "ns16550");
}
/* some more clocks */
qemu_fdt_setprop_cell(fdt, "/plb", "clock-frequency",
PLB_FREQ);
qemu_fdt_setprop_cell(fdt, "/plb/opb", "clock-frequency",
OPB_FREQ);
qemu_fdt_setprop_cell(fdt, "/plb/opb/ebc", "clock-frequency",
EBC_FREQ);
rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr);
g_free(fdt);
ret = fdt_size;
@ -457,6 +485,7 @@ static void sam460ex_init(MachineState *machine)
object_property_set_bool(OBJECT(dev), true, "realized", NULL);
smbus_eeprom_init(i2c[0]->bus, 8, smbus_eeprom_buf, smbus_eeprom_size);
g_free(smbus_eeprom_buf);
i2c_create_slave(i2c[0]->bus, "m41t80", 0x68);
dev = sysbus_create_simple(TYPE_PPC4xx_I2C, 0x4ef600800, uic[0][3]);
i2c[1] = PPC4xx_I2C(dev);
@ -476,6 +505,9 @@ static void sam460ex_init(MachineState *machine)
/* MAL */
ppc4xx_mal_init(env, 4, 16, &uic[2][3]);
/* DMA */
ppc4xx_dma_init(env, 0x200);
/* 256K of L2 cache as memory */
ppc4xx_l2sram_init(env);
/* FIXME: remove this after fixing l2sram mapping in ppc440_uc.c? */

View File

@ -137,7 +137,7 @@ static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
goto error;
}
return ICS_SIMPLE(obj);
return ICS_BASE(obj);
error:
error_propagate(errp, local_err);
@ -3962,6 +3962,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
mc->no_parallel = 1;
mc->default_boot_order = "";
mc->default_ram_size = 512 * MiB;
mc->default_display = "std";
mc->kvm_type = spapr_kvm_type;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
mc->pci_allow_0_address = true;
@ -4095,17 +4096,16 @@ static void spapr_machine_2_12_instance_options(MachineState *machine)
static void spapr_machine_2_12_class_options(MachineClass *mc)
{
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
uint8_t mps;
spapr_machine_3_0_class_options(mc);
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_12);
if (kvmppc_hpt_needs_host_contiguous_pages()) {
mps = ctz64(qemu_getrampagesize());
} else {
mps = 34; /* allow everything up to 16GiB, i.e. everything */
}
smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = mps;
/* We depend on kvm_enabled() to choose a default value for the
* hpt-max-page-size capability. Of course we can't do it here
* because this is too early and the HW accelerator isn't initialzed
* yet. Postpone this to machine init (see default_caps_with_cpu()).
*/
smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
}
DEFINE_SPAPR_MACHINE(2_12, "2.12", false);

View File

@ -465,6 +465,19 @@ static sPAPRCapabilities default_caps_with_cpu(sPAPRMachineState *spapr,
caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
}
/* This is for pseries-2.12 and older */
if (smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] == 0) {
uint8_t mps;
if (kvmppc_hpt_needs_host_contiguous_pages()) {
mps = ctz64(qemu_getrampagesize());
} else {
mps = 34; /* allow everything up to 16GiB, i.e. everything */
}
caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = mps;
}
return caps;
}

View File

@ -6,6 +6,7 @@ common-obj-$(CONFIG_CADENCE) += cadence_ttc.o
common-obj-$(CONFIG_DS1338) += ds1338.o
common-obj-$(CONFIG_HPET) += hpet.o
common-obj-$(CONFIG_I8254) += i8254_common.o i8254.o
common-obj-$(CONFIG_M41T80) += m41t80.o
common-obj-$(CONFIG_M48T59) += m48t59.o
ifeq ($(CONFIG_ISA_BUS),y)
common-obj-$(CONFIG_M48T59) += m48t59-isa.o

117
hw/timer/m41t80.c Normal file
View File

@ -0,0 +1,117 @@
/*
* M41T80 serial rtc emulation
*
* Copyright (c) 2018 BALATON Zoltan
*
* This work is licensed under the GNU GPL license version 2 or later.
*
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/timer.h"
#include "qemu/bcd.h"
#include "hw/i2c/i2c.h"
#define TYPE_M41T80 "m41t80"
#define M41T80(obj) OBJECT_CHECK(M41t80State, (obj), TYPE_M41T80)
typedef struct M41t80State {
I2CSlave parent_obj;
int8_t addr;
} M41t80State;
static void m41t80_realize(DeviceState *dev, Error **errp)
{
M41t80State *s = M41T80(dev);
s->addr = -1;
}
static int m41t80_send(I2CSlave *i2c, uint8_t data)
{
M41t80State *s = M41T80(i2c);
if (s->addr < 0) {
s->addr = data;
} else {
s->addr++;
}
return 0;
}
static int m41t80_recv(I2CSlave *i2c)
{
M41t80State *s = M41T80(i2c);
struct tm now;
qemu_timeval tv;
if (s->addr < 0) {
s->addr = 0;
}
if (s->addr >= 1 && s->addr <= 7) {
qemu_get_timedate(&now, -1);
}
switch (s->addr++) {
case 0:
qemu_gettimeofday(&tv);
return to_bcd(tv.tv_usec / 10000);
case 1:
return to_bcd(now.tm_sec);
case 2:
return to_bcd(now.tm_min);
case 3:
return to_bcd(now.tm_hour);
case 4:
return to_bcd(now.tm_wday);
case 5:
return to_bcd(now.tm_mday);
case 6:
return to_bcd(now.tm_mon + 1);
case 7:
return to_bcd(now.tm_year % 100);
case 8 ... 19:
qemu_log_mask(LOG_UNIMP, "%s: unimplemented register: %d\n",
__func__, s->addr - 1);
return 0;
default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid register: %d\n",
__func__, s->addr - 1);
return 0;
}
}
static int m41t80_event(I2CSlave *i2c, enum i2c_event event)
{
M41t80State *s = M41T80(i2c);
if (event == I2C_START_SEND) {
s->addr = -1;
}
return 0;
}
static void m41t80_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
dc->realize = m41t80_realize;
sc->send = m41t80_send;
sc->recv = m41t80_recv;
sc->event = m41t80_event;
}
static const TypeInfo m41t80_info = {
.name = TYPE_M41T80,
.parent = TYPE_I2C_SLAVE,
.instance_size = sizeof(M41t80State),
.class_init = m41t80_class_init,
};
static void m41t80_register_types(void)
{
type_register_static(&m41t80_info);
}
type_init(m41t80_register_types)

View File

@ -46,7 +46,8 @@ typedef struct PPC4xxI2CState {
qemu_irq irq;
MemoryRegion iomem;
bitbang_i2c_interface *bitbang;
uint8_t mdata;
int mdidx;
uint8_t mdata[4];
uint8_t lmadr;
uint8_t hmadr;
uint8_t cntl;

View File

@ -65,10 +65,11 @@ typedef struct XICSFabric XICSFabric;
struct ICPStateClass {
DeviceClass parent_class;
void (*realize)(ICPState *icp, Error **errp);
DeviceRealize parent_realize;
DeviceReset parent_reset;
void (*pre_save)(ICPState *icp);
int (*post_load)(ICPState *icp, int version_id);
void (*reset)(ICPState *icp);
void (*synchronize_state)(ICPState *icp);
};
@ -114,7 +115,9 @@ struct PnvICPState {
struct ICSStateClass {
DeviceClass parent_class;
void (*realize)(ICSState *s, Error **errp);
DeviceRealize parent_realize;
DeviceReset parent_reset;
void (*pre_save)(ICSState *s);
int (*post_load)(ICSState *s, int version_id);
void (*reject)(ICSState *s, uint32_t irq);

View File

@ -65,99 +65,23 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
return -1;
}
static int do_store_exclusive(CPUPPCState *env)
{
target_ulong addr;
target_ulong page_addr;
target_ulong val, val2 __attribute__((unused)) = 0;
int flags;
int segv = 0;
addr = env->reserve_ea;
page_addr = addr & TARGET_PAGE_MASK;
start_exclusive();
mmap_lock();
flags = page_get_flags(page_addr);
if ((flags & PAGE_READ) == 0) {
segv = 1;
} else {
int reg = env->reserve_info & 0x1f;
int size = env->reserve_info >> 5;
int stored = 0;
if (addr == env->reserve_addr) {
switch (size) {
case 1: segv = get_user_u8(val, addr); break;
case 2: segv = get_user_u16(val, addr); break;
case 4: segv = get_user_u32(val, addr); break;
#if defined(TARGET_PPC64)
case 8: segv = get_user_u64(val, addr); break;
case 16: {
segv = get_user_u64(val, addr);
if (!segv) {
segv = get_user_u64(val2, addr + 8);
}
break;
}
#endif
default: abort();
}
if (!segv && val == env->reserve_val) {
val = env->gpr[reg];
switch (size) {
case 1: segv = put_user_u8(val, addr); break;
case 2: segv = put_user_u16(val, addr); break;
case 4: segv = put_user_u32(val, addr); break;
#if defined(TARGET_PPC64)
case 8: segv = put_user_u64(val, addr); break;
case 16: {
if (val2 == env->reserve_val2) {
if (msr_le) {
val2 = val;
val = env->gpr[reg+1];
} else {
val2 = env->gpr[reg+1];
}
segv = put_user_u64(val, addr);
if (!segv) {
segv = put_user_u64(val2, addr + 8);
}
}
break;
}
#endif
default: abort();
}
if (!segv) {
stored = 1;
}
}
}
env->crf[0] = (stored << 1) | xer_so;
env->reserve_addr = (target_ulong)-1;
}
if (!segv) {
env->nip += 4;
}
mmap_unlock();
end_exclusive();
return segv;
}
void cpu_loop(CPUPPCState *env)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
target_siginfo_t info;
int trapnr;
int trapnr, sig;
target_ulong ret;
for(;;) {
bool arch_interrupt;
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);
switch(trapnr) {
arch_interrupt = true;
switch (trapnr) {
case POWERPC_EXCP_NONE:
/* Just go on */
break;
@ -524,26 +448,15 @@ void cpu_loop(CPUPPCState *env)
}
env->gpr[3] = ret;
break;
case POWERPC_EXCP_STCX:
if (do_store_exclusive(env)) {
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = env->nip;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
}
break;
case EXCP_DEBUG:
{
int sig;
sig = gdb_handlesig(cs, TARGET_SIGTRAP);
if (sig) {
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
}
sig = gdb_handlesig(cs, TARGET_SIGTRAP);
if (sig) {
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
} else {
arch_interrupt = false;
}
break;
case EXCP_INTERRUPT:
@ -551,12 +464,22 @@ void cpu_loop(CPUPPCState *env)
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
arch_interrupt = false;
break;
default:
cpu_abort(cs, "Unknown exception 0x%x. Aborting\n", trapnr);
break;
}
process_pending_signals(env);
/* Most of the traps imply a transition through kernel mode,
* which implies an REI instruction has been executed. Which
* means that RX and LOCK_ADDR should be cleared. But there
* are a few exceptions for traps internal to QEMU.
*/
if (arch_interrupt) {
env->reserve_addr = -1;
}
}
}

View File

@ -196,7 +196,6 @@ enum {
/* QEMU exceptions: special cases we want to stop translation */
POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */
POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
POWERPC_EXCP_STCX = 0x204 /* Conditional stores in user mode */
};
/* Exceptions error codes */
@ -994,10 +993,6 @@ struct CPUPPCState {
/* Reservation value */
target_ulong reserve_val;
target_ulong reserve_val2;
/* Reservation store address */
target_ulong reserve_ea;
/* Reserved store source register and size */
target_ulong reserve_info;
/* Those ones are used in supervisor mode only */
/* machine state register */
@ -1015,6 +1010,9 @@ struct CPUPPCState {
/* Next instruction pointer */
target_ulong nip;
/* High part of 128-bit helper return. */
uint64_t retxh;
int access_type; /* when a memory exception occurs, the access
type is stored here */

View File

@ -22,7 +22,7 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "internal.h"
#include "helper_regs.h"
//#define DEBUG_OP
@ -1198,3 +1198,19 @@ void helper_book3s_msgsnd(target_ulong rb)
qemu_mutex_unlock_iothread();
}
#endif
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
CPUPPCState *env = cs->env_ptr;
uint32_t insn;
/* Restore state and reload the insn we executed, for filling in DSISR. */
cpu_restore_state(cs, retaddr, true);
insn = cpu_ldl_code(env, env->nip);
cs->exception_index = POWERPC_EXCP_ALIGN;
env->error_code = insn & 0x03FF0000;
cpu_loop_exit(cs);
}

View File

@ -274,6 +274,7 @@ static inline void float_inexact_excp(CPUPPCState *env)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
env->fpscr |= 1 << FPSCR_FI;
env->fpscr |= 1 << FPSCR_XX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
@ -533,6 +534,7 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
int status = get_float_exception_flags(&env->fp_status);
bool inexact_happened = false;
if (status & float_flag_divbyzero) {
float_zero_divide_excp(env, raddr);
@ -542,6 +544,12 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
float_underflow_excp(env);
} else if (status & float_flag_inexact) {
float_inexact_excp(env);
inexact_happened = true;
}
/* if the inexact flag was not set */
if (inexact_happened == false) {
env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
}
if (cs->exception_index == POWERPC_EXCP_PROGRAM &&

View File

@ -799,3 +799,14 @@ DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
DEF_HELPER_1(tbegin, void, env)
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
void, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
void, env, tl, i64, i64, i32)
DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32)
DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32)
#endif

View File

@ -252,4 +252,9 @@ static inline void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
/* Raise a data fault alignment exception for the specified virtual address */
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
#endif /* PPC_INTERNAL_H */

View File

@ -248,107 +248,25 @@ static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
#if defined(TARGET_PPC64)
static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
struct kvm_ppc_smmu_info *info)
static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
{
CPUPPCState *env = &cpu->env;
CPUState *cs = CPU(cpu);
memset(info, 0, sizeof(*info));
/* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
* need to "guess" what the supported page sizes are.
*
* For that to work we make a few assumptions:
*
* - Check whether we are running "PR" KVM which only supports 4K
* and 16M pages, but supports them regardless of the backing
* store characteritics. We also don't support 1T segments.
*
* This is safe as if HV KVM ever supports that capability or PR
* KVM grows supports for more page/segment sizes, those versions
* will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
* will not hit this fallback
*
* - Else we are running HV KVM. This means we only support page
* sizes that fit in the backing store. Additionally we only
* advertize 64K pages if the processor is ARCH 2.06 and we assume
* P7 encodings for the SLB and hash table. Here too, we assume
* support for any newer processor will mean a kernel that
* implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
* this fallback.
*/
if (kvmppc_is_pr(cs->kvm_state)) {
/* No flags */
info->flags = 0;
info->slb_size = 64;
/* Standard 4k base page size segment */
info->sps[0].page_shift = 12;
info->sps[0].slb_enc = 0;
info->sps[0].enc[0].page_shift = 12;
info->sps[0].enc[0].pte_enc = 0;
/* Standard 16M large page size segment */
info->sps[1].page_shift = 24;
info->sps[1].slb_enc = SLB_VSID_L;
info->sps[1].enc[0].page_shift = 24;
info->sps[1].enc[0].pte_enc = 0;
} else {
int i = 0;
/* HV KVM has backing store size restrictions */
info->flags = KVM_PPC_PAGE_SIZES_REAL;
if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
info->flags |= KVM_PPC_1T_SEGMENTS;
}
if (env->mmu_model == POWERPC_MMU_2_06 ||
env->mmu_model == POWERPC_MMU_2_07) {
info->slb_size = 32;
} else {
info->slb_size = 64;
}
/* Standard 4k base page size segment */
info->sps[i].page_shift = 12;
info->sps[i].slb_enc = 0;
info->sps[i].enc[0].page_shift = 12;
info->sps[i].enc[0].pte_enc = 0;
i++;
/* 64K on MMU 2.06 and later */
if (env->mmu_model == POWERPC_MMU_2_06 ||
env->mmu_model == POWERPC_MMU_2_07) {
info->sps[i].page_shift = 16;
info->sps[i].slb_enc = 0x110;
info->sps[i].enc[0].page_shift = 16;
info->sps[i].enc[0].pte_enc = 1;
i++;
}
/* Standard 16M large page size segment */
info->sps[i].page_shift = 24;
info->sps[i].slb_enc = SLB_VSID_L;
info->sps[i].enc[0].page_shift = 24;
info->sps[i].enc[0].pte_enc = 0;
}
}
static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
{
CPUState *cs = CPU(cpu);
int ret;
if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
if (ret == 0) {
return;
}
assert(kvm_state != NULL);
if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
error_setg(errp, "KVM doesn't expose the MMU features it supports");
error_append_hint(errp, "Consider switching to a newer KVM\n");
return;
}
kvm_get_fallback_smmu_info(cpu, info);
ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
if (ret == 0) {
return;
}
error_setg_errno(errp, -ret,
"KVM failed to provide the MMU features it supports");
}
struct ppc_radix_page_info *kvm_get_radix_page_info(void)
@ -408,14 +326,13 @@ target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
bool kvmppc_hpt_needs_host_contiguous_pages(void)
{
PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
static struct kvm_ppc_smmu_info smmu_info;
if (!kvm_enabled()) {
return false;
}
kvm_get_smmu_info(cpu, &smmu_info);
kvm_get_smmu_info(&smmu_info, &error_fatal);
return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
}
@ -423,13 +340,18 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
{
struct kvm_ppc_smmu_info smmu_info;
int iq, ik, jq, jk;
Error *local_err = NULL;
/* For now, we only have anything to check on hash64 MMUs */
if (!cpu->hash64_opts || !kvm_enabled()) {
return;
}
kvm_get_smmu_info(cpu, &smmu_info);
kvm_get_smmu_info(&smmu_info, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
&& !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
@ -2168,7 +2090,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
/* Find the largest hardware supported page size that's less than
* or equal to the (logical) backing page size of guest RAM */
kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
kvm_get_smmu_info(&info, &error_fatal);
rampagesize = qemu_getrampagesize();
best_page_shift = 0;

View File

@ -21,9 +21,9 @@
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "helper_regs.h"
#include "exec/cpu_ldst.h"
#include "tcg.h"
#include "internal.h"
//#define DEBUG_OP
@ -215,6 +215,76 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
return i;
}
#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
uint32_t opidx)
{
Int128 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret);
return int128_getlo(ret);
}
uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
uint32_t opidx)
{
Int128 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret);
return int128_getlo(ret);
}
void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
uint64_t lo, uint64_t hi, uint32_t opidx)
{
Int128 val = int128_make128(lo, hi);
helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
}
void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
uint64_t lo, uint64_t hi, uint32_t opidx)
{
Int128 val = int128_make128(lo, hi);
helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
}
uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
uint64_t new_lo, uint64_t new_hi,
uint32_t opidx)
{
bool success = false;
if (likely(addr == env->reserve_addr)) {
Int128 oldv, cmpv, newv;
cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
opidx, GETPC());
success = int128_eq(oldv, cmpv);
}
env->reserve_addr = -1;
return env->so + success * CRF_EQ_BIT;
}
uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
uint64_t new_lo, uint64_t new_hi,
uint32_t opidx)
{
bool success = false;
if (likely(addr == env->reserve_addr)) {
Int128 oldv, cmpv, newv;
cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
opidx, GETPC());
success = int128_eq(oldv, cmpv);
}
env->reserve_addr = -1;
return env->so + success * CRF_EQ_BIT;
}
#endif
/*****************************************************************************/
/* Altivec extension helpers */
#if defined(HOST_WORDS_BIGENDIAN)

View File

@ -2388,23 +2388,6 @@ static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
}
}
static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
{
TCGLabel *l1 = gen_new_label();
TCGv t0 = tcg_temp_new();
TCGv_i32 t1, t2;
tcg_gen_andi_tl(t0, EA, mask);
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
gen_update_nip(ctx, ctx->base.pc_next - 4);
gen_helper_raise_exception_err(cpu_env, t1, t2);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
gen_set_label(l1);
tcg_temp_free(t0);
}
static inline void gen_align_no_le(DisasContext *ctx)
{
gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
@ -2607,7 +2590,7 @@ static void gen_ld(DisasContext *ctx)
static void gen_lq(DisasContext *ctx)
{
int ra, rd;
TCGv EA;
TCGv EA, hi, lo;
/* lq is a legal user mode instruction starting in ISA 2.07 */
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
@ -2633,16 +2616,35 @@ static void gen_lq(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
/* Note that the low part is always in RD+1, even in LE mode. */
lo = cpu_gpr[rd + 1];
hi = cpu_gpr[rd];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
#ifdef CONFIG_ATOMIC128
TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
} else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
}
tcg_temp_free_i32(oi);
tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
#else
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
#endif
} else if (ctx->le_mode) {
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
}
@ -2741,6 +2743,7 @@ static void gen_std(DisasContext *ctx)
if ((ctx->opcode & 0x3) == 0x2) { /* stq */
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
TCGv hi, lo;
if (!(ctx->insns_flags & PPC_64BX)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
@ -2764,20 +2767,38 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
/* Note that the low part is always in RS+1, even in LE mode. */
lo = cpu_gpr[rs + 1];
hi = cpu_gpr[rs];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
#ifdef CONFIG_ATOMIC128
TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
} else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
}
tcg_temp_free_i32(oi);
#else
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
#endif
} else if (ctx->le_mode) {
tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
} else {
/* std / stdu*/
/* std / stdu */
if (Rc(ctx->opcode)) {
if (unlikely(rA(ctx->opcode) == 0)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
@ -3032,23 +3053,24 @@ static void gen_isync(DisasContext *ctx)
#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
#define LARX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if ((len) > 1) { \
gen_check_align(ctx, t0, (len)-1); \
} \
tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
tcg_gen_mov_tl(cpu_reserve, t0); \
tcg_gen_mov_tl(cpu_reserve_val, gpr); \
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); \
tcg_temp_free(t0); \
static void gen_load_locked(DisasContext *ctx, TCGMemOp memop)
{
TCGv gpr = cpu_gpr[rD(ctx->opcode)];
TCGv t0 = tcg_temp_new();
gen_set_access_type(ctx, ACCESS_RES);
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
tcg_gen_mov_tl(cpu_reserve, t0);
tcg_gen_mov_tl(cpu_reserve_val, gpr);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
tcg_temp_free(t0);
}
#define LARX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
gen_load_locked(ctx, memop); \
}
/* lwarx */
@ -3056,134 +3078,239 @@ LARX(lbarx, DEF_MEMOP(MO_UB))
LARX(lharx, DEF_MEMOP(MO_UW))
LARX(lwarx, DEF_MEMOP(MO_UL))
#define LD_ATOMIC(name, memop, tp, op, eop) \
static void gen_##name(DisasContext *ctx) \
{ \
int len = MEMOP_GET_SIZE(memop); \
uint32_t gpr_FC = FC(ctx->opcode); \
TCGv EA = tcg_temp_local_new(); \
TCGv_##tp t0, t1; \
\
gen_addr_register(ctx, EA); \
if (len > 1) { \
gen_check_align(ctx, EA, len - 1); \
} \
t0 = tcg_temp_new_##tp(); \
t1 = tcg_temp_new_##tp(); \
tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \
\
switch (gpr_FC) { \
case 0: /* Fetch and add */ \
tcg_gen_atomic_fetch_add_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 1: /* Fetch and xor */ \
tcg_gen_atomic_fetch_xor_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 2: /* Fetch and or */ \
tcg_gen_atomic_fetch_or_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 3: /* Fetch and 'and' */ \
tcg_gen_atomic_fetch_and_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 8: /* Swap */ \
tcg_gen_atomic_xchg_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 4: /* Fetch and max unsigned */ \
case 5: /* Fetch and max signed */ \
case 6: /* Fetch and min unsigned */ \
case 7: /* Fetch and min signed */ \
case 16: /* compare and swap not equal */ \
case 24: /* Fetch and increment bounded */ \
case 25: /* Fetch and increment equal */ \
case 28: /* Fetch and decrement bounded */ \
gen_invalid(ctx); \
break; \
default: \
/* invoke data storage error handler */ \
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
} \
tcg_gen_##eop(cpu_gpr[rD(ctx->opcode)], t1); \
tcg_temp_free_##tp(t0); \
tcg_temp_free_##tp(t1); \
tcg_temp_free(EA); \
}
LD_ATOMIC(lwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32, extu_i32_tl)
#if defined(TARGET_PPC64)
LD_ATOMIC(ldat, DEF_MEMOP(MO_Q), i64, mov_i64, mov_i64)
#endif
#define ST_ATOMIC(name, memop, tp, op) \
static void gen_##name(DisasContext *ctx) \
{ \
int len = MEMOP_GET_SIZE(memop); \
uint32_t gpr_FC = FC(ctx->opcode); \
TCGv EA = tcg_temp_local_new(); \
TCGv_##tp t0, t1; \
\
gen_addr_register(ctx, EA); \
if (len > 1) { \
gen_check_align(ctx, EA, len - 1); \
} \
t0 = tcg_temp_new_##tp(); \
t1 = tcg_temp_new_##tp(); \
tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \
\
switch (gpr_FC) { \
case 0: /* add and Store */ \
tcg_gen_atomic_add_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 1: /* xor and Store */ \
tcg_gen_atomic_xor_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 2: /* Or and Store */ \
tcg_gen_atomic_or_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 3: /* 'and' and Store */ \
tcg_gen_atomic_and_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
break; \
case 4: /* Store max unsigned */ \
case 5: /* Store max signed */ \
case 6: /* Store min unsigned */ \
case 7: /* Store min signed */ \
case 24: /* Store twin */ \
gen_invalid(ctx); \
break; \
default: \
/* invoke data storage error handler */ \
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
} \
tcg_temp_free_##tp(t0); \
tcg_temp_free_##tp(t1); \
tcg_temp_free(EA); \
}
ST_ATOMIC(stwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32)
#if defined(TARGET_PPC64)
ST_ATOMIC(stdat, DEF_MEMOP(MO_Q), i64, mov_i64)
#endif
#if defined(CONFIG_USER_ONLY)
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
int reg, int memop)
static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop,
TCGv EA, TCGCond cond, int addend)
{
TCGv t0 = tcg_temp_new();
TCGv t = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGv u = tcg_temp_new();
tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
tcg_temp_free(t0);
gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
tcg_gen_addi_tl(u, t, addend);
/* E.g. for fetch and increment bounded... */
/* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
tcg_gen_movcond_tl(cond, u, t, t2, u, t);
tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
/* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
tcg_temp_free(t);
tcg_temp_free(t2);
tcg_temp_free(u);
}
#else
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
int reg, int memop)
static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop)
{
uint32_t gpr_FC = FC(ctx->opcode);
TCGv EA = tcg_temp_new();
int rt = rD(ctx->opcode);
bool need_serial;
TCGv src, dst;
gen_addr_register(ctx, EA);
dst = cpu_gpr[rt];
src = cpu_gpr[(rt + 1) & 31];
need_serial = false;
memop |= MO_ALIGN;
switch (gpr_FC) {
case 0: /* Fetch and add */
tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 1: /* Fetch and xor */
tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 2: /* Fetch and or */
tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 3: /* Fetch and 'and' */
tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 4: /* Fetch and max unsigned */
tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 5: /* Fetch and max signed */
tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 6: /* Fetch and min unsigned */
tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 7: /* Fetch and min signed */
tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 8: /* Swap */
tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
break;
case 16: /* Compare and swap not equal */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
need_serial = true;
} else {
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
tcg_gen_mov_tl(t1, src);
} else {
tcg_gen_ext32u_tl(t1, src);
}
tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
cpu_gpr[(rt + 2) & 31], t0);
tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
tcg_gen_mov_tl(dst, t0);
tcg_temp_free(t0);
tcg_temp_free(t1);
}
break;
case 24: /* Fetch and increment bounded */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
need_serial = true;
} else {
gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
}
break;
case 25: /* Fetch and increment equal */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
need_serial = true;
} else {
gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
}
break;
case 28: /* Fetch and decrement bounded */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
need_serial = true;
} else {
gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
}
break;
default:
/* invoke data storage error handler */
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
}
tcg_temp_free(EA);
if (need_serial) {
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
}
static void gen_lwat(DisasContext *ctx)
{
gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
}
#ifdef TARGET_PPC64
static void gen_ldat(DisasContext *ctx)
{
gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
}
#endif
static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop)
{
uint32_t gpr_FC = FC(ctx->opcode);
TCGv EA = tcg_temp_new();
TCGv src, discard;
gen_addr_register(ctx, EA);
src = cpu_gpr[rD(ctx->opcode)];
discard = tcg_temp_new();
memop |= MO_ALIGN;
switch (gpr_FC) {
case 0: /* add and Store */
tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 1: /* xor and Store */
tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 2: /* Or and Store */
tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 3: /* 'and' and Store */
tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 4: /* Store max unsigned */
tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 5: /* Store max signed */
tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 6: /* Store min unsigned */
tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 7: /* Store min signed */
tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
break;
case 24: /* Store twin */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
} else {
TCGv t = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGv s = tcg_temp_new();
TCGv s2 = tcg_temp_new();
TCGv ea_plus_s = tcg_temp_new();
tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
tcg_temp_free(ea_plus_s);
tcg_temp_free(s2);
tcg_temp_free(s);
tcg_temp_free(t2);
tcg_temp_free(t);
}
break;
default:
/* invoke data storage error handler */
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
}
tcg_temp_free(discard);
tcg_temp_free(EA);
}
static void gen_stwat(DisasContext *ctx)
{
gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
}
#ifdef TARGET_PPC64
static void gen_stdat(DisasContext *ctx)
{
gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
}
#endif
static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
TCGv t0;
TCGv t0 = tcg_temp_new();
int reg = rS(ctx->opcode);
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
gen_set_access_type(ctx, ACCESS_RES);
gen_addr_reg_index(ctx, t0);
tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
tcg_temp_free(t0);
t0 = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
@ -3206,21 +3333,11 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
gen_set_label(l2);
tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif
#define STCX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if (len > 1) { \
gen_check_align(ctx, t0, (len) - 1); \
} \
gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
tcg_temp_free(t0); \
#define STCX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
gen_conditional_store(ctx, memop); \
}
STCX(stbcx_, DEF_MEMOP(MO_UB))
@ -3236,9 +3353,8 @@ STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
{
TCGv EA;
int rd = rD(ctx->opcode);
TCGv gpr1, gpr2;
TCGv EA, hi, lo;
if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
(rd == rB(ctx->opcode)))) {
@ -3247,73 +3363,125 @@ static void gen_lqarx(DisasContext *ctx)
}
gen_set_access_type(ctx, ACCESS_RES);
EA = tcg_temp_local_new();
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
gen_check_align(ctx, EA, 15);
if (unlikely(ctx->le_mode)) {
gpr1 = cpu_gpr[rd+1];
gpr2 = cpu_gpr[rd];
} else {
gpr1 = cpu_gpr[rd];
gpr2 = cpu_gpr[rd+1];
}
tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
/* Note that the low part is always in RD+1, even in LE mode. */
lo = cpu_gpr[rd + 1];
hi = cpu_gpr[rd];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
#ifdef CONFIG_ATOMIC128
TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
ctx->mem_idx));
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
} else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
ctx->mem_idx));
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
}
tcg_temp_free_i32(oi);
tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
#else
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
tcg_temp_free(EA);
return;
#endif
} else if (ctx->le_mode) {
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
}
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
TCGv EA;
int reg = rS(ctx->opcode);
int len = 16;
#if !defined(CONFIG_USER_ONLY)
TCGLabel *l1;
TCGv gpr1, gpr2;
#endif
int rs = rS(ctx->opcode);
TCGv EA, hi, lo;
if (unlikely((rD(ctx->opcode) & 1))) {
if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
gen_set_access_type(ctx, ACCESS_RES);
EA = tcg_temp_local_new();
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
if (len > 1) {
gen_check_align(ctx, EA, (len) - 1);
}
#if defined(CONFIG_USER_ONLY)
gen_conditional_store(ctx, EA, reg, 16);
/* Note that the low part is always in RS+1, even in LE mode. */
lo = cpu_gpr[rs + 1];
hi = cpu_gpr[rs];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
#ifdef CONFIG_ATOMIC128
if (ctx->le_mode) {
gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
} else {
gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
}
#else
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
if (unlikely(ctx->le_mode)) {
gpr1 = cpu_gpr[reg + 1];
gpr2 = cpu_gpr[reg];
} else {
gpr1 = cpu_gpr[reg];
gpr2 = cpu_gpr[reg + 1];
}
tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
#endif
tcg_temp_free(EA);
}
tcg_temp_free(EA);
tcg_temp_free_i32(oi);
} else {
TCGLabel *lab_fail = gen_new_label();
TCGLabel *lab_over = gen_new_label();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
tcg_temp_free(EA);
gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
? offsetof(CPUPPCState, reserve_val2)
: offsetof(CPUPPCState, reserve_val)));
tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
tcg_gen_addi_i64(t0, cpu_reserve, 8);
gen_qemu_ld64_i64(ctx, t0, t0);
tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
? offsetof(CPUPPCState, reserve_val)
: offsetof(CPUPPCState, reserve_val2)));
tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
/* Success */
gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
tcg_gen_addi_i64(t0, cpu_reserve, 8);
gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
tcg_gen_br(lab_over);
gen_set_label(lab_fail);
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
gen_set_label(lab_over);
tcg_gen_movi_tl(cpu_reserve, -1);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
}
}
#endif /* defined(TARGET_PPC64) */
/* sync */
@ -4636,8 +4804,8 @@ static void gen_eciwx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_EXT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x03);
gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
DEF_MEMOP(MO_UL | MO_ALIGN));
tcg_temp_free(t0);
}
@ -4649,8 +4817,8 @@ static void gen_ecowx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_EXT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x03);
gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
DEF_MEMOP(MO_UL | MO_ALIGN));
tcg_temp_free(t0);
}
@ -6886,7 +7054,7 @@ GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type),
#define GEN_STUX(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type),
#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
#define GEN_STS(name, stop, op, type) \
GEN_ST(name, stop, op | 0x20, type) \
GEN_STU(name, stop, op | 0x21, type) \
@ -7314,6 +7482,7 @@ static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_debug_exception(ctx);
dcbase->is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that

View File

@ -10457,6 +10457,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = ppc_cpu_set_pc;
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
#else