ppc patch queue for 2016-03-24

Accumulated patches for target-ppc, pseries machine type and related
 devices.
 
 * Preliminary patches from BenH & Cédric Le Goater's powernv code
     * We don't want the full machine type before 2.7
     * Adding some of the SPRs also fixes migration corner cases for
       spapr (when qemu has no knowledge of the registers, they're
       obviously not migrated)
     * We include some patches that aren't strictly fixes, but make
       applying the others easier, and they're low risk
 * Fix to buffer management which significantly improves throughput in
   the spapr-llan virtual network device
 * Start with 64-bit mode enabled on spapr.  This is the way it's
   supposed to be but we broke it a while back and didn't notice
   because Linux guests cope anyway.
     * Picked up by kvm-unit-tests
     * Still some bugs here that I'm working on
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJW8202AAoJEGw4ysog2bOSHgkP/RyHdyxjiQzzbQO9iVo37tvJ
 toJLMN771Eaxy9jizejHZtqwGuxHipHMLDs/lKqZahe5UFoUIn6zeNpSepbhRuDT
 95UmPgmLq3PPZpBWF38uMKSHFXpcudkwMCEn7G/nKyTAEc+CXo8sGJ+71rI0CKL7
 K0hMCjArkDqJHaxbGpSIB/OnbMEEFEd8aB3MNwLAPaKDP34Yjr7xvLodTHtye7fX
 6F2PlkAxzlreFfkSx6/3/omoVBOn0K6HgJRW15t5mhE9+y4JdhzquvSnrh/+96Cu
 JLKMo9HUKdEr15amjp8vEgO3Qq5T9RPw3snCSU0U6zGWKMXhNjnhbJrjsu2S8ayH
 zKSF96WbJJxSBV1Imd0Co5vZ0VG0ODNRk3H+fvCPFtHu6x0TJxKz7tEbCCOyzeMZ
 Kxsnq2nBVSji1jbBcMVtQT2T9Azoh6CHE3Mf2398Lk7Iv3cC+W2HNDstf6FJ5ITk
 4pTeqF2x3wkxRRmTO25cJHEO1v///xQR6EE/BUgLmq5fGDZjk0/plZskgIOjRxOO
 svGRlWQu5lPYNoitDEOEWEfw73kYGZqAk8zzQs9q0HKqOuPavN0WQbaz+WPw7kgt
 crK7nyFsrixiwiOD3ZYtQLkKnrRKndlBBw4NRtHbw78MIUj+3Pd9ILzi/vHHUwjn
 rXT/XpFy8Daf6HtXyuS6
 =AuUu
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.6-20160324' into staging

ppc patch queue for 2016-03-24

Accumulated patches for target-ppc, pseries machine type and related
devices.

* Preliminary patches from BenH & Cédric Le Goater's powernv code
    * We don't want the full machine type before 2.7
    * Adding some of the SPRs also fixes migration corner cases for
      spapr (when qemu has no knowledge of the registers, they're
      obviously not migrated)
    * We include some patches that aren't strictly fixes, but make
      applying the others easier, and they're low risk
* Fix to buffer management which significantly improves throughput in
  the spapr-llan virtual network device
* Start with 64-bit mode enabled on spapr.  This is the way it's
  supposed to be but we broke it a while back and didn't notice
  because Linux guests cope anyway.
    * Picked up by kvm-unit-tests
    * Still some bugs here that I'm working on

# gpg: Signature made Thu 24 Mar 2016 04:29:42 GMT using RSA key ID 20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>"
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>"
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>"
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-2.6-20160324:
  ppc: move POWER8 Book4 regs in their own routine
  hw/net/spapr_llan: Enable the RX buffer pools by default for new machines
  hw/net/spapr_llan: Fix receive buffer handling for better performance
  hw/net/spapr_llan: Extract rx buffer code into separate functions
  ppc: A couple more dummy POWER8 Book4 regs
  ppc: Add dummy CIABR SPR
  ppc: Add POWER8 IAMR register
  ppc: Fix writing to AMR/UAMOR
  ppc: Initialize AMOR in PAPR mode
  ppc: Add dummy SPR_IC for POWER8
  ppc: Create cpu_ppc_set_papr() helper
  ppc: Add a bunch of hypervisor SPRs to Book3s
  ppc: Add macros to register hypervisor mode SPRs
  ppc: Update SPR definitions
  spapr/target-ppc/kvm: Only add hcall-instructions if KVM supports it
  ppc64: set MSR_SF bit

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2016-03-24 15:22:39 +00:00
commit 490dda053e
6 changed files with 590 additions and 99 deletions

View File

@ -45,6 +45,10 @@
#define DPRINTF(fmt...)
#endif
/* Compatibility flags for migration */
#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
/*
* Virtual LAN device
*/
@ -86,6 +90,15 @@ typedef uint64_t vlan_bd_t;
#define VIO_SPAPR_VLAN_DEVICE(obj) \
OBJECT_CHECK(VIOsPAPRVLANDevice, (obj), TYPE_VIO_SPAPR_VLAN_DEVICE)
#define RX_POOL_MAX_BDS 4096
#define RX_MAX_POOLS 5
typedef struct {
int32_t bufsize;
int32_t count;
vlan_bd_t bds[RX_POOL_MAX_BDS];
} RxBufPool;
typedef struct VIOsPAPRVLANDevice {
VIOsPAPRDevice sdev;
NICConf nicconf;
@ -94,6 +107,8 @@ typedef struct VIOsPAPRVLANDevice {
target_ulong buf_list;
uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
target_ulong rxq_ptr;
uint32_t compat_flags; /* Compatability flags for migration */
RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */
} VIOsPAPRVLANDevice;
static int spapr_vlan_can_receive(NetClientState *nc)
@ -103,6 +118,73 @@ static int spapr_vlan_can_receive(NetClientState *nc)
return (dev->isopen && dev->rx_bufs > 0);
}
/**
* Get buffer descriptor from one of our receive buffer pools
*/
static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(VIOsPAPRVLANDevice *dev,
size_t size)
{
vlan_bd_t bd;
int pool;
for (pool = 0; pool < RX_MAX_POOLS; pool++) {
if (dev->rx_pool[pool]->count > 0 &&
dev->rx_pool[pool]->bufsize >= size + 8) {
break;
}
}
if (pool == RX_MAX_POOLS) {
/* Failed to find a suitable buffer */
return 0;
}
DPRINTF("Found buffer: pool=%d count=%d rxbufs=%d\n", pool,
dev->rx_pool[pool]->count, dev->rx_bufs);
/* Remove the buffer from the pool */
dev->rx_pool[pool]->count--;
bd = dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count];
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count] = 0;
return bd;
}
/**
* Get buffer descriptor from the receive buffer list page that has been
* supplied by the guest with the H_REGISTER_LOGICAL_LAN call
*/
static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
size_t size)
{
int buf_ptr = dev->use_buf_ptr;
vlan_bd_t bd;
do {
buf_ptr += 8;
if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
buf_ptr = VLAN_RX_BDS_OFF;
}
bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
DPRINTF("use_buf_ptr=%d bd=0x%016llx\n",
buf_ptr, (unsigned long long)bd);
} while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
&& buf_ptr != dev->use_buf_ptr);
if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) {
/* Failed to find a suitable buffer */
return 0;
}
/* Remove the buffer from the pool */
dev->use_buf_ptr = buf_ptr;
vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
DPRINTF("Found buffer: ptr=%d rxbufs=%d\n", dev->use_buf_ptr, dev->rx_bufs);
return bd;
}
static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
size_t size)
{
@ -110,7 +192,6 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
VIOsPAPRDevice *sdev = VIO_SPAPR_DEVICE(dev);
vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
vlan_bd_t bd;
int buf_ptr = dev->use_buf_ptr;
uint64_t handle;
uint8_t control;
@ -125,29 +206,16 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
return -1;
}
do {
buf_ptr += 8;
if (buf_ptr >= (VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF)) {
buf_ptr = VLAN_RX_BDS_OFF;
}
bd = vio_ldq(sdev, dev->buf_list + buf_ptr);
DPRINTF("use_buf_ptr=%d bd=0x%016llx\n",
buf_ptr, (unsigned long long)bd);
} while ((!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8)))
&& (buf_ptr != dev->use_buf_ptr));
if (!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8))) {
/* Failed to find a suitable buffer */
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
bd = spapr_vlan_get_rx_bd_from_pool(dev, size);
} else {
bd = spapr_vlan_get_rx_bd_from_page(dev, size);
}
if (!bd) {
return -1;
}
/* Remove the buffer from the pool */
dev->rx_bufs--;
dev->use_buf_ptr = buf_ptr;
vio_stq(sdev, dev->buf_list + dev->use_buf_ptr, 0);
DPRINTF("Found buffer: ptr=%d num=%d\n", dev->use_buf_ptr, dev->rx_bufs);
/* Transfer the packet data */
if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
@ -195,13 +263,31 @@ static NetClientInfo net_spapr_vlan_info = {
.receive = spapr_vlan_receive,
};
static void spapr_vlan_reset_rx_pool(RxBufPool *rxp)
{
/*
* Use INT_MAX as bufsize so that unused buffers are moved to the end
* of the list during the qsort in spapr_vlan_add_rxbuf_to_pool() later.
*/
rxp->bufsize = INT_MAX;
rxp->count = 0;
memset(rxp->bds, 0, sizeof(rxp->bds));
}
static void spapr_vlan_reset(VIOsPAPRDevice *sdev)
{
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
int i;
dev->buf_list = 0;
dev->rx_bufs = 0;
dev->isopen = 0;
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
for (i = 0; i < RX_MAX_POOLS; i++) {
spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
}
}
}
static void spapr_vlan_realize(VIOsPAPRDevice *sdev, Error **errp)
@ -218,10 +304,31 @@ static void spapr_vlan_realize(VIOsPAPRDevice *sdev, Error **errp)
static void spapr_vlan_instance_init(Object *obj)
{
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(obj);
int i;
device_add_bootindex_property(obj, &dev->nicconf.bootindex,
"bootindex", "",
DEVICE(dev), NULL);
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
for (i = 0; i < RX_MAX_POOLS; i++) {
dev->rx_pool[i] = g_new(RxBufPool, 1);
spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
}
}
}
static void spapr_vlan_instance_finalize(Object *obj)
{
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(obj);
int i;
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
for (i = 0; i < RX_MAX_POOLS; i++) {
g_free(dev->rx_pool[i]);
dev->rx_pool[i] = NULL;
}
}
}
void spapr_vlan_create(VIOsPAPRBus *bus, NICInfo *nd)
@ -372,6 +479,113 @@ static target_ulong h_free_logical_lan(PowerPCCPU *cpu,
return H_SUCCESS;
}
/**
* Used for qsort, this function compares two RxBufPools by size.
*/
static int rx_pool_size_compare(const void *p1, const void *p2)
{
const RxBufPool *pool1 = *(RxBufPool **)p1;
const RxBufPool *pool2 = *(RxBufPool **)p2;
if (pool1->bufsize < pool2->bufsize) {
return -1;
}
return pool1->bufsize > pool2->bufsize;
}
/**
* Search for a matching buffer pool with exact matching size,
* or return -1 if no matching pool has been found.
*/
static int spapr_vlan_get_rx_pool_id(VIOsPAPRVLANDevice *dev, int size)
{
int pool;
for (pool = 0; pool < RX_MAX_POOLS; pool++) {
if (dev->rx_pool[pool]->bufsize == size) {
return pool;
}
}
return -1;
}
/**
* Enqueuing receive buffer by adding it to one of our receive buffer pools
*/
static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
target_ulong buf)
{
int size = VLAN_BD_LEN(buf);
int pool;
pool = spapr_vlan_get_rx_pool_id(dev, size);
if (pool < 0) {
/*
* No matching pool found? Try to use a new one. If the guest used all
* pools before, but changed the size of one pool inbetween, we might
* need to recycle that pool here (if it's empty already). Thus scan
* all buffer pools now, starting with the last (likely empty) one.
*/
for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) {
if (dev->rx_pool[pool]->count == 0) {
dev->rx_pool[pool]->bufsize = size;
/*
* Sort pools by size so that spapr_vlan_receive()
* can later find the smallest buffer pool easily.
*/
qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
rx_pool_size_compare);
pool = spapr_vlan_get_rx_pool_id(dev, size);
DPRINTF("created RX pool %d for size %lld\n", pool,
VLAN_BD_LEN(buf));
break;
}
}
}
/* Still no usable pool? Give up */
if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) {
return H_RESOURCE;
}
DPRINTF("h_add_llan_buf(): Add buf using pool %i (size %lli, count=%i)\n",
pool, VLAN_BD_LEN(buf), dev->rx_pool[pool]->count);
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
return 0;
}
/**
* This is the old way of enqueuing receive buffers: Add it to the rx queue
* page that has been supplied by the guest (which is quite limited in size).
*/
static target_long spapr_vlan_add_rxbuf_to_page(VIOsPAPRVLANDevice *dev,
target_ulong buf)
{
vlan_bd_t bd;
if (dev->rx_bufs >= VLAN_MAX_BUFS) {
return H_RESOURCE;
}
do {
dev->add_buf_ptr += 8;
if (dev->add_buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
dev->add_buf_ptr = VLAN_RX_BDS_OFF;
}
bd = vio_ldq(&dev->sdev, dev->buf_list + dev->add_buf_ptr);
} while (bd & VLAN_BD_VALID);
vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
DPRINTF("h_add_llan_buf(): Added buf ptr=%d rx_bufs=%d bd=0x%016llx\n",
dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf);
return 0;
}
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
sPAPRMachineState *spapr,
target_ulong opcode,
@ -381,7 +595,7 @@ static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
target_ulong buf = args[1];
VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
vlan_bd_t bd;
target_long ret;
DPRINTF("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx
", 0x" TARGET_FMT_lx ")\n", reg, buf);
@ -397,29 +611,23 @@ static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
return H_PARAMETER;
}
if (!dev->isopen || dev->rx_bufs >= VLAN_MAX_BUFS) {
if (!dev->isopen) {
return H_RESOURCE;
}
do {
dev->add_buf_ptr += 8;
if (dev->add_buf_ptr >= (VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF)) {
dev->add_buf_ptr = VLAN_RX_BDS_OFF;
}
bd = vio_ldq(sdev, dev->buf_list + dev->add_buf_ptr);
} while (bd & VLAN_BD_VALID);
vio_stq(sdev, dev->buf_list + dev->add_buf_ptr, buf);
if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
ret = spapr_vlan_add_rxbuf_to_pool(dev, buf);
} else {
ret = spapr_vlan_add_rxbuf_to_page(dev, buf);
}
if (ret) {
return ret;
}
dev->rx_bufs++;
qemu_flush_queued_packets(qemu_get_queue(dev->nic));
DPRINTF("h_add_logical_lan_buffer(): Added buf ptr=%d rx_bufs=%d"
" bd=0x%016llx\n", dev->add_buf_ptr, dev->rx_bufs,
(unsigned long long)buf);
return H_SUCCESS;
}
@ -509,9 +717,44 @@ static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, sPAPRMachineState *spapr,
static Property spapr_vlan_properties[] = {
DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev),
DEFINE_NIC_PROPERTIES(VIOsPAPRVLANDevice, nicconf),
DEFINE_PROP_BIT("use-rx-buffer-pools", VIOsPAPRVLANDevice,
compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true),
DEFINE_PROP_END_OF_LIST(),
};
static bool spapr_vlan_rx_buffer_pools_needed(void *opaque)
{
VIOsPAPRVLANDevice *dev = opaque;
return (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) != 0;
}
static const VMStateDescription vmstate_rx_buffer_pool = {
.name = "spapr_llan/rx_buffer_pool",
.version_id = 1,
.minimum_version_id = 1,
.needed = spapr_vlan_rx_buffer_pools_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(bufsize, RxBufPool),
VMSTATE_INT32(count, RxBufPool),
VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_rx_pools = {
.name = "spapr_llan/rx_pools",
.version_id = 1,
.minimum_version_id = 1,
.needed = spapr_vlan_rx_buffer_pools_needed,
.fields = (VMStateField[]) {
VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, VIOsPAPRVLANDevice,
RX_MAX_POOLS, 1,
vmstate_rx_buffer_pool, RxBufPool),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_spapr_llan = {
.name = "spapr_llan",
.version_id = 1,
@ -528,6 +771,10 @@ static const VMStateDescription vmstate_spapr_llan = {
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * []) {
&vmstate_rx_pools,
NULL
}
};
static void spapr_vlan_class_init(ObjectClass *klass, void *data)
@ -554,6 +801,7 @@ static const TypeInfo spapr_vlan_info = {
.instance_size = sizeof(VIOsPAPRVLANDevice),
.class_init = spapr_vlan_class_init,
.instance_init = spapr_vlan_instance_init,
.instance_finalize = spapr_vlan_instance_finalize,
};
static void spapr_vlan_register_types(void)

View File

@ -497,10 +497,11 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
* Older KVM versions with older guest kernels were broken with the
* magic page, don't allow the guest to map it.
*/
kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
sizeof(hypercall));
_FDT((fdt_property(fdt, "hcall-instructions", hypercall,
sizeof(hypercall))));
if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
sizeof(hypercall))) {
_FDT((fdt_property(fdt, "hcall-instructions", hypercall,
sizeof(hypercall))));
}
}
_FDT((fdt_end_node(fdt)));
}
@ -1612,15 +1613,8 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
/* Set time-base frequency to 512 MHz */
cpu_ppc_tb_init(env, TIMEBASE_FREQ);
/* PAPR always has exception vectors in RAM not ROM. To ensure this,
* MSR[IP] should never be set.
*/
env->msr_mask &= ~(1 << 6);
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
kvmppc_set_papr(cpu);
}
/* Enable PAPR mode in TCG or KVM */
cpu_ppc_set_papr(cpu);
if (cpu->max_compat) {
Error *local_err = NULL;
@ -2362,7 +2356,12 @@ DEFINE_SPAPR_MACHINE(2_6, "2.6", true);
* pseries-2.5
*/
#define SPAPR_COMPAT_2_5 \
HW_COMPAT_2_5
HW_COMPAT_2_5 \
{ \
.driver = "spapr-vlan", \
.property = "use-rx-buffer-pools", \
.value = "off", \
},
static void spapr_machine_2_5_instance_options(MachineState *machine)
{

View File

@ -474,9 +474,17 @@ struct ppc_slb_t {
#define MSR_RI 1 /* Recoverable interrupt 1 */
#define MSR_LE 0 /* Little-endian mode 1 hflags */
#define LPCR_ILE (1 << (63-38))
#define LPCR_AIL_SHIFT (63-40) /* Alternate interrupt location */
#define LPCR_AIL (3 << LPCR_AIL_SHIFT)
/* LPCR bits */
#define LPCR_VPM0 (1ull << (63 - 0))
#define LPCR_VPM1 (1ull << (63 - 1))
#define LPCR_ISL (1ull << (63 - 2))
#define LPCR_KBV (1ull << (63 - 3))
#define LPCR_ILE (1ull << (63 - 38))
#define LPCR_MER (1ull << (63 - 52))
#define LPCR_LPES0 (1ull << (63 - 60))
#define LPCR_LPES1 (1ull << (63 - 61))
#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
#define msr_sf ((env->msr >> MSR_SF) & 1)
#define msr_isf ((env->msr >> MSR_ISF) & 1)
@ -1260,6 +1268,7 @@ void store_booke_tcr (CPUPPCState *env, target_ulong val);
void store_booke_tsr (CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all (CPUPPCState *env);
void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
void cpu_ppc_set_papr(PowerPCCPU *cpu);
#endif
#endif
@ -1346,11 +1355,14 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define SPR_SRR1 (0x01B)
#define SPR_CFAR (0x01C)
#define SPR_AMR (0x01D)
#define SPR_ACOP (0x01F)
#define SPR_BOOKE_PID (0x030)
#define SPR_BOOKS_PID (0x030)
#define SPR_BOOKE_DECAR (0x036)
#define SPR_BOOKE_CSRR0 (0x03A)
#define SPR_BOOKE_CSRR1 (0x03B)
#define SPR_BOOKE_DEAR (0x03D)
#define SPR_IAMR (0x03D)
#define SPR_BOOKE_ESR (0x03E)
#define SPR_BOOKE_IVPR (0x03F)
#define SPR_MPC_EIE (0x050)
@ -1381,6 +1393,11 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define SPR_MPC_ICTRL (0x09E)
#define SPR_MPC_BAR (0x09F)
#define SPR_PSPB (0x09F)
#define SPR_DAWR (0x0B4)
#define SPR_RPR (0x0BA)
#define SPR_CIABR (0x0BB)
#define SPR_DAWRX (0x0BC)
#define SPR_HFSCR (0x0BE)
#define SPR_VRSAVE (0x100)
#define SPR_USPRG0 (0x100)
#define SPR_USPRG1 (0x101)
@ -1435,19 +1452,25 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define SPR_HSRR1 (0x13B)
#define SPR_BOOKE_IAC4 (0x13B)
#define SPR_BOOKE_DAC1 (0x13C)
#define SPR_LPIDR (0x13D)
#define SPR_MMCRH (0x13C)
#define SPR_DABR2 (0x13D)
#define SPR_BOOKE_DAC2 (0x13D)
#define SPR_TFMR (0x13D)
#define SPR_BOOKE_DVC1 (0x13E)
#define SPR_LPCR (0x13E)
#define SPR_BOOKE_DVC2 (0x13F)
#define SPR_LPIDR (0x13F)
#define SPR_BOOKE_TSR (0x150)
#define SPR_HMER (0x150)
#define SPR_HMEER (0x151)
#define SPR_PCR (0x152)
#define SPR_BOOKE_LPIDR (0x152)
#define SPR_BOOKE_TCR (0x154)
#define SPR_BOOKE_TLB0PS (0x158)
#define SPR_BOOKE_TLB1PS (0x159)
#define SPR_BOOKE_TLB2PS (0x15A)
#define SPR_BOOKE_TLB3PS (0x15B)
#define SPR_AMOR (0x15D)
#define SPR_BOOKE_MAS7_MAS3 (0x174)
#define SPR_BOOKE_IVOR0 (0x190)
#define SPR_BOOKE_IVOR1 (0x191)
@ -1666,7 +1689,9 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define SPR_MPC_MD_DBRAM1 (0x32A)
#define SPR_RCPU_L2U_RA3 (0x32B)
#define SPR_TAR (0x32F)
#define SPR_IC (0x350)
#define SPR_VTB (0x351)
#define SPR_MMCRC (0x353)
#define SPR_440_INV0 (0x370)
#define SPR_440_INV1 (0x371)
#define SPR_440_INV2 (0x372)
@ -1683,6 +1708,7 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define SPR_POWER_SPMC1 (0x37C)
#define SPR_POWER_SPMC2 (0x37D)
#define SPR_POWER_MMCRS (0x37E)
#define SPR_WORT (0x37F)
#define SPR_PPR (0x380)
#define SPR_750_GQR0 (0x390)
#define SPR_440_DNV0 (0x390)
@ -1705,6 +1731,7 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define SPR_440_DVLIM (0x398)
#define SPR_750_WPAR (0x399)
#define SPR_440_IVLIM (0x399)
#define SPR_TSCR (0x399)
#define SPR_750_DMAU (0x39A)
#define SPR_750_DMAL (0x39B)
#define SPR_440_RSTCFG (0x39B)
@ -1879,9 +1906,10 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
/* HID0 bits */
#define HID0_DEEPNAP (1 << 24)
#define HID0_DOZE (1 << 23)
#define HID0_NAP (1 << 22)
#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
#define HID0_DOZE (1 << 23) /* pre-2.06 */
#define HID0_NAP (1 << 22) /* pre-2.06 */
#define HID0_HILE (1ull << (63 - 19)) /* POWER8 */
/*****************************************************************************/
/* PowerPC Instructions types definitions */
@ -2230,6 +2258,25 @@ enum {
PCR_TM_DIS = 1ull << (63-2), /* Trans. memory disable (POWER8) */
};
/* HMER/HMEER */
enum {
HMER_MALFUNCTION_ALERT = 1ull << (63 - 0),
HMER_PROC_RECV_DONE = 1ull << (63 - 2),
HMER_PROC_RECV_ERROR_MASKED = 1ull << (63 - 3),
HMER_TFAC_ERROR = 1ull << (63 - 4),
HMER_TFMR_PARITY_ERROR = 1ull << (63 - 5),
HMER_XSCOM_FAIL = 1ull << (63 - 8),
HMER_XSCOM_DONE = 1ull << (63 - 9),
HMER_PROC_RECV_AGAIN = 1ull << (63 - 11),
HMER_WARN_RISE = 1ull << (63 - 14),
HMER_WARN_FALL = 1ull << (63 - 15),
HMER_SCOM_FIR_HMI = 1ull << (63 - 16),
HMER_TRIG_FIR_HMI = 1ull << (63 - 17),
HMER_HYP_RESOURCE_ERR = 1ull << (63 - 20),
HMER_XSCOM_STATUS_MASK = 7ull << (63 - 23),
HMER_XSCOM_STATUS_LSH = (63 - 23),
};
/*****************************************************************************/
static inline target_ulong cpu_read_xer(CPUPPCState *env)

View File

@ -2007,7 +2007,7 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
hc[2] = cpu_to_be32(0x48000008);
hc[3] = cpu_to_be32(bswap32(0x3860ffff));
return 0;
return 1;
}
static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)

View File

@ -4282,14 +4282,17 @@ static inline void gen_op_mfspr(DisasContext *ctx)
void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
uint32_t sprn = SPR(ctx->opcode);
#if !defined(CONFIG_USER_ONLY)
if (ctx->hv)
read_cb = ctx->spr_cb[sprn].hea_read;
else if (!ctx->pr)
read_cb = ctx->spr_cb[sprn].oea_read;
else
#endif
#if defined(CONFIG_USER_ONLY)
read_cb = ctx->spr_cb[sprn].uea_read;
#else
if (ctx->pr) {
read_cb = ctx->spr_cb[sprn].uea_read;
} else if (ctx->hv) {
read_cb = ctx->spr_cb[sprn].hea_read;
} else {
read_cb = ctx->spr_cb[sprn].oea_read;
}
#endif
if (likely(read_cb != NULL)) {
if (likely(read_cb != SPR_NOACCESS)) {
(*read_cb)(ctx, rD(ctx->opcode), sprn);
@ -4437,14 +4440,17 @@ static void gen_mtspr(DisasContext *ctx)
void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
uint32_t sprn = SPR(ctx->opcode);
#if !defined(CONFIG_USER_ONLY)
if (ctx->hv)
write_cb = ctx->spr_cb[sprn].hea_write;
else if (!ctx->pr)
write_cb = ctx->spr_cb[sprn].oea_write;
else
#endif
#if defined(CONFIG_USER_ONLY)
write_cb = ctx->spr_cb[sprn].uea_write;
#else
if (ctx->pr) {
write_cb = ctx->spr_cb[sprn].uea_write;
} else if (ctx->hv) {
write_cb = ctx->spr_cb[sprn].hea_write;
} else {
write_cb = ctx->spr_cb[sprn].oea_write;
}
#endif
if (likely(write_cb != NULL)) {
if (likely(write_cb != SPR_NOACCESS)) {
(*write_cb)(ctx, sprn, rS(ctx->opcode));

View File

@ -579,17 +579,33 @@ static inline void vscr_init (CPUPPCState *env, uint32_t val)
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
oea_read, oea_write, one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, initial_value)
#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, \
one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, initial_value)
#else
#if !defined(CONFIG_KVM)
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
oea_read, oea_write, one_reg_id, initial_value) \
oea_read, oea_write, one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, \
oea_read, oea_write, initial_value)
oea_read, oea_write, oea_read, oea_write, initial_value)
#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, \
one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, initial_value)
#else
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
oea_read, oea_write, one_reg_id, initial_value) \
oea_read, oea_write, one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, \
oea_read, oea_write, one_reg_id, initial_value)
oea_read, oea_write, oea_read, oea_write, \
one_reg_id, initial_value)
#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, \
one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, \
one_reg_id, initial_value)
#endif
#endif
@ -598,6 +614,13 @@ static inline void vscr_init (CPUPPCState *env, uint32_t val)
spr_register_kvm(env, num, name, uea_read, uea_write, \
oea_read, oea_write, 0, initial_value)
#define spr_register_hv(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, \
initial_value) \
spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
oea_read, oea_write, hea_read, hea_write, \
0, initial_value)
static inline void _spr_register(CPUPPCState *env, int num,
const char *name,
void (*uea_read)(DisasContext *ctx, int gprn, int sprn),
@ -606,6 +629,8 @@ static inline void _spr_register(CPUPPCState *env, int num,
void (*oea_read)(DisasContext *ctx, int gprn, int sprn),
void (*oea_write)(DisasContext *ctx, int sprn, int gprn),
void (*hea_read)(DisasContext *opaque, int gprn, int sprn),
void (*hea_write)(DisasContext *opaque, int sprn, int gprn),
#endif
#if defined(CONFIG_KVM)
uint64_t one_reg_id,
@ -633,6 +658,8 @@ static inline void _spr_register(CPUPPCState *env, int num,
#if !defined(CONFIG_USER_ONLY)
spr->oea_read = oea_read;
spr->oea_write = oea_write;
spr->hea_read = hea_read;
spr->hea_write = hea_write;
#endif
#if defined(CONFIG_KVM)
spr->one_reg_id = one_reg_id,
@ -1036,30 +1063,102 @@ static void gen_spr_7xx (CPUPPCState *env)
#ifdef TARGET_PPC64
#ifndef CONFIG_USER_ONLY
static void spr_read_uamr (DisasContext *ctx, int gprn, int sprn)
{
gen_load_spr(cpu_gpr[gprn], SPR_AMR);
spr_load_dump_spr(SPR_AMR);
}
static void spr_write_uamr (DisasContext *ctx, int sprn, int gprn)
{
gen_store_spr(SPR_AMR, cpu_gpr[gprn]);
spr_store_dump_spr(SPR_AMR);
}
static void spr_write_uamr_pr (DisasContext *ctx, int sprn, int gprn)
static void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
gen_load_spr(t0, SPR_UAMOR);
tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
/* Note, the HV=1 PR=0 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
/* Build insertion mask into t1 based on context */
if (ctx->pr) {
gen_load_spr(t1, SPR_UAMOR);
} else {
gen_load_spr(t1, SPR_AMOR);
}
/* Mask new bits into t2 */
tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
/* Load AMR and clear new bits in t0 */
gen_load_spr(t0, SPR_AMR);
tcg_gen_andc_tl(t0, t0, t1);
/* Or'in new bits and write it out */
tcg_gen_or_tl(t0, t0, t2);
gen_store_spr(SPR_AMR, t0);
spr_store_dump_spr(SPR_AMR);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
}
static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
/* Note, the HV=1 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
/* Build insertion mask into t1 based on context */
gen_load_spr(t1, SPR_AMOR);
/* Mask new bits into t2 */
tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
/* Load AMR and clear new bits in t0 */
gen_load_spr(t0, SPR_UAMOR);
tcg_gen_andc_tl(t0, t0, t1);
/* Or'in new bits and write it out */
tcg_gen_or_tl(t0, t0, t2);
gen_store_spr(SPR_UAMOR, t0);
spr_store_dump_spr(SPR_UAMOR);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
}
static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
/* Note, the HV=1 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
/* Build insertion mask into t1 based on context */
gen_load_spr(t1, SPR_AMOR);
/* Mask new bits into t2 */
tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
/* Load AMR and clear new bits in t0 */
gen_load_spr(t0, SPR_IAMR);
tcg_gen_andc_tl(t0, t0, t1);
/* Or'in new bits and write it out */
tcg_gen_or_tl(t0, t0, t2);
gen_store_spr(SPR_IAMR, t0);
spr_store_dump_spr(SPR_IAMR);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
}
#endif /* CONFIG_USER_ONLY */
static void gen_spr_amr (CPUPPCState *env)
static void gen_spr_amr(CPUPPCState *env, bool has_iamr)
{
#ifndef CONFIG_USER_ONLY
/* Virtual Page Class Key protection */
@ -1067,17 +1166,31 @@ static void gen_spr_amr (CPUPPCState *env)
* userspace accessible, 29 is privileged. So we only need to set
* the kvm ONE_REG id on one of them, we use 29 */
spr_register(env, SPR_UAMR, "UAMR",
&spr_read_uamr, &spr_write_uamr_pr,
&spr_read_uamr, &spr_write_uamr,
&spr_read_generic, &spr_write_amr,
&spr_read_generic, &spr_write_amr,
0);
spr_register_kvm(env, SPR_AMR, "AMR",
spr_register_kvm_hv(env, SPR_AMR, "AMR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_amr,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_AMR, 0);
spr_register_kvm(env, SPR_UAMOR, "UAMOR",
spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_uamor,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_UAMOR, 0);
spr_register_hv(env, SPR_AMOR, "AMOR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0);
if (has_iamr) {
spr_register_kvm_hv(env, SPR_IAMR, "IAMR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_iamr,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_IAMR, 0);
}
#endif /* !CONFIG_USER_ONLY */
}
#endif /* TARGET_PPC64 */
@ -7464,6 +7577,25 @@ static void gen_spr_book3s_dbg(CPUPPCState *env)
KVM_REG_PPC_DABRX, 0x00000000);
}
static void gen_spr_book3s_207_dbg(CPUPPCState *env)
{
spr_register_kvm_hv(env, SPR_DAWR, "DAWR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_DAWR, 0x00000000);
spr_register_kvm_hv(env, SPR_DAWRX, "DAWRX",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_DAWRX, 0x00000000);
spr_register_kvm_hv(env, SPR_CIABR, "CIABR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_CIABR, 0x00000000);
}
static void gen_spr_970_dbg(CPUPPCState *env)
{
/* Breakpoints */
@ -7878,6 +8010,36 @@ static void gen_spr_power8_pspb(CPUPPCState *env)
KVM_REG_PPC_PSPB, 0);
}
static void gen_spr_power8_ic(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
spr_register_hv(env, SPR_IC, "IC",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0);
#endif
}
static void gen_spr_power8_book4(CPUPPCState *env)
{
/* Add a number of P8 book4 registers */
#if !defined(CONFIG_USER_ONLY)
spr_register_kvm(env, SPR_ACOP, "ACOP",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_ACOP, 0);
spr_register_kvm(env, SPR_BOOKS_PID, "PID",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_PID, 0);
spr_register_kvm(env, SPR_WORT, "WORT",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_WORT, 0);
#endif
}
static void init_proc_book3s_64(CPUPPCState *env, int version)
{
gen_spr_ne_601(env);
@ -7899,7 +8061,7 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
case BOOK3S_CPU_POWER7:
case BOOK3S_CPU_POWER8:
gen_spr_book3s_ids(env);
gen_spr_amr(env);
gen_spr_amr(env, version >= BOOK3S_CPU_POWER8);
gen_spr_book3s_purr(env);
env->ci_large_pages = true;
break;
@ -7930,9 +8092,13 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
gen_spr_power8_tm(env);
gen_spr_power8_pspb(env);
gen_spr_vtb(env);
gen_spr_power8_ic(env);
gen_spr_power8_book4(env);
}
if (version < BOOK3S_CPU_POWER8) {
gen_spr_book3s_dbg(env);
} else {
gen_spr_book3s_207_dbg(env);
}
#if !defined(CONFIG_USER_ONLY)
switch (version) {
@ -8332,8 +8498,33 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
}
#endif /* defined (TARGET_PPC64) */
#if !defined(CONFIG_USER_ONLY)
void cpu_ppc_set_papr(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
ppc_spr_t *amor = &env->spr_cb[SPR_AMOR];
/* PAPR always has exception vectors in RAM not ROM. To ensure this,
* MSR[IP] should never be set.
*
* We also disallow setting of MSR_HV
*/
env->msr_mask &= ~((1ull << MSR_EP) | MSR_HVB);
/* Set a full AMOR so guest can use the AMR as it sees fit */
env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull;
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
kvmppc_set_papr(cpu);
}
}
#endif /* !defined(CONFIG_USER_ONLY) */
#endif /* defined (TARGET_PPC64) */
/*****************************************************************************/
/* Generic CPU instantiation routine */
@ -9703,7 +9894,7 @@ static void ppc_cpu_reset(CPUState *s)
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
env->msr |= (1ULL << MSR_SF);
msr |= (1ULL << MSR_SF);
}
#endif