Fifth RISC-V PR for QEMU 7.0

* Fixup checks for ext_zb[abcs]
  * Add AIA support for virt machine
  * Increase maximum number of CPUs in virt machine
  * Fixup OpenTitan SPI address
  * Add support for zfinx, zdinx and zhinx{min} extensions
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE9sSsRtSTSGjTuM6PIeENKd+XcFQFAmIgUZ8ACgkQIeENKd+X
 cFTzegf8DbUYFLpyfURm6bJoJfLQHjtjB4Hs6PnszJZZAEtC6Ia+551TDjh93vTf
 GTbpWm0BlugQqEeyg+Mioe2mb2EhK2w208RGXRSDjT9QFVOaIp83NDAjaQTPqs22
 XC35ygJYuo1Yf0WoJV77aB6IYPZB3ba5i+dkGb6lk60Ru5ULqoLvqp73tNe5KvNB
 uVAEy+ubzjmzWs5hGPw95HqTIbcMGnlHew4XU6xJaiJixSy71Z5nOCCn+2sxk+6A
 QW59Onglyfk01F9ac3GMLvi2e+FUdj0S0y07oVqchzxXWYpYwgTO4Xkt794c8mqU
 T02kuelfubr1qH1z/IolStju1JnaXw==
 =LzOY
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/alistair/tags/pull-riscv-to-apply-20220303' into staging

Fifth RISC-V PR for QEMU 7.0

 * Fixup checks for ext_zb[abcs]
 * Add AIA support for virt machine
 * Increase maximum number of CPUs in virt machine
 * Fixup OpenTitan SPI address
 * Add support for zfinx, zdinx and zhinx{min} extensions

# gpg: Signature made Thu 03 Mar 2022 05:26:55 GMT
# gpg:                using RSA key F6C4AC46D4934868D3B8CE8F21E10D29DF977054
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [full]
# Primary key fingerprint: F6C4 AC46 D493 4868 D3B8  CE8F 21E1 0D29 DF97 7054

* remotes/alistair/tags/pull-riscv-to-apply-20220303:
  target/riscv: expose zfinx, zdinx, zhinx{min} properties
  target/riscv: add support for zhinx/zhinxmin
  target/riscv: add support for zdinx
  target/riscv: add support for zfinx
  target/riscv: hardwire mstatus.FS to zero when enable zfinx
  target/riscv: add cfg properties for zfinx, zdinx and zhinx{min}
  hw: riscv: opentitan: fixup SPI addresses
  hw/riscv: virt: Increase maximum number of allowed CPUs
  docs/system: riscv: Document AIA options for virt machine
  hw/riscv: virt: Add optional AIA IMSIC support to virt machine
  hw/intc: Add RISC-V AIA IMSIC device emulation
  hw/riscv: virt: Add optional AIA APLIC support to virt machine
  target/riscv: fix inverted checks for ext_zb[abcs]

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2022-03-03 19:59:38 +00:00
commit 5959ef7d43
22 changed files with 2145 additions and 500 deletions

View File

@ -63,6 +63,22 @@ The following machine-specific options are supported:
When this option is "on", ACLINT devices will be emulated instead of
SiFive CLINT. When not specified, this option is assumed to be "off".
- aia=[none|aplic|aplic-imsic]
This option allows selecting interrupt controller defined by the AIA
(advanced interrupt architecture) specification. The "aia=aplic" selects
APLIC (advanced platform level interrupt controller) to handle wired
interrupts whereas the "aia=aplic-imsic" selects APLIC and IMSIC (incoming
message signaled interrupt controller) to handle both wired interrupts and
MSIs. When not specified, this option is assumed to be "none" which selects
SiFive PLIC to handle wired interrupts.
- aia-guests=nnn
The number of per-HART VS-level AIA IMSIC pages to be emulated for a guest
having AIA IMSIC (i.e. "aia=aplic-imsic" selected). When not specified,
the default number of per-HART VS-level AIA IMSIC pages is 0.
Running Linux kernel
--------------------

View File

@ -73,6 +73,9 @@ config RISCV_ACLINT
config RISCV_APLIC
bool
config RISCV_IMSIC
bool
config SIFIVE_PLIC
bool

View File

@ -51,6 +51,7 @@ specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c'))
specific_ss.add(when: 'CONFIG_RISCV_IMSIC', if_true: files('riscv_imsic.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c', 'xive2.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],

448
hw/intc/riscv_imsic.c Normal file
View File

@ -0,0 +1,448 @@
/*
* RISC-V IMSIC (Incoming Message Signaled Interrupt Controller)
*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/bswap.h"
#include "exec/address-spaces.h"
#include "hw/sysbus.h"
#include "hw/pci/msi.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "hw/intc/riscv_imsic.h"
#include "hw/irq.h"
#include "target/riscv/cpu.h"
#include "target/riscv/cpu_bits.h"
#include "sysemu/sysemu.h"
#include "migration/vmstate.h"
#define IMSIC_MMIO_PAGE_LE 0x00
#define IMSIC_MMIO_PAGE_BE 0x04
#define IMSIC_MIN_ID ((IMSIC_EIPx_BITS * 2) - 1)
#define IMSIC_MAX_ID (IMSIC_TOPEI_IID_MASK)
#define IMSIC_EISTATE_PENDING (1U << 0)
#define IMSIC_EISTATE_ENABLED (1U << 1)
#define IMSIC_EISTATE_ENPEND (IMSIC_EISTATE_ENABLED | \
IMSIC_EISTATE_PENDING)
static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page)
{
uint32_t i, max_irq, base;
base = page * imsic->num_irqs;
max_irq = (imsic->eithreshold[page] &&
(imsic->eithreshold[page] <= imsic->num_irqs)) ?
imsic->eithreshold[page] : imsic->num_irqs;
for (i = 1; i < max_irq; i++) {
if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) ==
IMSIC_EISTATE_ENPEND) {
return (i << IMSIC_TOPEI_IID_SHIFT) | i;
}
}
return 0;
}
static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page)
{
if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) {
qemu_irq_raise(imsic->external_irqs[page]);
} else {
qemu_irq_lower(imsic->external_irqs[page]);
}
}
static int riscv_imsic_eidelivery_rmw(RISCVIMSICState *imsic, uint32_t page,
target_ulong *val,
target_ulong new_val,
target_ulong wr_mask)
{
target_ulong old_val = imsic->eidelivery[page];
if (val) {
*val = old_val;
}
wr_mask &= 0x1;
imsic->eidelivery[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
riscv_imsic_update(imsic, page);
return 0;
}
static int riscv_imsic_eithreshold_rmw(RISCVIMSICState *imsic, uint32_t page,
target_ulong *val,
target_ulong new_val,
target_ulong wr_mask)
{
target_ulong old_val = imsic->eithreshold[page];
if (val) {
*val = old_val;
}
wr_mask &= IMSIC_MAX_ID;
imsic->eithreshold[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
riscv_imsic_update(imsic, page);
return 0;
}
static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
uint32_t base, topei = riscv_imsic_topei(imsic, page);
/* Read pending and enabled interrupt with highest priority */
if (val) {
*val = topei;
}
/* Writes ignore value and clear top pending interrupt */
if (topei && wr_mask) {
topei >>= IMSIC_TOPEI_IID_SHIFT;
base = page * imsic->num_irqs;
if (topei) {
imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING;
}
riscv_imsic_update(imsic, page);
}
return 0;
}
static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
uint32_t xlen, uint32_t page,
uint32_t num, bool pend, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
uint32_t i, base;
target_ulong mask;
uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED;
if (xlen != 32) {
if (num & 0x1) {
return -EINVAL;
}
num >>= 1;
}
if (num >= (imsic->num_irqs / xlen)) {
return -EINVAL;
}
base = (page * imsic->num_irqs) + (num * xlen);
if (val) {
*val = 0;
for (i = 0; i < xlen; i++) {
mask = (target_ulong)1 << i;
*val |= (imsic->eistate[base + i] & state) ? mask : 0;
}
}
for (i = 0; i < xlen; i++) {
/* Bit0 of eip0 and eie0 are read-only zero */
if (!num && !i) {
continue;
}
mask = (target_ulong)1 << i;
if (wr_mask & mask) {
if (new_val & mask) {
imsic->eistate[base + i] |= state;
} else {
imsic->eistate[base + i] &= ~state;
}
}
}
riscv_imsic_update(imsic, page);
return 0;
}
static int riscv_imsic_rmw(void *arg, target_ulong reg, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
RISCVIMSICState *imsic = arg;
uint32_t isel, priv, virt, vgein, xlen, page;
priv = AIA_IREG_PRIV(reg);
virt = AIA_IREG_VIRT(reg);
isel = AIA_IREG_ISEL(reg);
vgein = AIA_IREG_VGEIN(reg);
xlen = AIA_IREG_XLEN(reg);
if (imsic->mmode) {
if (priv == PRV_M && !virt) {
page = 0;
} else {
goto err;
}
} else {
if (priv == PRV_S) {
if (virt) {
if (vgein && vgein < imsic->num_pages) {
page = vgein;
} else {
goto err;
}
} else {
page = 0;
}
} else {
goto err;
}
}
switch (isel) {
case ISELECT_IMSIC_EIDELIVERY:
return riscv_imsic_eidelivery_rmw(imsic, page, val,
new_val, wr_mask);
case ISELECT_IMSIC_EITHRESHOLD:
return riscv_imsic_eithreshold_rmw(imsic, page, val,
new_val, wr_mask);
case ISELECT_IMSIC_TOPEI:
return riscv_imsic_topei_rmw(imsic, page, val, new_val, wr_mask);
case ISELECT_IMSIC_EIP0 ... ISELECT_IMSIC_EIP63:
return riscv_imsic_eix_rmw(imsic, xlen, page,
isel - ISELECT_IMSIC_EIP0,
true, val, new_val, wr_mask);
case ISELECT_IMSIC_EIE0 ... ISELECT_IMSIC_EIE63:
return riscv_imsic_eix_rmw(imsic, xlen, page,
isel - ISELECT_IMSIC_EIE0,
false, val, new_val, wr_mask);
default:
break;
};
err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n",
__func__, priv, virt, isel, vgein);
return -EINVAL;
}
static uint64_t riscv_imsic_read(void *opaque, hwaddr addr, unsigned size)
{
RISCVIMSICState *imsic = opaque;
/* Reads must be 4 byte words */
if ((addr & 0x3) != 0) {
goto err;
}
/* Reads cannot be out of range */
if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
goto err;
}
return 0;
err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register read 0x%" HWADDR_PRIx "\n",
__func__, addr);
return 0;
}
static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
RISCVIMSICState *imsic = opaque;
uint32_t page;
/* Writes must be 4 byte words */
if ((addr & 0x3) != 0) {
goto err;
}
/* Writes cannot be out of range */
if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
goto err;
}
/* Writes only supported for MSI little-endian registers */
page = addr >> IMSIC_MMIO_PAGE_SHIFT;
if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
if (value && (value < imsic->num_irqs)) {
imsic->eistate[(page * imsic->num_irqs) + value] |=
IMSIC_EISTATE_PENDING;
}
}
/* Update CPU external interrupt status */
riscv_imsic_update(imsic, page);
return;
err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register write 0x%" HWADDR_PRIx "\n",
__func__, addr);
}
static const MemoryRegionOps riscv_imsic_ops = {
.read = riscv_imsic_read,
.write = riscv_imsic_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4
}
};
static void riscv_imsic_realize(DeviceState *dev, Error **errp)
{
RISCVIMSICState *imsic = RISCV_IMSIC(dev);
RISCVCPU *rcpu = RISCV_CPU(qemu_get_cpu(imsic->hartid));
CPUState *cpu = qemu_get_cpu(imsic->hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
imsic, TYPE_RISCV_IMSIC,
IMSIC_MMIO_SIZE(imsic->num_pages));
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
/* Claim the CPU interrupt to be triggered by this IMSIC */
if (riscv_cpu_claim_interrupts(rcpu,
(imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
error_setg(errp, "%s already claimed",
(imsic->mmode) ? "MEIP" : "SEIP");
return;
}
/* Create output IRQ lines */
imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
/* Force select AIA feature and setup CSR read-modify-write callback */
if (env) {
riscv_set_feature(env, RISCV_FEATURE_AIA);
if (!imsic->mmode) {
riscv_cpu_set_geilen(env, imsic->num_pages - 1);
}
riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
riscv_imsic_rmw, imsic);
}
msi_nonbroken = true;
}
static Property riscv_imsic_properties[] = {
DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0),
DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0),
DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0),
DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_riscv_imsic = {
.name = "riscv_imsic",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
num_pages, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
num_pages, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
num_eistate, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_END_OF_LIST()
}
};
static void riscv_imsic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, riscv_imsic_properties);
dc->realize = riscv_imsic_realize;
dc->vmsd = &vmstate_riscv_imsic;
}
static const TypeInfo riscv_imsic_info = {
.name = TYPE_RISCV_IMSIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(RISCVIMSICState),
.class_init = riscv_imsic_class_init,
};
static void riscv_imsic_register_types(void)
{
type_register_static(&riscv_imsic_info);
}
type_init(riscv_imsic_register_types)
/*
* Create IMSIC device.
*/
DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
uint32_t num_pages, uint32_t num_ids)
{
DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC);
CPUState *cpu = qemu_get_cpu(hartid);
uint32_t i;
assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1)));
if (mmode) {
assert(num_pages == 1);
} else {
assert(num_pages >= 1 && num_pages <= (IRQ_LOCAL_GUEST_MAX + 1));
}
assert(IMSIC_MIN_ID <= num_ids);
assert(num_ids <= IMSIC_MAX_ID);
assert((num_ids & IMSIC_MIN_ID) == IMSIC_MIN_ID);
qdev_prop_set_bit(dev, "mmode", mmode);
qdev_prop_set_uint32(dev, "hartid", hartid);
qdev_prop_set_uint32(dev, "num-pages", num_pages);
qdev_prop_set_uint32(dev, "num-irqs", num_ids + 1);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
for (i = 0; i < num_pages; i++) {
if (!i) {
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
} else {
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
IRQ_LOCAL_MAX + i - 1));
}
}
return dev;
}

View File

@ -42,6 +42,8 @@ config RISCV_VIRT
select PFLASH_CFI01
select SERIAL
select RISCV_ACLINT
select RISCV_APLIC
select RISCV_IMSIC
select SIFIVE_PLIC
select SIFIVE_TEST
select VIRTIO_MMIO

View File

@ -34,13 +34,15 @@ static const MemMapEntry ibex_memmap[] = {
[IBEX_DEV_FLASH] = { 0x20000000, 0x80000 },
[IBEX_DEV_UART] = { 0x40000000, 0x1000 },
[IBEX_DEV_GPIO] = { 0x40040000, 0x1000 },
[IBEX_DEV_SPI] = { 0x40050000, 0x1000 },
[IBEX_DEV_SPI_DEVICE] = { 0x40050000, 0x1000 },
[IBEX_DEV_I2C] = { 0x40080000, 0x1000 },
[IBEX_DEV_PATTGEN] = { 0x400e0000, 0x1000 },
[IBEX_DEV_TIMER] = { 0x40100000, 0x1000 },
[IBEX_DEV_SENSOR_CTRL] = { 0x40110000, 0x1000 },
[IBEX_DEV_OTP_CTRL] = { 0x40130000, 0x4000 },
[IBEX_DEV_USBDEV] = { 0x40150000, 0x1000 },
[IBEX_DEV_SPI_HOST0] = { 0x40300000, 0x1000 },
[IBEX_DEV_SPI_HOST1] = { 0x40310000, 0x1000 },
[IBEX_DEV_PWRMGR] = { 0x40400000, 0x1000 },
[IBEX_DEV_RSTMGR] = { 0x40410000, 0x1000 },
[IBEX_DEV_CLKMGR] = { 0x40420000, 0x1000 },
@ -209,8 +211,12 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
create_unimplemented_device("riscv.lowrisc.ibex.gpio",
memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size);
create_unimplemented_device("riscv.lowrisc.ibex.spi",
memmap[IBEX_DEV_SPI].base, memmap[IBEX_DEV_SPI].size);
create_unimplemented_device("riscv.lowrisc.ibex.spi_device",
memmap[IBEX_DEV_SPI_DEVICE].base, memmap[IBEX_DEV_SPI_DEVICE].size);
create_unimplemented_device("riscv.lowrisc.ibex.spi_host0",
memmap[IBEX_DEV_SPI_HOST0].base, memmap[IBEX_DEV_SPI_HOST0].size);
create_unimplemented_device("riscv.lowrisc.ibex.spi_host1",
memmap[IBEX_DEV_SPI_HOST1].base, memmap[IBEX_DEV_SPI_HOST1].size);
create_unimplemented_device("riscv.lowrisc.ibex.i2c",
memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size);
create_unimplemented_device("riscv.lowrisc.ibex.pattgen",

View File

@ -33,6 +33,8 @@
#include "hw/riscv/boot.h"
#include "hw/riscv/numa.h"
#include "hw/intc/riscv_aclint.h"
#include "hw/intc/riscv_aplic.h"
#include "hw/intc/riscv_imsic.h"
#include "hw/intc/sifive_plic.h"
#include "hw/misc/sifive_test.h"
#include "chardev/char.h"
@ -43,6 +45,28 @@
#include "hw/pci-host/gpex.h"
#include "hw/display/ramfb.h"
/*
* The virt machine physical address space used by some of the devices
* namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
* number of CPUs, and number of IMSIC guest files.
*
* Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
* and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
* of virt machine physical address space.
*/
#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
#if VIRT_IMSIC_GROUP_MAX_SIZE < \
IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
#error "Can't accomodate single IMSIC group in address space"
#endif
#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
VIRT_IMSIC_GROUP_MAX_SIZE)
#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
#error "Can't accomodate all IMSIC groups in address space"
#endif
static const MemMapEntry virt_memmap[] = {
[VIRT_DEBUG] = { 0x0, 0x100 },
[VIRT_MROM] = { 0x1000, 0xf000 },
@ -52,10 +76,14 @@ static const MemMapEntry virt_memmap[] = {
[VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 },
[VIRT_PCIE_PIO] = { 0x3000000, 0x10000 },
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
[VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) },
[VIRT_APLIC_S] = { 0xd000000, APLIC_SIZE(VIRT_CPUS_MAX) },
[VIRT_UART0] = { 0x10000000, 0x100 },
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
[VIRT_FW_CFG] = { 0x10100000, 0x18 },
[VIRT_FLASH] = { 0x20000000, 0x4000000 },
[VIRT_IMSIC_M] = { 0x24000000, VIRT_IMSIC_MAX_SIZE },
[VIRT_IMSIC_S] = { 0x28000000, VIRT_IMSIC_MAX_SIZE },
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
[VIRT_DRAM] = { 0x80000000, 0x0 },
@ -133,12 +161,13 @@ static void virt_flash_map(RISCVVirtState *s,
sysmem);
}
static void create_pcie_irq_map(void *fdt, char *nodename,
uint32_t plic_phandle)
static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
uint32_t irqchip_phandle)
{
int pin, dev;
uint32_t
full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {};
uint32_t irq_map_stride = 0;
uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS *
FDT_MAX_INT_MAP_WIDTH] = {};
uint32_t *irq_map = full_irq_map;
/* This code creates a standard swizzle of interrupts such that
@ -156,23 +185,31 @@ static void create_pcie_irq_map(void *fdt, char *nodename,
int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
int i = 0;
/* Fill PCI address cells */
irq_map[i] = cpu_to_be32(devfn << 8);
i += FDT_PCI_ADDR_CELLS;
/* Fill PCI Interrupt cells */
irq_map[i] = cpu_to_be32(pin + 1);
i += FDT_PCI_INT_CELLS;
irq_map[i++] = cpu_to_be32(plic_phandle);
i += FDT_PLIC_ADDR_CELLS;
irq_map[i] = cpu_to_be32(irq_nr);
/* Fill interrupt controller phandle and cells */
irq_map[i++] = cpu_to_be32(irqchip_phandle);
irq_map[i++] = cpu_to_be32(irq_nr);
if (s->aia_type != VIRT_AIA_TYPE_NONE) {
irq_map[i++] = cpu_to_be32(0x4);
}
irq_map += FDT_INT_MAP_WIDTH;
if (!irq_map_stride) {
irq_map_stride = i;
}
irq_map += irq_map_stride;
}
}
qemu_fdt_setprop(fdt, nodename, "interrupt-map",
full_irq_map, sizeof(full_irq_map));
qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map,
GPEX_NUM_IRQS * GPEX_NUM_IRQS *
irq_map_stride * sizeof(uint32_t));
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
0x1800, 0, 0, 0x7);
@ -298,7 +335,7 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
{
int cpu;
char *name;
unsigned long addr;
unsigned long addr, size;
uint32_t aclint_cells_size;
uint32_t *aclint_mswi_cells;
uint32_t *aclint_sswi_cells;
@ -319,29 +356,38 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
}
aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2;
addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
name = g_strdup_printf("/soc/mswi@%lx", addr);
qemu_fdt_add_subnode(mc->fdt, name);
qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-mswi");
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
aclint_mswi_cells, aclint_cells_size);
qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
g_free(name);
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
name = g_strdup_printf("/soc/mswi@%lx", addr);
qemu_fdt_add_subnode(mc->fdt, name);
qemu_fdt_setprop_string(mc->fdt, name, "compatible",
"riscv,aclint-mswi");
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
aclint_mswi_cells, aclint_cells_size);
qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
g_free(name);
}
addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
(memmap[VIRT_CLINT].size * socket);
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
addr = memmap[VIRT_CLINT].base +
(RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket);
size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE;
} else {
addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
(memmap[VIRT_CLINT].size * socket);
size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
}
name = g_strdup_printf("/soc/mtimer@%lx", addr);
qemu_fdt_add_subnode(mc->fdt, name);
qemu_fdt_setprop_string(mc->fdt, name, "compatible",
"riscv,aclint-mtimer");
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
0x0, addr + RISCV_ACLINT_DEFAULT_MTIME,
0x0, memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE -
RISCV_ACLINT_DEFAULT_MTIME,
0x0, size - RISCV_ACLINT_DEFAULT_MTIME,
0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP,
0x0, RISCV_ACLINT_DEFAULT_MTIME);
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
@ -349,19 +395,22 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
g_free(name);
addr = memmap[VIRT_ACLINT_SSWI].base +
(memmap[VIRT_ACLINT_SSWI].size * socket);
name = g_strdup_printf("/soc/sswi@%lx", addr);
qemu_fdt_add_subnode(mc->fdt, name);
qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-sswi");
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
aclint_sswi_cells, aclint_cells_size);
qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
g_free(name);
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
addr = memmap[VIRT_ACLINT_SSWI].base +
(memmap[VIRT_ACLINT_SSWI].size * socket);
name = g_strdup_printf("/soc/sswi@%lx", addr);
qemu_fdt_add_subnode(mc->fdt, name);
qemu_fdt_setprop_string(mc->fdt, name, "compatible",
"riscv,aclint-sswi");
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
aclint_sswi_cells, aclint_cells_size);
qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
g_free(name);
}
g_free(aclint_mswi_cells);
g_free(aclint_mtimer_cells);
@ -404,8 +453,6 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
qemu_fdt_add_subnode(mc->fdt, plic_name);
qemu_fdt_setprop_cell(mc->fdt, plic_name,
"#address-cells", FDT_PLIC_ADDR_CELLS);
qemu_fdt_setprop_cell(mc->fdt, plic_name,
"#interrupt-cells", FDT_PLIC_INT_CELLS);
qemu_fdt_setprop_string_array(mc->fdt, plic_name, "compatible",
@ -425,17 +472,233 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
g_free(plic_cells);
}
static uint32_t imsic_num_bits(uint32_t count)
{
uint32_t ret = 0;
while (BIT(ret) < count) {
ret++;
}
return ret;
}
static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t *phandle, uint32_t *intc_phandles,
uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
{
int cpu, socket;
char *imsic_name;
MachineState *mc = MACHINE(s);
uint32_t imsic_max_hart_per_socket, imsic_guest_bits;
uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
*msi_m_phandle = (*phandle)++;
*msi_s_phandle = (*phandle)++;
imsic_cells = g_new0(uint32_t, mc->smp.cpus * 2);
imsic_regs = g_new0(uint32_t, riscv_socket_count(mc) * 4);
/* M-level IMSIC node */
for (cpu = 0; cpu < mc->smp.cpus; cpu++) {
imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
}
imsic_max_hart_per_socket = 0;
for (socket = 0; socket < riscv_socket_count(mc); socket++) {
imsic_addr = memmap[VIRT_IMSIC_M].base +
socket * VIRT_IMSIC_GROUP_MAX_SIZE;
imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts;
imsic_regs[socket * 4 + 0] = 0;
imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
imsic_regs[socket * 4 + 2] = 0;
imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
imsic_max_hart_per_socket = s->soc[socket].num_harts;
}
}
imsic_name = g_strdup_printf("/soc/imsics@%lx",
(unsigned long)memmap[VIRT_IMSIC_M].base);
qemu_fdt_add_subnode(mc->fdt, imsic_name);
qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible",
"riscv,imsics");
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells",
FDT_IMSIC_INT_CELLS);
qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller",
NULL, 0);
qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller",
NULL, 0);
qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended",
imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2);
qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs,
riscv_socket_count(mc) * sizeof(uint32_t) * 4);
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids",
VIRT_IRQCHIP_NUM_MSIS);
qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id",
VIRT_IRQCHIP_IPI_MSI);
if (riscv_socket_count(mc) > 1) {
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits",
imsic_num_bits(imsic_max_hart_per_socket));
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits",
imsic_num_bits(riscv_socket_count(mc)));
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift",
IMSIC_MMIO_GROUP_MIN_SHIFT);
}
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_m_phandle);
g_free(imsic_name);
/* S-level IMSIC node */
for (cpu = 0; cpu < mc->smp.cpus; cpu++) {
imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
}
imsic_guest_bits = imsic_num_bits(s->aia_guests + 1);
imsic_max_hart_per_socket = 0;
for (socket = 0; socket < riscv_socket_count(mc); socket++) {
imsic_addr = memmap[VIRT_IMSIC_S].base +
socket * VIRT_IMSIC_GROUP_MAX_SIZE;
imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
s->soc[socket].num_harts;
imsic_regs[socket * 4 + 0] = 0;
imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
imsic_regs[socket * 4 + 2] = 0;
imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
imsic_max_hart_per_socket = s->soc[socket].num_harts;
}
}
imsic_name = g_strdup_printf("/soc/imsics@%lx",
(unsigned long)memmap[VIRT_IMSIC_S].base);
qemu_fdt_add_subnode(mc->fdt, imsic_name);
qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible",
"riscv,imsics");
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells",
FDT_IMSIC_INT_CELLS);
qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller",
NULL, 0);
qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller",
NULL, 0);
qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended",
imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2);
qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs,
riscv_socket_count(mc) * sizeof(uint32_t) * 4);
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids",
VIRT_IRQCHIP_NUM_MSIS);
qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id",
VIRT_IRQCHIP_IPI_MSI);
if (imsic_guest_bits) {
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,guest-index-bits",
imsic_guest_bits);
}
if (riscv_socket_count(mc) > 1) {
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits",
imsic_num_bits(imsic_max_hart_per_socket));
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits",
imsic_num_bits(riscv_socket_count(mc)));
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift",
IMSIC_MMIO_GROUP_MIN_SHIFT);
}
qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_s_phandle);
g_free(imsic_name);
g_free(imsic_regs);
g_free(imsic_cells);
}
static void create_fdt_socket_aplic(RISCVVirtState *s,
const MemMapEntry *memmap, int socket,
uint32_t msi_m_phandle,
uint32_t msi_s_phandle,
uint32_t *phandle,
uint32_t *intc_phandles,
uint32_t *aplic_phandles)
{
int cpu;
char *aplic_name;
uint32_t *aplic_cells;
unsigned long aplic_addr;
MachineState *mc = MACHINE(s);
uint32_t aplic_m_phandle, aplic_s_phandle;
aplic_m_phandle = (*phandle)++;
aplic_s_phandle = (*phandle)++;
aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
/* M-level APLIC node */
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
}
aplic_addr = memmap[VIRT_APLIC_M].base +
(memmap[VIRT_APLIC_M].size * socket);
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
qemu_fdt_add_subnode(mc->fdt, aplic_name);
qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic");
qemu_fdt_setprop_cell(mc->fdt, aplic_name,
"#interrupt-cells", FDT_APLIC_INT_CELLS);
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
} else {
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent",
msi_m_phandle);
}
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
VIRT_IRQCHIP_NUM_SOURCES);
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,children",
aplic_s_phandle);
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "riscv,delegate",
aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES);
riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket);
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_m_phandle);
g_free(aplic_name);
/* S-level APLIC node */
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
}
aplic_addr = memmap[VIRT_APLIC_S].base +
(memmap[VIRT_APLIC_S].size * socket);
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
qemu_fdt_add_subnode(mc->fdt, aplic_name);
qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic");
qemu_fdt_setprop_cell(mc->fdt, aplic_name,
"#interrupt-cells", FDT_APLIC_INT_CELLS);
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
} else {
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent",
msi_s_phandle);
}
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
VIRT_IRQCHIP_NUM_SOURCES);
riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket);
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_s_phandle);
g_free(aplic_name);
g_free(aplic_cells);
aplic_phandles[socket] = aplic_s_phandle;
}
static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
bool is_32_bit, uint32_t *phandle,
uint32_t *irq_mmio_phandle,
uint32_t *irq_pcie_phandle,
uint32_t *irq_virtio_phandle)
uint32_t *irq_virtio_phandle,
uint32_t *msi_pcie_phandle)
{
int socket;
char *clust_name;
uint32_t *intc_phandles;
int socket, phandle_pos;
MachineState *mc = MACHINE(s);
uint32_t xplic_phandles[MAX_NODES];
uint32_t msi_m_phandle = 0, msi_s_phandle = 0;
uint32_t *intc_phandles, xplic_phandles[MAX_NODES];
qemu_fdt_add_subnode(mc->fdt, "/cpus");
qemu_fdt_setprop_cell(mc->fdt, "/cpus", "timebase-frequency",
@ -444,32 +707,55 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#address-cells", 0x1);
qemu_fdt_add_subnode(mc->fdt, "/cpus/cpu-map");
intc_phandles = g_new0(uint32_t, mc->smp.cpus);
phandle_pos = mc->smp.cpus;
for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
phandle_pos -= s->soc[socket].num_harts;
clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
qemu_fdt_add_subnode(mc->fdt, clust_name);
intc_phandles = g_new0(uint32_t, s->soc[socket].num_harts);
create_fdt_socket_cpus(s, socket, clust_name, phandle,
is_32_bit, intc_phandles);
is_32_bit, &intc_phandles[phandle_pos]);
create_fdt_socket_memory(s, memmap, socket);
g_free(clust_name);
if (!kvm_enabled()) {
if (s->have_aclint) {
create_fdt_socket_aclint(s, memmap, socket, intc_phandles);
create_fdt_socket_aclint(s, memmap, socket,
&intc_phandles[phandle_pos]);
} else {
create_fdt_socket_clint(s, memmap, socket, intc_phandles);
create_fdt_socket_clint(s, memmap, socket,
&intc_phandles[phandle_pos]);
}
}
create_fdt_socket_plic(s, memmap, socket, phandle,
intc_phandles, xplic_phandles);
g_free(intc_phandles);
g_free(clust_name);
}
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
create_fdt_imsic(s, memmap, phandle, intc_phandles,
&msi_m_phandle, &msi_s_phandle);
*msi_pcie_phandle = msi_s_phandle;
}
phandle_pos = mc->smp.cpus;
for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
phandle_pos -= s->soc[socket].num_harts;
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
create_fdt_socket_plic(s, memmap, socket, phandle,
&intc_phandles[phandle_pos], xplic_phandles);
} else {
create_fdt_socket_aplic(s, memmap, socket,
msi_m_phandle, msi_s_phandle, phandle,
&intc_phandles[phandle_pos], xplic_phandles);
}
}
g_free(intc_phandles);
for (socket = 0; socket < riscv_socket_count(mc); socket++) {
if (socket == 0) {
*irq_mmio_phandle = xplic_phandles[socket];
@ -505,13 +791,20 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
0x0, memmap[VIRT_VIRTIO].size);
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent",
irq_virtio_phandle);
qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", VIRTIO_IRQ + i);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
qemu_fdt_setprop_cell(mc->fdt, name, "interrupts",
VIRTIO_IRQ + i);
} else {
qemu_fdt_setprop_cells(mc->fdt, name, "interrupts",
VIRTIO_IRQ + i, 0x4);
}
g_free(name);
}
}
static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t irq_pcie_phandle)
uint32_t irq_pcie_phandle,
uint32_t msi_pcie_phandle)
{
char *name;
MachineState *mc = MACHINE(s);
@ -531,6 +824,9 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_cells(mc->fdt, name, "bus-range", 0,
memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
qemu_fdt_setprop(mc->fdt, name, "dma-coherent", NULL, 0);
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
qemu_fdt_setprop_cell(mc->fdt, name, "msi-parent", msi_pcie_phandle);
}
qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0,
memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
qemu_fdt_setprop_sized_cells(mc->fdt, name, "ranges",
@ -543,7 +839,7 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
2, virt_high_pcie_memmap.base,
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
create_pcie_irq_map(mc->fdt, name, irq_pcie_phandle);
create_pcie_irq_map(s, mc->fdt, name, irq_pcie_phandle);
g_free(name);
}
@ -602,7 +898,11 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
0x0, memmap[VIRT_UART0].size);
qemu_fdt_setprop_cell(mc->fdt, name, "clock-frequency", 3686400);
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", irq_mmio_phandle);
qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ);
} else {
qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", UART0_IRQ, 0x4);
}
qemu_fdt_add_subnode(mc->fdt, "/chosen");
qemu_fdt_setprop_string(mc->fdt, "/chosen", "stdout-path", name);
@ -623,7 +923,11 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size);
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent",
irq_mmio_phandle);
qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ);
} else {
qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", RTC_IRQ, 0x4);
}
g_free(name);
}
@ -648,7 +952,7 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
uint64_t mem_size, const char *cmdline, bool is_32_bit)
{
MachineState *mc = MACHINE(s);
uint32_t phandle = 1, irq_mmio_phandle = 1;
uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1;
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
if (mc->dtb) {
@ -678,11 +982,12 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_cell(mc->fdt, "/soc", "#address-cells", 0x2);
create_fdt_sockets(s, memmap, is_32_bit, &phandle,
&irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle);
&irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle,
&msi_pcie_phandle);
create_fdt_virtio(s, memmap, irq_virtio_phandle);
create_fdt_pcie(s, memmap, irq_pcie_phandle);
create_fdt_pcie(s, memmap, irq_pcie_phandle, msi_pcie_phandle);
create_fdt_reset(s, memmap, &phandle);
@ -704,7 +1009,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
hwaddr high_mmio_base,
hwaddr high_mmio_size,
hwaddr pio_base,
DeviceState *plic)
DeviceState *irqchip)
{
DeviceState *dev;
MemoryRegion *ecam_alias, *ecam_reg;
@ -738,7 +1043,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
for (i = 0; i < GPEX_NUM_IRQS; i++) {
irq = qdev_get_gpio_in(plic, PCIE_IRQ + i);
irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
@ -769,18 +1074,100 @@ static FWCfgState *create_fw_cfg(const MachineState *mc)
return fw_cfg;
}
static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
int base_hartid, int hart_count)
{
DeviceState *ret;
char *plic_hart_config;
/* Per-socket PLIC hart topology configuration string */
plic_hart_config = riscv_plic_hart_config_string(hart_count);
/* Per-socket PLIC */
ret = sifive_plic_create(
memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size,
plic_hart_config, hart_count, base_hartid,
VIRT_IRQCHIP_NUM_SOURCES,
((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1),
VIRT_PLIC_PRIORITY_BASE,
VIRT_PLIC_PENDING_BASE,
VIRT_PLIC_ENABLE_BASE,
VIRT_PLIC_ENABLE_STRIDE,
VIRT_PLIC_CONTEXT_BASE,
VIRT_PLIC_CONTEXT_STRIDE,
memmap[VIRT_PLIC].size);
g_free(plic_hart_config);
return ret;
}
static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
const MemMapEntry *memmap, int socket,
int base_hartid, int hart_count)
{
int i;
hwaddr addr;
uint32_t guest_bits;
DeviceState *aplic_m;
bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false;
if (msimode) {
/* Per-socket M-level IMSICs */
addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
for (i = 0; i < hart_count; i++) {
riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
base_hartid + i, true, 1,
VIRT_IRQCHIP_NUM_MSIS);
}
/* Per-socket S-level IMSICs */
guest_bits = imsic_num_bits(aia_guests + 1);
addr = memmap[VIRT_IMSIC_S].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
for (i = 0; i < hart_count; i++) {
riscv_imsic_create(addr + i * IMSIC_HART_SIZE(guest_bits),
base_hartid + i, false, 1 + aia_guests,
VIRT_IRQCHIP_NUM_MSIS);
}
}
/* Per-socket M-level APLIC */
aplic_m = riscv_aplic_create(
memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
memmap[VIRT_APLIC_M].size,
(msimode) ? 0 : base_hartid,
(msimode) ? 0 : hart_count,
VIRT_IRQCHIP_NUM_SOURCES,
VIRT_IRQCHIP_NUM_PRIO_BITS,
msimode, true, NULL);
if (aplic_m) {
/* Per-socket S-level APLIC */
riscv_aplic_create(
memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
memmap[VIRT_APLIC_S].size,
(msimode) ? 0 : base_hartid,
(msimode) ? 0 : hart_count,
VIRT_IRQCHIP_NUM_SOURCES,
VIRT_IRQCHIP_NUM_PRIO_BITS,
msimode, false, aplic_m);
}
return aplic_m;
}
static void virt_machine_init(MachineState *machine)
{
const MemMapEntry *memmap = virt_memmap;
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
char *plic_hart_config, *soc_name;
char *soc_name;
target_ulong start_addr = memmap[VIRT_DRAM].base;
target_ulong firmware_end_addr, kernel_start_addr;
uint32_t fdt_load_addr;
uint64_t kernel_entry;
DeviceState *mmio_plic, *virtio_plic, *pcie_plic;
DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip;
int i, base_hartid, hart_count;
/* Check socket count limit */
@ -791,7 +1178,7 @@ static void virt_machine_init(MachineState *machine)
}
/* Initialize sockets */
mmio_plic = virtio_plic = pcie_plic = NULL;
mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL;
for (i = 0; i < riscv_socket_count(machine); i++) {
if (!riscv_socket_check_hartids(machine, i)) {
error_report("discontinuous hartids in socket%d", i);
@ -823,56 +1210,68 @@ static void virt_machine_init(MachineState *machine)
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort);
if (!kvm_enabled()) {
/* Per-socket CLINT */
riscv_aclint_swi_create(
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
/* Per-socket ACLINT SSWI */
if (s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
i * memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
}
} else {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
memmap[VIRT_ACLINT_SSWI].base +
i * memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
}
}
/* Per-socket PLIC hart topology configuration string */
plic_hart_config = riscv_plic_hart_config_string(hart_count);
/* Per-socket interrupt controller */
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
s->irqchip[i] = virt_create_plic(memmap, i,
base_hartid, hart_count);
} else {
s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
memmap, i, base_hartid,
hart_count);
}
/* Per-socket PLIC */
s->plic[i] = sifive_plic_create(
memmap[VIRT_PLIC].base + i * memmap[VIRT_PLIC].size,
plic_hart_config, hart_count, base_hartid,
VIRT_PLIC_NUM_SOURCES,
VIRT_PLIC_NUM_PRIORITIES,
VIRT_PLIC_PRIORITY_BASE,
VIRT_PLIC_PENDING_BASE,
VIRT_PLIC_ENABLE_BASE,
VIRT_PLIC_ENABLE_STRIDE,
VIRT_PLIC_CONTEXT_BASE,
VIRT_PLIC_CONTEXT_STRIDE,
memmap[VIRT_PLIC].size);
g_free(plic_hart_config);
/* Try to use different PLIC instance based device type */
/* Try to use different IRQCHIP instance based device type */
if (i == 0) {
mmio_plic = s->plic[i];
virtio_plic = s->plic[i];
pcie_plic = s->plic[i];
mmio_irqchip = s->irqchip[i];
virtio_irqchip = s->irqchip[i];
pcie_irqchip = s->irqchip[i];
}
if (i == 1) {
virtio_plic = s->plic[i];
pcie_plic = s->plic[i];
virtio_irqchip = s->irqchip[i];
pcie_irqchip = s->irqchip[i];
}
if (i == 2) {
pcie_plic = s->plic[i];
pcie_irqchip = s->irqchip[i];
}
}
@ -990,7 +1389,7 @@ static void virt_machine_init(MachineState *machine)
for (i = 0; i < VIRTIO_COUNT; i++) {
sysbus_create_simple("virtio-mmio",
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
qdev_get_gpio_in(DEVICE(virtio_plic), VIRTIO_IRQ + i));
qdev_get_gpio_in(DEVICE(virtio_irqchip), VIRTIO_IRQ + i));
}
gpex_pcie_init(system_memory,
@ -1001,14 +1400,14 @@ static void virt_machine_init(MachineState *machine)
virt_high_pcie_memmap.base,
virt_high_pcie_memmap.size,
memmap[VIRT_PCIE_PIO].base,
DEVICE(pcie_plic));
DEVICE(pcie_irqchip));
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(DEVICE(mmio_plic), UART0_IRQ), 399193,
0, qdev_get_gpio_in(DEVICE(mmio_irqchip), UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
qdev_get_gpio_in(DEVICE(mmio_plic), RTC_IRQ));
qdev_get_gpio_in(DEVICE(mmio_irqchip), RTC_IRQ));
virt_flash_create(s);
@ -1024,6 +1423,64 @@ static void virt_machine_instance_init(Object *obj)
{
}
static char *virt_get_aia_guests(Object *obj, Error **errp)
{
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
char val[32];
sprintf(val, "%d", s->aia_guests);
return g_strdup(val);
}
static void virt_set_aia_guests(Object *obj, const char *val, Error **errp)
{
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
s->aia_guests = atoi(val);
if (s->aia_guests < 0 || s->aia_guests > VIRT_IRQCHIP_MAX_GUESTS) {
error_setg(errp, "Invalid number of AIA IMSIC guests");
error_append_hint(errp, "Valid values be between 0 and %d.\n",
VIRT_IRQCHIP_MAX_GUESTS);
}
}
static char *virt_get_aia(Object *obj, Error **errp)
{
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
const char *val;
switch (s->aia_type) {
case VIRT_AIA_TYPE_APLIC:
val = "aplic";
break;
case VIRT_AIA_TYPE_APLIC_IMSIC:
val = "aplic-imsic";
break;
default:
val = "none";
break;
};
return g_strdup(val);
}
static void virt_set_aia(Object *obj, const char *val, Error **errp)
{
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
if (!strcmp(val, "none")) {
s->aia_type = VIRT_AIA_TYPE_NONE;
} else if (!strcmp(val, "aplic")) {
s->aia_type = VIRT_AIA_TYPE_APLIC;
} else if (!strcmp(val, "aplic-imsic")) {
s->aia_type = VIRT_AIA_TYPE_APLIC_IMSIC;
} else {
error_setg(errp, "Invalid AIA interrupt controller type");
error_append_hint(errp, "Valid values are none, aplic, and "
"aplic-imsic.\n");
}
}
static bool virt_get_aclint(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
@ -1042,6 +1499,7 @@ static void virt_set_aclint(Object *obj, bool value, Error **errp)
static void virt_machine_class_init(ObjectClass *oc, void *data)
{
char str[128];
MachineClass *mc = MACHINE_CLASS(oc);
mc->desc = "RISC-V VirtIO board";
@ -1062,6 +1520,20 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "aclint",
"Set on/off to enable/disable "
"emulating ACLINT devices");
object_class_property_add_str(oc, "aia", virt_get_aia,
virt_set_aia);
object_class_property_set_description(oc, "aia",
"Set type of AIA interrupt "
"conttoller. Valid values are "
"none, aplic, and aplic-imsic.");
object_class_property_add_str(oc, "aia-guests",
virt_get_aia_guests,
virt_set_aia_guests);
sprintf(str, "Set number of guest MMIO pages for AIA IMSIC. Valid value "
"should be between 0 and %d.", VIRT_IRQCHIP_MAX_GUESTS);
object_class_property_set_description(oc, "aia-guests", str);
}
static const TypeInfo virt_machine_typeinfo = {

View File

@ -0,0 +1,68 @@
/*
* RISC-V IMSIC (Incoming Message Signal Interrupt Controller) interface
*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HW_RISCV_IMSIC_H
#define HW_RISCV_IMSIC_H
#include "hw/sysbus.h"
#include "qom/object.h"
#define TYPE_RISCV_IMSIC "riscv.imsic"
typedef struct RISCVIMSICState RISCVIMSICState;
DECLARE_INSTANCE_CHECKER(RISCVIMSICState, RISCV_IMSIC, TYPE_RISCV_IMSIC)
#define IMSIC_MMIO_PAGE_SHIFT 12
#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
#define IMSIC_MMIO_SIZE(__num_pages) ((__num_pages) * IMSIC_MMIO_PAGE_SZ)
#define IMSIC_MMIO_HART_GUEST_MAX_BTIS 6
#define IMSIC_MMIO_GROUP_MIN_SHIFT 24
#define IMSIC_HART_NUM_GUESTS(__guest_bits) \
(1U << (__guest_bits))
#define IMSIC_HART_SIZE(__guest_bits) \
(IMSIC_HART_NUM_GUESTS(__guest_bits) * IMSIC_MMIO_PAGE_SZ)
#define IMSIC_GROUP_NUM_HARTS(__hart_bits) \
(1U << (__hart_bits))
#define IMSIC_GROUP_SIZE(__hart_bits, __guest_bits) \
(IMSIC_GROUP_NUM_HARTS(__hart_bits) * IMSIC_HART_SIZE(__guest_bits))
struct RISCVIMSICState {
/*< private >*/
SysBusDevice parent_obj;
qemu_irq *external_irqs;
/*< public >*/
MemoryRegion mmio;
uint32_t num_eistate;
uint32_t *eidelivery;
uint32_t *eithreshold;
uint32_t *eistate;
/* config */
bool mmode;
uint32_t hartid;
uint32_t num_pages;
uint32_t num_irqs;
};
DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
uint32_t num_pages, uint32_t num_ids);
#endif

View File

@ -57,8 +57,10 @@ enum {
IBEX_DEV_FLASH,
IBEX_DEV_FLASH_VIRTUAL,
IBEX_DEV_UART,
IBEX_DEV_SPI_DEVICE,
IBEX_DEV_SPI_HOST0,
IBEX_DEV_SPI_HOST1,
IBEX_DEV_GPIO,
IBEX_DEV_SPI,
IBEX_DEV_I2C,
IBEX_DEV_PATTGEN,
IBEX_DEV_TIMER,

View File

@ -24,26 +24,36 @@
#include "hw/block/flash.h"
#include "qom/object.h"
#define VIRT_CPUS_MAX 32
#define VIRT_SOCKETS_MAX 8
#define VIRT_CPUS_MAX_BITS 9
#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
#define VIRT_SOCKETS_MAX_BITS 2
#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
typedef struct RISCVVirtState RISCVVirtState;
DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
TYPE_RISCV_VIRT_MACHINE)
typedef enum RISCVVirtAIAType {
VIRT_AIA_TYPE_NONE = 0,
VIRT_AIA_TYPE_APLIC,
VIRT_AIA_TYPE_APLIC_IMSIC,
} RISCVVirtAIAType;
struct RISCVVirtState {
/*< private >*/
MachineState parent;
/*< public >*/
RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
DeviceState *plic[VIRT_SOCKETS_MAX];
DeviceState *irqchip[VIRT_SOCKETS_MAX];
PFlashCFI01 *flash[2];
FWCfgState *fw_cfg;
int fdt_size;
bool have_aclint;
RISCVVirtAIAType aia_type;
int aia_guests;
};
enum {
@ -54,9 +64,13 @@ enum {
VIRT_CLINT,
VIRT_ACLINT_SSWI,
VIRT_PLIC,
VIRT_APLIC_M,
VIRT_APLIC_S,
VIRT_UART0,
VIRT_VIRTIO,
VIRT_FW_CFG,
VIRT_IMSIC_M,
VIRT_IMSIC_S,
VIRT_FLASH,
VIRT_DRAM,
VIRT_PCIE_MMIO,
@ -73,8 +87,13 @@ enum {
VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
};
#define VIRT_PLIC_NUM_SOURCES 127
#define VIRT_PLIC_NUM_PRIORITIES 7
#define VIRT_IRQCHIP_IPI_MSI 1
#define VIRT_IRQCHIP_NUM_MSIS 255
#define VIRT_IRQCHIP_NUM_SOURCES VIRTIO_NDEV
#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
#define VIRT_IRQCHIP_MAX_GUESTS_BITS 3
#define VIRT_IRQCHIP_MAX_GUESTS ((1U << VIRT_IRQCHIP_MAX_GUESTS_BITS) - 1U)
#define VIRT_PLIC_PRIORITY_BASE 0x04
#define VIRT_PLIC_PENDING_BASE 0x1000
#define VIRT_PLIC_ENABLE_BASE 0x2000
@ -86,9 +105,15 @@ enum {
#define FDT_PCI_ADDR_CELLS 3
#define FDT_PCI_INT_CELLS 1
#define FDT_PLIC_ADDR_CELLS 0
#define FDT_PLIC_INT_CELLS 1
#define FDT_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + 1 + \
FDT_PLIC_ADDR_CELLS + FDT_PLIC_INT_CELLS)
#define FDT_APLIC_INT_CELLS 2
#define FDT_IMSIC_INT_CELLS 0
#define FDT_MAX_INT_CELLS 2
#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
1 + FDT_MAX_INT_CELLS)
#define FDT_PLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
1 + FDT_PLIC_INT_CELLS)
#define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
1 + FDT_APLIC_INT_CELLS)
#endif

View File

@ -587,6 +587,11 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
cpu->cfg.ext_d = true;
}
if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
cpu->cfg.ext_zhinxmin) {
cpu->cfg.ext_zfinx = true;
}
/* Set the ISA extensions, checks should have happened above */
if (cpu->cfg.ext_i) {
ext |= RVI;
@ -665,6 +670,13 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
if (cpu->cfg.ext_j) {
ext |= RVJ;
}
if (cpu->cfg.ext_zfinx && ((ext & (RVF | RVD)) || cpu->cfg.ext_zfh ||
cpu->cfg.ext_zfhmin)) {
error_setg(errp,
"'Zfinx' cannot be supported together with 'F', 'D', 'Zfh',"
" 'Zfhmin'");
return;
}
set_misa(env, env->misa_mxl, ext);
}
@ -783,6 +795,11 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
/* Vendor-specific custom extensions */
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),

View File

@ -362,8 +362,12 @@ struct RISCVCPUConfig {
bool ext_svinval;
bool ext_svnapot;
bool ext_svpbmt;
bool ext_zdinx;
bool ext_zfh;
bool ext_zfhmin;
bool ext_zfinx;
bool ext_zhinx;
bool ext_zhinxmin;
bool ext_zve32f;
bool ext_zve64f;

View File

@ -466,9 +466,13 @@ bool riscv_cpu_vector_enabled(CPURISCVState *env)
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
{
uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
MSTATUS64_UXL | MSTATUS_VS;
if (riscv_has_ext(env, RVF)) {
mstatus_mask |= MSTATUS_FS;
}
bool current_virt = riscv_cpu_virt_enabled(env);
g_assert(riscv_has_ext(env, RVH));

View File

@ -39,7 +39,8 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
static RISCVException fs(CPURISCVState *env, int csrno)
{
#if !defined(CONFIG_USER_ONLY)
if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
!RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
return RISCV_EXCP_ILLEGAL_INST;
}
#endif
@ -302,7 +303,9 @@ static RISCVException write_fflags(CPURISCVState *env, int csrno,
target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_FS;
if (riscv_has_ext(env, RVF)) {
env->mstatus |= MSTATUS_FS;
}
#endif
riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
return RISCV_EXCP_NONE;
@ -319,7 +322,9 @@ static RISCVException write_frm(CPURISCVState *env, int csrno,
target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_FS;
if (riscv_has_ext(env, RVF)) {
env->mstatus |= MSTATUS_FS;
}
#endif
env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
return RISCV_EXCP_NONE;
@ -337,7 +342,9 @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno,
target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_FS;
if (riscv_has_ext(env, RVF)) {
env->mstatus |= MSTATUS_FS;
}
#endif
env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
@ -653,10 +660,14 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
tlb_flush(env_cpu(env));
}
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
MSTATUS_TW | MSTATUS_VS;
if (riscv_has_ext(env, RVF)) {
mask |= MSTATUS_FS;
}
if (xl != MXL_RV32 || env->debugger) {
/*
* RV32: MPV and GVA are not in mstatus. The current plan is to
@ -788,6 +799,10 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
if (!(val & RVF)) {
env->mstatus &= ~MSTATUS_FS;
}
/* flush translation cache */
tb_flush(env_cpu(env));
env->misa_ext = val;

View File

@ -89,19 +89,21 @@ void helper_set_rod_rounding_mode(CPURISCVState *env)
static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
uint64_t rs3, int flags)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
float16 frs3 = check_nanbox_h(rs3);
return nanbox_h(float16_muladd(frs1, frs2, frs3, flags, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
float16 frs3 = check_nanbox_h(env, rs3);
return nanbox_h(env, float16_muladd(frs1, frs2, frs3, flags,
&env->fp_status));
}
static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
uint64_t rs3, int flags)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
float32 frs3 = check_nanbox_s(rs3);
return nanbox_s(float32_muladd(frs1, frs2, frs3, flags, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
float32 frs3 = check_nanbox_s(env, rs3);
return nanbox_s(env, float32_muladd(frs1, frs2, frs3, flags,
&env->fp_status));
}
uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
@ -183,124 +185,124 @@ uint64_t helper_fnmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
return nanbox_s(float32_add(frs1, frs2, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return nanbox_s(env, float32_add(frs1, frs2, &env->fp_status));
}
uint64_t helper_fsub_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
return nanbox_s(float32_sub(frs1, frs2, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return nanbox_s(env, float32_sub(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmul_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
return nanbox_s(float32_mul(frs1, frs2, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return nanbox_s(env, float32_mul(frs1, frs2, &env->fp_status));
}
uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
return nanbox_s(float32_div(frs1, frs2, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return nanbox_s(env, float32_div(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
float32_minnum(frs1, frs2, &env->fp_status) :
float32_minimum_number(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
float32_maxnum(frs1, frs2, &env->fp_status) :
float32_maximum_number(frs1, frs2, &env->fp_status));
}
uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
return nanbox_s(float32_sqrt(frs1, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
return nanbox_s(env, float32_sqrt(frs1, &env->fp_status));
}
target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return float32_le(frs1, frs2, &env->fp_status);
}
target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return float32_lt(frs1, frs2, &env->fp_status);
}
target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs2 = check_nanbox_s(rs2);
float32 frs1 = check_nanbox_s(env, rs1);
float32 frs2 = check_nanbox_s(env, rs2);
return float32_eq_quiet(frs1, frs2, &env->fp_status);
}
target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs1 = check_nanbox_s(env, rs1);
return float32_to_int32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs1 = check_nanbox_s(env, rs1);
return (int32_t)float32_to_uint32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs1 = check_nanbox_s(env, rs1);
return float32_to_int64(frs1, &env->fp_status);
}
target_ulong helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs1 = check_nanbox_s(env, rs1);
return float32_to_uint64(frs1, &env->fp_status);
}
uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1)
{
return nanbox_s(int32_to_float32((int32_t)rs1, &env->fp_status));
return nanbox_s(env, int32_to_float32((int32_t)rs1, &env->fp_status));
}
uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1)
{
return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status));
return nanbox_s(env, uint32_to_float32((uint32_t)rs1, &env->fp_status));
}
uint64_t helper_fcvt_s_l(CPURISCVState *env, target_ulong rs1)
{
return nanbox_s(int64_to_float32(rs1, &env->fp_status));
return nanbox_s(env, int64_to_float32(rs1, &env->fp_status));
}
uint64_t helper_fcvt_s_lu(CPURISCVState *env, target_ulong rs1)
{
return nanbox_s(uint64_to_float32(rs1, &env->fp_status));
return nanbox_s(env, uint64_to_float32(rs1, &env->fp_status));
}
target_ulong helper_fclass_s(uint64_t rs1)
target_ulong helper_fclass_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs1 = check_nanbox_s(env, rs1);
return fclass_s(frs1);
}
@ -340,12 +342,12 @@ uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
{
return nanbox_s(float64_to_float32(rs1, &env->fp_status));
return nanbox_s(env, float64_to_float32(rs1, &env->fp_status));
}
uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
float32 frs1 = check_nanbox_s(env, rs1);
return float32_to_float64(frs1, &env->fp_status);
}
@ -416,146 +418,146 @@ target_ulong helper_fclass_d(uint64_t frs1)
uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_add(frs1, frs2, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return nanbox_h(env, float16_add(frs1, frs2, &env->fp_status));
}
uint64_t helper_fsub_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_sub(frs1, frs2, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return nanbox_h(env, float16_sub(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmul_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_mul(frs1, frs2, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return nanbox_h(env, float16_mul(frs1, frs2, &env->fp_status));
}
uint64_t helper_fdiv_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_div(frs1, frs2, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return nanbox_h(env, float16_div(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
float16_minnum(frs1, frs2, &env->fp_status) :
float16_minimum_number(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
float16_maxnum(frs1, frs2, &env->fp_status) :
float16_maximum_number(frs1, frs2, &env->fp_status));
}
uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return nanbox_h(float16_sqrt(frs1, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
return nanbox_h(env, float16_sqrt(frs1, &env->fp_status));
}
target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return float16_le(frs1, frs2, &env->fp_status);
}
target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return float16_lt(frs1, frs2, &env->fp_status);
}
target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
float16 frs1 = check_nanbox_h(env, rs1);
float16 frs2 = check_nanbox_h(env, rs2);
return float16_eq_quiet(frs1, frs2, &env->fp_status);
}
target_ulong helper_fclass_h(uint64_t rs1)
target_ulong helper_fclass_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return fclass_h(frs1);
}
target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return float16_to_int32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_wu_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return (int32_t)float16_to_uint32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_l_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return float16_to_int64(frs1, &env->fp_status);
}
target_ulong helper_fcvt_lu_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return float16_to_uint64(frs1, &env->fp_status);
}
uint64_t helper_fcvt_h_w(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(int32_to_float16((int32_t)rs1, &env->fp_status));
return nanbox_h(env, int32_to_float16((int32_t)rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_wu(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(uint32_to_float16((uint32_t)rs1, &env->fp_status));
return nanbox_h(env, uint32_to_float16((uint32_t)rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_l(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(int64_to_float16(rs1, &env->fp_status));
return nanbox_h(env, int64_to_float16(rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(uint64_to_float16(rs1, &env->fp_status));
return nanbox_h(env, uint64_to_float16(rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
float32 frs1 = check_nanbox_s(env, rs1);
return nanbox_h(env, float32_to_float16(frs1, true, &env->fp_status));
}
uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return nanbox_s(float16_to_float32(frs1, true, &env->fp_status));
float16 frs1 = check_nanbox_h(env, rs1);
return nanbox_s(env, float16_to_float32(frs1, true, &env->fp_status));
}
uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
{
return nanbox_h(float64_to_float16(rs1, true, &env->fp_status));
return nanbox_h(env, float64_to_float16(rs1, true, &env->fp_status));
}
uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs1 = check_nanbox_h(env, rs1);
return float16_to_float64(frs1, true, &env->fp_status);
}

View File

@ -38,7 +38,7 @@ DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64)
DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, tl, env, i64)
/* Floating Point - Double Precision */
DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
@ -90,7 +90,7 @@ DEF_HELPER_FLAGS_2(fcvt_h_w, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_1(fclass_h, TCG_CALL_NO_RWG_SE, tl, i64)
DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64)
/* Special functions */
DEF_HELPER_2(csrr, tl, env, int)

View File

@ -19,25 +19,25 @@
*/
#define REQUIRE_ZBA(ctx) do { \
if (ctx->cfg_ptr->ext_zba) { \
if (!ctx->cfg_ptr->ext_zba) { \
return false; \
} \
} while (0)
#define REQUIRE_ZBB(ctx) do { \
if (ctx->cfg_ptr->ext_zbb) { \
if (!ctx->cfg_ptr->ext_zbb) { \
return false; \
} \
} while (0)
#define REQUIRE_ZBC(ctx) do { \
if (ctx->cfg_ptr->ext_zbc) { \
if (!ctx->cfg_ptr->ext_zbc) { \
return false; \
} \
} while (0)
#define REQUIRE_ZBS(ctx) do { \
if (ctx->cfg_ptr->ext_zbs) { \
if (!ctx->cfg_ptr->ext_zbs) { \
return false; \
} \
} while (0)

View File

@ -18,6 +18,19 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ZDINX_OR_D(ctx) do { \
if (!ctx->cfg_ptr->ext_zdinx) { \
REQUIRE_EXT(ctx, RVD); \
} \
} while (0)
#define REQUIRE_EVEN(ctx, reg) do { \
if (ctx->cfg_ptr->ext_zdinx && (get_xl(ctx) == MXL_RV32) && \
((reg) & 0x1)) { \
return false; \
} \
} while (0)
static bool trans_fld(DisasContext *ctx, arg_fld *a)
{
TCGv addr;
@ -47,10 +60,17 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fmadd_d(dest, cpu_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -58,10 +78,17 @@ static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fmsub_d(dest, cpu_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -69,10 +96,17 @@ static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fnmsub_d(dest, cpu_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -80,10 +114,17 @@ static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fnmadd_d(dest, cpu_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -91,12 +132,16 @@ static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fadd_d(dest, cpu_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -104,12 +149,16 @@ static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fsub_d(dest, cpu_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -117,12 +166,16 @@ static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fmul_d(dest, cpu_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -130,12 +183,16 @@ static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fdiv_d(dest, cpu_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -143,23 +200,34 @@ static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fsqrt_d(dest, cpu_env, src1);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
{
REQUIRE_FPU;
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
if (a->rs1 == a->rs2) { /* FMOV */
tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
dest = get_fpr_d(ctx, a->rs1);
} else {
tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2],
cpu_fpr[a->rs1], 0, 63);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
tcg_gen_deposit_i64(dest, src2, src1, 0, 63);
}
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -167,15 +235,22 @@ static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
if (a->rs1 == a->rs2) { /* FNEG */
tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN);
tcg_gen_xori_i64(dest, src1, INT64_MIN);
} else {
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
TCGv_i64 t0 = tcg_temp_new_i64();
tcg_gen_not_i64(t0, cpu_fpr[a->rs2]);
tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63);
tcg_gen_not_i64(t0, src2);
tcg_gen_deposit_i64(dest, t0, src1, 0, 63);
tcg_temp_free_i64(t0);
}
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -183,15 +258,22 @@ static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
if (a->rs1 == a->rs2) { /* FABS */
tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN);
tcg_gen_andi_i64(dest, src1, ~INT64_MIN);
} else {
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
TCGv_i64 t0 = tcg_temp_new_i64();
tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN);
tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0);
tcg_gen_andi_i64(t0, src2, INT64_MIN);
tcg_gen_xor_i64(dest, src1, t0);
tcg_temp_free_i64(t0);
}
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -199,11 +281,15 @@ static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_helper_fmin_d(dest, cpu_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -211,11 +297,15 @@ static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_helper_fmax_d(dest, cpu_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -223,11 +313,15 @@ static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_s_d(dest, cpu_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -235,11 +329,15 @@ static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_d_s(dest, cpu_env, src1);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -247,11 +345,14 @@ static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_helper_feq_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_feq_d(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -259,11 +360,14 @@ static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_helper_flt_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_flt_d(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -271,11 +375,14 @@ static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_helper_fle_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fle_d(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -283,11 +390,13 @@ static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_helper_fclass_d(dest, cpu_fpr[a->rs1]);
gen_helper_fclass_d(dest, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -295,12 +404,14 @@ static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_w_d(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_w_d(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -308,12 +419,14 @@ static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_wu_d(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_wu_d(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -321,12 +434,15 @@ static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_d_w(dest, cpu_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -335,12 +451,15 @@ static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_d_wu(dest, cpu_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -350,12 +469,14 @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_l_d(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_l_d(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -364,12 +485,14 @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rs1);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_lu_d(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_lu_d(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -392,12 +515,15 @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_d_l(dest, cpu_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -407,12 +533,15 @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZDINX_OR_D(ctx);
REQUIRE_EVEN(ctx, a->rd);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_d_lu(dest, cpu_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;

View File

@ -20,7 +20,14 @@
#define REQUIRE_FPU do {\
if (ctx->mstatus_fs == 0) \
return false; \
if (!ctx->cfg_ptr->ext_zfinx) \
return false; \
} while (0)
#define REQUIRE_ZFINX_OR_F(ctx) do {\
if (!ctx->cfg_ptr->ext_zfinx) { \
REQUIRE_EXT(ctx, RVF); \
} \
} while (0)
static bool trans_flw(DisasContext *ctx, arg_flw *a)
@ -55,10 +62,16 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -66,10 +79,16 @@ static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -77,10 +96,16 @@ static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fnmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -88,10 +113,16 @@ static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fnmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -99,11 +130,15 @@ static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fadd_s(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fadd_s(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -111,11 +146,15 @@ static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fsub_s(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fsub_s(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -123,11 +162,15 @@ static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fmul_s(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fmul_s(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -135,11 +178,15 @@ static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fdiv_s(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fdiv_s(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -147,10 +194,14 @@ static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fsqrt_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fsqrt_s(dest, cpu_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -158,22 +209,37 @@ static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
if (a->rs1 == a->rs2) { /* FMOV */
gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_s(dest, src1);
} else {
tcg_gen_ext32s_i64(dest, src1);
}
} else { /* FSGNJ */
TCGv_i64 rs1 = tcg_temp_new_i64();
TCGv_i64 rs2 = tcg_temp_new_i64();
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
if (!ctx->cfg_ptr->ext_zfinx) {
TCGv_i64 rs1 = tcg_temp_new_i64();
TCGv_i64 rs2 = tcg_temp_new_i64();
gen_check_nanbox_s(rs1, src1);
gen_check_nanbox_s(rs2, src2);
/* This formulation retains the nanboxing of rs2. */
tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31);
tcg_temp_free_i64(rs1);
tcg_temp_free_i64(rs2);
/* This formulation retains the nanboxing of rs2 in normal 'F'. */
tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31);
tcg_temp_free_i64(rs1);
tcg_temp_free_i64(rs2);
} else {
tcg_gen_deposit_i64(dest, src2, src1, 0, 31);
tcg_gen_ext32s_i64(dest, dest);
}
}
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -183,16 +249,27 @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
TCGv_i64 rs1, rs2, mask;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
rs1 = tcg_temp_new_i64();
gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
if (a->rs1 == a->rs2) { /* FNEG */
tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1));
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_s(rs1, src1);
} else {
tcg_gen_mov_i64(rs1, src1);
}
if (a->rs1 == a->rs2) { /* FNEG */
tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1));
} else {
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
rs2 = tcg_temp_new_i64();
gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_s(rs2, src2);
} else {
tcg_gen_mov_i64(rs2, src2);
}
/*
* Replace bit 31 in rs1 with inverse in rs2.
@ -200,13 +277,17 @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
*/
mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
tcg_gen_nor_i64(rs2, rs2, mask);
tcg_gen_and_i64(rs1, mask, rs1);
tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
tcg_gen_and_i64(dest, mask, rs1);
tcg_gen_or_i64(dest, dest, rs2);
tcg_temp_free_i64(rs2);
}
/* signed-extended intead of nanboxing for result if enable zfinx */
if (ctx->cfg_ptr->ext_zfinx) {
tcg_gen_ext32s_i64(dest, dest);
}
gen_set_fpr_hs(ctx, a->rd, dest);
tcg_temp_free_i64(rs1);
mark_fs_dirty(ctx);
return true;
}
@ -216,28 +297,45 @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
TCGv_i64 rs1, rs2;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
rs1 = tcg_temp_new_i64();
gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_s(rs1, src1);
} else {
tcg_gen_mov_i64(rs1, src1);
}
if (a->rs1 == a->rs2) { /* FABS */
tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1));
tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1));
} else {
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
rs2 = tcg_temp_new_i64();
gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_s(rs2, src2);
} else {
tcg_gen_mov_i64(rs2, src2);
}
/*
* Xor bit 31 in rs1 with that in rs2.
* This formulation retains the nanboxing of rs1.
*/
tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1));
tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1));
tcg_gen_xor_i64(dest, rs1, dest);
tcg_temp_free_i64(rs2);
}
/* signed-extended intead of nanboxing for result if enable zfinx */
if (ctx->cfg_ptr->ext_zfinx) {
tcg_gen_ext32s_i64(dest, dest);
}
tcg_temp_free_i64(rs1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -245,10 +343,14 @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
gen_helper_fmin_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2]);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_fmin_s(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -256,10 +358,14 @@ static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
gen_helper_fmax_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2]);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_fmax_s(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -267,12 +373,13 @@ static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_w_s(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_w_s(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -280,12 +387,13 @@ static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_wu_s(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_wu_s(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -294,14 +402,14 @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
{
/* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
#if defined(TARGET_RISCV64)
tcg_gen_ext32s_tl(dest, cpu_fpr[a->rs1]);
tcg_gen_ext32s_tl(dest, src1);
#else
tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]);
tcg_gen_extrl_i64_i32(dest, src1);
#endif
gen_set_gpr(ctx, a->rd, dest);
@ -311,11 +419,13 @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_feq_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_feq_s(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -323,11 +433,13 @@ static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_flt_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_flt_s(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -335,11 +447,13 @@ static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_fle_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fle_s(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -347,11 +461,12 @@ static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_helper_fclass_s(dest, cpu_fpr[a->rs1]);
gen_helper_fclass_s(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -359,13 +474,14 @@ static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_s_w(dest, cpu_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -373,13 +489,14 @@ static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
{
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_s_wu(dest, cpu_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -388,13 +505,14 @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
{
/* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
tcg_gen_extu_tl_i64(cpu_fpr[a->rd], src);
gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
tcg_gen_extu_tl_i64(dest, src);
gen_nanbox_s(dest, dest);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -403,12 +521,13 @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_l_s(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_l_s(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -417,12 +536,13 @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_lu_s(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_lu_s(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -431,13 +551,14 @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_s_l(dest, cpu_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -446,13 +567,14 @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
REQUIRE_ZFINX_OR_F(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, src);
gen_helper_fcvt_s_lu(dest, cpu_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}

View File

@ -22,12 +22,25 @@
} \
} while (0)
#define REQUIRE_ZHINX_OR_ZFH(ctx) do { \
if (!ctx->cfg_ptr->ext_zhinx && !ctx->cfg_ptr->ext_zfh) { \
return false; \
} \
} while (0)
#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \
return false; \
} \
} while (0)
#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \
ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \
return false; \
} \
} while (0)
static bool trans_flh(DisasContext *ctx, arg_flh *a)
{
TCGv_i64 dest;
@ -73,11 +86,16 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fmadd_h(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -85,11 +103,16 @@ static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fmsub_h(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -97,11 +120,16 @@ static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fnmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fnmsub_h(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -109,11 +137,16 @@ static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
gen_helper_fnmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
gen_helper_fnmadd_h(dest, cpu_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -121,11 +154,15 @@ static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fadd_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fadd_h(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -133,11 +170,15 @@ static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fsub_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fsub_h(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -145,11 +186,15 @@ static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fmul_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fmul_h(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -157,11 +202,15 @@ static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
gen_helper_fdiv_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fdiv_h(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -169,10 +218,14 @@ static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fsqrt_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fsqrt_h(dest, cpu_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -180,23 +233,37 @@ static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
if (a->rs1 == a->rs2) { /* FMOV */
gen_check_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_h(dest, src1);
} else {
tcg_gen_ext16s_i64(dest, src1);
}
} else {
TCGv_i64 rs1 = tcg_temp_new_i64();
TCGv_i64 rs2 = tcg_temp_new_i64();
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
if (!ctx->cfg_ptr->ext_zfinx) {
TCGv_i64 rs1 = tcg_temp_new_i64();
TCGv_i64 rs2 = tcg_temp_new_i64();
gen_check_nanbox_h(rs1, src1);
gen_check_nanbox_h(rs2, src2);
/* This formulation retains the nanboxing of rs2. */
tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 15);
tcg_temp_free_i64(rs1);
tcg_temp_free_i64(rs2);
/* This formulation retains the nanboxing of rs2 in normal 'Zfh'. */
tcg_gen_deposit_i64(dest, rs2, rs1, 0, 15);
tcg_temp_free_i64(rs1);
tcg_temp_free_i64(rs2);
} else {
tcg_gen_deposit_i64(dest, src2, src1, 0, 15);
tcg_gen_ext16s_i64(dest, dest);
}
}
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -206,16 +273,29 @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
TCGv_i64 rs1, rs2, mask;
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
rs1 = tcg_temp_new_i64();
gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_h(rs1, src1);
} else {
tcg_gen_mov_i64(rs1, src1);
}
if (a->rs1 == a->rs2) { /* FNEG */
tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(15, 1));
tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(15, 1));
} else {
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
rs2 = tcg_temp_new_i64();
gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_h(rs2, src2);
} else {
tcg_gen_mov_i64(rs2, src2);
}
/*
* Replace bit 15 in rs1 with inverse in rs2.
@ -224,12 +304,17 @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1));
tcg_gen_not_i64(rs2, rs2);
tcg_gen_andc_i64(rs2, rs2, mask);
tcg_gen_and_i64(rs1, mask, rs1);
tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
tcg_gen_and_i64(dest, mask, rs1);
tcg_gen_or_i64(dest, dest, rs2);
tcg_temp_free_i64(mask);
tcg_temp_free_i64(rs2);
}
/* signed-extended intead of nanboxing for result if enable zfinx */
if (ctx->cfg_ptr->ext_zfinx) {
tcg_gen_ext16s_i64(dest, dest);
}
tcg_temp_free_i64(rs1);
mark_fs_dirty(ctx);
return true;
}
@ -239,27 +324,44 @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
TCGv_i64 rs1, rs2;
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
rs1 = tcg_temp_new_i64();
gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_h(rs1, src1);
} else {
tcg_gen_mov_i64(rs1, src1);
}
if (a->rs1 == a->rs2) { /* FABS */
tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(15, 1));
tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(15, 1));
} else {
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
rs2 = tcg_temp_new_i64();
gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
if (!ctx->cfg_ptr->ext_zfinx) {
gen_check_nanbox_h(rs2, src2);
} else {
tcg_gen_mov_i64(rs2, src2);
}
/*
* Xor bit 15 in rs1 with that in rs2.
* This formulation retains the nanboxing of rs1.
*/
tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(15, 1));
tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(15, 1));
tcg_gen_xor_i64(dest, rs1, dest);
tcg_temp_free_i64(rs2);
}
/* signed-extended intead of nanboxing for result if enable zfinx */
if (ctx->cfg_ptr->ext_zfinx) {
tcg_gen_ext16s_i64(dest, dest);
}
tcg_temp_free_i64(rs1);
mark_fs_dirty(ctx);
return true;
}
@ -267,10 +369,14 @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
gen_helper_fmin_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2]);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_fmin_h(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -278,10 +384,14 @@ static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
gen_helper_fmax_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2]);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_fmax_h(dest, cpu_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
@ -289,10 +399,14 @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_s_h(dest, cpu_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@ -302,26 +416,32 @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
REQUIRE_ZDINX_OR_D(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_d_h(dest, cpu_env, src1);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_h_s(dest, cpu_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -330,12 +450,15 @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
REQUIRE_EXT(ctx, RVD);
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
REQUIRE_ZDINX_OR_D(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_h_d(dest, cpu_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -344,11 +467,13 @@ static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_feq_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_feq_h(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -356,11 +481,13 @@ static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_flt_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_flt_h(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
@ -369,11 +496,13 @@ static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_helper_fle_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_helper_fle_h(dest, cpu_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -381,11 +510,12 @@ static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_helper_fclass_h(dest, cpu_fpr[a->rs1]);
gen_helper_fclass_h(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -393,12 +523,13 @@ static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_w_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_w_h(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -406,12 +537,13 @@ static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_wu_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_wu_h(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -419,12 +551,14 @@ static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_w(cpu_fpr[a->rd], cpu_env, t0);
gen_helper_fcvt_h_w(dest, cpu_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -433,12 +567,14 @@ static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_wu(cpu_fpr[a->rd], cpu_env, t0);
gen_helper_fcvt_h_wu(dest, cpu_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -482,12 +618,13 @@ static bool trans_fcvt_l_h(DisasContext *ctx, arg_fcvt_l_h *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_l_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_l_h(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -496,12 +633,13 @@ static bool trans_fcvt_lu_h(DisasContext *ctx, arg_fcvt_lu_h *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_lu_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_helper_fcvt_lu_h(dest, cpu_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@ -510,12 +648,14 @@ static bool trans_fcvt_h_l(DisasContext *ctx, arg_fcvt_h_l *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_l(cpu_fpr[a->rd], cpu_env, t0);
gen_helper_fcvt_h_l(dest, cpu_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@ -525,12 +665,14 @@ static bool trans_fcvt_h_lu(DisasContext *ctx, arg_fcvt_h_lu *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
REQUIRE_ZHINX_OR_ZFH(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_lu(cpu_fpr[a->rd], cpu_env, t0);
gen_helper_fcvt_h_lu(dest, cpu_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;

View File

@ -46,13 +46,23 @@ enum {
RISCV_FRM_ROD = 8, /* Round to Odd */
};
static inline uint64_t nanbox_s(float32 f)
static inline uint64_t nanbox_s(CPURISCVState *env, float32 f)
{
return f | MAKE_64BIT_MASK(32, 32);
/* the value is sign-extended instead of NaN-boxing for zfinx */
if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
return (int32_t)f;
} else {
return f | MAKE_64BIT_MASK(32, 32);
}
}
static inline float32 check_nanbox_s(uint64_t f)
static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
{
/* Disable NaN-boxing check when enable zfinx */
if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
return (uint32_t)f;
}
uint64_t mask = MAKE_64BIT_MASK(32, 32);
if (likely((f & mask) == mask)) {
@ -62,13 +72,23 @@ static inline float32 check_nanbox_s(uint64_t f)
}
}
static inline uint64_t nanbox_h(float16 f)
static inline uint64_t nanbox_h(CPURISCVState *env, float16 f)
{
return f | MAKE_64BIT_MASK(16, 48);
/* the value is sign-extended instead of NaN-boxing for zfinx */
if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
return (int16_t)f;
} else {
return f | MAKE_64BIT_MASK(16, 48);
}
}
static inline float16 check_nanbox_h(uint64_t f)
static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
{
/* Disable nanbox check when enable zfinx */
if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
return (uint16_t)f;
}
uint64_t mask = MAKE_64BIT_MASK(16, 48);
if (likely((f & mask) == mask)) {

View File

@ -101,6 +101,9 @@ typedef struct DisasContext {
TCGv zero;
/* Space for 3 operands plus 1 extra for address computation. */
TCGv temp[4];
/* Space for 4 operands(1 dest and <=3 src) for float point computation */
TCGv_i64 ftemp[4];
uint8_t nftemp;
/* PointerMasking extension */
bool pm_mask_enabled;
bool pm_base_enabled;
@ -380,6 +383,138 @@ static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
}
}
static TCGv_i64 ftemp_new(DisasContext *ctx)
{
assert(ctx->nftemp < ARRAY_SIZE(ctx->ftemp));
return ctx->ftemp[ctx->nftemp++] = tcg_temp_new_i64();
}
static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
{
if (!ctx->cfg_ptr->ext_zfinx) {
return cpu_fpr[reg_num];
}
if (reg_num == 0) {
return tcg_constant_i64(0);
}
switch (get_xl(ctx)) {
case MXL_RV32:
#ifdef TARGET_RISCV32
{
TCGv_i64 t = ftemp_new(ctx);
tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]);
return t;
}
#else
/* fall through */
case MXL_RV64:
return cpu_gpr[reg_num];
#endif
default:
g_assert_not_reached();
}
}
static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num)
{
if (!ctx->cfg_ptr->ext_zfinx) {
return cpu_fpr[reg_num];
}
if (reg_num == 0) {
return tcg_constant_i64(0);
}
switch (get_xl(ctx)) {
case MXL_RV32:
{
TCGv_i64 t = ftemp_new(ctx);
tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
return t;
}
#ifdef TARGET_RISCV64
case MXL_RV64:
return cpu_gpr[reg_num];
#endif
default:
g_assert_not_reached();
}
}
static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
{
if (!ctx->cfg_ptr->ext_zfinx) {
return cpu_fpr[reg_num];
}
if (reg_num == 0) {
return ftemp_new(ctx);
}
switch (get_xl(ctx)) {
case MXL_RV32:
return ftemp_new(ctx);
#ifdef TARGET_RISCV64
case MXL_RV64:
return cpu_gpr[reg_num];
#endif
default:
g_assert_not_reached();
}
}
/* assume t is nanboxing (for normal) or sign-extended (for zfinx) */
static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
{
if (!ctx->cfg_ptr->ext_zfinx) {
tcg_gen_mov_i64(cpu_fpr[reg_num], t);
return;
}
if (reg_num != 0) {
switch (get_xl(ctx)) {
case MXL_RV32:
#ifdef TARGET_RISCV32
tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t);
break;
#else
/* fall through */
case MXL_RV64:
tcg_gen_mov_i64(cpu_gpr[reg_num], t);
break;
#endif
default:
g_assert_not_reached();
}
}
}
static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
{
if (!ctx->cfg_ptr->ext_zfinx) {
tcg_gen_mov_i64(cpu_fpr[reg_num], t);
return;
}
if (reg_num != 0) {
switch (get_xl(ctx)) {
case MXL_RV32:
#ifdef TARGET_RISCV32
tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
break;
#else
tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
break;
case MXL_RV64:
tcg_gen_mov_i64(cpu_gpr[reg_num], t);
break;
#endif
default:
g_assert_not_reached();
}
}
}
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
{
target_ulong next_pc;
@ -426,6 +561,10 @@ static void mark_fs_dirty(DisasContext *ctx)
{
TCGv tmp;
if (!has_ext(ctx, RVF)) {
return;
}
if (ctx->mstatus_fs != MSTATUS_FS) {
/* Remember the state change for the rest of the TB. */
ctx->mstatus_fs = MSTATUS_FS;
@ -951,6 +1090,8 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cs = cs;
ctx->ntemp = 0;
memset(ctx->temp, 0, sizeof(ctx->temp));
ctx->nftemp = 0;
memset(ctx->ftemp, 0, sizeof(ctx->ftemp));
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
ctx->zero = tcg_constant_tl(0);
@ -972,16 +1113,22 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPURISCVState *env = cpu->env_ptr;
uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
int i;
ctx->ol = ctx->xl;
decode_opc(env, ctx, opcode16);
ctx->base.pc_next = ctx->pc_succ_insn;
for (int i = ctx->ntemp - 1; i >= 0; --i) {
for (i = ctx->ntemp - 1; i >= 0; --i) {
tcg_temp_free(ctx->temp[i]);
ctx->temp[i] = NULL;
}
ctx->ntemp = 0;
for (i = ctx->nftemp - 1; i >= 0; --i) {
tcg_temp_free_i64(ctx->ftemp[i]);
ctx->ftemp[i] = NULL;
}
ctx->nftemp = 0;
if (ctx->base.is_jmp == DISAS_NEXT) {
target_ulong page_start;