target-arm queue:

* New CPU type: cortex-a710
  * Implement new architectural features:
     - FEAT_PACQARMA3
     - FEAT_EPAC
     - FEAT_Pauth2
     - FEAT_FPAC
     - FEAT_FPACCOMBINE
     - FEAT_TIDCP1
  * Xilinx Versal: Model the CFU/CFI
  * Implement RMR_ELx registers
  * Implement handling of HCR_EL2.TIDCP trap bit
  * arm/kvm: Enable support for KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
  * hw/intc/arm_gicv3_its: Avoid maybe-uninitialized error in get_vte()
  * target/arm: Do not use gen_mte_checkN in trans_STGP
  * arm64: Restore trapless ptimer access
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmT7VEkZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3v7BEACENUKCxsFHRQSLmQkoBCT9
 Lc4SJrGCbVUC6b+4s5ligZSWIoFzp/kY6NPpeRYqFa0DCxozd2T5D81/j7TpSo0C
 wUFkZfUq1nGFJ4K5arYcDwhdTtJvvc07YrSbUqufBp6uNGqhR4YmDWPECqBfOlaj
 7bgJM6axsg7FkJJh5zp4cQ4WEfp14MHWRPQWpVTI+9cxNmNymokSVRBhVFkM0Wen
 WD4C/nYud8bOxpDfR8GkIqJ+UnUMhUNEhp28QmHdwywgg0zLWOE4ysIxo55cM0+0
 FL3q45PL2e4S24UUx9dkxDBWnKEZ5qpQpPn9F6EhWzfm3n2dqr4uUnfWAEOg6NAi
 vnGS9MlL7nZo69OM3h8g7yKDfTKYm2vl9HVZ0ytFA6PLoSnaQyQwli58qnLtiid3
 17MWPoNQlq6G8tHUTPkrJjdA8XLz0iNPXe5G2kwhuM/S0Lv7ORzDc2pq4qBYLvIw
 9nV0oUWqzyE7zH6bRKxbbPw2sMI7c8qQr9QRyZeLHL7HdcY5ExvX9FH+qii5JDR/
 fZohi1pBoNNwYYTeSRnxgHiQ7OizYq0xQJhrdqcFF9voytZj1yZEZ0mp6Tq0/CIj
 YkC/vEyLYBqgrJ2JeUjbV3h1RIzQcVaXxnxwGsyMyceACd6MNMmdbjR7bZk0lNIu
 kh+aFEdKajPp56UseJiKBQ==
 =5Shq
 -----END PGP SIGNATURE-----

Merge tag 'pull-target-arm-20230908' of https://git.linaro.org/people/pmaydell/qemu-arm into staging

target-arm queue:
 * New CPU type: cortex-a710
 * Implement new architectural features:
    - FEAT_PACQARMA3
    - FEAT_EPAC
    - FEAT_Pauth2
    - FEAT_FPAC
    - FEAT_FPACCOMBINE
    - FEAT_TIDCP1
 * Xilinx Versal: Model the CFU/CFI
 * Implement RMR_ELx registers
 * Implement handling of HCR_EL2.TIDCP trap bit
 * arm/kvm: Enable support for KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
 * hw/intc/arm_gicv3_its: Avoid maybe-uninitialized error in get_vte()
 * target/arm: Do not use gen_mte_checkN in trans_STGP
 * arm64: Restore trapless ptimer access

# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmT7VEkZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3v7BEACENUKCxsFHRQSLmQkoBCT9
# Lc4SJrGCbVUC6b+4s5ligZSWIoFzp/kY6NPpeRYqFa0DCxozd2T5D81/j7TpSo0C
# wUFkZfUq1nGFJ4K5arYcDwhdTtJvvc07YrSbUqufBp6uNGqhR4YmDWPECqBfOlaj
# 7bgJM6axsg7FkJJh5zp4cQ4WEfp14MHWRPQWpVTI+9cxNmNymokSVRBhVFkM0Wen
# WD4C/nYud8bOxpDfR8GkIqJ+UnUMhUNEhp28QmHdwywgg0zLWOE4ysIxo55cM0+0
# FL3q45PL2e4S24UUx9dkxDBWnKEZ5qpQpPn9F6EhWzfm3n2dqr4uUnfWAEOg6NAi
# vnGS9MlL7nZo69OM3h8g7yKDfTKYm2vl9HVZ0ytFA6PLoSnaQyQwli58qnLtiid3
# 17MWPoNQlq6G8tHUTPkrJjdA8XLz0iNPXe5G2kwhuM/S0Lv7ORzDc2pq4qBYLvIw
# 9nV0oUWqzyE7zH6bRKxbbPw2sMI7c8qQr9QRyZeLHL7HdcY5ExvX9FH+qii5JDR/
# fZohi1pBoNNwYYTeSRnxgHiQ7OizYq0xQJhrdqcFF9voytZj1yZEZ0mp6Tq0/CIj
# YkC/vEyLYBqgrJ2JeUjbV3h1RIzQcVaXxnxwGsyMyceACd6MNMmdbjR7bZk0lNIu
# kh+aFEdKajPp56UseJiKBQ==
# =5Shq
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 08 Sep 2023 13:05:13 EDT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full]
# gpg:                 aka "Peter Maydell <peter@archaic.org.uk>" [unknown]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* tag 'pull-target-arm-20230908' of https://git.linaro.org/people/pmaydell/qemu-arm: (26 commits)
  arm/kvm: Enable support for KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
  target/arm: Enable SCTLR_EL1.TIDCP for user-only
  target/arm: Implement FEAT_TIDCP1
  target/arm: Implement HCR_EL2.TIDCP
  target/arm: Implement cortex-a710
  target/arm: Implement RMR_ELx
  arm64: Restore trapless ptimer access
  target/arm: Do not use gen_mte_checkN in trans_STGP
  hw/arm/versal: Connect the CFRAME_REG and CFRAME_BCAST_REG
  hw/arm/xlnx-versal: Connect the CFU_APB, CFU_FDRO and CFU_SFR
  hw/misc: Introduce a model of Xilinx Versal's CFRAME_BCAST_REG
  hw/misc: Introduce a model of Xilinx Versal's CFRAME_REG
  hw/misc/xlnx-versal-cfu: Introduce a model of Xilinx Versal's CFU_SFR
  hw/misc/xlnx-versal-cfu: Introduce a model of Xilinx Versal CFU_FDRO
  hw/misc: Introduce a model of Xilinx Versal's CFU_APB
  hw/misc: Introduce the Xilinx CFI interface
  hw/intc/arm_gicv3_its: Avoid maybe-uninitialized error in get_vte()
  target/arm: Implement FEAT_FPAC and FEAT_FPACCOMBINE
  target/arm: Inform helpers whether a PAC instruction is 'combined'
  target/arm: Implement FEAT_Pauth2
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
master
Stefan Hajnoczi 2023-09-11 09:10:36 -04:00
commit a7e8e30e7c
40 changed files with 3184 additions and 157 deletions

View File

@ -1026,6 +1026,16 @@ S: Maintained
F: hw/ssi/xlnx-versal-ospi.c
F: include/hw/ssi/xlnx-versal-ospi.h
Xilinx Versal CFI
M: Francisco Iglesias <francisco.iglesias@amd.com>
S: Maintained
F: hw/misc/xlnx-cfi-if.c
F: include/hw/misc/xlnx-cfi-if.h
F: hw/misc/xlnx-versal-cfu.c
F: include/hw/misc/xlnx-versal-cfu.h
F: hw/misc/xlnx-versal-cframe-reg.c
F: include/hw/misc/xlnx-versal-cframe-reg.h
STM32F100
M: Alexandre Iooss <erdnaxe@crans.org>
L: qemu-arm@nongnu.org

View File

@ -3763,6 +3763,7 @@ static void kvm_accel_instance_init(Object *obj)
/* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_with_bitmap = false;
s->kvm_eager_split_size = 0;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0;
s->xen_version = 0;

View File

@ -210,15 +210,20 @@ TCG VCPU Features
TCG VCPU features are CPU features that are specific to TCG.
Below is the list of TCG VCPU features and their descriptions.
``pauth-impdef``
When ``FEAT_Pauth`` is enabled, either the *impdef* (Implementation
Defined) algorithm is enabled or the *architected* QARMA algorithm
is enabled. By default the impdef algorithm is disabled, and QARMA
is enabled.
``pauth``
Enable or disable ``FEAT_Pauth`` entirely.
The architected QARMA algorithm has good cryptographic properties,
but can be quite slow to emulate. The impdef algorithm used by QEMU
is non-cryptographic but significantly faster.
``pauth-impdef``
When ``pauth`` is enabled, select the QEMU implementation defined algorithm.
``pauth-qarma3``
When ``pauth`` is enabled, select the architected QARMA3 algorithm.
Without either ``pauth-impdef`` or ``pauth-qarma3`` enabled,
the architected QARMA5 algorithm is used. The architected QARMA5
and QARMA3 algorithms have good cryptographic properties, but can
be quite slow to emulate. The impdef algorithm used by QEMU is
non-cryptographic but significantly faster.
SVE CPU Properties
==================

View File

@ -28,12 +28,15 @@ the following architecture extensions:
- FEAT_DotProd (Advanced SIMD dot product instructions)
- FEAT_DoubleFault (Double Fault Extension)
- FEAT_E0PD (Preventing EL0 access to halves of address maps)
- FEAT_EPAC (Enhanced pointer authentication)
- FEAT_ETS (Enhanced Translation Synchronization)
- FEAT_EVT (Enhanced Virtualization Traps)
- FEAT_FCMA (Floating-point complex number instructions)
- FEAT_FGT (Fine-Grained Traps)
- FEAT_FHM (Floating-point half-precision multiplication instructions)
- FEAT_FP16 (Half-precision floating-point data processing)
- FEAT_FPAC (Faulting on AUT* instructions)
- FEAT_FPACCOMBINE (Faulting on combined pointer authentication instructions)
- FEAT_FRINTTS (Floating-point to integer instructions)
- FEAT_FlagM (Flag manipulation instructions v2)
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
@ -57,10 +60,14 @@ the following architecture extensions:
- FEAT_MTE (Memory Tagging Extension)
- FEAT_MTE2 (Memory Tagging Extension)
- FEAT_MTE3 (MTE Asymmetric Fault Handling)
- FEAT_PACIMP (Pointer authentication - IMPLEMENTATION DEFINED algorithm)
- FEAT_PACQARMA3 (Pointer authentication - QARMA3 algorithm)
- FEAT_PACQARMA5 (Pointer authentication - QARMA5 algorithm)
- FEAT_PAN (Privileged access never)
- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
- FEAT_PAN3 (Support for SCTLR_ELx.EPAN)
- FEAT_PAuth (Pointer authentication)
- FEAT_PAuth2 (Enhacements to pointer authentication)
- FEAT_PMULL (PMULL, PMULL2 instructions)
- FEAT_PMUv3p1 (PMU Extensions v3.1)
- FEAT_PMUv3p4 (PMU Extensions v3.4)
@ -85,6 +92,7 @@ the following architecture extensions:
- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
- FEAT_SPECRES (Speculation restriction instructions)
- FEAT_SSBS (Speculative Store Bypass Safe)
- FEAT_TIDCP1 (EL0 use of IMPLEMENTATION DEFINED functionality)
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
- FEAT_TLBIRANGE (TLB invalidate range instructions)
- FEAT_TTCNP (Translation table Common not private translations)

View File

@ -58,6 +58,7 @@ Supported guest CPU types:
- ``cortex-a57`` (64-bit)
- ``cortex-a72`` (64-bit)
- ``cortex-a76`` (64-bit)
- ``cortex-a710`` (64-bit)
- ``a64fx`` (64-bit)
- ``host`` (with KVM only)
- ``neoverse-n1`` (64-bit)

View File

@ -211,6 +211,7 @@ static const char *valid_cpus[] = {
ARM_CPU_TYPE_NAME("cortex-a55"),
ARM_CPU_TYPE_NAME("cortex-a72"),
ARM_CPU_TYPE_NAME("cortex-a76"),
ARM_CPU_TYPE_NAME("cortex-a710"),
ARM_CPU_TYPE_NAME("a64fx"),
ARM_CPU_TYPE_NAME("neoverse-n1"),
ARM_CPU_TYPE_NAME("neoverse-v1"),

View File

@ -27,7 +27,7 @@
#define XLNX_VERSAL_RCPU_TYPE ARM_CPU_TYPE_NAME("cortex-r5f")
#define GEM_REVISION 0x40070106
#define VERSAL_NUM_PMC_APB_IRQS 3
#define VERSAL_NUM_PMC_APB_IRQS 18
#define NUM_OSPI_IRQ_LINES 3
static void versal_create_apu_cpus(Versal *s)
@ -341,6 +341,7 @@ static void versal_create_pmc_apb_irq_orgate(Versal *s, qemu_irq *pic)
* - RTC
* - BBRAM
* - PMC SLCR
* - CFRAME regs (input 3 - 17 to the orgate)
*/
object_initialize_child(OBJECT(s), "pmc-apb-irq-orgate",
&s->pmc.apb_irq_orgate, TYPE_OR_IRQ);
@ -570,6 +571,157 @@ static void versal_create_ospi(Versal *s, qemu_irq *pic)
qdev_connect_gpio_out(orgate, 0, pic[VERSAL_OSPI_IRQ]);
}
static void versal_create_cfu(Versal *s, qemu_irq *pic)
{
SysBusDevice *sbd;
DeviceState *dev;
int i;
const struct {
uint64_t reg_base;
uint64_t fdri_base;
} cframe_addr[] = {
{ MM_PMC_CFRAME0_REG, MM_PMC_CFRAME0_FDRI },
{ MM_PMC_CFRAME1_REG, MM_PMC_CFRAME1_FDRI },
{ MM_PMC_CFRAME2_REG, MM_PMC_CFRAME2_FDRI },
{ MM_PMC_CFRAME3_REG, MM_PMC_CFRAME3_FDRI },
{ MM_PMC_CFRAME4_REG, MM_PMC_CFRAME4_FDRI },
{ MM_PMC_CFRAME5_REG, MM_PMC_CFRAME5_FDRI },
{ MM_PMC_CFRAME6_REG, MM_PMC_CFRAME6_FDRI },
{ MM_PMC_CFRAME7_REG, MM_PMC_CFRAME7_FDRI },
{ MM_PMC_CFRAME8_REG, MM_PMC_CFRAME8_FDRI },
{ MM_PMC_CFRAME9_REG, MM_PMC_CFRAME9_FDRI },
{ MM_PMC_CFRAME10_REG, MM_PMC_CFRAME10_FDRI },
{ MM_PMC_CFRAME11_REG, MM_PMC_CFRAME11_FDRI },
{ MM_PMC_CFRAME12_REG, MM_PMC_CFRAME12_FDRI },
{ MM_PMC_CFRAME13_REG, MM_PMC_CFRAME13_FDRI },
{ MM_PMC_CFRAME14_REG, MM_PMC_CFRAME14_FDRI },
};
const struct {
uint32_t blktype0_frames;
uint32_t blktype1_frames;
uint32_t blktype2_frames;
uint32_t blktype3_frames;
uint32_t blktype4_frames;
uint32_t blktype5_frames;
uint32_t blktype6_frames;
} cframe_cfg[] = {
[0] = { 34111, 3528, 12800, 11, 5, 1, 1 },
[1] = { 38498, 3841, 15361, 13, 7, 3, 1 },
[2] = { 38498, 3841, 15361, 13, 7, 3, 1 },
[3] = { 38498, 3841, 15361, 13, 7, 3, 1 },
};
/* CFU FDRO */
object_initialize_child(OBJECT(s), "cfu-fdro", &s->pmc.cfu_fdro,
TYPE_XLNX_VERSAL_CFU_FDRO);
sbd = SYS_BUS_DEVICE(&s->pmc.cfu_fdro);
sysbus_realize(sbd, &error_fatal);
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_FDRO,
sysbus_mmio_get_region(sbd, 0));
/* CFRAME REG */
for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) {
g_autofree char *name = g_strdup_printf("cframe%d", i);
object_initialize_child(OBJECT(s), name, &s->pmc.cframe[i],
TYPE_XLNX_VERSAL_CFRAME_REG);
sbd = SYS_BUS_DEVICE(&s->pmc.cframe[i]);
dev = DEVICE(&s->pmc.cframe[i]);
if (i < ARRAY_SIZE(cframe_cfg)) {
object_property_set_int(OBJECT(dev), "blktype0-frames",
cframe_cfg[i].blktype0_frames,
&error_abort);
object_property_set_int(OBJECT(dev), "blktype1-frames",
cframe_cfg[i].blktype1_frames,
&error_abort);
object_property_set_int(OBJECT(dev), "blktype2-frames",
cframe_cfg[i].blktype2_frames,
&error_abort);
object_property_set_int(OBJECT(dev), "blktype3-frames",
cframe_cfg[i].blktype3_frames,
&error_abort);
object_property_set_int(OBJECT(dev), "blktype4-frames",
cframe_cfg[i].blktype4_frames,
&error_abort);
object_property_set_int(OBJECT(dev), "blktype5-frames",
cframe_cfg[i].blktype5_frames,
&error_abort);
object_property_set_int(OBJECT(dev), "blktype6-frames",
cframe_cfg[i].blktype6_frames,
&error_abort);
}
object_property_set_link(OBJECT(dev), "cfu-fdro",
OBJECT(&s->pmc.cfu_fdro), &error_fatal);
sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal);
memory_region_add_subregion(&s->mr_ps, cframe_addr[i].reg_base,
sysbus_mmio_get_region(sbd, 0));
memory_region_add_subregion(&s->mr_ps, cframe_addr[i].fdri_base,
sysbus_mmio_get_region(sbd, 1));
sysbus_connect_irq(sbd, 0,
qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate),
3 + i));
}
/* CFRAME BCAST */
object_initialize_child(OBJECT(s), "cframe_bcast", &s->pmc.cframe_bcast,
TYPE_XLNX_VERSAL_CFRAME_BCAST_REG);
sbd = SYS_BUS_DEVICE(&s->pmc.cframe_bcast);
dev = DEVICE(&s->pmc.cframe_bcast);
for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) {
g_autofree char *propname = g_strdup_printf("cframe%d", i);
object_property_set_link(OBJECT(dev), propname,
OBJECT(&s->pmc.cframe[i]), &error_fatal);
}
sysbus_realize(sbd, &error_fatal);
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFRAME_BCAST_REG,
sysbus_mmio_get_region(sbd, 0));
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFRAME_BCAST_FDRI,
sysbus_mmio_get_region(sbd, 1));
/* CFU APB */
object_initialize_child(OBJECT(s), "cfu-apb", &s->pmc.cfu_apb,
TYPE_XLNX_VERSAL_CFU_APB);
sbd = SYS_BUS_DEVICE(&s->pmc.cfu_apb);
dev = DEVICE(&s->pmc.cfu_apb);
for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) {
g_autofree char *propname = g_strdup_printf("cframe%d", i);
object_property_set_link(OBJECT(dev), propname,
OBJECT(&s->pmc.cframe[i]), &error_fatal);
}
sysbus_realize(sbd, &error_fatal);
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_APB,
sysbus_mmio_get_region(sbd, 0));
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_STREAM,
sysbus_mmio_get_region(sbd, 1));
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_STREAM_2,
sysbus_mmio_get_region(sbd, 2));
sysbus_connect_irq(sbd, 0, pic[VERSAL_CFU_IRQ_0]);
/* CFU SFR */
object_initialize_child(OBJECT(s), "cfu-sfr", &s->pmc.cfu_sfr,
TYPE_XLNX_VERSAL_CFU_SFR);
sbd = SYS_BUS_DEVICE(&s->pmc.cfu_sfr);
object_property_set_link(OBJECT(&s->pmc.cfu_sfr),
"cfu", OBJECT(&s->pmc.cfu_apb), &error_abort);
sysbus_realize(sbd, &error_fatal);
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_SFR,
sysbus_mmio_get_region(sbd, 0));
}
static void versal_create_crl(Versal *s, qemu_irq *pic)
{
SysBusDevice *sbd;
@ -763,6 +915,7 @@ static void versal_realize(DeviceState *dev, Error **errp)
versal_create_pmc_iou_slcr(s, pic);
versal_create_ospi(s, pic);
versal_create_crl(s, pic);
versal_create_cfu(s, pic);
versal_map_ddr(s);
versal_unimp(s);

View File

@ -330,23 +330,20 @@ static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte)
if (entry_addr == -1) {
/* No L2 table entry, i.e. no valid VTE, or a memory error */
vte->valid = false;
goto out;
trace_gicv3_its_vte_read_fault(vpeid);
return MEMTX_OK;
}
vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
goto out;
trace_gicv3_its_vte_read_fault(vpeid);
return res;
}
vte->valid = FIELD_EX64(vteval, VTE, VALID);
vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE);
vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR);
vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE);
out:
if (res != MEMTX_OK) {
trace_gicv3_its_vte_read_fault(vpeid);
} else {
trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
vte->vptaddr, vte->rdbase);
}
trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
vte->vptaddr, vte->rdbase);
return res;
}

View File

@ -98,6 +98,9 @@ specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c'))
system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files(
'xlnx-versal-xramc.c',
'xlnx-versal-pmc-iou-slcr.c',
'xlnx-versal-cfu.c',
'xlnx-cfi-if.c',
'xlnx-versal-cframe-reg.c',
))
system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c'))
system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c'))

34
hw/misc/xlnx-cfi-if.c Normal file
View File

@ -0,0 +1,34 @@
/*
* Xilinx CFI interface
*
* Copyright (C) 2023, Advanced Micro Devices, Inc.
*
* Written by Francisco Iglesias <francisco.iglesias@amd.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "hw/misc/xlnx-cfi-if.h"
void xlnx_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt)
{
XlnxCfiIfClass *xcic = XLNX_CFI_IF_GET_CLASS(cfi_if);
if (xcic->cfi_transfer_packet) {
xcic->cfi_transfer_packet(cfi_if, pkt);
}
}
static const TypeInfo xlnx_cfi_if_info = {
.name = TYPE_XLNX_CFI_IF,
.parent = TYPE_INTERFACE,
.class_size = sizeof(XlnxCfiIfClass),
};
static void xlnx_cfi_if_register_types(void)
{
type_register_static(&xlnx_cfi_if_info);
}
type_init(xlnx_cfi_if_register_types)

View File

@ -0,0 +1,858 @@
/*
* QEMU model of the Configuration Frame Control module
*
* Copyright (C) 2023, Advanced Micro Devices, Inc.
*
* Written by Francisco Iglesias <francisco.iglesias@amd.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "hw/register.h"
#include "hw/registerfields.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
#include "qemu/units.h"
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
#include "hw/misc/xlnx-versal-cframe-reg.h"
#ifndef XLNX_VERSAL_CFRAME_REG_ERR_DEBUG
#define XLNX_VERSAL_CFRAME_REG_ERR_DEBUG 0
#endif
#define KEYHOLE_STREAM_4K (4 * KiB)
#define N_WORDS_128BIT 4
#define MAX_BLOCKTYPE 6
#define MAX_BLOCKTYPE_FRAMES 0xFFFFF
enum {
CFRAME_CMD_WCFG = 1,
CFRAME_CMD_ROWON = 2,
CFRAME_CMD_ROWOFF = 3,
CFRAME_CMD_RCFG = 4,
CFRAME_CMD_DLPARK = 5,
};
static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
{
guint ua = GPOINTER_TO_UINT(a);
guint ub = GPOINTER_TO_UINT(b);
return (ua > ub) - (ua < ub);
}
static void cfrm_imr_update_irq(XlnxVersalCFrameReg *s)
{
bool pending = s->regs[R_CFRM_ISR0] & ~s->regs[R_CFRM_IMR0];
qemu_set_irq(s->irq_cfrm_imr, pending);
}
static void cfrm_isr_postw(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
cfrm_imr_update_irq(s);
}
static uint64_t cfrm_ier_prew(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
s->regs[R_CFRM_IMR0] &= ~s->regs[R_CFRM_IER0];
s->regs[R_CFRM_IER0] = 0;
cfrm_imr_update_irq(s);
return 0;
}
static uint64_t cfrm_idr_prew(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
s->regs[R_CFRM_IMR0] |= s->regs[R_CFRM_IDR0];
s->regs[R_CFRM_IDR0] = 0;
cfrm_imr_update_irq(s);
return 0;
}
static uint64_t cfrm_itr_prew(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
s->regs[R_CFRM_ISR0] |= s->regs[R_CFRM_ITR0];
s->regs[R_CFRM_ITR0] = 0;
cfrm_imr_update_irq(s);
return 0;
}
static void cframe_incr_far(XlnxVersalCFrameReg *s)
{
uint32_t faddr = ARRAY_FIELD_EX32(s->regs, FAR0, FRAME_ADDR);
uint32_t blktype = ARRAY_FIELD_EX32(s->regs, FAR0, BLOCKTYPE);
assert(blktype <= MAX_BLOCKTYPE);
faddr++;
if (faddr > s->cfg.blktype_num_frames[blktype]) {
/* Restart from 0 and increment block type */
faddr = 0;
blktype++;
assert(blktype <= MAX_BLOCKTYPE);
ARRAY_FIELD_DP32(s->regs, FAR0, BLOCKTYPE, blktype);
}
ARRAY_FIELD_DP32(s->regs, FAR0, FRAME_ADDR, faddr);
}
static void cfrm_fdri_post_write(RegisterInfo *reg, uint64_t val)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
if (s->row_configured && s->rowon && s->wcfg) {
if (fifo32_num_free(&s->new_f_data) >= N_WORDS_128BIT) {
fifo32_push(&s->new_f_data, s->regs[R_FDRI0]);
fifo32_push(&s->new_f_data, s->regs[R_FDRI1]);
fifo32_push(&s->new_f_data, s->regs[R_FDRI2]);
fifo32_push(&s->new_f_data, s->regs[R_FDRI3]);
}
if (fifo32_is_full(&s->new_f_data)) {
uint32_t addr = extract32(s->regs[R_FAR0], 0, 23);
XlnxCFrame *f = g_new(XlnxCFrame, 1);
for (int i = 0; i < FRAME_NUM_WORDS; i++) {
f->data[i] = fifo32_pop(&s->new_f_data);
}
g_tree_replace(s->cframes, GUINT_TO_POINTER(addr), f);
cframe_incr_far(s);
fifo32_reset(&s->new_f_data);
}
}
}
static void cfrm_readout_frames(XlnxVersalCFrameReg *s, uint32_t start_addr,
uint32_t end_addr)
{
/*
* NB: when our minimum glib version is at least 2.68 we can improve the
* performance of the cframe traversal by using g_tree_lookup_node and
* g_tree_node_next (instead of calling g_tree_lookup for finding each
* cframe).
*/
for (uint32_t addr = start_addr; addr < end_addr; addr++) {
XlnxCFrame *f = g_tree_lookup(s->cframes, GUINT_TO_POINTER(addr));
/* Transmit the data if a frame was found */
if (f) {
for (int i = 0; i < FRAME_NUM_WORDS; i += 4) {
XlnxCfiPacket pkt = {};
pkt.data[0] = f->data[i];
pkt.data[1] = f->data[i + 1];
pkt.data[2] = f->data[i + 2];
pkt.data[3] = f->data[i + 3];
if (s->cfg.cfu_fdro) {
xlnx_cfi_transfer_packet(s->cfg.cfu_fdro, &pkt);
}
}
}
}
}
static void cfrm_frcnt_post_write(RegisterInfo *reg, uint64_t val)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
if (s->row_configured && s->rowon && s->rcfg) {
uint32_t start_addr = extract32(s->regs[R_FAR0], 0, 23);
uint32_t end_addr = start_addr + s->regs[R_FRCNT0] / FRAME_NUM_QWORDS;
cfrm_readout_frames(s, start_addr, end_addr);
}
}
static void cfrm_cmd_post_write(RegisterInfo *reg, uint64_t val)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
if (s->row_configured) {
uint8_t cmd = ARRAY_FIELD_EX32(s->regs, CMD0, CMD);
switch (cmd) {
case CFRAME_CMD_WCFG:
s->wcfg = true;
break;
case CFRAME_CMD_ROWON:
s->rowon = true;
break;
case CFRAME_CMD_ROWOFF:
s->rowon = false;
break;
case CFRAME_CMD_RCFG:
s->rcfg = true;
break;
case CFRAME_CMD_DLPARK:
s->wcfg = false;
s->rcfg = false;
break;
default:
break;
};
}
}
static uint64_t cfrm_last_frame_bot_post_read(RegisterInfo *reg,
uint64_t val64)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
uint64_t val = 0;
switch (reg->access->addr) {
case A_LAST_FRAME_BOT0:
val = FIELD_DP32(val, LAST_FRAME_BOT0, BLOCKTYPE1_LAST_FRAME_LSB,
s->cfg.blktype_num_frames[1]);
val = FIELD_DP32(val, LAST_FRAME_BOT0, BLOCKTYPE0_LAST_FRAME,
s->cfg.blktype_num_frames[0]);
break;
case A_LAST_FRAME_BOT1:
val = FIELD_DP32(val, LAST_FRAME_BOT1, BLOCKTYPE3_LAST_FRAME_LSB,
s->cfg.blktype_num_frames[3]);
val = FIELD_DP32(val, LAST_FRAME_BOT1, BLOCKTYPE2_LAST_FRAME,
s->cfg.blktype_num_frames[2]);
val = FIELD_DP32(val, LAST_FRAME_BOT1, BLOCKTYPE1_LAST_FRAME_MSB,
(s->cfg.blktype_num_frames[1] >> 12));
break;
case A_LAST_FRAME_BOT2:
val = FIELD_DP32(val, LAST_FRAME_BOT2, BLOCKTYPE3_LAST_FRAME_MSB,
(s->cfg.blktype_num_frames[3] >> 4));
break;
case A_LAST_FRAME_BOT3:
default:
break;
}
return val;
}
static uint64_t cfrm_last_frame_top_post_read(RegisterInfo *reg,
uint64_t val64)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
uint64_t val = 0;
switch (reg->access->addr) {
case A_LAST_FRAME_TOP0:
val = FIELD_DP32(val, LAST_FRAME_TOP0, BLOCKTYPE5_LAST_FRAME_LSB,
s->cfg.blktype_num_frames[5]);
val = FIELD_DP32(val, LAST_FRAME_TOP0, BLOCKTYPE4_LAST_FRAME,
s->cfg.blktype_num_frames[4]);
break;
case A_LAST_FRAME_TOP1:
val = FIELD_DP32(val, LAST_FRAME_TOP1, BLOCKTYPE6_LAST_FRAME,
s->cfg.blktype_num_frames[6]);
val = FIELD_DP32(val, LAST_FRAME_TOP1, BLOCKTYPE5_LAST_FRAME_MSB,
(s->cfg.blktype_num_frames[5] >> 12));
break;
case A_LAST_FRAME_TOP2:
case A_LAST_FRAME_BOT3:
default:
break;
}
return val;
}
static void cfrm_far_sfr_post_write(RegisterInfo *reg, uint64_t val)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
if (s->row_configured && s->rowon && s->rcfg) {
uint32_t start_addr = extract32(s->regs[R_FAR_SFR0], 0, 23);
/* Readback 1 frame */
cfrm_readout_frames(s, start_addr, start_addr + 1);
}
}
static const RegisterAccessInfo cframe_reg_regs_info[] = {
{ .name = "CRC0", .addr = A_CRC0,
.rsvd = 0x00000000,
},{ .name = "CRC1", .addr = A_CRC0,
.rsvd = 0xffffffff,
},{ .name = "CRC2", .addr = A_CRC0,
.rsvd = 0xffffffff,
},{ .name = "CRC3", .addr = A_CRC0,
.rsvd = 0xffffffff,
},{ .name = "FAR0", .addr = A_FAR0,
.rsvd = 0xfe000000,
},{ .name = "FAR1", .addr = A_FAR1,
.rsvd = 0xffffffff,
},{ .name = "FAR2", .addr = A_FAR2,
.rsvd = 0xffffffff,
},{ .name = "FAR3", .addr = A_FAR3,
.rsvd = 0xffffffff,
},{ .name = "FAR_SFR0", .addr = A_FAR_SFR0,
.rsvd = 0xff800000,
},{ .name = "FAR_SFR1", .addr = A_FAR_SFR1,
.rsvd = 0xffffffff,
},{ .name = "FAR_SFR2", .addr = A_FAR_SFR2,
.rsvd = 0xffffffff,
},{ .name = "FAR_SFR3", .addr = A_FAR_SFR3,
.rsvd = 0xffffffff,
.post_write = cfrm_far_sfr_post_write,
},{ .name = "FDRI0", .addr = A_FDRI0,
},{ .name = "FDRI1", .addr = A_FDRI1,
},{ .name = "FDRI2", .addr = A_FDRI2,
},{ .name = "FDRI3", .addr = A_FDRI3,
.post_write = cfrm_fdri_post_write,
},{ .name = "FRCNT0", .addr = A_FRCNT0,
.rsvd = 0x00000000,
},{ .name = "FRCNT1", .addr = A_FRCNT1,
.rsvd = 0xffffffff,
},{ .name = "FRCNT2", .addr = A_FRCNT2,
.rsvd = 0xffffffff,
},{ .name = "FRCNT3", .addr = A_FRCNT3,
.rsvd = 0xffffffff,
.post_write = cfrm_frcnt_post_write
},{ .name = "CMD0", .addr = A_CMD0,
.rsvd = 0xffffffe0,
},{ .name = "CMD1", .addr = A_CMD1,
.rsvd = 0xffffffff,
},{ .name = "CMD2", .addr = A_CMD2,
.rsvd = 0xffffffff,
},{ .name = "CMD3", .addr = A_CMD3,
.rsvd = 0xffffffff,
.post_write = cfrm_cmd_post_write
},{ .name = "CR_MASK0", .addr = A_CR_MASK0,
.rsvd = 0x00000000,
},{ .name = "CR_MASK1", .addr = A_CR_MASK1,
.rsvd = 0x00000000,
},{ .name = "CR_MASK2", .addr = A_CR_MASK2,
.rsvd = 0x00000000,
},{ .name = "CR_MASK3", .addr = A_CR_MASK3,
.rsvd = 0xffffffff,
},{ .name = "CTL0", .addr = A_CTL0,
.rsvd = 0xfffffff8,
},{ .name = "CTL1", .addr = A_CTL1,
.rsvd = 0xffffffff,
},{ .name = "CTL2", .addr = A_CTL2,
.rsvd = 0xffffffff,
},{ .name = "CTL3", .addr = A_CTL3,
.rsvd = 0xffffffff,
},{ .name = "CFRM_ISR0", .addr = A_CFRM_ISR0,
.rsvd = 0xffc04000,
.w1c = 0x3bfff,
},{ .name = "CFRM_ISR1", .addr = A_CFRM_ISR1,
.rsvd = 0xffffffff,
},{ .name = "CFRM_ISR2", .addr = A_CFRM_ISR2,
.rsvd = 0xffffffff,
},{ .name = "CFRM_ISR3", .addr = A_CFRM_ISR3,
.rsvd = 0xffffffff,
.post_write = cfrm_isr_postw,
},{ .name = "CFRM_IMR0", .addr = A_CFRM_IMR0,
.rsvd = 0xffc04000,
.ro = 0xfffff,
.reset = 0x3bfff,
},{ .name = "CFRM_IMR1", .addr = A_CFRM_IMR1,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IMR2", .addr = A_CFRM_IMR2,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IMR3", .addr = A_CFRM_IMR3,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IER0", .addr = A_CFRM_IER0,
.rsvd = 0xffc04000,
},{ .name = "CFRM_IER1", .addr = A_CFRM_IER1,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IER2", .addr = A_CFRM_IER2,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IER3", .addr = A_CFRM_IER3,
.rsvd = 0xffffffff,
.pre_write = cfrm_ier_prew,
},{ .name = "CFRM_IDR0", .addr = A_CFRM_IDR0,
.rsvd = 0xffc04000,
},{ .name = "CFRM_IDR1", .addr = A_CFRM_IDR1,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IDR2", .addr = A_CFRM_IDR2,
.rsvd = 0xffffffff,
},{ .name = "CFRM_IDR3", .addr = A_CFRM_IDR3,
.rsvd = 0xffffffff,
.pre_write = cfrm_idr_prew,
},{ .name = "CFRM_ITR0", .addr = A_CFRM_ITR0,
.rsvd = 0xffc04000,
},{ .name = "CFRM_ITR1", .addr = A_CFRM_ITR1,
.rsvd = 0xffffffff,
},{ .name = "CFRM_ITR2", .addr = A_CFRM_ITR2,
.rsvd = 0xffffffff,
},{ .name = "CFRM_ITR3", .addr = A_CFRM_ITR3,
.rsvd = 0xffffffff,
.pre_write = cfrm_itr_prew,
},{ .name = "SEU_SYNDRM00", .addr = A_SEU_SYNDRM00,
},{ .name = "SEU_SYNDRM01", .addr = A_SEU_SYNDRM01,
},{ .name = "SEU_SYNDRM02", .addr = A_SEU_SYNDRM02,
},{ .name = "SEU_SYNDRM03", .addr = A_SEU_SYNDRM03,
},{ .name = "SEU_SYNDRM10", .addr = A_SEU_SYNDRM10,
},{ .name = "SEU_SYNDRM11", .addr = A_SEU_SYNDRM11,
},{ .name = "SEU_SYNDRM12", .addr = A_SEU_SYNDRM12,
},{ .name = "SEU_SYNDRM13", .addr = A_SEU_SYNDRM13,
},{ .name = "SEU_SYNDRM20", .addr = A_SEU_SYNDRM20,
},{ .name = "SEU_SYNDRM21", .addr = A_SEU_SYNDRM21,
},{ .name = "SEU_SYNDRM22", .addr = A_SEU_SYNDRM22,
},{ .name = "SEU_SYNDRM23", .addr = A_SEU_SYNDRM23,
},{ .name = "SEU_SYNDRM30", .addr = A_SEU_SYNDRM30,
},{ .name = "SEU_SYNDRM31", .addr = A_SEU_SYNDRM31,
},{ .name = "SEU_SYNDRM32", .addr = A_SEU_SYNDRM32,
},{ .name = "SEU_SYNDRM33", .addr = A_SEU_SYNDRM33,
},{ .name = "SEU_VIRTUAL_SYNDRM0", .addr = A_SEU_VIRTUAL_SYNDRM0,
},{ .name = "SEU_VIRTUAL_SYNDRM1", .addr = A_SEU_VIRTUAL_SYNDRM1,
},{ .name = "SEU_VIRTUAL_SYNDRM2", .addr = A_SEU_VIRTUAL_SYNDRM2,
},{ .name = "SEU_VIRTUAL_SYNDRM3", .addr = A_SEU_VIRTUAL_SYNDRM3,
},{ .name = "SEU_CRC0", .addr = A_SEU_CRC0,
},{ .name = "SEU_CRC1", .addr = A_SEU_CRC1,
},{ .name = "SEU_CRC2", .addr = A_SEU_CRC2,
},{ .name = "SEU_CRC3", .addr = A_SEU_CRC3,
},{ .name = "CFRAME_FAR_BOT0", .addr = A_CFRAME_FAR_BOT0,
},{ .name = "CFRAME_FAR_BOT1", .addr = A_CFRAME_FAR_BOT1,
},{ .name = "CFRAME_FAR_BOT2", .addr = A_CFRAME_FAR_BOT2,
},{ .name = "CFRAME_FAR_BOT3", .addr = A_CFRAME_FAR_BOT3,
},{ .name = "CFRAME_FAR_TOP0", .addr = A_CFRAME_FAR_TOP0,
},{ .name = "CFRAME_FAR_TOP1", .addr = A_CFRAME_FAR_TOP1,
},{ .name = "CFRAME_FAR_TOP2", .addr = A_CFRAME_FAR_TOP2,
},{ .name = "CFRAME_FAR_TOP3", .addr = A_CFRAME_FAR_TOP3,
},{ .name = "LAST_FRAME_BOT0", .addr = A_LAST_FRAME_BOT0,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_bot_post_read,
},{ .name = "LAST_FRAME_BOT1", .addr = A_LAST_FRAME_BOT1,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_bot_post_read,
},{ .name = "LAST_FRAME_BOT2", .addr = A_LAST_FRAME_BOT2,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_bot_post_read,
},{ .name = "LAST_FRAME_BOT3", .addr = A_LAST_FRAME_BOT3,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_bot_post_read,
},{ .name = "LAST_FRAME_TOP0", .addr = A_LAST_FRAME_TOP0,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_top_post_read,
},{ .name = "LAST_FRAME_TOP1", .addr = A_LAST_FRAME_TOP1,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_top_post_read,
},{ .name = "LAST_FRAME_TOP2", .addr = A_LAST_FRAME_TOP2,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_top_post_read,
},{ .name = "LAST_FRAME_TOP3", .addr = A_LAST_FRAME_TOP3,
.ro = 0xffffffff,
.post_read = cfrm_last_frame_top_post_read,
}
};
static void cframe_reg_cfi_transfer_packet(XlnxCfiIf *cfi_if,
XlnxCfiPacket *pkt)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(cfi_if);
uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
if (!s->row_configured) {
return;
}
switch (pkt->reg_addr) {
case CFRAME_FAR:
s->regs[R_FAR0] = pkt->data[0];
break;
case CFRAME_SFR:
s->regs[R_FAR_SFR0] = pkt->data[0];
register_write(&s->regs_info[R_FAR_SFR3], 0,
we, object_get_typename(OBJECT(s)),
XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
break;
case CFRAME_FDRI:
s->regs[R_FDRI0] = pkt->data[0];
s->regs[R_FDRI1] = pkt->data[1];
s->regs[R_FDRI2] = pkt->data[2];
register_write(&s->regs_info[R_FDRI3], pkt->data[3],
we, object_get_typename(OBJECT(s)),
XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
break;
case CFRAME_CMD:
ARRAY_FIELD_DP32(s->regs, CMD0, CMD, pkt->data[0]);
register_write(&s->regs_info[R_CMD3], 0,
we, object_get_typename(OBJECT(s)),
XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
break;
default:
break;
}
}
static uint64_t cframe_reg_fdri_read(void *opaque, hwaddr addr, unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
HWADDR_PRIx "\n", __func__, addr);
return 0;
}
static void cframe_reg_fdri_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(opaque);
uint32_t wfifo[WFIFO_SZ];
if (update_wfifo(addr, value, s->wfifo, wfifo)) {
uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
s->regs[R_FDRI0] = wfifo[0];
s->regs[R_FDRI1] = wfifo[1];
s->regs[R_FDRI2] = wfifo[2];
register_write(&s->regs_info[R_FDRI3], wfifo[3],
we, object_get_typename(OBJECT(s)),
XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
}
}
static void cframe_reg_reset_enter(Object *obj, ResetType type)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
register_reset(&s->regs_info[i]);
}
memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
fifo32_reset(&s->new_f_data);
if (g_tree_nnodes(s->cframes)) {
/*
* Take a reference so when g_tree_destroy() unrefs it we keep the
* GTree and only destroy its contents. NB: when our minimum
* glib version is at least 2.70 we could use g_tree_remove_all().
*/
g_tree_ref(s->cframes);
g_tree_destroy(s->cframes);
}
}
static void cframe_reg_reset_hold(Object *obj)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj);
cfrm_imr_update_irq(s);
}
static const MemoryRegionOps cframe_reg_ops = {
.read = register_read_memory,
.write = register_write_memory,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static const MemoryRegionOps cframe_reg_fdri_ops = {
.read = cframe_reg_fdri_read,
.write = cframe_reg_fdri_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static uint64_t cframes_bcast_reg_read(void *opaque, hwaddr addr, unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
HWADDR_PRIx "\n", __func__, addr);
return 0;
}
static void cframes_bcast_write(XlnxVersalCFrameBcastReg *s, uint8_t reg_addr,
uint32_t *wfifo)
{
XlnxCfiPacket pkt = {
.reg_addr = reg_addr,
.data[0] = wfifo[0],
.data[1] = wfifo[1],
.data[2] = wfifo[2],
.data[3] = wfifo[3]
};
for (int i = 0; i < ARRAY_SIZE(s->cfg.cframe); i++) {
if (s->cfg.cframe[i]) {
xlnx_cfi_transfer_packet(s->cfg.cframe[i], &pkt);
}
}
}
static void cframes_bcast_reg_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(opaque);
uint32_t wfifo[WFIFO_SZ];
if (update_wfifo(addr, value, s->wfifo, wfifo)) {
uint8_t reg_addr = extract32(addr, 4, 6);
cframes_bcast_write(s, reg_addr, wfifo);
}
}
static uint64_t cframes_bcast_fdri_read(void *opaque, hwaddr addr,
unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
HWADDR_PRIx "\n", __func__, addr);
return 0;
}
static void cframes_bcast_fdri_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(opaque);
uint32_t wfifo[WFIFO_SZ];
if (update_wfifo(addr, value, s->wfifo, wfifo)) {
cframes_bcast_write(s, CFRAME_FDRI, wfifo);
}
}
static const MemoryRegionOps cframes_bcast_reg_reg_ops = {
.read = cframes_bcast_reg_read,
.write = cframes_bcast_reg_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static const MemoryRegionOps cframes_bcast_reg_fdri_ops = {
.read = cframes_bcast_fdri_read,
.write = cframes_bcast_fdri_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static void cframe_reg_realize(DeviceState *dev, Error **errp)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(dev);
for (int i = 0; i < ARRAY_SIZE(s->cfg.blktype_num_frames); i++) {
if (s->cfg.blktype_num_frames[i] > MAX_BLOCKTYPE_FRAMES) {
error_setg(errp,
"blktype-frames%d > 0xFFFFF (max frame per block)",
i);
return;
}
if (s->cfg.blktype_num_frames[i]) {
s->row_configured = true;
}
}
}
static void cframe_reg_init(Object *obj)
{
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
RegisterInfoArray *reg_array;
memory_region_init(&s->iomem, obj, TYPE_XLNX_VERSAL_CFRAME_REG,
CFRAME_REG_R_MAX * 4);
reg_array =
register_init_block32(DEVICE(obj), cframe_reg_regs_info,
ARRAY_SIZE(cframe_reg_regs_info),
s->regs_info, s->regs,
&cframe_reg_ops,
XLNX_VERSAL_CFRAME_REG_ERR_DEBUG,
CFRAME_REG_R_MAX * 4);
memory_region_add_subregion(&s->iomem,
0x0,
&reg_array->mem);
sysbus_init_mmio(sbd, &s->iomem);
memory_region_init_io(&s->iomem_fdri, obj, &cframe_reg_fdri_ops, s,
TYPE_XLNX_VERSAL_CFRAME_REG "-fdri",
KEYHOLE_STREAM_4K);
sysbus_init_mmio(sbd, &s->iomem_fdri);
sysbus_init_irq(sbd, &s->irq_cfrm_imr);
s->cframes = g_tree_new_full((GCompareDataFunc)int_cmp, NULL,
NULL, (GDestroyNotify)g_free);
fifo32_create(&s->new_f_data, FRAME_NUM_WORDS);
}
static const VMStateDescription vmstate_cframe = {
.name = "cframe",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(data, XlnxCFrame, FRAME_NUM_WORDS),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_cframe_reg = {
.name = TYPE_XLNX_VERSAL_CFRAME_REG,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameReg, 4),
VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFrameReg, CFRAME_REG_R_MAX),
VMSTATE_BOOL(rowon, XlnxVersalCFrameReg),
VMSTATE_BOOL(wcfg, XlnxVersalCFrameReg),
VMSTATE_BOOL(rcfg, XlnxVersalCFrameReg),
VMSTATE_GTREE_DIRECT_KEY_V(cframes, XlnxVersalCFrameReg, 1,
&vmstate_cframe, XlnxCFrame),
VMSTATE_FIFO32(new_f_data, XlnxVersalCFrameReg),
VMSTATE_END_OF_LIST(),
}
};
static Property cframe_regs_props[] = {
DEFINE_PROP_LINK("cfu-fdro", XlnxVersalCFrameReg, cfg.cfu_fdro,
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_UINT32("blktype0-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[0], 0),
DEFINE_PROP_UINT32("blktype1-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[1], 0),
DEFINE_PROP_UINT32("blktype2-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[2], 0),
DEFINE_PROP_UINT32("blktype3-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[3], 0),
DEFINE_PROP_UINT32("blktype4-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[4], 0),
DEFINE_PROP_UINT32("blktype5-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[5], 0),
DEFINE_PROP_UINT32("blktype6-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[6], 0),
DEFINE_PROP_END_OF_LIST(),
};
static void cframe_bcast_reg_init(Object *obj)
{
XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
memory_region_init_io(&s->iomem_reg, obj, &cframes_bcast_reg_reg_ops, s,
TYPE_XLNX_VERSAL_CFRAME_BCAST_REG, KEYHOLE_STREAM_4K);
memory_region_init_io(&s->iomem_fdri, obj, &cframes_bcast_reg_fdri_ops, s,
TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "-fdri",
KEYHOLE_STREAM_4K);
sysbus_init_mmio(sbd, &s->iomem_reg);
sysbus_init_mmio(sbd, &s->iomem_fdri);
}
static void cframe_bcast_reg_reset_enter(Object *obj, ResetType type)
{
XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(obj);
memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
}
static const VMStateDescription vmstate_cframe_bcast_reg = {
.name = TYPE_XLNX_VERSAL_CFRAME_BCAST_REG,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameBcastReg, 4),
VMSTATE_END_OF_LIST(),
}
};
static Property cframe_bcast_regs_props[] = {
DEFINE_PROP_LINK("cframe0", XlnxVersalCFrameBcastReg, cfg.cframe[0],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe1", XlnxVersalCFrameBcastReg, cfg.cframe[1],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe2", XlnxVersalCFrameBcastReg, cfg.cframe[2],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe3", XlnxVersalCFrameBcastReg, cfg.cframe[3],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe4", XlnxVersalCFrameBcastReg, cfg.cframe[4],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe5", XlnxVersalCFrameBcastReg, cfg.cframe[5],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe6", XlnxVersalCFrameBcastReg, cfg.cframe[6],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe7", XlnxVersalCFrameBcastReg, cfg.cframe[7],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe8", XlnxVersalCFrameBcastReg, cfg.cframe[8],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe9", XlnxVersalCFrameBcastReg, cfg.cframe[9],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe10", XlnxVersalCFrameBcastReg, cfg.cframe[10],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe11", XlnxVersalCFrameBcastReg, cfg.cframe[11],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe12", XlnxVersalCFrameBcastReg, cfg.cframe[12],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe13", XlnxVersalCFrameBcastReg, cfg.cframe[13],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe14", XlnxVersalCFrameBcastReg, cfg.cframe[14],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_END_OF_LIST(),
};
static void cframe_reg_class_init(ObjectClass *klass, void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
XlnxCfiIfClass *xcic = XLNX_CFI_IF_CLASS(klass);
dc->vmsd = &vmstate_cframe_reg;
dc->realize = cframe_reg_realize;
rc->phases.enter = cframe_reg_reset_enter;
rc->phases.hold = cframe_reg_reset_hold;
device_class_set_props(dc, cframe_regs_props);
xcic->cfi_transfer_packet = cframe_reg_cfi_transfer_packet;
}
static void cframe_bcast_reg_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
dc->vmsd = &vmstate_cframe_bcast_reg;
device_class_set_props(dc, cframe_bcast_regs_props);
rc->phases.enter = cframe_bcast_reg_reset_enter;
}
static const TypeInfo cframe_reg_info = {
.name = TYPE_XLNX_VERSAL_CFRAME_REG,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(XlnxVersalCFrameReg),
.class_init = cframe_reg_class_init,
.instance_init = cframe_reg_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_XLNX_CFI_IF },
{ }
}
};
static const TypeInfo cframe_bcast_reg_info = {
.name = TYPE_XLNX_VERSAL_CFRAME_BCAST_REG,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(XlnxVersalCFrameBcastReg),
.class_init = cframe_bcast_reg_class_init,
.instance_init = cframe_bcast_reg_init,
};
static void cframe_reg_register_types(void)
{
type_register_static(&cframe_reg_info);
type_register_static(&cframe_bcast_reg_info);
}
type_init(cframe_reg_register_types)

563
hw/misc/xlnx-versal-cfu.c Normal file
View File

@ -0,0 +1,563 @@
/*
* QEMU model of the CFU Configuration Unit.
*
* Copyright (C) 2023, Advanced Micro Devices, Inc.
*
* Written by Edgar E. Iglesias <edgar.iglesias@gmail.com>,
* Sai Pavan Boddu <sai.pavan.boddu@amd.com>,
* Francisco Iglesias <francisco.iglesias@amd.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "hw/register.h"
#include "hw/irq.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
#include "qemu/units.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/misc/xlnx-versal-cfu.h"
#ifndef XLNX_VERSAL_CFU_APB_ERR_DEBUG
#define XLNX_VERSAL_CFU_APB_ERR_DEBUG 0
#endif
#define KEYHOLE_STREAM_4K (4 * KiB)
#define KEYHOLE_STREAM_256K (256 * KiB)
#define CFRAME_BROADCAST_ROW 0x1F
bool update_wfifo(hwaddr addr, uint64_t value,
uint32_t *wfifo, uint32_t *wfifo_ret)
{
unsigned int idx = extract32(addr, 2, 2);
wfifo[idx] = value;
if (idx == 3) {
memcpy(wfifo_ret, wfifo, WFIFO_SZ * sizeof(uint32_t));
memset(wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
return true;
}
return false;
}
static void cfu_imr_update_irq(XlnxVersalCFUAPB *s)
{
bool pending = s->regs[R_CFU_ISR] & ~s->regs[R_CFU_IMR];
qemu_set_irq(s->irq_cfu_imr, pending);
}
static void cfu_isr_postw(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
cfu_imr_update_irq(s);
}
static uint64_t cfu_ier_prew(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
uint32_t val = val64;
s->regs[R_CFU_IMR] &= ~val;
cfu_imr_update_irq(s);
return 0;
}
static uint64_t cfu_idr_prew(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
uint32_t val = val64;
s->regs[R_CFU_IMR] |= val;
cfu_imr_update_irq(s);
return 0;
}
static uint64_t cfu_itr_prew(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
uint32_t val = val64;
s->regs[R_CFU_ISR] |= val;
cfu_imr_update_irq(s);
return 0;
}
static void cfu_fgcr_postw(RegisterInfo *reg, uint64_t val64)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
uint32_t val = (uint32_t)val64;
/* Do a scan. It always looks good. */
if (FIELD_EX32(val, CFU_FGCR, SC_HBC_TRIGGER)) {
ARRAY_FIELD_DP32(s->regs, CFU_STATUS, SCAN_CLEAR_PASS, 1);
ARRAY_FIELD_DP32(s->regs, CFU_STATUS, SCAN_CLEAR_DONE, 1);
}
}
static const RegisterAccessInfo cfu_apb_regs_info[] = {
{ .name = "CFU_ISR", .addr = A_CFU_ISR,
.rsvd = 0xfffffc00,
.w1c = 0x3ff,
.post_write = cfu_isr_postw,
},{ .name = "CFU_IMR", .addr = A_CFU_IMR,
.reset = 0x3ff,
.rsvd = 0xfffffc00,
.ro = 0x3ff,
},{ .name = "CFU_IER", .addr = A_CFU_IER,
.rsvd = 0xfffffc00,
.pre_write = cfu_ier_prew,
},{ .name = "CFU_IDR", .addr = A_CFU_IDR,
.rsvd = 0xfffffc00,
.pre_write = cfu_idr_prew,
},{ .name = "CFU_ITR", .addr = A_CFU_ITR,
.rsvd = 0xfffffc00,
.pre_write = cfu_itr_prew,
},{ .name = "CFU_PROTECT", .addr = A_CFU_PROTECT,
.reset = 0x1,
},{ .name = "CFU_FGCR", .addr = A_CFU_FGCR,
.rsvd = 0xffff8000,
.post_write = cfu_fgcr_postw,
},{ .name = "CFU_CTL", .addr = A_CFU_CTL,
.rsvd = 0xffff0000,
},{ .name = "CFU_CRAM_RW", .addr = A_CFU_CRAM_RW,
.reset = 0x401f7d9,
.rsvd = 0xf8000000,
},{ .name = "CFU_MASK", .addr = A_CFU_MASK,
},{ .name = "CFU_CRC_EXPECT", .addr = A_CFU_CRC_EXPECT,
},{ .name = "CFU_CFRAME_LEFT_T0", .addr = A_CFU_CFRAME_LEFT_T0,
.rsvd = 0xfff00000,
},{ .name = "CFU_CFRAME_LEFT_T1", .addr = A_CFU_CFRAME_LEFT_T1,
.rsvd = 0xfff00000,
},{ .name = "CFU_CFRAME_LEFT_T2", .addr = A_CFU_CFRAME_LEFT_T2,
.rsvd = 0xfff00000,
},{ .name = "CFU_ROW_RANGE", .addr = A_CFU_ROW_RANGE,
.rsvd = 0xffffffc0,
.ro = 0x3f,
},{ .name = "CFU_STATUS", .addr = A_CFU_STATUS,
.rsvd = 0x80000000,
.ro = 0x7fffffff,
},{ .name = "CFU_INTERNAL_STATUS", .addr = A_CFU_INTERNAL_STATUS,
.rsvd = 0xff800000,
.ro = 0x7fffff,
},{ .name = "CFU_QWORD_CNT", .addr = A_CFU_QWORD_CNT,
.ro = 0xffffffff,
},{ .name = "CFU_CRC_LIVE", .addr = A_CFU_CRC_LIVE,
.ro = 0xffffffff,
},{ .name = "CFU_PENDING_READ_CNT", .addr = A_CFU_PENDING_READ_CNT,
.rsvd = 0xfe000000,
.ro = 0x1ffffff,
},{ .name = "CFU_FDRI_CNT", .addr = A_CFU_FDRI_CNT,
.ro = 0xffffffff,
},{ .name = "CFU_ECO1", .addr = A_CFU_ECO1,
},{ .name = "CFU_ECO2", .addr = A_CFU_ECO2,
}
};
static void cfu_apb_reset(DeviceState *dev)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(dev);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
register_reset(&s->regs_info[i]);
}
memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
s->regs[R_CFU_STATUS] |= R_CFU_STATUS_HC_COMPLETE_MASK;
cfu_imr_update_irq(s);
}
static const MemoryRegionOps cfu_apb_ops = {
.read = register_read_memory,
.write = register_write_memory,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static void cfu_transfer_cfi_packet(XlnxVersalCFUAPB *s, uint8_t row_addr,
XlnxCfiPacket *pkt)
{
if (row_addr == CFRAME_BROADCAST_ROW) {
for (int i = 0; i < ARRAY_SIZE(s->cfg.cframe); i++) {
if (s->cfg.cframe[i]) {
xlnx_cfi_transfer_packet(s->cfg.cframe[i], pkt);
}
}
} else {
assert(row_addr < ARRAY_SIZE(s->cfg.cframe));
if (s->cfg.cframe[row_addr]) {
xlnx_cfi_transfer_packet(s->cfg.cframe[row_addr], pkt);
}
}
}
static uint64_t cfu_stream_read(void *opaque, hwaddr addr, unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
HWADDR_PRIx "\n", __func__, addr);
return 0;
}
static void cfu_stream_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(opaque);
uint32_t wfifo[WFIFO_SZ];
if (update_wfifo(addr, value, s->wfifo, wfifo)) {
uint8_t packet_type, row_addr, reg_addr;
packet_type = extract32(wfifo[0], 24, 8);
row_addr = extract32(wfifo[0], 16, 5);
reg_addr = extract32(wfifo[0], 8, 6);
/* Compressed bitstreams are not supported yet. */
if (ARRAY_FIELD_EX32(s->regs, CFU_CTL, DECOMPRESS) == 0) {
if (s->regs[R_CFU_FDRI_CNT]) {
XlnxCfiPacket pkt = {
.reg_addr = CFRAME_FDRI,
.data[0] = wfifo[0],
.data[1] = wfifo[1],
.data[2] = wfifo[2],
.data[3] = wfifo[3]
};
cfu_transfer_cfi_packet(s, s->fdri_row_addr, &pkt);
s->regs[R_CFU_FDRI_CNT]--;
} else if (packet_type == PACKET_TYPE_CFU &&
reg_addr == CFRAME_FDRI) {
/* Load R_CFU_FDRI_CNT, must be multiple of 25 */
s->regs[R_CFU_FDRI_CNT] = wfifo[1];
/* Store target row_addr */
s->fdri_row_addr = row_addr;
if (wfifo[1] % 25 != 0) {
qemu_log_mask(LOG_GUEST_ERROR,
"CFU FDRI_CNT is not loaded with "
"a multiple of 25 value\n");
}
} else if (packet_type == PACKET_TYPE_CFRAME) {
XlnxCfiPacket pkt = {
.reg_addr = reg_addr,
.data[0] = wfifo[1],
.data[1] = wfifo[2],
.data[2] = wfifo[3],
};
cfu_transfer_cfi_packet(s, row_addr, &pkt);
}
}
}
}
static uint64_t cfu_sfr_read(void *opaque, hwaddr addr, unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
HWADDR_PRIx "\n", __func__, addr);
return 0;
}
static void cfu_sfr_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
XlnxVersalCFUSFR *s = XLNX_VERSAL_CFU_SFR(opaque);
uint32_t wfifo[WFIFO_SZ];
if (update_wfifo(addr, value, s->wfifo, wfifo)) {
uint8_t row_addr = extract32(wfifo[0], 23, 5);
uint32_t frame_addr = extract32(wfifo[0], 0, 23);
XlnxCfiPacket pkt = { .reg_addr = CFRAME_SFR,
.data[0] = frame_addr };
if (s->cfg.cfu) {
cfu_transfer_cfi_packet(s->cfg.cfu, row_addr, &pkt);
}
}
}
static uint64_t cfu_fdro_read(void *opaque, hwaddr addr, unsigned size)
{
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(opaque);
uint64_t ret = 0;
if (!fifo32_is_empty(&s->fdro_data)) {
ret = fifo32_pop(&s->fdro_data);
}
return ret;
}
static void cfu_fdro_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported write from addr=%"
HWADDR_PRIx "\n", __func__, addr);
}
static const MemoryRegionOps cfu_stream_ops = {
.read = cfu_stream_read,
.write = cfu_stream_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 8,
},
};
static const MemoryRegionOps cfu_sfr_ops = {
.read = cfu_sfr_read,
.write = cfu_sfr_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static const MemoryRegionOps cfu_fdro_ops = {
.read = cfu_fdro_read,
.write = cfu_fdro_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static void cfu_apb_init(Object *obj)
{
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
RegisterInfoArray *reg_array;
unsigned int i;
char *name;
memory_region_init(&s->iomem, obj, TYPE_XLNX_VERSAL_CFU_APB, R_MAX * 4);
reg_array =
register_init_block32(DEVICE(obj), cfu_apb_regs_info,
ARRAY_SIZE(cfu_apb_regs_info),
s->regs_info, s->regs,
&cfu_apb_ops,
XLNX_VERSAL_CFU_APB_ERR_DEBUG,
R_MAX * 4);
memory_region_add_subregion(&s->iomem,
0x0,
&reg_array->mem);
sysbus_init_mmio(sbd, &s->iomem);
for (i = 0; i < NUM_STREAM; i++) {
name = g_strdup_printf(TYPE_XLNX_VERSAL_CFU_APB "-stream%d", i);
memory_region_init_io(&s->iomem_stream[i], obj, &cfu_stream_ops, s,
name, i == 0 ? KEYHOLE_STREAM_4K :
KEYHOLE_STREAM_256K);
sysbus_init_mmio(sbd, &s->iomem_stream[i]);
g_free(name);
}
sysbus_init_irq(sbd, &s->irq_cfu_imr);
}
static void cfu_sfr_init(Object *obj)
{
XlnxVersalCFUSFR *s = XLNX_VERSAL_CFU_SFR(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
memory_region_init_io(&s->iomem_sfr, obj, &cfu_sfr_ops, s,
TYPE_XLNX_VERSAL_CFU_SFR, KEYHOLE_STREAM_4K);
sysbus_init_mmio(sbd, &s->iomem_sfr);
}
static void cfu_sfr_reset_enter(Object *obj, ResetType type)
{
XlnxVersalCFUSFR *s = XLNX_VERSAL_CFU_SFR(obj);
memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
}
static void cfu_fdro_init(Object *obj)
{
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
memory_region_init_io(&s->iomem_fdro, obj, &cfu_fdro_ops, s,
TYPE_XLNX_VERSAL_CFU_FDRO, KEYHOLE_STREAM_4K);
sysbus_init_mmio(sbd, &s->iomem_fdro);
fifo32_create(&s->fdro_data, 8 * KiB / sizeof(uint32_t));
}
static void cfu_fdro_reset_enter(Object *obj, ResetType type)
{
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
fifo32_reset(&s->fdro_data);
}
static void cfu_fdro_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt)
{
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(cfi_if);
if (fifo32_num_free(&s->fdro_data) >= ARRAY_SIZE(pkt->data)) {
for (int i = 0; i < ARRAY_SIZE(pkt->data); i++) {
fifo32_push(&s->fdro_data, pkt->data[i]);
}
} else {
/* It is a programming error to fill the fifo. */
qemu_log_mask(LOG_GUEST_ERROR,
"CFU_FDRO: CFI data dropped due to full read fifo\n");
}
}
static Property cfu_props[] = {
DEFINE_PROP_LINK("cframe0", XlnxVersalCFUAPB, cfg.cframe[0],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe1", XlnxVersalCFUAPB, cfg.cframe[1],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe2", XlnxVersalCFUAPB, cfg.cframe[2],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe3", XlnxVersalCFUAPB, cfg.cframe[3],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe4", XlnxVersalCFUAPB, cfg.cframe[4],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe5", XlnxVersalCFUAPB, cfg.cframe[5],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe6", XlnxVersalCFUAPB, cfg.cframe[6],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe7", XlnxVersalCFUAPB, cfg.cframe[7],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe8", XlnxVersalCFUAPB, cfg.cframe[8],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe9", XlnxVersalCFUAPB, cfg.cframe[9],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe10", XlnxVersalCFUAPB, cfg.cframe[10],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe11", XlnxVersalCFUAPB, cfg.cframe[11],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe12", XlnxVersalCFUAPB, cfg.cframe[12],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe13", XlnxVersalCFUAPB, cfg.cframe[13],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe14", XlnxVersalCFUAPB, cfg.cframe[14],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_END_OF_LIST(),
};
static Property cfu_sfr_props[] = {
DEFINE_PROP_LINK("cfu", XlnxVersalCFUSFR, cfg.cfu,
TYPE_XLNX_VERSAL_CFU_APB, XlnxVersalCFUAPB *),
DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_cfu_apb = {
.name = TYPE_XLNX_VERSAL_CFU_APB,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUAPB, 4),
VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFUAPB, R_MAX),
VMSTATE_UINT8(fdri_row_addr, XlnxVersalCFUAPB),
VMSTATE_END_OF_LIST(),
}
};
static const VMStateDescription vmstate_cfu_fdro = {
.name = TYPE_XLNX_VERSAL_CFU_FDRO,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_FIFO32(fdro_data, XlnxVersalCFUFDRO),
VMSTATE_END_OF_LIST(),
}
};
static const VMStateDescription vmstate_cfu_sfr = {
.name = TYPE_XLNX_VERSAL_CFU_SFR,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUSFR, 4),
VMSTATE_END_OF_LIST(),
}
};
static void cfu_apb_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->reset = cfu_apb_reset;
dc->vmsd = &vmstate_cfu_apb;
device_class_set_props(dc, cfu_props);
}
static void cfu_fdro_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
XlnxCfiIfClass *xcic = XLNX_CFI_IF_CLASS(klass);
dc->vmsd = &vmstate_cfu_fdro;
xcic->cfi_transfer_packet = cfu_fdro_cfi_transfer_packet;
rc->phases.enter = cfu_fdro_reset_enter;
}
static void cfu_sfr_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
device_class_set_props(dc, cfu_sfr_props);
dc->vmsd = &vmstate_cfu_sfr;
rc->phases.enter = cfu_sfr_reset_enter;
}
static const TypeInfo cfu_apb_info = {
.name = TYPE_XLNX_VERSAL_CFU_APB,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(XlnxVersalCFUAPB),
.class_init = cfu_apb_class_init,
.instance_init = cfu_apb_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_XLNX_CFI_IF },
{ }
}
};
static const TypeInfo cfu_fdro_info = {
.name = TYPE_XLNX_VERSAL_CFU_FDRO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(XlnxVersalCFUFDRO),
.class_init = cfu_fdro_class_init,
.instance_init = cfu_fdro_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_XLNX_CFI_IF },
{ }
}
};
static const TypeInfo cfu_sfr_info = {
.name = TYPE_XLNX_VERSAL_CFU_SFR,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(XlnxVersalCFUSFR),
.class_init = cfu_sfr_class_init,
.instance_init = cfu_sfr_init,
};
static void cfu_apb_register_types(void)
{
type_register_static(&cfu_apb_info);
type_register_static(&cfu_fdro_info);
type_register_static(&cfu_sfr_info);
}
type_init(cfu_apb_register_types)

View File

@ -32,6 +32,8 @@
#include "hw/misc/xlnx-versal-crl.h"
#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
#include "hw/net/xlnx-versal-canfd.h"
#include "hw/misc/xlnx-versal-cfu.h"
#include "hw/misc/xlnx-versal-cframe-reg.h"
#define TYPE_XLNX_VERSAL "xlnx-versal"
OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
@ -46,6 +48,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
#define XLNX_VERSAL_NR_IRQS 192
#define XLNX_VERSAL_NR_CANFD 2
#define XLNX_VERSAL_CANFD_REF_CLK (24 * 1000 * 1000)
#define XLNX_VERSAL_NR_CFRAME 15
struct Versal {
/*< private >*/
@ -117,6 +120,11 @@ struct Versal {
XlnxEFuse efuse;
XlnxVersalEFuseCtrl efuse_ctrl;
XlnxVersalEFuseCache efuse_cache;
XlnxVersalCFUAPB cfu_apb;
XlnxVersalCFUFDRO cfu_fdro;
XlnxVersalCFUSFR cfu_sfr;
XlnxVersalCFrameReg cframe[XLNX_VERSAL_NR_CFRAME];
XlnxVersalCFrameBcastReg cframe_bcast;
OrIRQState apb_irq_orgate;
} pmc;
@ -147,6 +155,7 @@ struct Versal {
#define VERSAL_GEM1_WAKE_IRQ_0 59
#define VERSAL_ADMA_IRQ_0 60
#define VERSAL_XRAM_IRQ_0 79
#define VERSAL_CFU_IRQ_0 120
#define VERSAL_PMC_APB_IRQ 121
#define VERSAL_OSPI_IRQ 124
#define VERSAL_SD0_IRQ_0 126
@ -240,6 +249,82 @@ struct Versal {
#define MM_PMC_EFUSE_CACHE 0xf1250000
#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00
#define MM_PMC_CFU_APB 0xf12b0000
#define MM_PMC_CFU_APB_SIZE 0x10000
#define MM_PMC_CFU_STREAM 0xf12c0000
#define MM_PMC_CFU_STREAM_SIZE 0x1000
#define MM_PMC_CFU_SFR 0xf12c1000
#define MM_PMC_CFU_SFR_SIZE 0x1000
#define MM_PMC_CFU_FDRO 0xf12c2000
#define MM_PMC_CFU_FDRO_SIZE 0x1000
#define MM_PMC_CFU_STREAM_2 0xf1f80000
#define MM_PMC_CFU_STREAM_2_SIZE 0x40000
#define MM_PMC_CFRAME0_REG 0xf12d0000
#define MM_PMC_CFRAME0_REG_SIZE 0x1000
#define MM_PMC_CFRAME0_FDRI 0xf12d1000
#define MM_PMC_CFRAME0_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME1_REG 0xf12d2000
#define MM_PMC_CFRAME1_REG_SIZE 0x1000
#define MM_PMC_CFRAME1_FDRI 0xf12d3000
#define MM_PMC_CFRAME1_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME2_REG 0xf12d4000
#define MM_PMC_CFRAME2_REG_SIZE 0x1000
#define MM_PMC_CFRAME2_FDRI 0xf12d5000
#define MM_PMC_CFRAME2_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME3_REG 0xf12d6000
#define MM_PMC_CFRAME3_REG_SIZE 0x1000
#define MM_PMC_CFRAME3_FDRI 0xf12d7000
#define MM_PMC_CFRAME3_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME4_REG 0xf12d8000
#define MM_PMC_CFRAME4_REG_SIZE 0x1000
#define MM_PMC_CFRAME4_FDRI 0xf12d9000
#define MM_PMC_CFRAME4_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME5_REG 0xf12da000
#define MM_PMC_CFRAME5_REG_SIZE 0x1000
#define MM_PMC_CFRAME5_FDRI 0xf12db000
#define MM_PMC_CFRAME5_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME6_REG 0xf12dc000
#define MM_PMC_CFRAME6_REG_SIZE 0x1000
#define MM_PMC_CFRAME6_FDRI 0xf12dd000
#define MM_PMC_CFRAME6_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME7_REG 0xf12de000
#define MM_PMC_CFRAME7_REG_SIZE 0x1000
#define MM_PMC_CFRAME7_FDRI 0xf12df000
#define MM_PMC_CFRAME7_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME8_REG 0xf12e0000
#define MM_PMC_CFRAME8_REG_SIZE 0x1000
#define MM_PMC_CFRAME8_FDRI 0xf12e1000
#define MM_PMC_CFRAME8_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME9_REG 0xf12e2000
#define MM_PMC_CFRAME9_REG_SIZE 0x1000
#define MM_PMC_CFRAME9_FDRI 0xf12e3000
#define MM_PMC_CFRAME9_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME10_REG 0xf12e4000
#define MM_PMC_CFRAME10_REG_SIZE 0x1000
#define MM_PMC_CFRAME10_FDRI 0xf12e5000
#define MM_PMC_CFRAME10_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME11_REG 0xf12e6000
#define MM_PMC_CFRAME11_REG_SIZE 0x1000
#define MM_PMC_CFRAME11_FDRI 0xf12e7000
#define MM_PMC_CFRAME11_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME12_REG 0xf12e8000
#define MM_PMC_CFRAME12_REG_SIZE 0x1000
#define MM_PMC_CFRAME12_FDRI 0xf12e9000
#define MM_PMC_CFRAME12_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME13_REG 0xf12ea000
#define MM_PMC_CFRAME13_REG_SIZE 0x1000
#define MM_PMC_CFRAME13_FDRI 0xf12eb000
#define MM_PMC_CFRAME13_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME14_REG 0xf12ec000
#define MM_PMC_CFRAME14_REG_SIZE 0x1000
#define MM_PMC_CFRAME14_FDRI 0xf12ed000
#define MM_PMC_CFRAME14_FDRI_SIZE 0x1000
#define MM_PMC_CFRAME_BCAST_REG 0xf12ee000
#define MM_PMC_CFRAME_BCAST_REG_SIZE 0x1000
#define MM_PMC_CFRAME_BCAST_FDRI 0xf12ef000
#define MM_PMC_CFRAME_BCAST_FDRI_SIZE 0x1000
#define MM_PMC_CRP 0xf1260000U
#define MM_PMC_CRP_SIZE 0x10000
#define MM_PMC_RTC 0xf12a0000

View File

@ -0,0 +1,59 @@
/*
* Xilinx CFI interface
*
* Copyright (C) 2023, Advanced Micro Devices, Inc.
*
* Written by Francisco Iglesias <francisco.iglesias@amd.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef XLNX_CFI_IF_H
#define XLNX_CFI_IF_H 1
#include "qemu/help-texts.h"
#include "hw/hw.h"
#include "qom/object.h"
#define TYPE_XLNX_CFI_IF "xlnx-cfi-if"
typedef struct XlnxCfiIfClass XlnxCfiIfClass;
DECLARE_CLASS_CHECKERS(XlnxCfiIfClass, XLNX_CFI_IF, TYPE_XLNX_CFI_IF)
#define XLNX_CFI_IF(obj) \
INTERFACE_CHECK(XlnxCfiIf, (obj), TYPE_XLNX_CFI_IF)
typedef enum {
PACKET_TYPE_CFU = 0x52,
PACKET_TYPE_CFRAME = 0xA1,
} xlnx_cfi_packet_type;
typedef enum {
CFRAME_FAR = 1,
CFRAME_SFR = 2,
CFRAME_FDRI = 4,
CFRAME_CMD = 6,
} xlnx_cfi_reg_addr;
typedef struct XlnxCfiPacket {
uint8_t reg_addr;
uint32_t data[4];
} XlnxCfiPacket;
typedef struct XlnxCfiIf {
Object Parent;
} XlnxCfiIf;
typedef struct XlnxCfiIfClass {
InterfaceClass parent;
void (*cfi_transfer_packet)(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
} XlnxCfiIfClass;
/**
* Transfer a XlnxCfiPacket.
*
* @cfi_if: the object implementing this interface
* @XlnxCfiPacket: a pointer to the XlnxCfiPacket to transfer
*/
void xlnx_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
#endif /* XLNX_CFI_IF_H */

View File

@ -0,0 +1,303 @@
/*
* QEMU model of the Configuration Frame Control module
*
* Copyright (C) 2023, Advanced Micro Devices, Inc.
*
* Written by Francisco Iglesias <francisco.iglesias@amd.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* References:
* [1] Versal ACAP Technical Reference Manual,
* https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
*
* [2] Versal ACAP Register Reference,
* https://www.xilinx.com/htmldocs/registers/am012/am012-versal-register-reference.html
*/
#ifndef HW_MISC_XLNX_VERSAL_CFRAME_REG_H
#define HW_MISC_XLNX_VERSAL_CFRAME_REG_H
#include "hw/sysbus.h"
#include "hw/register.h"
#include "hw/misc/xlnx-cfi-if.h"
#include "hw/misc/xlnx-versal-cfu.h"
#include "qemu/fifo32.h"
#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx,cframe-reg"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG)
#define TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "xlnx.cframe-bcast-reg"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameBcastReg,
XLNX_VERSAL_CFRAME_BCAST_REG)
/*
* The registers in this module are 128 bits wide but it is ok to write
* and read them through 4 sequential 32 bit accesses (address[3:2] = 0,
* 1, 2, 3).
*/
REG32(CRC0, 0x0)
FIELD(CRC, CRC, 0, 32)
REG32(CRC1, 0x4)
REG32(CRC2, 0x8)
REG32(CRC3, 0xc)
REG32(FAR0, 0x10)
FIELD(FAR0, SEGMENT, 23, 2)
FIELD(FAR0, BLOCKTYPE, 20, 3)
FIELD(FAR0, FRAME_ADDR, 0, 20)
REG32(FAR1, 0x14)
REG32(FAR2, 0x18)
REG32(FAR3, 0x1c)
REG32(FAR_SFR0, 0x20)
FIELD(FAR_SFR0, BLOCKTYPE, 20, 3)
FIELD(FAR_SFR0, FRAME_ADDR, 0, 20)
REG32(FAR_SFR1, 0x24)
REG32(FAR_SFR2, 0x28)
REG32(FAR_SFR3, 0x2c)
REG32(FDRI0, 0x40)
REG32(FDRI1, 0x44)
REG32(FDRI2, 0x48)
REG32(FDRI3, 0x4c)
REG32(FRCNT0, 0x50)
FIELD(FRCNT0, FRCNT, 0, 32)
REG32(FRCNT1, 0x54)
REG32(FRCNT2, 0x58)
REG32(FRCNT3, 0x5c)
REG32(CMD0, 0x60)
FIELD(CMD0, CMD, 0, 5)
REG32(CMD1, 0x64)
REG32(CMD2, 0x68)
REG32(CMD3, 0x6c)
REG32(CR_MASK0, 0x70)
REG32(CR_MASK1, 0x74)
REG32(CR_MASK2, 0x78)
REG32(CR_MASK3, 0x7c)
REG32(CTL0, 0x80)
FIELD(CTL, PER_FRAME_CRC, 0, 1)
REG32(CTL1, 0x84)
REG32(CTL2, 0x88)
REG32(CTL3, 0x8c)
REG32(CFRM_ISR0, 0x150)
FIELD(CFRM_ISR0, READ_BROADCAST_ERROR, 21, 1)
FIELD(CFRM_ISR0, CMD_MISSING_ERROR, 20, 1)
FIELD(CFRM_ISR0, RW_ROWOFF_ERROR, 19, 1)
FIELD(CFRM_ISR0, READ_REG_ADDR_ERROR, 18, 1)
FIELD(CFRM_ISR0, READ_BLK_TYPE_ERROR, 17, 1)
FIELD(CFRM_ISR0, READ_FRAME_ADDR_ERROR, 16, 1)
FIELD(CFRM_ISR0, WRITE_REG_ADDR_ERROR, 15, 1)
FIELD(CFRM_ISR0, WRITE_BLK_TYPE_ERROR, 13, 1)
FIELD(CFRM_ISR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
FIELD(CFRM_ISR0, MFW_OVERRUN_ERROR, 11, 1)
FIELD(CFRM_ISR0, FAR_FIFO_UNDERFLOW, 10, 1)
FIELD(CFRM_ISR0, FAR_FIFO_OVERFLOW, 9, 1)
FIELD(CFRM_ISR0, PER_FRAME_SEQ_ERROR, 8, 1)
FIELD(CFRM_ISR0, CRC_ERROR, 7, 1)
FIELD(CFRM_ISR0, WRITE_OVERRUN_ERROR, 6, 1)
FIELD(CFRM_ISR0, READ_OVERRUN_ERROR, 5, 1)
FIELD(CFRM_ISR0, CMD_INTERRUPT_ERROR, 4, 1)
FIELD(CFRM_ISR0, WRITE_INTERRUPT_ERROR, 3, 1)
FIELD(CFRM_ISR0, READ_INTERRUPT_ERROR, 2, 1)
FIELD(CFRM_ISR0, SEU_CRC_ERROR, 1, 1)
FIELD(CFRM_ISR0, SEU_ECC_ERROR, 0, 1)
REG32(CFRM_ISR1, 0x154)
REG32(CFRM_ISR2, 0x158)
REG32(CFRM_ISR3, 0x15c)
REG32(CFRM_IMR0, 0x160)
FIELD(CFRM_IMR0, READ_BROADCAST_ERROR, 21, 1)
FIELD(CFRM_IMR0, CMD_MISSING_ERROR, 20, 1)
FIELD(CFRM_IMR0, RW_ROWOFF_ERROR, 19, 1)
FIELD(CFRM_IMR0, READ_REG_ADDR_ERROR, 18, 1)
FIELD(CFRM_IMR0, READ_BLK_TYPE_ERROR, 17, 1)
FIELD(CFRM_IMR0, READ_FRAME_ADDR_ERROR, 16, 1)
FIELD(CFRM_IMR0, WRITE_REG_ADDR_ERROR, 15, 1)
FIELD(CFRM_IMR0, WRITE_BLK_TYPE_ERROR, 13, 1)
FIELD(CFRM_IMR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
FIELD(CFRM_IMR0, MFW_OVERRUN_ERROR, 11, 1)
FIELD(CFRM_IMR0, FAR_FIFO_UNDERFLOW, 10, 1)
FIELD(CFRM_IMR0, FAR_FIFO_OVERFLOW, 9, 1)
FIELD(CFRM_IMR0, PER_FRAME_SEQ_ERROR, 8, 1)
FIELD(CFRM_IMR0, CRC_ERROR, 7, 1)
FIELD(CFRM_IMR0, WRITE_OVERRUN_ERROR, 6, 1)
FIELD(CFRM_IMR0, READ_OVERRUN_ERROR, 5, 1)
FIELD(CFRM_IMR0, CMD_INTERRUPT_ERROR, 4, 1)
FIELD(CFRM_IMR0, WRITE_INTERRUPT_ERROR, 3, 1)
FIELD(CFRM_IMR0, READ_INTERRUPT_ERROR, 2, 1)
FIELD(CFRM_IMR0, SEU_CRC_ERROR, 1, 1)
FIELD(CFRM_IMR0, SEU_ECC_ERROR, 0, 1)
REG32(CFRM_IMR1, 0x164)
REG32(CFRM_IMR2, 0x168)
REG32(CFRM_IMR3, 0x16c)
REG32(CFRM_IER0, 0x170)
FIELD(CFRM_IER0, READ_BROADCAST_ERROR, 21, 1)
FIELD(CFRM_IER0, CMD_MISSING_ERROR, 20, 1)
FIELD(CFRM_IER0, RW_ROWOFF_ERROR, 19, 1)
FIELD(CFRM_IER0, READ_REG_ADDR_ERROR, 18, 1)
FIELD(CFRM_IER0, READ_BLK_TYPE_ERROR, 17, 1)
FIELD(CFRM_IER0, READ_FRAME_ADDR_ERROR, 16, 1)
FIELD(CFRM_IER0, WRITE_REG_ADDR_ERROR, 15, 1)
FIELD(CFRM_IER0, WRITE_BLK_TYPE_ERROR, 13, 1)
FIELD(CFRM_IER0, WRITE_FRAME_ADDR_ERROR, 12, 1)
FIELD(CFRM_IER0, MFW_OVERRUN_ERROR, 11, 1)
FIELD(CFRM_IER0, FAR_FIFO_UNDERFLOW, 10, 1)
FIELD(CFRM_IER0, FAR_FIFO_OVERFLOW, 9, 1)
FIELD(CFRM_IER0, PER_FRAME_SEQ_ERROR, 8, 1)
FIELD(CFRM_IER0, CRC_ERROR, 7, 1)
FIELD(CFRM_IER0, WRITE_OVERRUN_ERROR, 6, 1)
FIELD(CFRM_IER0, READ_OVERRUN_ERROR, 5, 1)
FIELD(CFRM_IER0, CMD_INTERRUPT_ERROR, 4, 1)
FIELD(CFRM_IER0, WRITE_INTERRUPT_ERROR, 3, 1)
FIELD(CFRM_IER0, READ_INTERRUPT_ERROR, 2, 1)
FIELD(CFRM_IER0, SEU_CRC_ERROR, 1, 1)
FIELD(CFRM_IER0, SEU_ECC_ERROR, 0, 1)
REG32(CFRM_IER1, 0x174)
REG32(CFRM_IER2, 0x178)
REG32(CFRM_IER3, 0x17c)
REG32(CFRM_IDR0, 0x180)
FIELD(CFRM_IDR0, READ_BROADCAST_ERROR, 21, 1)
FIELD(CFRM_IDR0, CMD_MISSING_ERROR, 20, 1)
FIELD(CFRM_IDR0, RW_ROWOFF_ERROR, 19, 1)
FIELD(CFRM_IDR0, READ_REG_ADDR_ERROR, 18, 1)
FIELD(CFRM_IDR0, READ_BLK_TYPE_ERROR, 17, 1)
FIELD(CFRM_IDR0, READ_FRAME_ADDR_ERROR, 16, 1)
FIELD(CFRM_IDR0, WRITE_REG_ADDR_ERROR, 15, 1)
FIELD(CFRM_IDR0, WRITE_BLK_TYPE_ERROR, 13, 1)
FIELD(CFRM_IDR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
FIELD(CFRM_IDR0, MFW_OVERRUN_ERROR, 11, 1)
FIELD(CFRM_IDR0, FAR_FIFO_UNDERFLOW, 10, 1)
FIELD(CFRM_IDR0, FAR_FIFO_OVERFLOW, 9, 1)
FIELD(CFRM_IDR0, PER_FRAME_SEQ_ERROR, 8, 1)
FIELD(CFRM_IDR0, CRC_ERROR, 7, 1)
FIELD(CFRM_IDR0, WRITE_OVERRUN_ERROR, 6, 1)
FIELD(CFRM_IDR0, READ_OVERRUN_ERROR, 5, 1)
FIELD(CFRM_IDR0, CMD_INTERRUPT_ERROR, 4, 1)
FIELD(CFRM_IDR0, WRITE_INTERRUPT_ERROR, 3, 1)
FIELD(CFRM_IDR0, READ_INTERRUPT_ERROR, 2, 1)
FIELD(CFRM_IDR0, SEU_CRC_ERROR, 1, 1)
FIELD(CFRM_IDR0, SEU_ECC_ERROR, 0, 1)
REG32(CFRM_IDR1, 0x184)
REG32(CFRM_IDR2, 0x188)
REG32(CFRM_IDR3, 0x18c)
REG32(CFRM_ITR0, 0x190)
FIELD(CFRM_ITR0, READ_BROADCAST_ERROR, 21, 1)
FIELD(CFRM_ITR0, CMD_MISSING_ERROR, 20, 1)
FIELD(CFRM_ITR0, RW_ROWOFF_ERROR, 19, 1)
FIELD(CFRM_ITR0, READ_REG_ADDR_ERROR, 18, 1)
FIELD(CFRM_ITR0, READ_BLK_TYPE_ERROR, 17, 1)
FIELD(CFRM_ITR0, READ_FRAME_ADDR_ERROR, 16, 1)
FIELD(CFRM_ITR0, WRITE_REG_ADDR_ERROR, 15, 1)
FIELD(CFRM_ITR0, WRITE_BLK_TYPE_ERROR, 13, 1)
FIELD(CFRM_ITR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
FIELD(CFRM_ITR0, MFW_OVERRUN_ERROR, 11, 1)
FIELD(CFRM_ITR0, FAR_FIFO_UNDERFLOW, 10, 1)
FIELD(CFRM_ITR0, FAR_FIFO_OVERFLOW, 9, 1)
FIELD(CFRM_ITR0, PER_FRAME_SEQ_ERROR, 8, 1)
FIELD(CFRM_ITR0, CRC_ERROR, 7, 1)
FIELD(CFRM_ITR0, WRITE_OVERRUN_ERROR, 6, 1)
FIELD(CFRM_ITR0, READ_OVERRUN_ERROR, 5, 1)
FIELD(CFRM_ITR0, CMD_INTERRUPT_ERROR, 4, 1)
FIELD(CFRM_ITR0, WRITE_INTERRUPT_ERROR, 3, 1)
FIELD(CFRM_ITR0, READ_INTERRUPT_ERROR, 2, 1)
FIELD(CFRM_ITR0, SEU_CRC_ERROR, 1, 1)
FIELD(CFRM_ITR0, SEU_ECC_ERROR, 0, 1)
REG32(CFRM_ITR1, 0x194)
REG32(CFRM_ITR2, 0x198)
REG32(CFRM_ITR3, 0x19c)
REG32(SEU_SYNDRM00, 0x1a0)
REG32(SEU_SYNDRM01, 0x1a4)
REG32(SEU_SYNDRM02, 0x1a8)
REG32(SEU_SYNDRM03, 0x1ac)
REG32(SEU_SYNDRM10, 0x1b0)
REG32(SEU_SYNDRM11, 0x1b4)
REG32(SEU_SYNDRM12, 0x1b8)
REG32(SEU_SYNDRM13, 0x1bc)
REG32(SEU_SYNDRM20, 0x1c0)
REG32(SEU_SYNDRM21, 0x1c4)
REG32(SEU_SYNDRM22, 0x1c8)
REG32(SEU_SYNDRM23, 0x1cc)
REG32(SEU_SYNDRM30, 0x1d0)
REG32(SEU_SYNDRM31, 0x1d4)
REG32(SEU_SYNDRM32, 0x1d8)
REG32(SEU_SYNDRM33, 0x1dc)
REG32(SEU_VIRTUAL_SYNDRM0, 0x1e0)
REG32(SEU_VIRTUAL_SYNDRM1, 0x1e4)
REG32(SEU_VIRTUAL_SYNDRM2, 0x1e8)
REG32(SEU_VIRTUAL_SYNDRM3, 0x1ec)
REG32(SEU_CRC0, 0x1f0)
REG32(SEU_CRC1, 0x1f4)
REG32(SEU_CRC2, 0x1f8)
REG32(SEU_CRC3, 0x1fc)
REG32(CFRAME_FAR_BOT0, 0x200)
REG32(CFRAME_FAR_BOT1, 0x204)
REG32(CFRAME_FAR_BOT2, 0x208)
REG32(CFRAME_FAR_BOT3, 0x20c)
REG32(CFRAME_FAR_TOP0, 0x210)
REG32(CFRAME_FAR_TOP1, 0x214)
REG32(CFRAME_FAR_TOP2, 0x218)
REG32(CFRAME_FAR_TOP3, 0x21c)
REG32(LAST_FRAME_BOT0, 0x220)
FIELD(LAST_FRAME_BOT0, BLOCKTYPE1_LAST_FRAME_LSB, 20, 12)
FIELD(LAST_FRAME_BOT0, BLOCKTYPE0_LAST_FRAME, 0, 20)
REG32(LAST_FRAME_BOT1, 0x224)
FIELD(LAST_FRAME_BOT1, BLOCKTYPE3_LAST_FRAME_LSB, 28, 4)
FIELD(LAST_FRAME_BOT1, BLOCKTYPE2_LAST_FRAME, 8, 20)
FIELD(LAST_FRAME_BOT1, BLOCKTYPE1_LAST_FRAME_MSB, 0, 8)
REG32(LAST_FRAME_BOT2, 0x228)
FIELD(LAST_FRAME_BOT2, BLOCKTYPE3_LAST_FRAME_MSB, 0, 16)
REG32(LAST_FRAME_BOT3, 0x22c)
REG32(LAST_FRAME_TOP0, 0x230)
FIELD(LAST_FRAME_TOP0, BLOCKTYPE5_LAST_FRAME_LSB, 20, 12)
FIELD(LAST_FRAME_TOP0, BLOCKTYPE4_LAST_FRAME, 0, 20)
REG32(LAST_FRAME_TOP1, 0x234)
FIELD(LAST_FRAME_TOP1, BLOCKTYPE6_LAST_FRAME, 8, 20)
FIELD(LAST_FRAME_TOP1, BLOCKTYPE5_LAST_FRAME_MSB, 0, 8)
REG32(LAST_FRAME_TOP2, 0x238)
REG32(LAST_FRAME_TOP3, 0x23c)
#define CFRAME_REG_R_MAX (R_LAST_FRAME_TOP3 + 1)
#define FRAME_NUM_QWORDS 25
#define FRAME_NUM_WORDS (FRAME_NUM_QWORDS * 4) /* 25 * 128 bits */
typedef struct XlnxCFrame {
uint32_t data[FRAME_NUM_WORDS];
} XlnxCFrame;
struct XlnxVersalCFrameReg {
SysBusDevice parent_obj;
MemoryRegion iomem;
MemoryRegion iomem_fdri;
qemu_irq irq_cfrm_imr;
/* 128-bit wfifo. */
uint32_t wfifo[WFIFO_SZ];
uint32_t regs[CFRAME_REG_R_MAX];
RegisterInfo regs_info[CFRAME_REG_R_MAX];
bool rowon;
bool wcfg;
bool rcfg;
GTree *cframes;
Fifo32 new_f_data;
struct {
XlnxCfiIf *cfu_fdro;
uint32_t blktype_num_frames[7];
} cfg;
bool row_configured;
};
struct XlnxVersalCFrameBcastReg {
SysBusDevice parent_obj;
MemoryRegion iomem_reg;
MemoryRegion iomem_fdri;
/* 128-bit wfifo. */
uint32_t wfifo[WFIFO_SZ];
struct {
XlnxCfiIf *cframe[15];
} cfg;
};
#endif

View File

@ -0,0 +1,258 @@
/*
* QEMU model of the CFU Configuration Unit.
*
* Copyright (C) 2023, Advanced Micro Devices, Inc.
*
* Written by Francisco Iglesias <francisco.iglesias@amd.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* References:
* [1] Versal ACAP Technical Reference Manual,
* https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
*
* [2] Versal ACAP Register Reference,
* https://www.xilinx.com/htmldocs/registers/am012/am012-versal-register-reference.html
*/
#ifndef HW_MISC_XLNX_VERSAL_CFU_APB_H
#define HW_MISC_XLNX_VERSAL_CFU_APB_H
#include "hw/sysbus.h"
#include "hw/register.h"
#include "hw/misc/xlnx-cfi-if.h"
#include "qemu/fifo32.h"
#define TYPE_XLNX_VERSAL_CFU_APB "xlnx,versal-cfu-apb"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB)
#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx,versal-cfu-fdro"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO)
#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx,versal-cfu-sfr"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUSFR, XLNX_VERSAL_CFU_SFR)
REG32(CFU_ISR, 0x0)
FIELD(CFU_ISR, USR_GTS_EVENT, 9, 1)
FIELD(CFU_ISR, USR_GSR_EVENT, 8, 1)
FIELD(CFU_ISR, SLVERR, 7, 1)
FIELD(CFU_ISR, DECOMP_ERROR, 6, 1)
FIELD(CFU_ISR, BAD_CFI_PACKET, 5, 1)
FIELD(CFU_ISR, AXI_ALIGN_ERROR, 4, 1)
FIELD(CFU_ISR, CFI_ROW_ERROR, 3, 1)
FIELD(CFU_ISR, CRC32_ERROR, 2, 1)
FIELD(CFU_ISR, CRC8_ERROR, 1, 1)
FIELD(CFU_ISR, SEU_ENDOFCALIB, 0, 1)
REG32(CFU_IMR, 0x4)
FIELD(CFU_IMR, USR_GTS_EVENT, 9, 1)
FIELD(CFU_IMR, USR_GSR_EVENT, 8, 1)
FIELD(CFU_IMR, SLVERR, 7, 1)
FIELD(CFU_IMR, DECOMP_ERROR, 6, 1)
FIELD(CFU_IMR, BAD_CFI_PACKET, 5, 1)
FIELD(CFU_IMR, AXI_ALIGN_ERROR, 4, 1)
FIELD(CFU_IMR, CFI_ROW_ERROR, 3, 1)
FIELD(CFU_IMR, CRC32_ERROR, 2, 1)
FIELD(CFU_IMR, CRC8_ERROR, 1, 1)
FIELD(CFU_IMR, SEU_ENDOFCALIB, 0, 1)
REG32(CFU_IER, 0x8)
FIELD(CFU_IER, USR_GTS_EVENT, 9, 1)
FIELD(CFU_IER, USR_GSR_EVENT, 8, 1)
FIELD(CFU_IER, SLVERR, 7, 1)
FIELD(CFU_IER, DECOMP_ERROR, 6, 1)
FIELD(CFU_IER, BAD_CFI_PACKET, 5, 1)
FIELD(CFU_IER, AXI_ALIGN_ERROR, 4, 1)
FIELD(CFU_IER, CFI_ROW_ERROR, 3, 1)
FIELD(CFU_IER, CRC32_ERROR, 2, 1)
FIELD(CFU_IER, CRC8_ERROR, 1, 1)
FIELD(CFU_IER, SEU_ENDOFCALIB, 0, 1)
REG32(CFU_IDR, 0xc)
FIELD(CFU_IDR, USR_GTS_EVENT, 9, 1)
FIELD(CFU_IDR, USR_GSR_EVENT, 8, 1)
FIELD(CFU_IDR, SLVERR, 7, 1)
FIELD(CFU_IDR, DECOMP_ERROR, 6, 1)
FIELD(CFU_IDR, BAD_CFI_PACKET, 5, 1)
FIELD(CFU_IDR, AXI_ALIGN_ERROR, 4, 1)
FIELD(CFU_IDR, CFI_ROW_ERROR, 3, 1)
FIELD(CFU_IDR, CRC32_ERROR, 2, 1)
FIELD(CFU_IDR, CRC8_ERROR, 1, 1)
FIELD(CFU_IDR, SEU_ENDOFCALIB, 0, 1)
REG32(CFU_ITR, 0x10)
FIELD(CFU_ITR, USR_GTS_EVENT, 9, 1)
FIELD(CFU_ITR, USR_GSR_EVENT, 8, 1)
FIELD(CFU_ITR, SLVERR, 7, 1)
FIELD(CFU_ITR, DECOMP_ERROR, 6, 1)
FIELD(CFU_ITR, BAD_CFI_PACKET, 5, 1)
FIELD(CFU_ITR, AXI_ALIGN_ERROR, 4, 1)
FIELD(CFU_ITR, CFI_ROW_ERROR, 3, 1)
FIELD(CFU_ITR, CRC32_ERROR, 2, 1)
FIELD(CFU_ITR, CRC8_ERROR, 1, 1)
FIELD(CFU_ITR, SEU_ENDOFCALIB, 0, 1)
REG32(CFU_PROTECT, 0x14)
FIELD(CFU_PROTECT, ACTIVE, 0, 1)
REG32(CFU_FGCR, 0x18)
FIELD(CFU_FGCR, GCLK_CAL, 14, 1)
FIELD(CFU_FGCR, SC_HBC_TRIGGER, 13, 1)
FIELD(CFU_FGCR, GLOW, 12, 1)
FIELD(CFU_FGCR, GPWRDWN, 11, 1)
FIELD(CFU_FGCR, GCAP, 10, 1)
FIELD(CFU_FGCR, GSCWE, 9, 1)
FIELD(CFU_FGCR, GHIGH_B, 8, 1)
FIELD(CFU_FGCR, GMC_B, 7, 1)
FIELD(CFU_FGCR, GWE, 6, 1)
FIELD(CFU_FGCR, GRESTORE, 5, 1)
FIELD(CFU_FGCR, GTS_CFG_B, 4, 1)
FIELD(CFU_FGCR, GLUTMASK, 3, 1)
FIELD(CFU_FGCR, EN_GLOBS_B, 2, 1)
FIELD(CFU_FGCR, EOS, 1, 1)
FIELD(CFU_FGCR, INIT_COMPLETE, 0, 1)
REG32(CFU_CTL, 0x1c)
FIELD(CFU_CTL, GSR_GSC, 15, 1)
FIELD(CFU_CTL, SLVERR_EN, 14, 1)
FIELD(CFU_CTL, CRC32_RESET, 13, 1)
FIELD(CFU_CTL, AXI_ERROR_EN, 12, 1)
FIELD(CFU_CTL, FLUSH_AXI, 11, 1)
FIELD(CFU_CTL, SSI_PER_SLR_PR, 10, 1)
FIELD(CFU_CTL, GCAP_CLK_EN, 9, 1)
FIELD(CFU_CTL, STATUS_SYNC_DISABLE, 8, 1)
FIELD(CFU_CTL, IGNORE_CFI_ERROR, 7, 1)
FIELD(CFU_CTL, CFRAME_DISABLE, 6, 1)
FIELD(CFU_CTL, QWORD_CNT_RESET, 5, 1)
FIELD(CFU_CTL, CRC8_DISABLE, 4, 1)
FIELD(CFU_CTL, CRC32_CHECK, 3, 1)
FIELD(CFU_CTL, DECOMPRESS, 2, 1)
FIELD(CFU_CTL, SEU_GO, 1, 1)
FIELD(CFU_CTL, CFI_LOCAL_RESET, 0, 1)
REG32(CFU_CRAM_RW, 0x20)
FIELD(CFU_CRAM_RW, RFIFO_AFULL_DEPTH, 18, 9)
FIELD(CFU_CRAM_RW, RD_WAVE_CNT_LEFT, 12, 6)
FIELD(CFU_CRAM_RW, RD_WAVE_CNT, 6, 6)
FIELD(CFU_CRAM_RW, WR_WAVE_CNT, 0, 6)
REG32(CFU_MASK, 0x28)
REG32(CFU_CRC_EXPECT, 0x2c)
REG32(CFU_CFRAME_LEFT_T0, 0x60)
FIELD(CFU_CFRAME_LEFT_T0, NUM, 0, 20)
REG32(CFU_CFRAME_LEFT_T1, 0x64)
FIELD(CFU_CFRAME_LEFT_T1, NUM, 0, 20)
REG32(CFU_CFRAME_LEFT_T2, 0x68)
FIELD(CFU_CFRAME_LEFT_T2, NUM, 0, 20)
REG32(CFU_ROW_RANGE, 0x6c)
FIELD(CFU_ROW_RANGE, HALF_FSR, 5, 1)
FIELD(CFU_ROW_RANGE, NUM, 0, 5)
REG32(CFU_STATUS, 0x100)
FIELD(CFU_STATUS, SEU_WRITE_ERROR, 30, 1)
FIELD(CFU_STATUS, FRCNT_ERROR, 29, 1)
FIELD(CFU_STATUS, RSVD_ERROR, 28, 1)
FIELD(CFU_STATUS, FDRO_ERROR, 27, 1)
FIELD(CFU_STATUS, FDRI_ERROR, 26, 1)
FIELD(CFU_STATUS, FDRI_READ_ERROR, 25, 1)
FIELD(CFU_STATUS, READ_FDRI_ERROR, 24, 1)
FIELD(CFU_STATUS, READ_SFR_ERROR, 23, 1)
FIELD(CFU_STATUS, READ_STREAM_ERROR, 22, 1)
FIELD(CFU_STATUS, UNKNOWN_STREAM_PKT, 21, 1)
FIELD(CFU_STATUS, USR_GTS, 20, 1)
FIELD(CFU_STATUS, USR_GSR, 19, 1)
FIELD(CFU_STATUS, AXI_BAD_WSTRB, 18, 1)
FIELD(CFU_STATUS, AXI_BAD_AR_SIZE, 17, 1)
FIELD(CFU_STATUS, AXI_BAD_AW_SIZE, 16, 1)
FIELD(CFU_STATUS, AXI_BAD_ARADDR, 15, 1)
FIELD(CFU_STATUS, AXI_BAD_AWADDR, 14, 1)
FIELD(CFU_STATUS, SCAN_CLEAR_PASS, 13, 1)
FIELD(CFU_STATUS, HC_SEC_ERROR, 12, 1)
FIELD(CFU_STATUS, GHIGH_B_ISHIGH, 11, 1)
FIELD(CFU_STATUS, GHIGH_B_ISLOW, 10, 1)
FIELD(CFU_STATUS, GMC_B_ISHIGH, 9, 1)
FIELD(CFU_STATUS, GMC_B_ISLOW, 8, 1)
FIELD(CFU_STATUS, GPWRDWN_B_ISHIGH, 7, 1)
FIELD(CFU_STATUS, CFI_SEU_CRC_ERROR, 6, 1)
FIELD(CFU_STATUS, CFI_SEU_ECC_ERROR, 5, 1)
FIELD(CFU_STATUS, CFI_SEU_HEARTBEAT, 4, 1)
FIELD(CFU_STATUS, SCAN_CLEAR_DONE, 3, 1)
FIELD(CFU_STATUS, HC_COMPLETE, 2, 1)
FIELD(CFU_STATUS, CFI_CFRAME_BUSY, 1, 1)
FIELD(CFU_STATUS, CFU_STREAM_BUSY, 0, 1)
REG32(CFU_INTERNAL_STATUS, 0x104)
FIELD(CFU_INTERNAL_STATUS, SSI_EOS, 22, 1)
FIELD(CFU_INTERNAL_STATUS, SSI_GWE, 21, 1)
FIELD(CFU_INTERNAL_STATUS, RFIFO_EMPTY, 20, 1)
FIELD(CFU_INTERNAL_STATUS, RFIFO_FULL, 19, 1)
FIELD(CFU_INTERNAL_STATUS, SEL_SFR, 18, 1)
FIELD(CFU_INTERNAL_STATUS, STREAM_CFRAME, 17, 1)
FIELD(CFU_INTERNAL_STATUS, FDRI_PHASE, 16, 1)
FIELD(CFU_INTERNAL_STATUS, CFI_PIPE_EN, 15, 1)
FIELD(CFU_INTERNAL_STATUS, AWFIFO_DCNT, 10, 5)
FIELD(CFU_INTERNAL_STATUS, WFIFO_DCNT, 5, 5)
FIELD(CFU_INTERNAL_STATUS, REPAIR_BUSY, 4, 1)
FIELD(CFU_INTERNAL_STATUS, TRIMU_BUSY, 3, 1)
FIELD(CFU_INTERNAL_STATUS, TRIMB_BUSY, 2, 1)
FIELD(CFU_INTERNAL_STATUS, HCLEANR_BUSY, 1, 1)
FIELD(CFU_INTERNAL_STATUS, HCLEAN_BUSY, 0, 1)
REG32(CFU_QWORD_CNT, 0x108)
REG32(CFU_CRC_LIVE, 0x10c)
REG32(CFU_PENDING_READ_CNT, 0x110)
FIELD(CFU_PENDING_READ_CNT, NUM, 0, 25)
REG32(CFU_FDRI_CNT, 0x114)
REG32(CFU_ECO1, 0x118)
REG32(CFU_ECO2, 0x11c)
#define R_MAX (R_CFU_ECO2 + 1)
#define NUM_STREAM 2
#define WFIFO_SZ 4
struct XlnxVersalCFUAPB {
SysBusDevice parent_obj;
MemoryRegion iomem;
MemoryRegion iomem_stream[NUM_STREAM];
qemu_irq irq_cfu_imr;
/* 128-bit wfifo. */
uint32_t wfifo[WFIFO_SZ];
uint32_t regs[R_MAX];
RegisterInfo regs_info[R_MAX];
uint8_t fdri_row_addr;
struct {
XlnxCfiIf *cframe[15];
} cfg;
};
struct XlnxVersalCFUFDRO {
SysBusDevice parent_obj;
MemoryRegion iomem_fdro;
Fifo32 fdro_data;
};
struct XlnxVersalCFUSFR {
SysBusDevice parent_obj;
MemoryRegion iomem_sfr;
/* 128-bit wfifo. */
uint32_t wfifo[WFIFO_SZ];
struct {
XlnxVersalCFUAPB *cfu;
} cfg;
};
/**
* This is a helper function for updating a CFI data write fifo, an array of 4
* uint32_t and 128 bits of data that are allowed to be written through 4
* sequential 32 bit accesses. After the last index has been written into the
* write fifo (wfifo), the data is copied to and returned in a secondary fifo
* provided to the function (wfifo_ret), and the write fifo is cleared
* (zeroized).
*
* @addr: the address used when calculating the wfifo array index to update
* @value: the value to write into the wfifo array
* @wfifo: the wfifo to update
* @wfifo_out: will return the wfifo data when all 128 bits have been written
*
* @return: true if all 128 bits have been updated.
*/
bool update_wfifo(hwaddr addr, uint64_t value,
uint32_t *wfifo, uint32_t *wfifo_ret);
#endif

View File

@ -116,6 +116,7 @@ struct KVMState
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
bool kvm_dirty_ring_with_bitmap;
uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
struct KVMDirtyRingReaper reaper;
NotifyVmexitOption notify_vmexit;
uint32_t notify_window;

View File

@ -186,6 +186,7 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
" split-wx=on|off (enable TCG split w^x mapping)\n"
" tb-size=n (TCG translation block cache size)\n"
" dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n"
" eager-split-size=n (KVM Eager Page Split chunk size, default 0, disabled. ARM only)\n"
" notify-vmexit=run|internal-error|disable,notify-window=n (enable notify VM exit and set notify window, x86 only)\n"
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
SRST
@ -244,6 +245,20 @@ SRST
is disabled (dirty-ring-size=0). When enabled, KVM will instead
record dirty pages in a bitmap.
``eager-split-size=n``
KVM implements dirty page logging at the PAGE_SIZE granularity and
enabling dirty-logging on a huge-page requires breaking it into
PAGE_SIZE pages in the first place. KVM on ARM does this splitting
lazily by default. There are performance benefits in doing huge-page
split eagerly, especially in situations where TLBI costs associated
with break-before-make sequences are considerable and also if guest
workloads are read intensive. The size here specifies how many pages
to break at a time and needs to be a valid block size which is
1GB/2MB/4KB, 32MB/16KB and 512MB/64KB for 4KB/16KB/64KB PAGE_SIZE
respectively. Be wary of specifying a higher size as it will have an
impact on the memory. By default, this feature is disabled
(eager-split-size=0).
``notify-vmexit=run|internal-error|disable,notify-window=n``
Enables or disables notify VM exit support on x86 host and specify
the corresponding notify window to trigger the VM exit if enabled.

View File

@ -95,7 +95,7 @@ static const char *cpu_model_advertised_features[] = {
"sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
"sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
"kvm-no-adjvtime", "kvm-steal-time",
"pauth", "pauth-impdef",
"pauth", "pauth-impdef", "pauth-qarma3",
NULL
};

View File

@ -243,6 +243,10 @@ static void arm_cpu_reset_hold(Object *obj)
SCTLR_EnDA | SCTLR_EnDB);
/* Trap on btype=3 for PACIxSP. */
env->cp15.sctlr_el[1] |= SCTLR_BT0;
/* Trap on implementation defined registers. */
if (cpu_isar_feature(aa64_tidcp1, cpu)) {
env->cp15.sctlr_el[1] |= SCTLR_TIDCP;
}
/* and to the FP/Neon instructions */
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
CPACR_EL1, FPEN, 3);

View File

@ -1033,6 +1033,7 @@ struct ArchCPU {
uint32_t dbgdevid1;
uint64_t id_aa64isar0;
uint64_t id_aa64isar1;
uint64_t id_aa64isar2;
uint64_t id_aa64pfr0;
uint64_t id_aa64pfr1;
uint64_t id_aa64mmfr0;
@ -1071,6 +1072,7 @@ struct ArchCPU {
*/
bool prop_pauth;
bool prop_pauth_impdef;
bool prop_pauth_qarma3;
bool prop_lpa2;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
@ -3795,28 +3797,59 @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
}
/*
* These are the values from APA/API/APA3.
* In general these must be compared '>=', per the normal Arm ARM
* treatment of fields in ID registers.
*/
typedef enum {
PauthFeat_None = 0,
PauthFeat_1 = 1,
PauthFeat_EPAC = 2,
PauthFeat_2 = 3,
PauthFeat_FPAC = 4,
PauthFeat_FPACCOMBINED = 5,
} ARMPauthFeature;
static inline ARMPauthFeature
isar_feature_pauth_feature(const ARMISARegisters *id)
{
/*
* Architecturally, only one of {APA,API,APA3} may be active (non-zero)
* and the other two must be zero. Thus we may avoid conditionals.
*/
return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) |
FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) |
FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3));
}
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
{
/*
* Return true if any form of pauth is enabled, as this
* predicate controls migration of the 128-bit keys.
*/
return (id->id_aa64isar1 &
(FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) |
FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) |
FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
return isar_feature_pauth_feature(id) != PauthFeat_None;
}
static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id)
{
/*
* Return true if pauth is enabled with the architected QARMA algorithm.
* QEMU will always set APA+GPA to the same value.
* Return true if pauth is enabled with the architected QARMA5 algorithm.
* QEMU will always enable or disable both APA and GPA.
*/
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
}
static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id)
{
/*
* Return true if pauth is enabled with the architected QARMA3 algorithm.
* QEMU will always enable or disable both APA3 and GPA3.
*/
return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0;
}
static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
@ -3939,6 +3972,11 @@ static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
}
static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR1, TIDCP1) != 0;
}
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;

View File

@ -473,43 +473,80 @@ void aarch64_add_sme_properties(Object *obj)
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
{
int arch_val = 0, impdef_val = 0;
uint64_t t;
ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu);
uint64_t isar1, isar2;
/* Exit early if PAuth is enabled, and fall through to disable it */
if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) {
if (!cpu_isar_feature(aa64_pauth, cpu)) {
error_setg(errp, "'pauth' feature not supported by %s on this host",
kvm_enabled() ? "KVM" : "hvf");
/*
* These properties enable or disable Pauth as a whole, or change
* the pauth algorithm, but do not change the set of features that
* are present. We have saved a copy of those features above and
* will now place it into the field that chooses the algorithm.
*
* Begin by disabling all fields.
*/
isar1 = cpu->isar.id_aa64isar1;
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0);
isar2 = cpu->isar.id_aa64isar2;
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0);
if (kvm_enabled() || hvf_enabled()) {
/*
* Exit early if PAuth is enabled and fall through to disable it.
* The algorithm selection properties are not present.
*/
if (cpu->prop_pauth) {
if (features == 0) {
error_setg(errp, "'pauth' feature not supported by "
"%s on this host", current_accel_name());
}
return;
}
} else {
/* Pauth properties are only present when the model supports it. */
if (features == 0) {
assert(!cpu->prop_pauth);
return;
}
return;
}
if (cpu->prop_pauth) {
if (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) {
error_setg(errp,
"cannot enable both pauth-impdef and pauth-qarma3");
return;
}
/* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
if (cpu->prop_pauth) {
if (cpu->prop_pauth_impdef) {
impdef_val = 1;
} else {
arch_val = 1;
if (cpu->prop_pauth_impdef) {
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
} else if (cpu->prop_pauth_qarma3) {
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1);
} else {
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
}
} else if (cpu->prop_pauth_impdef || cpu->prop_pauth_qarma3) {
error_setg(errp, "cannot enable pauth-impdef or "
"pauth-qarma3 without pauth");
error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
}
} else if (cpu->prop_pauth_impdef) {
error_setg(errp, "cannot enable pauth-impdef without pauth");
error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
}
t = cpu->isar.id_aa64isar1;
t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
cpu->isar.id_aa64isar1 = t;
cpu->isar.id_aa64isar1 = isar1;
cpu->isar.id_aa64isar2 = isar2;
}
static Property arm_cpu_pauth_property =
DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
static Property arm_cpu_pauth_impdef_property =
DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
static Property arm_cpu_pauth_qarma3_property =
DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false);
void aarch64_add_pauth_properties(Object *obj)
{
@ -529,6 +566,7 @@ void aarch64_add_pauth_properties(Object *obj)
cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
} else {
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property);
}
}

View File

@ -8435,11 +8435,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64isar1 },
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
{ .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = 0 },
.resetvalue = cpu->isar.id_aa64isar2 },
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@ -8682,16 +8682,25 @@ void register_cp_regs_for_features(ARMCPU *cpu)
};
modify_arm_cp_regs(v8_idregs, v8_user_idregs);
#endif
/* RVBAR_EL1 is only implemented if EL1 is the highest EL */
/*
* RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
* TODO: For RMR, a write with bit 1 set should do something with
* cpu_reset(). In the meantime, "the bit is strictly a request",
* so we are in spec just ignoring writes.
*/
if (!arm_feature(env, ARM_FEATURE_EL3) &&
!arm_feature(env, ARM_FEATURE_EL2)) {
ARMCPRegInfo rvbar = {
.name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL1_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar),
ARMCPRegInfo el1_reset_regs[] = {
{ .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL1_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
{ .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
.access = PL1_RW, .type = ARM_CP_CONST,
.resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
};
define_one_arm_cp_reg(cpu, &rvbar);
define_arm_cp_regs(cpu, el1_reset_regs);
}
define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
@ -8775,22 +8784,25 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_sel2, cpu)) {
define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
}
/* RVBAR_EL2 is only implemented if EL2 is the highest EL */
/*
* RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
* See commentary near RMR_EL1.
*/
if (!arm_feature(env, ARM_FEATURE_EL3)) {
ARMCPRegInfo rvbar[] = {
{
.name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL2_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar),
},
{ .name = "RVBAR", .type = ARM_CP_ALIAS,
.cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL2_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar),
},
static const ARMCPRegInfo el2_reset_regs[] = {
{ .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL2_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
{ .name = "RVBAR", .type = ARM_CP_ALIAS,
.cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL2_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
{ .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
};
define_arm_cp_regs(cpu, rvbar);
define_arm_cp_regs(cpu, el2_reset_regs);
}
}
@ -8801,8 +8813,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
.access = PL3_R,
.fieldoffset = offsetof(CPUARMState, cp15.rvbar),
},
.fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
{ .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
{ .name = "RMR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
.access = PL3_RW, .type = ARM_CP_CONST,
.resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
{ .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
.access = PL3_RW,

View File

@ -81,6 +81,8 @@ DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
DEF_HELPER_2(get_cp_reg, i32, env, cptr)
DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)

View File

@ -847,6 +847,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
{ HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
{ HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
{ HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
{ HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
{ HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
{ HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },

View File

@ -30,6 +30,7 @@
#include "exec/address-spaces.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "qapi/visitor.h"
#include "qemu/log.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
@ -287,6 +288,26 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
if (s->kvm_eager_split_size) {
uint32_t sizes;
sizes = kvm_vm_check_extension(s, KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES);
if (!sizes) {
s->kvm_eager_split_size = 0;
warn_report("Eager Page Split support not available");
} else if (!(s->kvm_eager_split_size & sizes)) {
error_report("Eager Page Split requested chunk size not valid");
ret = -EINVAL;
} else {
ret = kvm_vm_enable_cap(s, KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE, 0,
s->kvm_eager_split_size);
if (ret < 0) {
error_report("Enabling of Eager Page Split failed: %s",
strerror(-ret));
}
}
}
kvm_arm_init_debug(s);
return ret;
@ -1069,6 +1090,46 @@ bool kvm_arch_cpu_check_are_resettable(void)
return true;
}
static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
KVMState *s = KVM_STATE(obj);
uint64_t value = s->kvm_eager_split_size;
visit_type_size(v, name, &value, errp);
}
static void kvm_arch_set_eager_split_size(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
KVMState *s = KVM_STATE(obj);
uint64_t value;
if (s->fd != -1) {
error_setg(errp, "Unable to set early-split-size after KVM has been initialized");
return;
}
if (!visit_type_size(v, name, &value, errp)) {
return;
}
if (value && !is_power_of_2(value)) {
error_setg(errp, "early-split-size must be a power of two");
return;
}
s->kvm_eager_split_size = value;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
object_class_property_add(oc, "eager-split-size", "size",
kvm_arch_get_eager_split_size,
kvm_arch_set_eager_split_size, NULL, NULL);
object_class_property_set_description(oc, "eager-split-size",
"Eager Page Split chunk size for hugepages. (default: 0, disabled)");
}

View File

@ -304,6 +304,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ARM64_SYS_REG(3, 0, 0, 6, 0));
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
ARM64_SYS_REG(3, 0, 0, 6, 1));
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
ARM64_SYS_REG(3, 0, 0, 6, 2));
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
ARM64_SYS_REG(3, 0, 0, 7, 0));
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
@ -672,6 +674,7 @@ typedef struct CPRegStateLevel {
*/
static const CPRegStateLevel non_runtime_cpregs[] = {
{ KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
{ KVM_REG_ARM_PTIMER_CNT, KVM_PUT_FULL_STATE },
};
int kvm_arm_cpreg_level(uint64_t regidx)

View File

@ -49,6 +49,7 @@ enum arm_exception_class {
EC_SYSTEMREGISTERTRAP = 0x18,
EC_SVEACCESSTRAP = 0x19,
EC_ERETTRAP = 0x1a,
EC_PACFAIL = 0x1c,
EC_SMETRAP = 0x1d,
EC_GPC = 0x1e,
EC_INSNABORT = 0x20,
@ -232,6 +233,12 @@ static inline uint32_t syn_smetrap(SMEExceptionType etype, bool is_16bit)
| (is_16bit ? 0 : ARM_EL_IL) | etype;
}
static inline uint32_t syn_pacfail(bool data, int keynumber)
{
int error_code = (data << 1) | keynumber;
return (EC_PACFAIL << ARM_EL_EC_SHIFT) | ARM_EL_IL | error_code;
}
static inline uint32_t syn_pactrap(void)
{
return EC_PACTRAP << ARM_EL_EC_SHIFT;

View File

@ -745,6 +745,217 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
aarch64_add_sve_properties(obj);
}
static const ARMCPRegInfo cortex_a710_cp_reginfo[] = {
{ .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUACTLR4_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 3,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 4,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUACTLR5_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUACTLR6_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "CPUACTLR7_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 2,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = access_actlr_w },
{ .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPPMCR2_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 1,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPPMCR4_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 4,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPPMCR5_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 5,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPPMCR6_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 6,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUACTLR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 4, .opc2 = 0,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPOR2_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 4,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPMR2_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 5,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPFR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 6,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
/*
* Stub RAMINDEX, as we don't actually implement caches, BTB,
* or anything else with cpu internal memory.
* "Read" zeros into the IDATA* and DDATA* output registers.
*/
{ .name = "RAMINDEX_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 0,
.access = PL3_W, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "IDATA0_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 0,
.access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "IDATA1_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 1,
.access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "IDATA2_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 2,
.access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "DDATA0_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 0,
.access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "DDATA1_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 1,
.access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "DDATA2_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 2,
.access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
};
static void aarch64_a710_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
cpu->dtb_compatible = "arm,cortex-a710";
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_NEON);
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by Section B.4: AArch64 registers */
cpu->midr = 0x412FD471; /* r2p1 */
cpu->revidr = 0;
cpu->isar.id_pfr0 = 0x21110131;
cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
cpu->isar.id_dfr0 = 0x16011099;
cpu->id_afr0 = 0;
cpu->isar.id_mmfr0 = 0x10201105;
cpu->isar.id_mmfr1 = 0x40000000;
cpu->isar.id_mmfr2 = 0x01260000;
cpu->isar.id_mmfr3 = 0x02122211;
cpu->isar.id_isar0 = 0x02101110;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232042;
cpu->isar.id_isar3 = 0x01112131;
cpu->isar.id_isar4 = 0x00010142;
cpu->isar.id_isar5 = 0x11011121; /* with Crypto */
cpu->isar.id_mmfr4 = 0x21021110;
cpu->isar.id_isar6 = 0x01111111;
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
cpu->isar.id_pfr2 = 0x00000011;
cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */
cpu->isar.id_aa64pfr1 = 0x0000000000000221ull;
cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */
cpu->isar.id_aa64dfr0 = 0x000011f010305611ull;
cpu->isar.id_aa64dfr1 = 0;
cpu->id_aa64afr0 = 0;
cpu->id_aa64afr1 = 0;
cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */
cpu->isar.id_aa64isar1 = 0x0010111101211032ull;
cpu->isar.id_aa64mmfr0 = 0x0000022200101122ull;
cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
cpu->isar.id_aa64mmfr2 = 0x1221011110101011ull;
cpu->clidr = 0x0000001482000023ull;
cpu->gm_blocksize = 4;
cpu->ctr = 0x000000049444c004ull;
cpu->dcz_blocksize = 4;
/* TODO FEAT_MPAM: mpamidr_el1 = 0x0000_0001_0006_003f */
/* Section B.5.2: PMCR_EL0 */
cpu->isar.reset_pmcr_el0 = 0xa000; /* with 20 counters */
/* Section B.6.7: ICH_VTR_EL2 */
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
cpu->gic_pribits = 5;
/* Section 14: Scalable Vector Extensions support */
cpu->sve_vq.supported = 1 << 0; /* 128bit */
/*
* The cortex-a710 TRM does not list CCSIDR values. The layout of
* the caches are in text in Table 7-1, Table 8-1, and Table 9-1.
*
* L1: 4-way set associative 64-byte line size, total either 32K or 64K.
* L2: 8-way set associative 64 byte line size, total either 256K or 512K.
*/
cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */
/* FIXME: Not documented -- copied from neoverse-v1 */
cpu->reset_sctlr = 0x30c50838;
define_arm_cp_regs(cpu, cortex_a710_cp_reginfo);
aarch64_add_pauth_properties(obj);
aarch64_add_sve_properties(obj);
}
/*
* -cpu max: a CPU with as many features enabled as our emulation supports.
* The version of '-cpu max' for qemu-system-arm is defined in cpu32.c;
@ -803,6 +1014,8 @@ void aarch64_max_tcg_initfn(Object *obj)
t = cpu->isar.id_aa64isar1;
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_FPACCOMBINED);
t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */
@ -858,6 +1071,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */
t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */
cpu->isar.id_aa64mmfr1 = t;
t = cpu->isar.id_aa64mmfr2;
@ -934,6 +1148,7 @@ static const ARMCPUInfo aarch64_cpus[] = {
{ .name = "cortex-a55", .initfn = aarch64_a55_initfn },
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
{ .name = "cortex-a76", .initfn = aarch64_a76_initfn },
{ .name = "cortex-a710", .initfn = aarch64_a710_initfn },
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
{ .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn },
{ .name = "neoverse-v1", .initfn = aarch64_neoverse_v1_initfn },

View File

@ -90,9 +90,13 @@ DEF_HELPER_FLAGS_3(pacda, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(pacdb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(pacga, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autia, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autia_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autib, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autib_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autda_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autdb_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)

View File

@ -764,6 +764,39 @@ const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
return ri;
}
/*
* Test for HCR_EL2.TIDCP at EL1.
* Since implementation defined registers are rare, and within QEMU
* most of them are no-op, do not waste HFLAGS space for this and
* always use a helper.
*/
void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
{
if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
}
}
/*
* Similarly, for FEAT_TIDCP1 at EL0.
* We have already checked for the presence of the feature.
*/
void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
{
/* See arm_sctlr(), but we also need the sctlr el. */
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
/*
* The bit is not valid unless the target el is aa64, but since the
* bit test is simpler perform that first and check validity after.
*/
if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
&& arm_el_is_aa64(env, target_el)) {
raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
}
}
void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
{
const ARMCPRegInfo *ri = rip;

View File

@ -96,6 +96,21 @@ static uint64_t pac_sub(uint64_t i)
return o;
}
static uint64_t pac_sub1(uint64_t i)
{
static const uint8_t sub1[16] = {
0xa, 0xd, 0xe, 0x6, 0xf, 0x7, 0x3, 0x5,
0x9, 0x8, 0x0, 0xc, 0xb, 0x1, 0x2, 0x4,
};
uint64_t o = 0;
int b;
for (b = 0; b < 64; b += 4) {
o |= (uint64_t)sub1[(i >> b) & 0xf] << b;
}
return o;
}
static uint64_t pac_inv_sub(uint64_t i)
{
static const uint8_t inv_sub[16] = {
@ -209,7 +224,7 @@ static uint64_t tweak_inv_shuffle(uint64_t i)
}
static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
ARMPACKey key)
ARMPACKey key, bool isqarma3)
{
static const uint64_t RC[5] = {
0x0000000000000000ull,
@ -219,6 +234,7 @@ static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
0x452821E638D01377ull,
};
const uint64_t alpha = 0xC0AC29B7C97C50DDull;
int iterations = isqarma3 ? 2 : 4;
/*
* Note that in the ARM pseudocode, key0 contains bits <127:64>
* and key1 contains bits <63:0> of the 128-bit key.
@ -231,7 +247,7 @@ static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
runningmod = modifier;
workingval = data ^ key0;
for (i = 0; i <= 4; ++i) {
for (i = 0; i <= iterations; ++i) {
roundkey = key1 ^ runningmod;
workingval ^= roundkey;
workingval ^= RC[i];
@ -239,32 +255,48 @@ static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
workingval = pac_cell_shuffle(workingval);
workingval = pac_mult(workingval);
}
workingval = pac_sub(workingval);
if (isqarma3) {
workingval = pac_sub1(workingval);
} else {
workingval = pac_sub(workingval);
}
runningmod = tweak_shuffle(runningmod);
}
roundkey = modk0 ^ runningmod;
workingval ^= roundkey;
workingval = pac_cell_shuffle(workingval);
workingval = pac_mult(workingval);
workingval = pac_sub(workingval);
if (isqarma3) {
workingval = pac_sub1(workingval);
} else {
workingval = pac_sub(workingval);
}
workingval = pac_cell_shuffle(workingval);
workingval = pac_mult(workingval);
workingval ^= key1;
workingval = pac_cell_inv_shuffle(workingval);
workingval = pac_inv_sub(workingval);
if (isqarma3) {
workingval = pac_sub1(workingval);
} else {
workingval = pac_inv_sub(workingval);
}
workingval = pac_mult(workingval);
workingval = pac_cell_inv_shuffle(workingval);
workingval ^= key0;
workingval ^= runningmod;
for (i = 0; i <= 4; ++i) {
workingval = pac_inv_sub(workingval);
if (i < 4) {
for (i = 0; i <= iterations; ++i) {
if (isqarma3) {
workingval = pac_sub1(workingval);
} else {
workingval = pac_inv_sub(workingval);
}
if (i < iterations) {
workingval = pac_mult(workingval);
workingval = pac_cell_inv_shuffle(workingval);
}
runningmod = tweak_inv_shuffle(runningmod);
roundkey = key1 ^ runningmod;
workingval ^= RC[4 - i];
workingval ^= RC[iterations - i];
workingval ^= roundkey;
workingval ^= alpha;
}
@ -282,8 +314,10 @@ static uint64_t pauth_computepac_impdef(uint64_t data, uint64_t modifier,
static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
uint64_t modifier, ARMPACKey key)
{
if (cpu_isar_feature(aa64_pauth_arch, env_archcpu(env))) {
return pauth_computepac_architected(data, modifier, key);
if (cpu_isar_feature(aa64_pauth_qarma5, env_archcpu(env))) {
return pauth_computepac_architected(data, modifier, key, false);
} else if (cpu_isar_feature(aa64_pauth_qarma3, env_archcpu(env))) {
return pauth_computepac_architected(data, modifier, key, true);
} else {
return pauth_computepac_impdef(data, modifier, key);
}
@ -292,8 +326,10 @@ static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data)
{
ARMCPU *cpu = env_archcpu(env);
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false);
ARMPauthFeature pauth_feature = cpu_isar_feature(pauth_feature, cpu);
uint64_t pac, ext_ptr, ext, test;
int bot_bit, top_bit;
@ -317,17 +353,26 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
*/
test = sextract64(ptr, bot_bit, top_bit - bot_bit);
if (test != 0 && test != -1) {
/*
* Note that our top_bit is one greater than the pseudocode's
* version, hence "- 2" here.
*/
pac ^= MAKE_64BIT_MASK(top_bit - 2, 1);
if (pauth_feature >= PauthFeat_2) {
/* No action required */
} else if (pauth_feature == PauthFeat_EPAC) {
pac = 0;
} else {
/*
* Note that our top_bit is one greater than the pseudocode's
* version, hence "- 2" here.
*/
pac ^= MAKE_64BIT_MASK(top_bit - 2, 1);
}
}
/*
* Preserve the determination between upper and lower at bit 55,
* and insert pointer authentication code.
*/
if (pauth_feature >= PauthFeat_2) {
pac ^= ptr;
}
if (param.tbi) {
ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1);
pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1);
@ -351,21 +396,46 @@ static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
}
}
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data, int keynumber)
static G_NORETURN
void pauth_fail_exception(CPUARMState *env, bool data,
int keynumber, uintptr_t ra)
{
raise_exception_ra(env, EXCP_UDEF, syn_pacfail(data, keynumber),
exception_target_el(env), ra);
}
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data, int keynumber,
uintptr_t ra, bool is_combined)
{
ARMCPU *cpu = env_archcpu(env);
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false);
ARMPauthFeature pauth_feature = cpu_isar_feature(pauth_feature, cpu);
int bot_bit, top_bit;
uint64_t pac, orig_ptr, test;
uint64_t pac, orig_ptr, cmp_mask;
orig_ptr = pauth_original_ptr(ptr, param);
pac = pauth_computepac(env, orig_ptr, modifier, *key);
bot_bit = 64 - param.tsz;
top_bit = 64 - 8 * param.tbi;
test = (pac ^ ptr) & ~MAKE_64BIT_MASK(55, 1);
if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) {
cmp_mask = MAKE_64BIT_MASK(bot_bit, top_bit - bot_bit);
cmp_mask &= ~MAKE_64BIT_MASK(55, 1);
if (pauth_feature >= PauthFeat_2) {
ARMPauthFeature fault_feature =
is_combined ? PauthFeat_FPACCOMBINED : PauthFeat_FPAC;
uint64_t result = ptr ^ (pac & cmp_mask);
if (pauth_feature >= fault_feature
&& ((result ^ sextract64(result, 55, 1)) & cmp_mask)) {
pauth_fail_exception(env, data, keynumber, ra);
}
return result;
}
if ((pac ^ ptr) & cmp_mask) {
int error_code = (keynumber << 1) | (keynumber ^ 1);
if (param.tbi) {
return deposit64(orig_ptr, 53, 2, error_code);
@ -466,44 +536,88 @@ uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y)
return pac & 0xffffffff00000000ull;
}
uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y)
static uint64_t pauth_autia(CPUARMState *env, uint64_t x, uint64_t y,
uintptr_t ra, bool is_combined)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnIA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->keys.apia, false, 0);
pauth_check_trap(env, el, ra);
return pauth_auth(env, x, y, &env->keys.apia, false, 0, ra, is_combined);
}
uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y)
uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autia(env, x, y, GETPC(), false);
}
uint64_t HELPER(autia_combined)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autia(env, x, y, GETPC(), true);
}
static uint64_t pauth_autib(CPUARMState *env, uint64_t x, uint64_t y,
uintptr_t ra, bool is_combined)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnIB)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->keys.apib, false, 1);
pauth_check_trap(env, el, ra);
return pauth_auth(env, x, y, &env->keys.apib, false, 1, ra, is_combined);
}
uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y)
uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autib(env, x, y, GETPC(), false);
}
uint64_t HELPER(autib_combined)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autib(env, x, y, GETPC(), true);
}
static uint64_t pauth_autda(CPUARMState *env, uint64_t x, uint64_t y,
uintptr_t ra, bool is_combined)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->keys.apda, true, 0);
pauth_check_trap(env, el, ra);
return pauth_auth(env, x, y, &env->keys.apda, true, 0, ra, is_combined);
}
uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y)
uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autda(env, x, y, GETPC(), false);
}
uint64_t HELPER(autda_combined)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autda(env, x, y, GETPC(), true);
}
static uint64_t pauth_autdb(CPUARMState *env, uint64_t x, uint64_t y,
uintptr_t ra, bool is_combined)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDB)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->keys.apdb, true, 1);
pauth_check_trap(env, el, ra);
return pauth_auth(env, x, y, &env->keys.apdb, true, 1, ra, is_combined);
}
uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autdb(env, x, y, GETPC(), false);
}
uint64_t HELPER(autdb_combined)(CPUARMState *env, uint64_t x, uint64_t y)
{
return pauth_autdb(env, x, y, GETPC(), true);
}
uint64_t HELPER(xpaci)(CPUARMState *env, uint64_t a)

View File

@ -1530,9 +1530,9 @@ static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
truedst = tcg_temp_new_i64();
if (use_key_a) {
gen_helper_autia(truedst, cpu_env, dst, modifier);
gen_helper_autia_combined(truedst, cpu_env, dst, modifier);
} else {
gen_helper_autib(truedst, cpu_env, dst, modifier);
gen_helper_autib_combined(truedst, cpu_env, dst, modifier);
}
return truedst;
}
@ -2154,6 +2154,25 @@ static void handle_sys(DisasContext *s, bool isread,
bool need_exit_tb = false;
TCGv_ptr tcg_ri = NULL;
TCGv_i64 tcg_rt;
uint32_t syndrome;
if (crn == 11 || crn == 15) {
/*
* Check for TIDCP trap, which must take precedence over
* the UNDEF for "no such register" etc.
*/
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
switch (s->current_el) {
case 0:
if (dc_isar_feature(aa64_tidcp1, s)) {
gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome));
}
break;
case 1:
gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
break;
}
}
if (!ri) {
/* Unknown register; this might be a guest error or a QEMU
@ -2176,8 +2195,6 @@ static void handle_sys(DisasContext *s, bool isread,
/* Emit code to perform further access permissions checks at
* runtime; this may result in an exception.
*/
uint32_t syndrome;
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
gen_a64_update_pc(s, 0);
tcg_ri = tcg_temp_new_ptr();
@ -3020,37 +3037,17 @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
}
if (!s->ata) {
/*
* TODO: We could rely on the stores below, at least for
* system mode, if we arrange to add MO_ALIGN_16.
*/
gen_helper_stg_stub(cpu_env, dirty_addr);
} else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
} else {
gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
}
mop = finalize_memop(s, MO_64);
clean_addr = gen_mte_checkN(s, dirty_addr, true, false, 2 << MO_64, mop);
clean_addr = clean_data_tbi(s, dirty_addr);
tcg_rt = cpu_reg(s, a->rt);
tcg_rt2 = cpu_reg(s, a->rt2);
/*
* STGP is defined as two 8-byte memory operations and one tag operation.
* We implement it as one single 16-byte memory operation for convenience.
* Rebuild mop as for STP.
* TODO: The atomicity with LSE2 is stronger than required.
* Need a form of MO_ATOM_WITHIN16_PAIR that never requires
* 16-byte atomicity.
* STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE,
* and one tag operation. We implement it as one single aligned 16-byte
* memory operation for convenience. Note that the alignment ensures
* MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store.
*/
mop = MO_128;
if (s->align_mem) {
mop |= MO_ALIGN_8;
}
mop = finalize_memop_pair(s, mop);
mop = finalize_memop_atom(s, MO_128 | MO_ALIGN, MO_ATOM_IFALIGN_PAIR);
tmp = tcg_temp_new_i128();
if (s->be_data == MO_LE) {
@ -3060,6 +3057,15 @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
}
tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
/* Perform the tag store, if tag access enabled. */
if (s->ata) {
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
} else {
gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
}
}
op_addr_ldstpair_post(s, a, dirty_addr, offset);
return true;
}
@ -3352,11 +3358,11 @@ static bool trans_LDRA(DisasContext *s, arg_LDRA *a)
if (s->pauth_active) {
if (!a->m) {
gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
tcg_constant_i64(0));
gen_helper_autda_combined(dirty_addr, cpu_env, dirty_addr,
tcg_constant_i64(0));
} else {
gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
tcg_constant_i64(0));
gen_helper_autdb_combined(dirty_addr, cpu_env, dirty_addr,
tcg_constant_i64(0));
}
}

View File

@ -4538,6 +4538,20 @@ void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
static bool aa32_cpreg_encoding_in_impdef_space(uint8_t crn, uint8_t crm)
{
static const uint16_t mask[3] = {
0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */
0b0000000100010011, /* crn == 10, crm == {c0, c1, c4, c8} */
0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */
};
if (crn >= 9 && crn <= 11) {
return (mask[crn - 9] >> crm) & 1;
}
return false;
}
static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
int opc1, int crn, int crm, int opc2,
bool isread, int rt, int rt2)
@ -4619,6 +4633,25 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
}
}
if (cpnum == 15 && aa32_cpreg_encoding_in_impdef_space(crn, crm)) {
/*
* Check for TIDCP trap, which must take precedence over the UNDEF
* for "no such register" etc. It shares precedence with HSTR,
* but raises the same exception, so order doesn't matter.
*/
switch (s->current_el) {
case 0:
if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
&& dc_isar_feature(aa64_tidcp1, s)) {
gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome));
}
break;
case 1:
gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
break;
}
}
if (!ri) {
/*
* Unknown register; this might be a guest error or a QEMU

View File

@ -417,12 +417,22 @@ static void pauth_tests_default(QTestState *qts, const char *cpu_type)
{
assert_has_feature_enabled(qts, cpu_type, "pauth");
assert_has_feature_disabled(qts, cpu_type, "pauth-impdef");
assert_has_feature_disabled(qts, cpu_type, "pauth-qarma3");
assert_set_feature(qts, cpu_type, "pauth", false);
assert_set_feature(qts, cpu_type, "pauth", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", false);
assert_error(qts, cpu_type, "cannot enable pauth-impdef without pauth",
assert_set_feature(qts, cpu_type, "pauth-qarma3", true);
assert_set_feature(qts, cpu_type, "pauth-qarma3", false);
assert_error(qts, cpu_type,
"cannot enable pauth-impdef or pauth-qarma3 without pauth",
"{ 'pauth': false, 'pauth-impdef': true }");
assert_error(qts, cpu_type,
"cannot enable pauth-impdef or pauth-qarma3 without pauth",
"{ 'pauth': false, 'pauth-qarma3': true }");
assert_error(qts, cpu_type,
"cannot enable both pauth-impdef and pauth-qarma3",
"{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true }");
}
static void test_query_cpu_model_expansion(const void *data)

View File

@ -42,7 +42,11 @@ endif
ifneq ($(CROSS_CC_HAS_ARMV8_3),)
AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5
pauth-%: CFLAGS += -march=armv8.3-a
run-pauth-%: QEMU_OPTS += -cpu max
run-pauth-1: QEMU_OPTS += -cpu max
run-pauth-2: QEMU_OPTS += -cpu max
# Choose a cpu with FEAT_Pauth but without FEAT_FPAC for pauth-[45].
run-pauth-4: QEMU_OPTS += -cpu neoverse-v1
run-pauth-5: QEMU_OPTS += -cpu neoverse-v1
endif
# BTI Tests

View File

@ -1,5 +1,22 @@
#include <stdint.h>
#include <signal.h>
#include <stdlib.h>
#include <assert.h>
#include "pauth.h"
static void sigill(int sig, siginfo_t *info, void *vuc)
{
ucontext_t *uc = vuc;
uint64_t test;
/* There is only one insn below that is allowed to fault. */
asm volatile("adr %0, auth2_insn" : "=r"(test));
assert(test == uc->uc_mcontext.pc);
exit(0);
}
static int pac_feature;
void do_test(uint64_t value)
{
@ -27,31 +44,52 @@ void do_test(uint64_t value)
* An invalid salt usually fails authorization, but again there
* is a chance of choosing another salt that works.
* Iterate until we find another salt which does fail.
*
* With FEAT_FPAC, this will SIGILL instead of producing a result.
*/
for (salt2 = salt1 + 1; ; salt2++) {
asm volatile("autda %0, %2" : "=r"(decode) : "0"(encode), "r"(salt2));
asm volatile("auth2_insn: autda %0, %2"
: "=r"(decode) : "0"(encode), "r"(salt2));
if (decode != value) {
break;
}
}
assert(pac_feature < 4); /* No FEAT_FPAC */
/* The VA bits, bit 55, and the TBI bits, should be unchanged. */
assert(((decode ^ value) & 0xff80ffffffffffffull) == 0);
/*
* Bits [54:53] are an error indicator based on the key used;
* the DA key above is keynumber 0, so error == 0b01. Otherwise
* bit 55 of the original is sign-extended into the rest of the auth.
* Without FEAT_Pauth2, bits [54:53] are an error indicator based on
* the key used; the DA key above is keynumber 0, so error == 0b01.
* Otherwise, bit 55 of the original is sign-extended into the rest
* of the auth.
*/
if ((value >> 55) & 1) {
assert(((decode >> 48) & 0xff) == 0b10111111);
} else {
assert(((decode >> 48) & 0xff) == 0b00100000);
if (pac_feature < 3) {
if ((value >> 55) & 1) {
assert(((decode >> 48) & 0xff) == 0b10111111);
} else {
assert(((decode >> 48) & 0xff) == 0b00100000);
}
}
}
int main()
{
static const struct sigaction sa = {
.sa_sigaction = sigill,
.sa_flags = SA_SIGINFO
};
pac_feature = get_pac_feature();
assert(pac_feature != 0);
if (pac_feature >= 4) {
/* FEAT_FPAC */
sigaction(SIGILL, &sa, NULL);
}
do_test(0);
do_test(0xda004acedeadbeefull);
return 0;

View File

@ -2,14 +2,24 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "pauth.h"
#define TESTS 1000
int main()
{
char base[TESTS];
int i, count = 0;
float perc;
void *base = malloc(TESTS);
int pac_feature = get_pac_feature();
/*
* Exit if no PAuth or FEAT_FPAC, which will SIGILL on AUTIA failure
* rather than return an error for us to check below.
*/
if (pac_feature == 0 || pac_feature >= 4) {
return 0;
}
for (i = 0; i < TESTS; i++) {
uintptr_t in, x, y;
@ -17,7 +27,7 @@ int main()
in = i + (uintptr_t) base;
asm("mov %0, %[in]\n\t"
"pacia %0, sp\n\t" /* sigill if pauth not supported */
"pacia %0, sp\n\t"
"eor %0, %0, #4\n\t" /* corrupt single bit */
"mov %1, %0\n\t"
"autia %1, sp\n\t" /* validate corrupted pointer */
@ -36,10 +46,10 @@ int main()
if (x != y) {
count++;
}
}
perc = (float) count / (float) TESTS;
printf("Checks Passed: %0.2f%%", perc * 100.0);
printf("Checks Passed: %0.2f%%\n", perc * 100.0);
assert(perc > 0.95);
return 0;
}

View File

@ -1,4 +1,5 @@
#include <assert.h>
#include "pauth.h"
static int x;
@ -6,6 +7,15 @@ int main()
{
int *p0 = &x, *p1, *p2, *p3;
unsigned long salt = 0;
int pac_feature = get_pac_feature();
/*
* Exit if no PAuth or FEAT_FPAC, which will SIGILL on AUTDA failure
* rather than return an error for us to check below.
*/
if (pac_feature == 0 || pac_feature >= 4) {
return 0;
}
/*
* With TBI enabled and a 48-bit VA, there are 7 bits of auth, and so

23
tests/tcg/aarch64/pauth.h Normal file
View File

@ -0,0 +1,23 @@
/*
* Helper for pauth test case
*
* Copyright (c) 2023 Linaro Ltd
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <assert.h>
#include <sys/auxv.h>
static int get_pac_feature(void)
{
unsigned long isar1, isar2;
assert(getauxval(AT_HWCAP) & HWCAP_CPUID);
asm("mrs %0, id_aa64isar1_el1" : "=r"(isar1));
asm("mrs %0, S3_0_C0_C6_2" : "=r"(isar2)); /* id_aa64isar2_el1 */
return ((isar1 >> 4) & 0xf) /* APA */
| ((isar1 >> 8) & 0xf) /* API */
| ((isar2 >> 12) & 0xf); /* APA3 */
}