target-arm queue:

* fsl-imx6: Add SNVS support for i.MX6 boards
  * smmuv3: Add support for stage 2 translations
  * hw/dma/xilinx_axidma: Check DMASR.HALTED to prevent infinite loop
  * hw/arm/xlnx-zynqmp: fix unsigned error when checking the RPUs number
  * cleanups for recent Kconfig changes
  * target/arm: Explicitly select short-format FSR for M-profile
  * tests/qtest: Run arm-specific tests only if the required machine is available
  * hw/arm/sbsa-ref: add GIC node into DT
  * docs: sbsa: correct graphics card name
  * Update copyright dates to 2023
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmR2DYsZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3ubED/40MFaRWfJuVhD3NzWltzhD
 5Y2/kxd3Bm51ki56hiBWXBXeovR3Exve9rP8OOGJ5RUK0SoEb4xdIjwMAGRt1Ksi
 Ln4MUqjv0tqUNv1hBDKgnGJ4dW34bhmJAnU/Jdzt8yrpGuSmN+LCWoPC+vTNCWYm
 sNFm8VLB+nmVq/sjTKwQc/Uo+7l9JZ+aY6poyHfN7kKpITUHtoCPgwz34btRrXEk
 4+eNYQV1UvofRhLRVsIrvA89bd7Rcn5iHbhY+xYHaJDEaoYy7iBfUJeDlUtEgW8k
 0fXt5Z5bXUXpz7jmzXdbq//68p8HcqinarIFH4r0Nbu+u2UgkZDJZRns+p5i8Wmv
 qE+hLGOgEg8s9n2e6chGuvw6wX49T3Xtr7tNpKQfi5ou5VT7qZIwl50m/JefuoPI
 eHu4uPj7pS0z/s8KDk0mNtbfcHkzmT5KpZkbvS2JOzg9o2t1fwGCbKPlcgJPxcIV
 Ro7R3rNvd6XSSQBlmcYNXWE7P7zuJyfjfSN7D7b0MdFP/hBTpLGKI2LBggZEdcce
 21fiEkEE6d1L2oN+Eiq3q8xQNoVjYSGaE5LJ34+997z7W1JRB/dyJhZM0AkabSMl
 mkgyi9kBKxU4S9pxtZ//Uh9B/5blpMQAI4U8S/svuGqzwfI6luY/Qxue+YzRUD0H
 XsDSBnq1x2LW2Fhu7YVW3Q==
 =/OdY
 -----END PGP SIGNATURE-----

Merge tag 'pull-target-arm-20230530-1' of https://git.linaro.org/people/pmaydell/qemu-arm into staging

target-arm queue:
 * fsl-imx6: Add SNVS support for i.MX6 boards
 * smmuv3: Add support for stage 2 translations
 * hw/dma/xilinx_axidma: Check DMASR.HALTED to prevent infinite loop
 * hw/arm/xlnx-zynqmp: fix unsigned error when checking the RPUs number
 * cleanups for recent Kconfig changes
 * target/arm: Explicitly select short-format FSR for M-profile
 * tests/qtest: Run arm-specific tests only if the required machine is available
 * hw/arm/sbsa-ref: add GIC node into DT
 * docs: sbsa: correct graphics card name
 * Update copyright dates to 2023

# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmR2DYsZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3ubED/40MFaRWfJuVhD3NzWltzhD
# 5Y2/kxd3Bm51ki56hiBWXBXeovR3Exve9rP8OOGJ5RUK0SoEb4xdIjwMAGRt1Ksi
# Ln4MUqjv0tqUNv1hBDKgnGJ4dW34bhmJAnU/Jdzt8yrpGuSmN+LCWoPC+vTNCWYm
# sNFm8VLB+nmVq/sjTKwQc/Uo+7l9JZ+aY6poyHfN7kKpITUHtoCPgwz34btRrXEk
# 4+eNYQV1UvofRhLRVsIrvA89bd7Rcn5iHbhY+xYHaJDEaoYy7iBfUJeDlUtEgW8k
# 0fXt5Z5bXUXpz7jmzXdbq//68p8HcqinarIFH4r0Nbu+u2UgkZDJZRns+p5i8Wmv
# qE+hLGOgEg8s9n2e6chGuvw6wX49T3Xtr7tNpKQfi5ou5VT7qZIwl50m/JefuoPI
# eHu4uPj7pS0z/s8KDk0mNtbfcHkzmT5KpZkbvS2JOzg9o2t1fwGCbKPlcgJPxcIV
# Ro7R3rNvd6XSSQBlmcYNXWE7P7zuJyfjfSN7D7b0MdFP/hBTpLGKI2LBggZEdcce
# 21fiEkEE6d1L2oN+Eiq3q8xQNoVjYSGaE5LJ34+997z7W1JRB/dyJhZM0AkabSMl
# mkgyi9kBKxU4S9pxtZ//Uh9B/5blpMQAI4U8S/svuGqzwfI6luY/Qxue+YzRUD0H
# XsDSBnq1x2LW2Fhu7YVW3Q==
# =/OdY
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 30 May 2023 07:51:55 AM PDT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full]

* tag 'pull-target-arm-20230530-1' of https://git.linaro.org/people/pmaydell/qemu-arm: (21 commits)
  docs: sbsa: correct graphics card name
  hw/arm/sbsa-ref: add GIC node into DT
  Update copyright dates to 2023
  arm/Kconfig: Make TCG dependence explicit
  arm/Kconfig: Keep Kconfig default entries in default.mak as documentation
  target/arm: Explain why we need to select ARM_V7M
  target/arm: Explicitly select short-format FSR for M-profile
  tests/qtest: Run arm-specific tests only if the required machine is available
  hw/arm/xlnx-zynqmp: fix unsigned error when checking the RPUs number
  hw/dma/xilinx_axidma: Check DMASR.HALTED to prevent infinite loop.
  hw/arm/smmuv3: Add knob to choose translation stage and enable stage-2
  hw/arm/smmuv3: Add stage-2 support in iova notifier
  hw/arm/smmuv3: Add CMDs related to stage-2
  hw/arm/smmuv3: Add VMID to TLB tagging
  hw/arm/smmuv3: Make TLB lookup work for stage-2
  hw/arm/smmuv3: Parse STE config for stage-2
  hw/arm/smmuv3: Add page table walk for stage-2
  hw/arm/smmuv3: Refactor stage-1 PTW
  hw/arm/smmuv3: Update translation config to hold stage-2
  hw/arm/smmuv3: Add missing fields for IDR0
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2023-05-30 08:02:05 -07:00
commit 7fe6cb6811
21 changed files with 774 additions and 145 deletions

View File

@ -2,3 +2,9 @@
# We support all the 32 bit boards so need all their config
include ../arm-softmmu/default.mak
# These are selected by default when TCG is enabled, uncomment them to
# keep out of the build.
# CONFIG_XLNX_ZYNQMP_ARM=n
# CONFIG_XLNX_VERSAL=n
# CONFIG_SBSA_REF=n

View File

@ -4,3 +4,43 @@
# CONFIG_TEST_DEVICES=n
CONFIG_ARM_VIRT=y
# These are selected by default when TCG is enabled, uncomment them to
# keep out of the build.
# CONFIG_CUBIEBOARD=n
# CONFIG_EXYNOS4=n
# CONFIG_HIGHBANK=n
# CONFIG_INTEGRATOR=n
# CONFIG_FSL_IMX31=n
# CONFIG_MUSICPAL=n
# CONFIG_MUSCA=n
# CONFIG_CHEETAH=n
# CONFIG_SX1=n
# CONFIG_NSERIES=n
# CONFIG_STELLARIS=n
# CONFIG_STM32VLDISCOVERY=n
# CONFIG_REALVIEW=n
# CONFIG_VERSATILE=n
# CONFIG_VEXPRESS=n
# CONFIG_ZYNQ=n
# CONFIG_MAINSTONE=n
# CONFIG_GUMSTIX=n
# CONFIG_SPITZ=n
# CONFIG_TOSA=n
# CONFIG_Z2=n
# CONFIG_NPCM7XX=n
# CONFIG_COLLIE=n
# CONFIG_ASPEED_SOC=n
# CONFIG_NETDUINO2=n
# CONFIG_NETDUINOPLUS2=n
# CONFIG_OLIMEX_STM32_H405=n
# CONFIG_MPS2=n
# CONFIG_RASPI=n
# CONFIG_DIGIC=n
# CONFIG_SABRELITE=n
# CONFIG_EMCRAFT_SF2=n
# CONFIG_MICROBIT=n
# CONFIG_FSL_IMX25=n
# CONFIG_FSL_IMX7=n
# CONFIG_FSL_IMX6UL=n
# CONFIG_ALLWINNER_H3=n

View File

@ -89,7 +89,7 @@ default_role = 'any'
# General information about the project.
project = u'QEMU'
copyright = u'2022, The QEMU Project Developers'
copyright = u'2023, The QEMU Project Developers'
author = u'The QEMU Project Developers'
# The version info for the project you're documenting, acts as replacement for

View File

@ -27,6 +27,6 @@ The sbsa-ref board supports:
- System bus EHCI controller
- CDROM and hard disc on AHCI bus
- E1000E ethernet card on PCIe bus
- VGA display adaptor on PCIe bus
- Bochs display adapter on PCIe bus
- A generic SBSA watchdog device

View File

@ -35,24 +35,28 @@ config ARM_VIRT
config CHEETAH
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select OMAP
select TSC210X
config CUBIEBOARD
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ALLWINNER_A10
config DIGIC
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select PTIMER
select PFLASH_CFI02
config EXYNOS4
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply I2C_DEVICES
select A9MPCORE
select I2C
@ -65,7 +69,8 @@ config EXYNOS4
config HIGHBANK
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select A9MPCORE
select A15MPCORE
select AHCI
@ -80,7 +85,8 @@ config HIGHBANK
config INTEGRATOR
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ARM_TIMER
select INTEGRATOR_DEBUG
select PL011 # UART
@ -93,14 +99,16 @@ config INTEGRATOR
config MAINSTONE
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select PXA2XX
select PFLASH_CFI01
select SMC91C111
config MUSCA
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ARMSSE
select PL011
select PL031
@ -112,7 +120,8 @@ config MARVELL_88W8618
config MUSICPAL
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select OR_IRQ
select BITBANG_I2C
select MARVELL_88W8618
@ -123,22 +132,26 @@ config MUSICPAL
config NETDUINO2
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select STM32F205_SOC
config NETDUINOPLUS2
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select STM32F405_SOC
config OLIMEX_STM32_H405
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select STM32F405_SOC
config NSERIES
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select OMAP
select TMP105 # temperature sensor
select BLIZZARD # LCD/TV controller
@ -171,14 +184,16 @@ config PXA2XX
config GUMSTIX
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select PFLASH_CFI01
select SMC91C111
select PXA2XX
config TOSA
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ZAURUS # scoop
select MICRODRIVE
select PXA2XX
@ -186,7 +201,8 @@ config TOSA
config SPITZ
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ADS7846 # touch-screen controller
select MAX111X # A/D converter
select WM8750 # audio codec
@ -199,7 +215,8 @@ config SPITZ
config Z2
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select PFLASH_CFI01
select WM8750
select PL011 # UART
@ -207,7 +224,8 @@ config Z2
config REALVIEW
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply PCI_DEVICES
imply PCI_TESTDEV
imply I2C_DEVICES
@ -236,7 +254,8 @@ config REALVIEW
config SBSA_REF
bool
default y if TCG && AARCH64
default y
depends on TCG && AARCH64
imply PCI_DEVICES
select AHCI
select ARM_SMMUV3
@ -252,13 +271,15 @@ config SBSA_REF
config SABRELITE
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select FSL_IMX6
select SSI_M25P80
config STELLARIS
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply I2C_DEVICES
select ARM_V7M
select CMSDK_APB_WATCHDOG
@ -276,7 +297,8 @@ config STELLARIS
config STM32VLDISCOVERY
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select STM32F100_SOC
config STRONGARM
@ -285,19 +307,22 @@ config STRONGARM
config COLLIE
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select PFLASH_CFI01
select ZAURUS # scoop
select STRONGARM
config SX1
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select OMAP
config VERSATILE
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ARM_TIMER # sp804
select PFLASH_CFI01
select LSI_SCSI_PCI
@ -309,7 +334,8 @@ config VERSATILE
config VEXPRESS
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select A9MPCORE
select A15MPCORE
select ARM_MPTIMER
@ -325,7 +351,8 @@ config VEXPRESS
config ZYNQ
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select A9MPCORE
select CADENCE # UART
select PFLASH_CFI02
@ -342,7 +369,8 @@ config ZYNQ
config ARM_V7M
bool
# currently v7M must be included in a TCG build due to translate.c
default y if TCG && ARM
default y
depends on TCG && ARM
select PTIMER
config ALLWINNER_A10
@ -361,7 +389,8 @@ config ALLWINNER_A10
config ALLWINNER_H3
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select ALLWINNER_A10_PIT
select ALLWINNER_SUN8I_EMAC
select ALLWINNER_I2C
@ -376,7 +405,8 @@ config ALLWINNER_H3
config RASPI
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select FRAMEBUFFER
select PL011 # UART
select SDHCI
@ -407,7 +437,8 @@ config STM32F405_SOC
config XLNX_ZYNQMP_ARM
bool
default y if TCG && AARCH64
default y
depends on TCG && AARCH64
select AHCI
select ARM_GIC
select CADENCE
@ -425,7 +456,8 @@ config XLNX_ZYNQMP_ARM
config XLNX_VERSAL
bool
default y if TCG && AARCH64
default y
depends on TCG && AARCH64
select ARM_GIC
select PL011
select CADENCE
@ -440,7 +472,8 @@ config XLNX_VERSAL
config NPCM7XX
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select A9MPCORE
select ADM1272
select ARM_GIC
@ -457,7 +490,8 @@ config NPCM7XX
config FSL_IMX25
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply I2C_DEVICES
select IMX
select IMX_FEC
@ -467,7 +501,8 @@ config FSL_IMX25
config FSL_IMX31
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply I2C_DEVICES
select SERIAL
select IMX
@ -488,7 +523,8 @@ config FSL_IMX6
config ASPEED_SOC
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select DS1338
select FTGMAC100
select I2C
@ -509,7 +545,8 @@ config ASPEED_SOC
config MPS2
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply I2C_DEVICES
select ARMSSE
select LAN9118
@ -525,7 +562,8 @@ config MPS2
config FSL_IMX7
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply PCI_DEVICES
imply TEST_DEVICES
imply I2C_DEVICES
@ -544,7 +582,8 @@ config ARM_SMMUV3
config FSL_IMX6UL
bool
default y if TCG && ARM
default y
depends on TCG && ARM
imply I2C_DEVICES
select A15MPCORE
select IMX
@ -556,7 +595,8 @@ config FSL_IMX6UL
config MICROBIT
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select NRF51_SOC
config NRF51_SOC
@ -568,7 +608,8 @@ config NRF51_SOC
config EMCRAFT_SF2
bool
default y if TCG && ARM
default y
depends on TCG && ARM
select MSF2
select SSI_M25P80

View File

@ -53,6 +53,8 @@ static void fsl_imx6_init(Object *obj)
object_initialize_child(obj, "src", &s->src, TYPE_IMX6_SRC);
object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS);
for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) {
snprintf(name, NAME_SIZE, "uart%d", i + 1);
object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL);
@ -390,6 +392,12 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in(DEVICE(&s->a9mpcore),
FSL_IMX6_ENET_MAC_1588_IRQ));
/*
* SNVS
*/
sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6_SNVSHP_ADDR);
/*
* Watchdog
*/

View File

@ -29,6 +29,7 @@
#include "exec/hwaddr.h"
#include "kvm_arm.h"
#include "hw/arm/boot.h"
#include "hw/arm/fdt.h"
#include "hw/arm/smmuv3.h"
#include "hw/block/flash.h"
#include "hw/boards.h"
@ -168,6 +169,20 @@ static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
return arm_cpu_mp_affinity(idx, clustersz);
}
static void sbsa_fdt_add_gic_node(SBSAMachineState *sms)
{
char *nodename;
nodename = g_strdup_printf("/intc");
qemu_fdt_add_subnode(sms->fdt, nodename);
qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg",
2, sbsa_ref_memmap[SBSA_GIC_DIST].base,
2, sbsa_ref_memmap[SBSA_GIC_DIST].size,
2, sbsa_ref_memmap[SBSA_GIC_REDIST].base,
2, sbsa_ref_memmap[SBSA_GIC_REDIST].size);
g_free(nodename);
}
/*
* Firmware on this machine only uses ACPI table to load OS, these limited
* device tree nodes are just to let firmware know the info which varies from
@ -204,7 +219,7 @@ static void create_fdt(SBSAMachineState *sms)
* fw compatibility.
*/
qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0);
qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0);
qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 1);
if (ms->numa_state->have_numa_distance) {
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
@ -260,6 +275,8 @@ static void create_fdt(SBSAMachineState *sms)
g_free(nodename);
}
sbsa_fdt_add_gic_node(sms);
}
#define SBSA_FLASH_SECTOR_SIZE (256 * KiB)

View File

@ -38,7 +38,7 @@ static guint smmu_iotlb_key_hash(gconstpointer v)
/* Jenkins hash */
a = b = c = JHASH_INITVAL + sizeof(*key);
a += key->asid + key->level + key->tg;
a += key->asid + key->vmid + key->level + key->tg;
b += extract64(key->iova, 0, 32);
c += extract64(key->iova, 32, 32);
@ -53,13 +53,15 @@ static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2;
return (k1->asid == k2->asid) && (k1->iova == k2->iova) &&
(k1->level == k2->level) && (k1->tg == k2->tg);
(k1->level == k2->level) && (k1->tg == k2->tg) &&
(k1->vmid == k2->vmid);
}
SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova,
SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
uint8_t tg, uint8_t level)
{
SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level};
SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova,
.tg = tg, .level = level};
return key;
}
@ -78,7 +80,8 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
uint64_t mask = subpage_size - 1;
SMMUIOTLBKey key;
key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level);
key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid,
iova & ~mask, tg, level);
entry = g_hash_table_lookup(bs->iotlb, &key);
if (entry) {
break;
@ -88,13 +91,13 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
if (entry) {
cfg->iotlb_hits++;
trace_smmu_iotlb_lookup_hit(cfg->asid, iova,
trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova,
cfg->iotlb_hits, cfg->iotlb_misses,
100 * cfg->iotlb_hits /
(cfg->iotlb_hits + cfg->iotlb_misses));
} else {
cfg->iotlb_misses++;
trace_smmu_iotlb_lookup_miss(cfg->asid, iova,
trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova,
cfg->iotlb_hits, cfg->iotlb_misses,
100 * cfg->iotlb_hits /
(cfg->iotlb_hits + cfg->iotlb_misses));
@ -111,8 +114,10 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
smmu_iotlb_inv_all(bs);
}
*key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level);
trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level);
*key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
tg, new->level);
trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
tg, new->level);
g_hash_table_insert(bs->iotlb, key, new);
}
@ -131,7 +136,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
return SMMU_IOTLB_ASID(*iotlb_key) == asid;
}
static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value,
static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value,
gpointer user_data)
{
uint16_t vmid = *(uint16_t *)user_data;
SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
return SMMU_IOTLB_VMID(*iotlb_key) == vmid;
}
static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value,
gpointer user_data)
{
SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
@ -142,18 +156,21 @@ static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value,
if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
return false;
}
if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) {
return false;
}
return ((info->iova & ~entry->addr_mask) == entry->iova) ||
((entry->iova & ~info->mask) == info->iova);
}
void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages, uint8_t ttl)
{
/* if tg is not set we use 4KB range invalidation */
uint8_t granule = tg ? tg * 2 + 10 : 12;
if (ttl && (num_pages == 1) && (asid >= 0)) {
SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl);
SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl);
if (g_hash_table_remove(s->iotlb, &key)) {
return;
@ -166,10 +183,11 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
SMMUIOTLBPageInvInfo info = {
.asid = asid, .iova = iova,
.vmid = vmid,
.mask = (num_pages * 1 << granule) - 1};
g_hash_table_foreach_remove(s->iotlb,
smmu_hash_remove_by_asid_iova,
smmu_hash_remove_by_asid_vmid_iova,
&info);
}
@ -179,6 +197,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
}
inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid)
{
trace_smmu_iotlb_inv_vmid(vmid);
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid);
}
/* VMSAv8-64 Translation */
/**
@ -264,7 +288,7 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
}
/**
* smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA
* smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA
* @cfg: translation config
* @iova: iova to translate
* @perm: access type
@ -276,9 +300,9 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
* Upon success, @tlbe is filled with translated_addr and entry
* permission rights.
*/
static int smmu_ptw_64(SMMUTransCfg *cfg,
dma_addr_t iova, IOMMUAccessFlags perm,
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
static int smmu_ptw_64_s1(SMMUTransCfg *cfg,
dma_addr_t iova, IOMMUAccessFlags perm,
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
dma_addr_t baseaddr, indexmask;
int stage = cfg->stage;
@ -291,14 +315,14 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
}
granule_sz = tt->granule_sz;
stride = granule_sz - 3;
stride = VMSA_STRIDE(granule_sz);
inputsize = 64 - tt->tsz;
level = 4 - (inputsize - 4) / stride;
indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
indexmask = VMSA_IDXMSK(inputsize, stride, level);
baseaddr = extract64(tt->ttb, 0, 48);
baseaddr &= ~indexmask;
while (level <= 3) {
while (level < VMSA_LEVELS) {
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
uint64_t mask = subpage_size - 1;
uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
@ -309,7 +333,7 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
if (get_pte(baseaddr, offset, &pte, info)) {
goto error;
}
trace_smmu_ptw_level(level, iova, subpage_size,
trace_smmu_ptw_level(stage, level, iova, subpage_size,
baseaddr, offset, pte);
if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
@ -358,6 +382,128 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
info->type = SMMU_PTW_ERR_TRANSLATION;
error:
info->stage = 1;
tlbe->entry.perm = IOMMU_NONE;
return -EINVAL;
}
/**
* smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa
* for stage-2.
* @cfg: translation config
* @ipa: ipa to translate
* @perm: access type
* @tlbe: SMMUTLBEntry (out)
* @info: handle to an error info
*
* Return 0 on success, < 0 on error. In case of error, @info is filled
* and tlbe->perm is set to IOMMU_NONE.
* Upon success, @tlbe is filled with translated_addr and entry
* permission rights.
*/
static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
dma_addr_t ipa, IOMMUAccessFlags perm,
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
const int stage = 2;
int granule_sz = cfg->s2cfg.granule_sz;
/* ARM DDI0487I.a: Table D8-7. */
int inputsize = 64 - cfg->s2cfg.tsz;
int level = get_start_level(cfg->s2cfg.sl0, granule_sz);
int stride = VMSA_STRIDE(granule_sz);
int idx = pgd_concat_idx(level, granule_sz, ipa);
/*
* Get the ttb from concatenated structure.
* The offset is the idx * size of each ttb(number of ptes * (sizeof(pte))
*/
uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) *
idx * sizeof(uint64_t);
dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level);
baseaddr &= ~indexmask;
/*
* On input, a stage 2 Translation fault occurs if the IPA is outside the
* range configured by the relevant S2T0SZ field of the STE.
*/
if (ipa >= (1ULL << inputsize)) {
info->type = SMMU_PTW_ERR_TRANSLATION;
goto error;
}
while (level < VMSA_LEVELS) {
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
uint64_t mask = subpage_size - 1;
uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz);
uint64_t pte, gpa;
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
uint8_t s2ap;
if (get_pte(baseaddr, offset, &pte, info)) {
goto error;
}
trace_smmu_ptw_level(stage, level, ipa, subpage_size,
baseaddr, offset, pte);
if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
pte_addr, offset, pte);
break;
}
if (is_table_pte(pte, level)) {
baseaddr = get_table_pte_address(pte, granule_sz);
level++;
continue;
} else if (is_page_pte(pte, level)) {
gpa = get_page_pte_address(pte, granule_sz);
trace_smmu_ptw_page_pte(stage, level, ipa,
baseaddr, pte_addr, pte, gpa);
} else {
uint64_t block_size;
gpa = get_block_pte_address(pte, level, granule_sz,
&block_size);
trace_smmu_ptw_block_pte(stage, level, baseaddr,
pte_addr, pte, ipa, gpa,
block_size >> 20);
}
/*
* If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry)
* An Access fault takes priority over a Permission fault.
*/
if (!PTE_AF(pte) && !cfg->s2cfg.affd) {
info->type = SMMU_PTW_ERR_ACCESS;
goto error;
}
s2ap = PTE_AP(pte);
if (is_permission_fault_s2(s2ap, perm)) {
info->type = SMMU_PTW_ERR_PERMISSION;
goto error;
}
/*
* The address output from the translation causes a stage 2 Address
* Size fault if it exceeds the effective PA output range.
*/
if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) {
info->type = SMMU_PTW_ERR_ADDR_SIZE;
goto error;
}
tlbe->entry.translated_addr = gpa;
tlbe->entry.iova = ipa & ~mask;
tlbe->entry.addr_mask = mask;
tlbe->entry.perm = s2ap;
tlbe->level = level;
tlbe->granule = granule_sz;
return 0;
}
info->type = SMMU_PTW_ERR_TRANSLATION;
error:
info->stage = 2;
tlbe->entry.perm = IOMMU_NONE;
return -EINVAL;
}
@ -376,15 +522,26 @@ error:
int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
if (!cfg->aa64) {
if (cfg->stage == 1) {
return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info);
} else if (cfg->stage == 2) {
/*
* This code path is not entered as we check this while decoding
* the configuration data in the derived SMMU model.
* If bypassing stage 1(or unimplemented), the input address is passed
* directly to stage 2 as IPA. If the input address of a transaction
* exceeds the size of the IAS, a stage 1 Address Size fault occurs.
* For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes"
*/
g_assert_not_reached();
if (iova >= (1ULL << cfg->oas)) {
info->type = SMMU_PTW_ERR_ADDR_SIZE;
info->stage = 1;
tlbe->entry.perm = IOMMU_NONE;
return -EINVAL;
}
return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info);
}
return smmu_ptw_64(cfg, iova, perm, tlbe, info);
g_assert_not_reached();
}
/**

View File

@ -66,6 +66,8 @@
#define PTE_APTABLE(pte) \
(extract64(pte, 61, 2))
#define PTE_AF(pte) \
(extract64(pte, 10, 1))
/*
* TODO: At the moment all transactions are considered as privileged (EL1)
* as IOMMU translation callback does not pass user/priv attributes.
@ -73,6 +75,9 @@
#define is_permission_fault(ap, perm) \
(((perm) & IOMMU_WO) && ((ap) & 0x2))
#define is_permission_fault_s2(s2ap, perm) \
(!(((s2ap) & (perm)) == (perm)))
#define PTE_AP_TO_PERM(ap) \
(IOMMU_ACCESS_FLAG(true, !((ap) & 0x2)))
@ -96,10 +101,42 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize,
MAKE_64BIT_MASK(0, gsz - 3);
}
/* FEAT_LPA2 and FEAT_TTST are not implemented. */
static inline int get_start_level(int sl0 , int granule_sz)
{
/* ARM DDI0487I.a: Table D8-12. */
if (granule_sz == 12) {
return 2 - sl0;
}
/* ARM DDI0487I.a: Table D8-22 and Table D8-31. */
return 3 - sl0;
}
/*
* Index in a concatenated first level stage-2 page table.
* ARM DDI0487I.a: D8.2.2 Concatenated translation tables.
*/
static inline int pgd_concat_idx(int start_level, int granule_sz,
dma_addr_t ipa)
{
uint64_t ret;
/*
* Get the number of bits handled by next levels, then any extra bits in
* the address should index the concatenated tables. This relation can be
* deduced from tables in ARM DDI0487I.a: D8.2.7-9
*/
int shift = level_shift(start_level - 1, granule_sz);
ret = ipa >> shift;
return ret;
}
#define SMMU_IOTLB_ASID(key) ((key).asid)
#define SMMU_IOTLB_VMID(key) ((key).vmid)
typedef struct SMMUIOTLBPageInvInfo {
int asid;
int vmid;
uint64_t iova;
uint64_t mask;
} SMMUIOTLBPageInvInfo;

View File

@ -34,10 +34,12 @@ typedef enum SMMUTranslationStatus {
/* MMIO Registers */
REG32(IDR0, 0x0)
FIELD(IDR0, S2P, 0 , 1)
FIELD(IDR0, S1P, 1 , 1)
FIELD(IDR0, TTF, 2 , 2)
FIELD(IDR0, COHACC, 4 , 1)
FIELD(IDR0, ASID16, 12, 1)
FIELD(IDR0, VMID16, 18, 1)
FIELD(IDR0, TTENDIAN, 21, 2)
FIELD(IDR0, STALL_MODEL, 24, 2)
FIELD(IDR0, TERM_MODEL, 26, 1)
@ -524,9 +526,13 @@ typedef struct CD {
#define STE_S2TG(x) extract32((x)->word[5], 14, 2)
#define STE_S2PS(x) extract32((x)->word[5], 16, 3)
#define STE_S2AA64(x) extract32((x)->word[5], 19, 1)
#define STE_S2HD(x) extract32((x)->word[5], 24, 1)
#define STE_S2HA(x) extract32((x)->word[5], 25, 1)
#define STE_S2S(x) extract32((x)->word[5], 26, 1)
#define STE_S2ENDI(x) extract32((x)->word[5], 20, 1)
#define STE_S2AFFD(x) extract32((x)->word[5], 21, 1)
#define STE_S2HD(x) extract32((x)->word[5], 23, 1)
#define STE_S2HA(x) extract32((x)->word[5], 24, 1)
#define STE_S2S(x) extract32((x)->word[5], 25, 1)
#define STE_S2R(x) extract32((x)->word[5], 26, 1)
#define STE_CTXPTR(x) \
({ \
unsigned long addr; \

View File

@ -21,6 +21,7 @@
#include "hw/irq.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-core.h"
#include "hw/pci/pci.h"
#include "cpu.h"
@ -33,6 +34,9 @@
#include "smmuv3-internal.h"
#include "smmu-internal.h"
#define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \
(cfg)->s2cfg.record_faults)
/**
* smmuv3_trigger_irq - pulse @irq if enabled and update
* GERROR register in case of GERROR interrupt
@ -238,14 +242,17 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
static void smmuv3_init_regs(SMMUv3State *s)
{
/**
* IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
* multi-level stream table
*/
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
/* Based on sys property, the stages supported in smmu will be advertised.*/
if (s->stage && !strcmp("2", s->stage)) {
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1);
} else {
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1);
}
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
/* terminated transaction will always be aborted/error returned */
@ -329,11 +336,138 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
return 0;
}
/*
* Max valid value is 39 when SMMU_IDR3.STT == 0.
* In architectures after SMMUv3.0:
* - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
* field is MAX(16, 64-IAS)
* - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
* is (64-IAS).
* As we only support AA64, IAS = OAS.
*/
static bool s2t0sz_valid(SMMUTransCfg *cfg)
{
if (cfg->s2cfg.tsz > 39) {
return false;
}
if (cfg->s2cfg.granule_sz == 16) {
return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS));
}
return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16));
}
/*
* Return true if s2 page table config is valid.
* This checks with the configured start level, ias_bits and granularity we can
* have a valid page table as described in ARM ARM D8.2 Translation process.
* The idea here is to see for the highest possible number of IPA bits, how
* many concatenated tables we would need, if it is more than 16, then this is
* not possible.
*/
static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran)
{
int level = get_start_level(sl0, gran);
uint64_t ipa_bits = 64 - t0sz;
uint64_t max_ipa = (1ULL << ipa_bits) - 1;
int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1;
return nr_concat <= VMSA_MAX_S2_CONCAT;
}
static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste)
{
cfg->stage = 2;
if (STE_S2AA64(ste) == 0x0) {
qemu_log_mask(LOG_UNIMP,
"SMMUv3 AArch32 tables not supported\n");
g_assert_not_reached();
}
switch (STE_S2TG(ste)) {
case 0x0: /* 4KB */
cfg->s2cfg.granule_sz = 12;
break;
case 0x1: /* 64KB */
cfg->s2cfg.granule_sz = 16;
break;
case 0x2: /* 16KB */
cfg->s2cfg.granule_sz = 14;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste));
goto bad_ste;
}
cfg->s2cfg.vttb = STE_S2TTB(ste);
cfg->s2cfg.sl0 = STE_S2SL0(ste);
/* FEAT_TTST not supported. */
if (cfg->s2cfg.sl0 == 0x3) {
qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
goto bad_ste;
}
/* For AA64, The effective S2PS size is capped to the OAS. */
cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS));
/*
* It is ILLEGAL for the address in S2TTB to be outside the range
* described by the effective S2PS value.
*/
if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) {
qemu_log_mask(LOG_GUEST_ERROR,
"SMMUv3 S2TTB too large 0x%" PRIx64
", effective PS %d bits\n",
cfg->s2cfg.vttb, cfg->s2cfg.eff_ps);
goto bad_ste;
}
cfg->s2cfg.tsz = STE_S2T0SZ(ste);
if (!s2t0sz_valid(cfg)) {
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n",
cfg->s2cfg.tsz);
goto bad_ste;
}
if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz,
cfg->s2cfg.granule_sz)) {
qemu_log_mask(LOG_GUEST_ERROR,
"SMMUv3 STE stage 2 config not valid!\n");
goto bad_ste;
}
/* Only LE supported(IDR0.TTENDIAN). */
if (STE_S2ENDI(ste)) {
qemu_log_mask(LOG_GUEST_ERROR,
"SMMUv3 STE_S2ENDI only supports LE!\n");
goto bad_ste;
}
cfg->s2cfg.affd = STE_S2AFFD(ste);
cfg->s2cfg.record_faults = STE_S2R(ste);
/* As stall is not supported. */
if (STE_S2S(ste)) {
qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n");
goto bad_ste;
}
return 0;
bad_ste:
return -EINVAL;
}
/* Returns < 0 in case of invalid STE, 0 otherwise */
static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
STE *ste, SMMUEventInfo *event)
{
uint32_t config;
int ret;
if (!STE_VALID(ste)) {
if (!event->inval_ste_allowed) {
@ -354,10 +488,38 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
return 0;
}
if (STE_CFG_S2_ENABLED(config)) {
qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
/*
* If a stage is enabled in SW while not advertised, throw bad ste
* according to user manual(IHI0070E) "5.2 Stream Table Entry".
*/
if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) {
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n");
goto bad_ste;
}
if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) {
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n");
goto bad_ste;
}
if (STAGE2_SUPPORTED(s)) {
/* VMID is considered even if s2 is disabled. */
cfg->s2cfg.vmid = STE_S2VMID(ste);
} else {
/* Default to -1 */
cfg->s2cfg.vmid = -1;
}
if (STE_CFG_S2_ENABLED(config)) {
/*
* Stage-1 OAS defaults to OAS even if not enabled as it would be used
* in input address check for stage-2.
*/
cfg->oas = oas2bits(SMMU_IDR5_OAS);
ret = decode_ste_s2_cfg(cfg, ste);
if (ret) {
goto bad_ste;
}
}
if (STE_S1CDMAX(ste) != 0) {
qemu_log_mask(LOG_UNIMP,
@ -559,6 +721,9 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
STE ste;
CD cd;
/* ASID defaults to -1 (if s1 is not supported). */
cfg->asid = -1;
ret = smmu_find_ste(s, sid, &ste, event);
if (ret) {
return ret;
@ -569,7 +734,7 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
return ret;
}
if (cfg->aborted || cfg->bypassed) {
if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) {
return 0;
}
@ -656,6 +821,11 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
.addr_mask = ~(hwaddr)0,
.perm = IOMMU_NONE,
};
/*
* Combined attributes used for TLB lookup, as only one stage is supported,
* it will hold attributes based on the enabled stage.
*/
SMMUTransTableInfo tt_combined;
qemu_mutex_lock(&s->mutex);
@ -684,25 +854,45 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
goto epilogue;
}
tt = select_tt(cfg, addr);
if (!tt) {
if (cfg->record_faults) {
event.type = SMMU_EVT_F_TRANSLATION;
event.u.f_translation.addr = addr;
event.u.f_translation.rnw = flag & 0x1;
if (cfg->stage == 1) {
/* Select stage1 translation table. */
tt = select_tt(cfg, addr);
if (!tt) {
if (cfg->record_faults) {
event.type = SMMU_EVT_F_TRANSLATION;
event.u.f_translation.addr = addr;
event.u.f_translation.rnw = flag & 0x1;
}
status = SMMU_TRANS_ERROR;
goto epilogue;
}
status = SMMU_TRANS_ERROR;
goto epilogue;
}
tt_combined.granule_sz = tt->granule_sz;
tt_combined.tsz = tt->tsz;
page_mask = (1ULL << (tt->granule_sz)) - 1;
} else {
/* Stage2. */
tt_combined.granule_sz = cfg->s2cfg.granule_sz;
tt_combined.tsz = cfg->s2cfg.tsz;
}
/*
* TLB lookup looks for granule and input size for a translation stage,
* as only one stage is supported right now, choose the right values
* from the configuration.
*/
page_mask = (1ULL << tt_combined.granule_sz) - 1;
aligned_addr = addr & ~page_mask;
cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr);
if (cached_entry) {
if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
status = SMMU_TRANS_ERROR;
if (cfg->record_faults) {
/*
* We know that the TLB only contains either stage-1 or stage-2 as
* nesting is not supported. So it is sufficient to check the
* translation stage to know the TLB stage for now.
*/
event.u.f_walk_eabt.s2 = (cfg->stage == 2);
if (PTW_RECORD_FAULT(cfg)) {
event.type = SMMU_EVT_F_PERMISSION;
event.u.f_permission.addr = addr;
event.u.f_permission.rnw = flag & 0x1;
@ -716,6 +906,8 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
cached_entry = g_new0(SMMUTLBEntry, 1);
if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
/* All faults from PTW has S2 field. */
event.u.f_walk_eabt.s2 = (ptw_info.stage == 2);
g_free(cached_entry);
switch (ptw_info.type) {
case SMMU_PTW_ERR_WALK_EABT:
@ -726,28 +918,28 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
event.u.f_walk_eabt.addr2 = ptw_info.addr;
break;
case SMMU_PTW_ERR_TRANSLATION:
if (cfg->record_faults) {
if (PTW_RECORD_FAULT(cfg)) {
event.type = SMMU_EVT_F_TRANSLATION;
event.u.f_translation.addr = addr;
event.u.f_translation.rnw = flag & 0x1;
}
break;
case SMMU_PTW_ERR_ADDR_SIZE:
if (cfg->record_faults) {
if (PTW_RECORD_FAULT(cfg)) {
event.type = SMMU_EVT_F_ADDR_SIZE;
event.u.f_addr_size.addr = addr;
event.u.f_addr_size.rnw = flag & 0x1;
}
break;
case SMMU_PTW_ERR_ACCESS:
if (cfg->record_faults) {
if (PTW_RECORD_FAULT(cfg)) {
event.type = SMMU_EVT_F_ACCESS;
event.u.f_access.addr = addr;
event.u.f_access.rnw = flag & 0x1;
}
break;
case SMMU_PTW_ERR_PERMISSION:
if (cfg->record_faults) {
if (PTW_RECORD_FAULT(cfg)) {
event.type = SMMU_EVT_F_PERMISSION;
event.u.f_permission.addr = addr;
event.u.f_permission.rnw = flag & 0x1;
@ -808,18 +1000,21 @@ epilogue:
* @mr: IOMMU mr region handle
* @n: notifier to be called
* @asid: address space ID or negative value if we don't care
* @vmid: virtual machine ID or negative value if we don't care
* @iova: iova
* @tg: translation granule (if communicated through range invalidation)
* @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
*/
static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
IOMMUNotifier *n,
int asid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages)
int asid, int vmid,
dma_addr_t iova, uint8_t tg,
uint64_t num_pages)
{
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
IOMMUTLBEvent event;
uint8_t granule;
SMMUv3State *s = sdev->smmu;
if (!tg) {
SMMUEventInfo event = {.inval_ste_allowed = true};
@ -834,11 +1029,20 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
return;
}
tt = select_tt(cfg, iova);
if (!tt) {
if (vmid >= 0 && cfg->s2cfg.vmid != vmid) {
return;
}
granule = tt->granule_sz;
if (STAGE1_SUPPORTED(s)) {
tt = select_tt(cfg, iova);
if (!tt) {
return;
}
granule = tt->granule_sz;
} else {
granule = cfg->s2cfg.granule_sz;
}
} else {
granule = tg * 2 + 10;
}
@ -852,9 +1056,10 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
memory_region_notify_iommu_one(n, &event);
}
/* invalidate an asid/iova range tuple in all mr's */
static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages)
/* invalidate an asid/vmid/iova range tuple in all mr's */
static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid,
dma_addr_t iova, uint8_t tg,
uint64_t num_pages)
{
SMMUDevice *sdev;
@ -862,20 +1067,20 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
IOMMUMemoryRegion *mr = &sdev->iommu;
IOMMUNotifier *n;
trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova,
tg, num_pages);
trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid,
iova, tg, num_pages);
IOMMU_NOTIFIER_FOREACH(n, mr) {
smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages);
smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages);
}
}
}
static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
{
dma_addr_t end, addr = CMD_ADDR(cmd);
uint8_t type = CMD_TYPE(cmd);
uint16_t vmid = CMD_VMID(cmd);
int vmid = -1;
uint8_t scale = CMD_SCALE(cmd);
uint8_t num = CMD_NUM(cmd);
uint8_t ttl = CMD_TTL(cmd);
@ -884,15 +1089,21 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
uint64_t num_pages;
uint8_t granule;
int asid = -1;
SMMUv3State *smmuv3 = ARM_SMMUV3(s);
/* Only consider VMID if stage-2 is supported. */
if (STAGE2_SUPPORTED(smmuv3)) {
vmid = CMD_VMID(cmd);
}
if (type == SMMU_CMD_TLBI_NH_VA) {
asid = CMD_ASID(cmd);
}
if (!tg) {
trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1);
smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
return;
}
@ -908,9 +1119,9 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
num_pages = (mask + 1) >> granule;
trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages);
smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
addr += mask + 1;
}
}
@ -1042,12 +1253,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
{
uint16_t asid = CMD_ASID(&cmd);
if (!STAGE1_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_asid(bs, asid);
break;
}
case SMMU_CMD_TLBI_NH_ALL:
if (!STAGE1_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
QEMU_FALLTHROUGH;
case SMMU_CMD_TLBI_NSNH_ALL:
trace_smmuv3_cmdq_tlbi_nh();
smmu_inv_notifiers_all(&s->smmu_state);
@ -1055,7 +1276,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
break;
case SMMU_CMD_TLBI_NH_VAA:
case SMMU_CMD_TLBI_NH_VA:
smmuv3_s1_range_inval(bs, &cmd);
if (!STAGE1_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
smmuv3_range_inval(bs, &cmd);
break;
case SMMU_CMD_TLBI_S12_VMALL:
{
uint16_t vmid = CMD_VMID(&cmd);
if (!STAGE2_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_vmid(bs, vmid);
break;
}
case SMMU_CMD_TLBI_S2_IPA:
if (!STAGE2_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
/*
* As currently only either s1 or s2 are supported
* we can reuse same function for s2.
*/
smmuv3_range_inval(bs, &cmd);
break;
case SMMU_CMD_TLBI_EL3_ALL:
case SMMU_CMD_TLBI_EL3_VA:
@ -1063,8 +1313,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
case SMMU_CMD_TLBI_EL2_ASID:
case SMMU_CMD_TLBI_EL2_VA:
case SMMU_CMD_TLBI_EL2_VAA:
case SMMU_CMD_TLBI_S12_VMALL:
case SMMU_CMD_TLBI_S2_IPA:
case SMMU_CMD_ATC_INV:
case SMMU_CMD_PRI_RESP:
case SMMU_CMD_RESUME:
@ -1073,12 +1321,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
break;
default:
cmd_error = SMMU_CERROR_ILL;
qemu_log_mask(LOG_GUEST_ERROR,
"Illegal command type: %d\n", CMD_TYPE(&cmd));
break;
}
qemu_mutex_unlock(&s->mutex);
if (cmd_error) {
if (cmd_error == SMMU_CERROR_ILL) {
qemu_log_mask(LOG_GUEST_ERROR,
"Illegal command type: %d\n", CMD_TYPE(&cmd));
}
break;
}
/*
@ -1555,6 +1805,17 @@ static const VMStateDescription vmstate_smmuv3 = {
}
};
static Property smmuv3_properties[] = {
/*
* Stages of translation advertised.
* "1": Stage 1
* "2": Stage 2
* Defaults to stage 1
*/
DEFINE_PROP_STRING("stage", SMMUv3State, stage),
DEFINE_PROP_END_OF_LIST()
};
static void smmuv3_instance_init(Object *obj)
{
/* Nothing much to do here as of now */
@ -1571,6 +1832,7 @@ static void smmuv3_class_init(ObjectClass *klass, void *data)
&c->parent_phases);
c->parent_realize = dc->realize;
dc->realize = smmu_realize;
device_class_set_props(dc, smmuv3_properties);
}
static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,

View File

@ -5,18 +5,19 @@ virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out."
# smmu-common.c
smmu_add_mr(const char *name) "%s"
smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64
smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64
smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64
smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64
smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB"
smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64
smmu_iotlb_inv_all(void) "IOTLB invalidate all"
smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d"
smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d"
smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64
smmu_inv_notifiers_mr(const char *name) "iommu mr=%s"
smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d"
smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
smmu_iotlb_lookup_miss(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
smmu_iotlb_insert(uint16_t asid, uint16_t vmid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d vmid=%d addr=0x%"PRIx64" tg=%d level=%d"
# smmuv3.c
smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)"
@ -45,11 +46,12 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x"
smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x"
smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)"
smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)"
smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d"
smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d"
smmuv3_cmdq_tlbi_nh(void) ""
smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d"
smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d"
smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s"
smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s"
smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64
smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64

View File

@ -213,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s,
const char *boot_cpu, Error **errp)
{
int i;
int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS,
int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS),
XLNX_ZYNQMP_NUM_RPU_CPUS);
if (num_rpus <= 0) {

View File

@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s)
return !!(s->regs[R_DMASR] & DMASR_IDLE);
}
static inline int stream_halted(struct Stream *s)
{
return !!(s->regs[R_DMASR] & DMASR_HALTED);
}
static void stream_reset(struct Stream *s)
{
s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev,
uint64_t addr;
bool eop;
if (!stream_running(s) || stream_idle(s)) {
if (!stream_running(s) || stream_idle(s) || stream_halted(s)) {
return;
}
@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
unsigned int rxlen;
size_t pos = 0;
if (!stream_running(s) || stream_idle(s)) {
if (!stream_running(s) || stream_idle(s) || stream_halted(s)) {
return 0;
}
@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj,
XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
struct Stream *s = &ds->dma->streams[1];
if (!stream_running(s) || stream_idle(s)) {
if (!stream_running(s) || stream_idle(s) || stream_halted(s)) {
ds->dma->notify = notify;
ds->dma->notify_opaque = notify_opaque;
return false;

View File

@ -21,6 +21,7 @@
#include "hw/cpu/a9mpcore.h"
#include "hw/misc/imx6_ccm.h"
#include "hw/misc/imx6_src.h"
#include "hw/misc/imx7_snvs.h"
#include "hw/watchdog/wdt_imx2.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
@ -59,6 +60,7 @@ struct FslIMX6State {
A9MPPrivState a9mpcore;
IMX6CCMState ccm;
IMX6SRCState src;
IMX7SNVSState snvs;
IMXSerialState uart[FSL_IMX6_NUM_UARTS];
IMXGPTState gpt;
IMXEPITState epit[FSL_IMX6_NUM_EPITS];

View File

@ -23,9 +23,19 @@
#include "hw/pci/pci.h"
#include "qom/object.h"
#define SMMU_PCI_BUS_MAX 256
#define SMMU_PCI_DEVFN_MAX 256
#define SMMU_PCI_DEVFN(sid) (sid & 0xFF)
#define SMMU_PCI_BUS_MAX 256
#define SMMU_PCI_DEVFN_MAX 256
#define SMMU_PCI_DEVFN(sid) (sid & 0xFF)
/* VMSAv8-64 Translation constants and functions */
#define VMSA_LEVELS 4
#define VMSA_MAX_S2_CONCAT 16
#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1)
#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \
(VMSA_LEVELS - (lvl)))
#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \
VMSA_BIT_LVL(isz, strd, lvl)) - 1)
/*
* Page table walk error types
@ -40,6 +50,7 @@ typedef enum {
} SMMUPTWEventType;
typedef struct SMMUPTWEventInfo {
int stage;
SMMUPTWEventType type;
dma_addr_t addr; /* fetched address that induced an abort, if any */
} SMMUPTWEventInfo;
@ -58,25 +69,41 @@ typedef struct SMMUTLBEntry {
uint8_t granule;
} SMMUTLBEntry;
/* Stage-2 configuration. */
typedef struct SMMUS2Cfg {
uint8_t tsz; /* Size of IPA input region (S2T0SZ) */
uint8_t sl0; /* Start level of translation (S2SL0) */
bool affd; /* AF Fault Disable (S2AFFD) */
bool record_faults; /* Record fault events (S2R) */
uint8_t granule_sz; /* Granule page shift (based on S2TG) */
uint8_t eff_ps; /* Effective PA output range (based on S2PS) */
uint16_t vmid; /* Virtual Machine ID (S2VMID) */
uint64_t vttb; /* Address of translation table base (S2TTB) */
} SMMUS2Cfg;
/*
* Generic structure populated by derived SMMU devices
* after decoding the configuration information and used as
* input to the page table walk
*/
typedef struct SMMUTransCfg {
/* Shared fields between stage-1 and stage-2. */
int stage; /* translation stage */
bool aa64; /* arch64 or aarch32 translation table */
bool disabled; /* smmu is disabled */
bool bypassed; /* translation is bypassed */
bool aborted; /* translation is aborted */
uint32_t iotlb_hits; /* counts IOTLB hits */
uint32_t iotlb_misses; /* counts IOTLB misses*/
/* Used by stage-1 only. */
bool aa64; /* arch64 or aarch32 translation table */
bool record_faults; /* record fault events */
uint64_t ttb; /* TT base address */
uint8_t oas; /* output address width */
uint8_t tbi; /* Top Byte Ignore */
uint16_t asid;
SMMUTransTableInfo tt[2];
uint32_t iotlb_hits; /* counts IOTLB hits for this asid */
uint32_t iotlb_misses; /* counts IOTLB misses for this asid */
/* Used by stage-2 only. */
struct SMMUS2Cfg s2cfg;
} SMMUTransCfg;
typedef struct SMMUDevice {
@ -98,6 +125,7 @@ typedef struct SMMUPciBus {
typedef struct SMMUIOTLBKey {
uint64_t iova;
uint16_t asid;
uint16_t vmid;
uint8_t tg;
uint8_t level;
} SMMUIOTLBKey;
@ -161,11 +189,12 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid);
SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
SMMUTransTableInfo *tt, hwaddr iova);
void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry);
SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova,
SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
uint8_t tg, uint8_t level);
void smmu_iotlb_inv_all(SMMUState *s);
void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid);
void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid);
void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages, uint8_t ttl);
/* Unmap the range of all the notifiers registered to any IOMMU mr */

View File

@ -62,6 +62,7 @@ struct SMMUv3State {
qemu_irq irq[4];
QemuMutex mutex;
char *stage;
};
typedef enum {
@ -83,4 +84,7 @@ struct SMMUv3Class {
#define TYPE_ARM_SMMUV3 "arm-smmuv3"
OBJECT_DECLARE_TYPE(SMMUv3State, SMMUv3Class, ARM_SMMUV3)
#define STAGE1_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S1P)
#define STAGE2_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S2P)
#endif

View File

@ -2,7 +2,7 @@
#define QEMU_HELP_TEXTS_H
/* Copyright string for -version arguments, About dialogs, etc */
#define QEMU_COPYRIGHT "Copyright (c) 2003-2022 " \
#define QEMU_COPYRIGHT "Copyright (c) 2003-2023 " \
"Fabrice Bellard and the QEMU Project developers"
/* Bug reporting information for --help arguments, About dialogs, etc */

View File

@ -1,6 +1,9 @@
config ARM
bool
select ARM_COMPATIBLE_SEMIHOSTING if TCG
# We need to select this until we move m_helper.c and the
# translate.c v7m helpers under ARM_V7M.
select ARM_V7M if TCG
config AARCH64

View File

@ -75,8 +75,17 @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
uint32_t fsr, fsc;
if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
/*
* For M-profile there is no guest-facing FSR. We compute a
* short-form value for env->exception.fsr which we will then
* examine in arm_v7m_cpu_do_interrupt(). In theory we could
* use the LPAE format instead as long as both bits of code agree
* (and arm_fi_to_lfsc() handled the M-profile specific
* ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases).
*/
if (!arm_feature(env, ARM_FEATURE_M) &&
(target_el == 2 || arm_el_is_aa64(env, target_el) ||
arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) {
/*
* LPAE format fault status register : bottom 6 bits are
* status code in the same form as needed for syndrome

View File

@ -198,14 +198,15 @@ qtests_arm = \
(config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_TIMER') ? ['cmsdk-apb-timer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_WATCHDOG') ? ['cmsdk-apb-watchdog-test'] : []) + \
(config_all_devices.has_key('CONFIG_PFLASH_CFI02') ? ['pflash-cfi02-test'] : []) + \
(config_all_devices.has_key('CONFIG_PFLASH_CFI02') and
config_all_devices.has_key('CONFIG_MUSICPAL') ? ['pflash-cfi02-test'] : []) + \
(config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed : []) + \
(config_all_devices.has_key('CONFIG_NPCM7XX') ? qtests_npcm7xx : []) + \
(config_all_devices.has_key('CONFIG_GENERIC_LOADER') ? ['hexloader-test'] : []) + \
(config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \
(config_all_devices.has_key('CONFIG_VEXPRESS') ? ['test-arm-mptimer'] : []) + \
(config_all_devices.has_key('CONFIG_MICROBIT') ? ['microbit-test'] : []) + \
['arm-cpu-features',
'microbit-test',
'test-arm-mptimer',
'boot-serial-test']
# TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional