diff --git a/MAINTAINERS b/MAINTAINERS index 0255113470..45e2e2009b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -543,7 +543,7 @@ F: include/hw/*/xlnx*.h ARM ACPI Subsystem M: Shannon Zhao -M: Shannon Zhao +M: Shannon Zhao L: qemu-arm@nongnu.org S: Maintained F: hw/arm/virt-acpi-build.c diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak index d37edc4312..b0d6e65038 100644 --- a/default-configs/arm-softmmu.mak +++ b/default-configs/arm-softmmu.mak @@ -130,5 +130,5 @@ CONFIG_SMBIOS=y CONFIG_ASPEED_SOC=y CONFIG_GPIO_KEY=y CONFIG_MSF2=y - CONFIG_FW_CFG_DMA=y +CONFIG_XILINX_AXI=y diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c index bbe7d046e4..b126cf148b 100644 --- a/hw/arm/xlnx-zcu102.c +++ b/hw/arm/xlnx-zcu102.c @@ -151,6 +151,29 @@ static void xlnx_zynqmp_init(XlnxZCU102 *s, MachineState *machine) sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi[i]), 1, cs_line); } + for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_FLASH; i++) { + SSIBus *spi_bus; + DeviceState *flash_dev; + qemu_irq cs_line; + DriveInfo *dinfo = drive_get_next(IF_MTD); + int bus = i / XLNX_ZYNQMP_NUM_QSPI_BUS_CS; + gchar *bus_name = g_strdup_printf("qspi%d", bus); + + spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(&s->soc), bus_name); + g_free(bus_name); + + flash_dev = ssi_create_slave_no_init(spi_bus, "n25q512a11"); + if (dinfo) { + qdev_prop_set_drive(flash_dev, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_init_nofail(flash_dev); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.qspi), i + 1, cs_line); + } + /* TODO create and connect IDE devices for ide_drive_get() */ xlnx_zcu102_binfo.ram_size = ram_size; diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index c707c66322..325642058b 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -40,6 +40,10 @@ #define SATA_ADDR 0xFD0C0000 #define SATA_NUM_PORTS 2 +#define QSPI_ADDR 0xff0f0000 +#define LQSPI_ADDR 0xc0000000 +#define QSPI_IRQ 15 + #define DP_ADDR 0xfd4a0000 #define DP_IRQ 113 @@ -171,6 +175,9 @@ static void xlnx_zynqmp_init(Object *obj) qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default()); } + object_initialize(&s->qspi, sizeof(s->qspi), TYPE_XLNX_ZYNQMP_QSPIPS); + qdev_set_parent_bus(DEVICE(&s->qspi), sysbus_get_default()); + object_initialize(&s->dp, sizeof(s->dp), TYPE_XLNX_DP); qdev_set_parent_bus(DEVICE(&s->dp), sysbus_get_default()); @@ -411,6 +418,25 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) g_free(bus_name); } + object_property_set_bool(OBJECT(&s->qspi), true, "realized", &err); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 0, QSPI_ADDR); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 1, LQSPI_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0, gic_spi[QSPI_IRQ]); + + for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_BUS; i++) { + gchar *bus_name; + gchar *target_bus; + + /* Alias controller SPI bus to the SoC itself */ + bus_name = g_strdup_printf("qspi%d", i); + target_bus = g_strdup_printf("spi%d", i); + object_property_add_alias(OBJECT(s), bus_name, + OBJECT(&s->qspi), target_bus, + &error_abort); + g_free(bus_name); + g_free(target_bus); + } + object_property_set_bool(OBJECT(&s->dp), true, "realized", &err); if (err) { error_propagate(errp, err); diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index a2438b9ed2..ea142160b3 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -240,6 +240,8 @@ static const FlashPartInfo known_devices[] = { { INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) }, { INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) }, { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q512a11", 0x20bb20, 0, 64 << 10, 1024, ER_4K) }, + { INFO("n25q512a13", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) }, { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, @@ -331,7 +333,10 @@ typedef enum { WRDI = 0x4, RDSR = 0x5, WREN = 0x6, + BRRD = 0x16, + BRWR = 0x17, JEDEC_READ = 0x9f, + BULK_ERASE_60 = 0x60, BULK_ERASE = 0xc7, READ_FSR = 0x70, RDCR = 0x15, @@ -355,6 +360,8 @@ typedef enum { DPP = 0xa2, QPP = 0x32, QPP_4 = 0x34, + RDID_90 = 0x90, + RDID_AB = 0xab, ERASE_4K = 0x20, ERASE4_4K = 0x21, @@ -405,6 +412,7 @@ typedef enum { MAN_MACRONIX, MAN_NUMONYX, MAN_WINBOND, + MAN_SST, MAN_GENERIC, } Manufacturer; @@ -423,6 +431,7 @@ typedef struct Flash { uint8_t data[M25P80_INTERNAL_DATA_BUFFER_SZ]; uint32_t len; uint32_t pos; + bool data_read_loop; uint8_t needed_bytes; uint8_t cmd_in_progress; uint32_t cur_addr; @@ -475,6 +484,8 @@ static inline Manufacturer get_man(Flash *s) return MAN_SPANSION; case 0xC2: return MAN_MACRONIX; + case 0xBF: + return MAN_SST; default: return MAN_GENERIC; } @@ -698,6 +709,7 @@ static void complete_collecting_data(Flash *s) s->write_enable = false; } break; + case BRWR: case EXTEND_ADDR_WRITE: s->ear = s->data[0]; break; @@ -710,6 +722,31 @@ static void complete_collecting_data(Flash *s) case WEVCR: s->enh_volatile_cfg = s->data[0]; break; + case RDID_90: + case RDID_AB: + if (get_man(s) == MAN_SST) { + if (s->cur_addr <= 1) { + if (s->cur_addr) { + s->data[0] = s->pi->id[2]; + s->data[1] = s->pi->id[0]; + } else { + s->data[0] = s->pi->id[0]; + s->data[1] = s->pi->id[2]; + } + s->pos = 0; + s->len = 2; + s->data_read_loop = true; + s->state = STATE_READING_DATA; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Invalid read id address\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Read id (command 0x90/0xAB) is not supported" + " by device\n"); + } + break; default: break; } @@ -925,6 +962,8 @@ static void decode_new_cmd(Flash *s, uint32_t value) case PP4: case PP4_4: case DIE_ERASE: + case RDID_90: + case RDID_AB: s->needed_bytes = get_addr_length(s); s->pos = 0; s->len = 0; @@ -983,6 +1022,7 @@ static void decode_new_cmd(Flash *s, uint32_t value) } s->pos = 0; s->len = 1; + s->data_read_loop = true; s->state = STATE_READING_DATA; break; @@ -993,6 +1033,7 @@ static void decode_new_cmd(Flash *s, uint32_t value) } s->pos = 0; s->len = 1; + s->data_read_loop = true; s->state = STATE_READING_DATA; break; @@ -1015,6 +1056,7 @@ static void decode_new_cmd(Flash *s, uint32_t value) s->state = STATE_READING_DATA; break; + case BULK_ERASE_60: case BULK_ERASE: if (s->write_enable) { DB_PRINT_L(0, "chip erase\n"); @@ -1032,12 +1074,14 @@ static void decode_new_cmd(Flash *s, uint32_t value) case EX_4BYTE_ADDR: s->four_bytes_address_mode = false; break; + case BRRD: case EXTEND_ADDR_READ: s->data[0] = s->ear; s->pos = 0; s->len = 1; s->state = STATE_READING_DATA; break; + case BRWR: case EXTEND_ADDR_WRITE: if (s->write_enable) { s->needed_bytes = 1; @@ -1133,6 +1177,7 @@ static int m25p80_cs(SSISlave *ss, bool select) s->pos = 0; s->state = STATE_IDLE; flash_sync_dirty(s, -1); + s->data_read_loop = false; } DB_PRINT_L(0, "%sselect\n", select ? "de" : ""); @@ -1198,7 +1243,9 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx) s->pos++; if (s->pos == s->len) { s->pos = 0; - s->state = STATE_IDLE; + if (!s->data_read_loop) { + s->state = STATE_IDLE; + } } break; @@ -1269,11 +1316,38 @@ static Property m25p80_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static int m25p80_pre_load(void *opaque) +{ + Flash *s = (Flash *)opaque; + + s->data_read_loop = false; + return 0; +} + +static bool m25p80_data_read_loop_needed(void *opaque) +{ + Flash *s = (Flash *)opaque; + + return s->data_read_loop; +} + +static const VMStateDescription vmstate_m25p80_data_read_loop = { + .name = "m25p80/data_read_loop", + .version_id = 1, + .minimum_version_id = 1, + .needed = m25p80_data_read_loop_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(data_read_loop, Flash), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_m25p80 = { .name = "m25p80", .version_id = 0, .minimum_version_id = 0, .pre_save = m25p80_pre_save, + .pre_load = m25p80_pre_load, .fields = (VMStateField[]) { VMSTATE_UINT8(state, Flash), VMSTATE_UINT8_ARRAY(data, Flash, M25P80_INTERNAL_DATA_BUFFER_SZ), @@ -1295,6 +1369,10 @@ static const VMStateDescription vmstate_m25p80 = { VMSTATE_UINT8(spansion_cr3nv, Flash), VMSTATE_UINT8(spansion_cr4nv, Flash), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_m25p80_data_read_loop, + NULL } }; diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c index 74d10af3d4..0ae63605f0 100644 --- a/hw/display/tc6393xb.c +++ b/hw/display/tc6393xb.c @@ -172,6 +172,7 @@ static void tc6393xb_gpio_handler_update(TC6393xbState *s) int bit; level = s->gpio_level & s->gpio_dir; + level &= MAKE_64BIT_MASK(0, TC6393XB_GPIOS); for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { bit = ctz32(diff); diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index f2cce597a9..2bd2f0f3c9 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -131,8 +131,6 @@ static void gicv3_its_common_reset(DeviceState *dev) s->creadr = 0; s->iidr = 0; memset(&s->baser, 0, sizeof(s->baser)); - - gicv3_its_post_load(s, 0); } static void gicv3_its_common_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 6fb45dffd7..bf290b8bff 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -28,6 +28,16 @@ #define TYPE_KVM_ARM_ITS "arm-its-kvm" #define KVM_ARM_ITS(obj) OBJECT_CHECK(GICv3ITSState, (obj), TYPE_KVM_ARM_ITS) +#define KVM_ARM_ITS_CLASS(klass) \ + OBJECT_CLASS_CHECK(KVMARMITSClass, (klass), TYPE_KVM_ARM_ITS) +#define KVM_ARM_ITS_GET_CLASS(obj) \ + OBJECT_GET_CLASS(KVMARMITSClass, (obj), TYPE_KVM_ARM_ITS) + +typedef struct KVMARMITSClass { + GICv3ITSCommonClass parent_class; + void (*parent_reset)(DeviceState *dev); +} KVMARMITSClass; + static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid) { @@ -155,10 +165,6 @@ static void kvm_arm_its_post_load(GICv3ITSState *s) { int i; - if (!s->iidr) { - return; - } - kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, GITS_IIDR, &s->iidr, true, &error_abort); @@ -190,6 +196,41 @@ static void kvm_arm_its_post_load(GICv3ITSState *s) GITS_CTLR, &s->ctlr, true, &error_abort); } +static void kvm_arm_its_reset(DeviceState *dev) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + KVMARMITSClass *c = KVM_ARM_ITS_GET_CLASS(s); + int i; + + c->parent_reset(dev); + + if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_ITS_CTRL_RESET)) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_ITS_CTRL_RESET, NULL, true, &error_abort); + return; + } + + error_report("ITS KVM: full reset is not supported by the host kernel"); + + if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR)) { + return; + } + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR, &s->ctlr, true, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CBASER, &s->cbaser, true, &error_abort); + + for (i = 0; i < 8; i++) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_BASER + i * 8, &s->baser[i], true, + &error_abort); + } +} + static Property kvm_arm_its_props[] = { DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "kvm-arm-gicv3", GICv3State *), @@ -200,12 +241,15 @@ static void kvm_arm_its_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); + KVMARMITSClass *ic = KVM_ARM_ITS_CLASS(klass); dc->realize = kvm_arm_its_realize; dc->props = kvm_arm_its_props; + ic->parent_reset = dc->reset; icc->send_msi = kvm_its_send_msi; icc->pre_save = kvm_arm_its_pre_save; icc->post_load = kvm_arm_its_post_load; + dc->reset = kvm_arm_its_reset; } static const TypeInfo kvm_arm_its_info = { @@ -213,6 +257,7 @@ static const TypeInfo kvm_arm_its_info = { .parent = TYPE_ARM_GICV3_ITS_COMMON, .instance_size = sizeof(GICv3ITSState), .class_init = kvm_arm_its_class_init, + .class_size = sizeof(KVMARMITSClass), }; static void kvm_arm_its_register_types(void) diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 5d9c8834ad..dd49b6c335 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -1786,10 +1786,12 @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, uint64_t value, unsigned size, MemTxAttrs attrs) { + MemoryRegion *mr = opaque; + if (attrs.secure) { /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; - return nvic_sysreg_write(opaque, addr, value, size, attrs); + return memory_region_dispatch_write(mr, addr, value, size, attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -1803,10 +1805,12 @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, uint64_t *data, unsigned size, MemTxAttrs attrs) { + MemoryRegion *mr = opaque; + if (attrs.secure) { /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; - return nvic_sysreg_read(opaque, addr, data, size, attrs); + return memory_region_dispatch_read(mr, addr, data, size, attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -1823,6 +1827,36 @@ static const MemoryRegionOps nvic_sysreg_ns_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; +static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + NVICState *s = opaque; + MemoryRegion *mr; + + /* Direct the access to the correct systick */ + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); + return memory_region_dispatch_write(mr, addr, value, size, attrs); +} + +static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + NVICState *s = opaque; + MemoryRegion *mr; + + /* Direct the access to the correct systick */ + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); + return memory_region_dispatch_read(mr, addr, data, size, attrs); +} + +static const MemoryRegionOps nvic_systick_ops = { + .read_with_attrs = nvic_systick_read, + .write_with_attrs = nvic_systick_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + static int nvic_post_load(void *opaque, int version_id) { NVICState *s = opaque; @@ -2001,17 +2035,16 @@ static void nvic_systick_trigger(void *opaque, int n, int level) /* SysTick just asked us to pend its exception. * (This is different from an external interrupt line's * behaviour.) - * TODO: when we implement the banked systicks we must make - * this pend the correct banked exception. + * n == 0 : NonSecure systick + * n == 1 : Secure systick */ - armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, false); + armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n); } } static void armv7m_nvic_realize(DeviceState *dev, Error **errp) { NVICState *s = NVIC(dev); - SysBusDevice *systick_sbd; Error *err = NULL; int regionlen; @@ -2028,14 +2061,35 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) /* include space for internal exception vectors */ s->num_irq += NVIC_FIRST_IRQ; - object_property_set_bool(OBJECT(&s->systick), true, "realized", &err); + object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true, + "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } - systick_sbd = SYS_BUS_DEVICE(&s->systick); - sysbus_connect_irq(systick_sbd, 0, - qdev_get_gpio_in_named(dev, "systick-trigger", 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0, + qdev_get_gpio_in_named(dev, "systick-trigger", + M_REG_NS)); + + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + /* We couldn't init the secure systick device in instance_init + * as we didn't know then if the CPU had the security extensions; + * so we have to do it here. + */ + object_initialize(&s->systick[M_REG_S], sizeof(s->systick[M_REG_S]), + TYPE_SYSTICK); + qdev_set_parent_bus(DEVICE(&s->systick[M_REG_S]), sysbus_get_default()); + + object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true, + "realized", &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0, + qdev_get_gpio_in_named(dev, "systick-trigger", + M_REG_S)); + } /* The NVIC and System Control Space (SCS) starts at 0xe000e000 * and looks like this: @@ -2069,15 +2123,24 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, "nvic_sysregs", 0x1000); memory_region_add_subregion(&s->container, 0, &s->sysregmem); + + memory_region_init_io(&s->systickmem, OBJECT(s), + &nvic_systick_ops, s, + "nvic_systick", 0xe0); + memory_region_add_subregion_overlap(&s->container, 0x10, - sysbus_mmio_get_region(systick_sbd, 0), - 1); + &s->systickmem, 1); if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s), - &nvic_sysreg_ns_ops, s, + &nvic_sysreg_ns_ops, &s->sysregmem, "nvic_sysregs_ns", 0x1000); memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem); + memory_region_init_io(&s->systick_ns_mem, OBJECT(s), + &nvic_sysreg_ns_ops, &s->systickmem, + "nvic_systick_ns", 0xe0); + memory_region_add_subregion_overlap(&s->container, 0x20010, + &s->systick_ns_mem, 1); } sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); @@ -2095,12 +2158,17 @@ static void armv7m_nvic_instance_init(Object *obj) NVICState *nvic = NVIC(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - object_initialize(&nvic->systick, sizeof(nvic->systick), TYPE_SYSTICK); - qdev_set_parent_bus(DEVICE(&nvic->systick), sysbus_get_default()); + object_initialize(&nvic->systick[M_REG_NS], + sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK); + qdev_set_parent_bus(DEVICE(&nvic->systick[M_REG_NS]), sysbus_get_default()); + /* We can't initialize the secure systick here, as we don't know + * yet if we need it. + */ sysbus_init_irq(sbd, &nvic->excpout); qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); - qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 1); + qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", + M_REG_NUM_BANKS); } static void armv7m_nvic_class_init(ObjectClass *klass, void *data) diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index ef56d35f2c..d8187fadd1 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -27,11 +27,11 @@ #include "sysemu/sysemu.h" #include "hw/ptimer.h" #include "qemu/log.h" -#include "qemu/fifo8.h" -#include "hw/ssi/ssi.h" #include "qemu/bitops.h" #include "hw/ssi/xilinx_spips.h" #include "qapi/error.h" +#include "hw/register.h" +#include "sysemu/dma.h" #include "migration/blocker.h" #ifndef XILINX_SPIPS_ERR_DEBUG @@ -48,7 +48,7 @@ /* config register */ #define R_CONFIG (0x00 / 4) #define IFMODE (1U << 31) -#define ENDIAN (1 << 26) +#define R_CONFIG_ENDIAN (1 << 26) #define MODEFAIL_GEN_EN (1 << 17) #define MAN_START_COM (1 << 16) #define MAN_START_EN (1 << 15) @@ -66,17 +66,35 @@ /* interrupt mechanism */ #define R_INTR_STATUS (0x04 / 4) +#define R_INTR_STATUS_RESET (0x104) #define R_INTR_EN (0x08 / 4) #define R_INTR_DIS (0x0C / 4) #define R_INTR_MASK (0x10 / 4) #define IXR_TX_FIFO_UNDERFLOW (1 << 6) +/* Poll timeout not implemented */ +#define IXR_RX_FIFO_EMPTY (1 << 11) +#define IXR_GENERIC_FIFO_FULL (1 << 10) +#define IXR_GENERIC_FIFO_NOT_FULL (1 << 9) +#define IXR_TX_FIFO_EMPTY (1 << 8) +#define IXR_GENERIC_FIFO_EMPTY (1 << 7) #define IXR_RX_FIFO_FULL (1 << 5) #define IXR_RX_FIFO_NOT_EMPTY (1 << 4) #define IXR_TX_FIFO_FULL (1 << 3) #define IXR_TX_FIFO_NOT_FULL (1 << 2) #define IXR_TX_FIFO_MODE_FAIL (1 << 1) #define IXR_RX_FIFO_OVERFLOW (1 << 0) -#define IXR_ALL ((IXR_TX_FIFO_UNDERFLOW<<1)-1) +#define IXR_ALL ((1 << 13) - 1) +#define GQSPI_IXR_MASK 0xFBE +#define IXR_SELF_CLEAR \ +(IXR_GENERIC_FIFO_EMPTY \ +| IXR_GENERIC_FIFO_FULL \ +| IXR_GENERIC_FIFO_NOT_FULL \ +| IXR_TX_FIFO_EMPTY \ +| IXR_TX_FIFO_FULL \ +| IXR_TX_FIFO_NOT_FULL \ +| IXR_RX_FIFO_EMPTY \ +| IXR_RX_FIFO_FULL \ +| IXR_RX_FIFO_NOT_EMPTY) #define R_EN (0x14 / 4) #define R_DELAY (0x18 / 4) @@ -85,6 +103,9 @@ #define R_SLAVE_IDLE_COUNT (0x24 / 4) #define R_TX_THRES (0x28 / 4) #define R_RX_THRES (0x2C / 4) +#define R_GPIO (0x30 / 4) +#define R_LPBK_DLY_ADJ (0x38 / 4) +#define R_LPBK_DLY_ADJ_RESET (0x33) #define R_TXD1 (0x80 / 4) #define R_TXD2 (0x84 / 4) #define R_TXD3 (0x88 / 4) @@ -93,8 +114,9 @@ #define R_LQSPI_CFG_RESET 0x03A002EB #define LQSPI_CFG_LQ_MODE (1U << 31) #define LQSPI_CFG_TWO_MEM (1 << 30) -#define LQSPI_CFG_SEP_BUS (1 << 30) +#define LQSPI_CFG_SEP_BUS (1 << 29) #define LQSPI_CFG_U_PAGE (1 << 28) +#define LQSPI_CFG_ADDR4 (1 << 27) #define LQSPI_CFG_MODE_EN (1 << 25) #define LQSPI_CFG_MODE_WIDTH 8 #define LQSPI_CFG_MODE_SHIFT 16 @@ -102,115 +124,168 @@ #define LQSPI_CFG_DUMMY_SHIFT 8 #define LQSPI_CFG_INST_CODE 0xFF +#define R_CMND (0xc0 / 4) + #define R_CMND_RXFIFO_DRAIN (1 << 19) + FIELD(CMND, PARTIAL_BYTE_LEN, 16, 3) +#define R_CMND_EXT_ADD (1 << 15) + FIELD(CMND, RX_DISCARD, 8, 7) + FIELD(CMND, DUMMY_CYCLES, 2, 6) +#define R_CMND_DMA_EN (1 << 1) +#define R_CMND_PUSH_WAIT (1 << 0) +#define R_TRANSFER_SIZE (0xc4 / 4) #define R_LQSPI_STS (0xA4 / 4) #define LQSPI_STS_WR_RECVD (1 << 1) #define R_MOD_ID (0xFC / 4) +#define R_GQSPI_SELECT (0x144 / 4) + FIELD(GQSPI_SELECT, GENERIC_QSPI_EN, 0, 1) +#define R_GQSPI_ISR (0x104 / 4) +#define R_GQSPI_IER (0x108 / 4) +#define R_GQSPI_IDR (0x10c / 4) +#define R_GQSPI_IMR (0x110 / 4) +#define R_GQSPI_IMR_RESET (0xfbe) +#define R_GQSPI_TX_THRESH (0x128 / 4) +#define R_GQSPI_RX_THRESH (0x12c / 4) +#define R_GQSPI_GPIO (0x130 / 4) +#define R_GQSPI_LPBK_DLY_ADJ (0x138 / 4) +#define R_GQSPI_LPBK_DLY_ADJ_RESET (0x33) +#define R_GQSPI_CNFG (0x100 / 4) + FIELD(GQSPI_CNFG, MODE_EN, 30, 2) + FIELD(GQSPI_CNFG, GEN_FIFO_START_MODE, 29, 1) + FIELD(GQSPI_CNFG, GEN_FIFO_START, 28, 1) + FIELD(GQSPI_CNFG, ENDIAN, 26, 1) + /* Poll timeout not implemented */ + FIELD(GQSPI_CNFG, EN_POLL_TIMEOUT, 20, 1) + /* QEMU doesnt care about any of these last three */ + FIELD(GQSPI_CNFG, BR, 3, 3) + FIELD(GQSPI_CNFG, CPH, 2, 1) + FIELD(GQSPI_CNFG, CPL, 1, 1) +#define R_GQSPI_GEN_FIFO (0x140 / 4) +#define R_GQSPI_TXD (0x11c / 4) +#define R_GQSPI_RXD (0x120 / 4) +#define R_GQSPI_FIFO_CTRL (0x14c / 4) + FIELD(GQSPI_FIFO_CTRL, RX_FIFO_RESET, 2, 1) + FIELD(GQSPI_FIFO_CTRL, TX_FIFO_RESET, 1, 1) + FIELD(GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET, 0, 1) +#define R_GQSPI_GFIFO_THRESH (0x150 / 4) +#define R_GQSPI_DATA_STS (0x15c / 4) +/* We use the snapshot register to hold the core state for the currently + * or most recently executed command. So the generic fifo format is defined + * for the snapshot register + */ +#define R_GQSPI_GF_SNAPSHOT (0x160 / 4) + FIELD(GQSPI_GF_SNAPSHOT, POLL, 19, 1) + FIELD(GQSPI_GF_SNAPSHOT, STRIPE, 18, 1) + FIELD(GQSPI_GF_SNAPSHOT, RECIEVE, 17, 1) + FIELD(GQSPI_GF_SNAPSHOT, TRANSMIT, 16, 1) + FIELD(GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT, 14, 2) + FIELD(GQSPI_GF_SNAPSHOT, CHIP_SELECT, 12, 2) + FIELD(GQSPI_GF_SNAPSHOT, SPI_MODE, 10, 2) + FIELD(GQSPI_GF_SNAPSHOT, EXPONENT, 9, 1) + FIELD(GQSPI_GF_SNAPSHOT, DATA_XFER, 8, 1) + FIELD(GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA, 0, 8) +#define R_GQSPI_MOD_ID (0x1fc / 4) +#define R_GQSPI_MOD_ID_RESET (0x10a0000) + +#define R_QSPIDMA_DST_CTRL (0x80c / 4) +#define R_QSPIDMA_DST_CTRL_RESET (0x803ffa00) +#define R_QSPIDMA_DST_I_MASK (0x820 / 4) +#define R_QSPIDMA_DST_I_MASK_RESET (0xfe) +#define R_QSPIDMA_DST_CTRL2 (0x824 / 4) +#define R_QSPIDMA_DST_CTRL2_RESET (0x081bfff8) + /* size of TXRX FIFOs */ -#define RXFF_A 32 -#define TXFF_A 32 +#define RXFF_A (128) +#define TXFF_A (128) #define RXFF_A_Q (64 * 4) #define TXFF_A_Q (64 * 4) /* 16MB per linear region */ #define LQSPI_ADDRESS_BITS 24 -/* Bite off 4k chunks at a time */ -#define LQSPI_CACHE_SIZE 1024 #define SNOOP_CHECKING 0xFF -#define SNOOP_NONE 0xFE +#define SNOOP_ADDR 0xF0 +#define SNOOP_NONE 0xEE #define SNOOP_STRIPING 0 -typedef enum { - READ = 0x3, - FAST_READ = 0xb, - DOR = 0x3b, - QOR = 0x6b, - DIOR = 0xbb, - QIOR = 0xeb, - - PP = 0x2, - DPP = 0xa2, - QPP = 0x32, -} FlashCMD; - -typedef struct { - XilinxSPIPS parent_obj; - - uint8_t lqspi_buf[LQSPI_CACHE_SIZE]; - hwaddr lqspi_cached_addr; - Error *migration_blocker; - bool mmio_execution_enabled; -} XilinxQSPIPS; - -typedef struct XilinxSPIPSClass { - SysBusDeviceClass parent_class; - - const MemoryRegionOps *reg_ops; - - uint32_t rx_fifo_size; - uint32_t tx_fifo_size; -} XilinxSPIPSClass; - static inline int num_effective_busses(XilinxSPIPS *s) { return (s->regs[R_LQSPI_CFG] & LQSPI_CFG_SEP_BUS && s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1; } -static inline bool xilinx_spips_cs_is_set(XilinxSPIPS *s, int i, int field) +static void xilinx_spips_update_cs(XilinxSPIPS *s, int field) { - return ~field & (1 << i) && (s->regs[R_CONFIG] & MANUAL_CS - || !fifo8_is_empty(&s->tx_fifo)); -} - -static void xilinx_spips_update_cs_lines(XilinxSPIPS *s) -{ - int i, j; - bool found = false; - int field = s->regs[R_CONFIG] >> CS_SHIFT; + int i; for (i = 0; i < s->num_cs; i++) { - for (j = 0; j < num_effective_busses(s); j++) { - int upage = !!(s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE); - int cs_to_set = (j * s->num_cs + i + upage) % - (s->num_cs * s->num_busses); + bool old_state = s->cs_lines_state[i]; + bool new_state = field & (1 << i); - if (xilinx_spips_cs_is_set(s, i, field) && !found) { - DB_PRINT_L(0, "selecting slave %d\n", i); - qemu_set_irq(s->cs_lines[cs_to_set], 0); - } else { - DB_PRINT_L(0, "deselecting slave %d\n", i); - qemu_set_irq(s->cs_lines[cs_to_set], 1); - } - } - if (xilinx_spips_cs_is_set(s, i, field)) { - found = true; + if (old_state != new_state) { + s->cs_lines_state[i] = new_state; + s->rx_discard = ARRAY_FIELD_EX32(s->regs, CMND, RX_DISCARD); + DB_PRINT_L(1, "%sselecting slave %d\n", new_state ? "" : "de", i); } + qemu_set_irq(s->cs_lines[i], !new_state); } - if (!found) { + if (!(field & ((1 << s->num_cs) - 1))) { s->snoop_state = SNOOP_CHECKING; + s->cmd_dummies = 0; + s->link_state = 1; + s->link_state_next = 1; + s->link_state_next_when = 0; DB_PRINT_L(1, "moving to snoop check state\n"); } } +static void xlnx_zynqmp_qspips_update_cs_lines(XlnxZynqMPQSPIPS *s) +{ + if (s->regs[R_GQSPI_GF_SNAPSHOT]) { + int field = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, CHIP_SELECT); + xilinx_spips_update_cs(XILINX_SPIPS(s), field); + } +} + +static void xilinx_spips_update_cs_lines(XilinxSPIPS *s) +{ + int field = ~((s->regs[R_CONFIG] & CS) >> CS_SHIFT); + + /* In dual parallel, mirror low CS to both */ + if (num_effective_busses(s) == 2) { + /* Single bit chip-select for qspi */ + field &= 0x1; + field |= field << 1; + /* Dual stack U-Page */ + } else if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM && + s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE) { + /* Single bit chip-select for qspi */ + field &= 0x1; + /* change from CS0 to CS1 */ + field <<= 1; + } + /* Auto CS */ + if (!(s->regs[R_CONFIG] & MANUAL_CS) && + fifo8_is_empty(&s->tx_fifo)) { + field = 0; + } + xilinx_spips_update_cs(s, field); +} + static void xilinx_spips_update_ixr(XilinxSPIPS *s) { - if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE) { - return; + if (!(s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE)) { + s->regs[R_INTR_STATUS] &= ~IXR_SELF_CLEAR; + s->regs[R_INTR_STATUS] |= + (fifo8_is_full(&s->rx_fifo) ? IXR_RX_FIFO_FULL : 0) | + (s->rx_fifo.num >= s->regs[R_RX_THRES] ? + IXR_RX_FIFO_NOT_EMPTY : 0) | + (fifo8_is_full(&s->tx_fifo) ? IXR_TX_FIFO_FULL : 0) | + (fifo8_is_empty(&s->tx_fifo) ? IXR_TX_FIFO_EMPTY : 0) | + (s->tx_fifo.num < s->regs[R_TX_THRES] ? IXR_TX_FIFO_NOT_FULL : 0); } - /* These are set/cleared as they occur */ - s->regs[R_INTR_STATUS] &= (IXR_TX_FIFO_UNDERFLOW | IXR_RX_FIFO_OVERFLOW | - IXR_TX_FIFO_MODE_FAIL); - /* these are pure functions of fifo state, set them here */ - s->regs[R_INTR_STATUS] |= - (fifo8_is_full(&s->rx_fifo) ? IXR_RX_FIFO_FULL : 0) | - (s->rx_fifo.num >= s->regs[R_RX_THRES] ? IXR_RX_FIFO_NOT_EMPTY : 0) | - (fifo8_is_full(&s->tx_fifo) ? IXR_TX_FIFO_FULL : 0) | - (s->tx_fifo.num < s->regs[R_TX_THRES] ? IXR_TX_FIFO_NOT_FULL : 0); - /* drive external interrupt pin */ int new_irqline = !!(s->regs[R_INTR_MASK] & s->regs[R_INTR_STATUS] & IXR_ALL); if (new_irqline != s->irqline) { @@ -219,14 +294,42 @@ static void xilinx_spips_update_ixr(XilinxSPIPS *s) } } +static void xlnx_zynqmp_qspips_update_ixr(XlnxZynqMPQSPIPS *s) +{ + uint32_t gqspi_int; + int new_irqline; + + s->regs[R_GQSPI_ISR] &= ~IXR_SELF_CLEAR; + s->regs[R_GQSPI_ISR] |= + (fifo32_is_empty(&s->fifo_g) ? IXR_GENERIC_FIFO_EMPTY : 0) | + (fifo32_is_full(&s->fifo_g) ? IXR_GENERIC_FIFO_FULL : 0) | + (s->fifo_g.fifo.num < s->regs[R_GQSPI_GFIFO_THRESH] ? + IXR_GENERIC_FIFO_NOT_FULL : 0) | + (fifo8_is_empty(&s->rx_fifo_g) ? IXR_RX_FIFO_EMPTY : 0) | + (fifo8_is_full(&s->rx_fifo_g) ? IXR_RX_FIFO_FULL : 0) | + (s->rx_fifo_g.num >= s->regs[R_GQSPI_RX_THRESH] ? + IXR_RX_FIFO_NOT_EMPTY : 0) | + (fifo8_is_empty(&s->tx_fifo_g) ? IXR_TX_FIFO_EMPTY : 0) | + (fifo8_is_full(&s->tx_fifo_g) ? IXR_TX_FIFO_FULL : 0) | + (s->tx_fifo_g.num < s->regs[R_GQSPI_TX_THRESH] ? + IXR_TX_FIFO_NOT_FULL : 0); + + /* GQSPI Interrupt Trigger Status */ + gqspi_int = (~s->regs[R_GQSPI_IMR]) & s->regs[R_GQSPI_ISR] & GQSPI_IXR_MASK; + new_irqline = !!(gqspi_int & IXR_ALL); + + /* drive external interrupt pin */ + if (new_irqline != s->gqspi_irqline) { + s->gqspi_irqline = new_irqline; + qemu_set_irq(XILINX_SPIPS(s)->irq, s->gqspi_irqline); + } +} + static void xilinx_spips_reset(DeviceState *d) { XilinxSPIPS *s = XILINX_SPIPS(d); - int i; - for (i = 0; i < XLNX_SPIPS_R_MAX; i++) { - s->regs[i] = 0; - } + memset(s->regs, 0, sizeof(s->regs)); fifo8_reset(&s->rx_fifo); fifo8_reset(&s->rx_fifo); @@ -238,19 +341,54 @@ static void xilinx_spips_reset(DeviceState *d) /* FIXME: move magic number definition somewhere sensible */ s->regs[R_MOD_ID] = 0x01090106; s->regs[R_LQSPI_CFG] = R_LQSPI_CFG_RESET; + s->link_state = 1; + s->link_state_next = 1; + s->link_state_next_when = 0; s->snoop_state = SNOOP_CHECKING; + s->cmd_dummies = 0; + s->man_start_com = false; xilinx_spips_update_ixr(s); xilinx_spips_update_cs_lines(s); } -/* N way (num) in place bit striper. Lay out row wise bits (LSB to MSB) +static void xlnx_zynqmp_qspips_reset(DeviceState *d) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(d); + + xilinx_spips_reset(d); + + memset(s->regs, 0, sizeof(s->regs)); + + fifo8_reset(&s->rx_fifo_g); + fifo8_reset(&s->rx_fifo_g); + fifo32_reset(&s->fifo_g); + s->regs[R_INTR_STATUS] = R_INTR_STATUS_RESET; + s->regs[R_GPIO] = 1; + s->regs[R_LPBK_DLY_ADJ] = R_LPBK_DLY_ADJ_RESET; + s->regs[R_GQSPI_GFIFO_THRESH] = 0x10; + s->regs[R_MOD_ID] = 0x01090101; + s->regs[R_GQSPI_IMR] = R_GQSPI_IMR_RESET; + s->regs[R_GQSPI_TX_THRESH] = 1; + s->regs[R_GQSPI_RX_THRESH] = 1; + s->regs[R_GQSPI_GPIO] = 1; + s->regs[R_GQSPI_LPBK_DLY_ADJ] = R_GQSPI_LPBK_DLY_ADJ_RESET; + s->regs[R_GQSPI_MOD_ID] = R_GQSPI_MOD_ID_RESET; + s->regs[R_QSPIDMA_DST_CTRL] = R_QSPIDMA_DST_CTRL_RESET; + s->regs[R_QSPIDMA_DST_I_MASK] = R_QSPIDMA_DST_I_MASK_RESET; + s->regs[R_QSPIDMA_DST_CTRL2] = R_QSPIDMA_DST_CTRL2_RESET; + s->man_start_com_g = false; + s->gqspi_irqline = 0; + xlnx_zynqmp_qspips_update_ixr(s); +} + +/* N way (num) in place bit striper. Lay out row wise bits (MSB to LSB) * column wise (from element 0 to N-1). num is the length of x, and dir * reverses the direction of the transform. Best illustrated by example: * Each digit in the below array is a single bit (num == 3): * - * {{ 76543210, } ----- stripe (dir == false) -----> {{ FCheb630, } - * { hgfedcba, } { GDAfc741, } - * { HGFEDCBA, }} <---- upstripe (dir == true) ----- { HEBgda52, }} + * {{ 76543210, } ----- stripe (dir == false) -----> {{ 741gdaFC, } + * { hgfedcba, } { 630fcHEB, } + * { HGFEDCBA, }} <---- upstripe (dir == true) ----- { 52hebGDA, }} */ static inline void stripe8(uint8_t *x, int num, bool dir) @@ -258,34 +396,188 @@ static inline void stripe8(uint8_t *x, int num, bool dir) uint8_t r[num]; memset(r, 0, sizeof(uint8_t) * num); int idx[2] = {0, 0}; - int bit[2] = {0, 0}; + int bit[2] = {0, 7}; int d = dir; for (idx[0] = 0; idx[0] < num; ++idx[0]) { - for (bit[0] = 0; bit[0] < 8; ++bit[0]) { - r[idx[d]] |= x[idx[!d]] & 1 << bit[!d] ? 1 << bit[d] : 0; + for (bit[0] = 7; bit[0] >= 0; bit[0]--) { + r[idx[!d]] |= x[idx[d]] & 1 << bit[d] ? 1 << bit[!d] : 0; idx[1] = (idx[1] + 1) % num; if (!idx[1]) { - bit[1]++; + bit[1]--; } } } memcpy(x, r, sizeof(uint8_t) * num); } +static void xlnx_zynqmp_qspips_flush_fifo_g(XlnxZynqMPQSPIPS *s) +{ + while (s->regs[R_GQSPI_DATA_STS] || !fifo32_is_empty(&s->fifo_g)) { + uint8_t tx_rx[2] = { 0 }; + int num_stripes = 1; + uint8_t busses; + int i; + + if (!s->regs[R_GQSPI_DATA_STS]) { + uint8_t imm; + + s->regs[R_GQSPI_GF_SNAPSHOT] = fifo32_pop(&s->fifo_g); + DB_PRINT_L(0, "GQSPI command: %x\n", s->regs[R_GQSPI_GF_SNAPSHOT]); + if (!s->regs[R_GQSPI_GF_SNAPSHOT]) { + DB_PRINT_L(0, "Dummy GQSPI Delay Command Entry, Do nothing"); + continue; + } + xlnx_zynqmp_qspips_update_cs_lines(s); + + imm = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA); + if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) { + /* immedate transfer */ + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) || + ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) { + s->regs[R_GQSPI_DATA_STS] = 1; + /* CS setup/hold - do nothing */ + } else { + s->regs[R_GQSPI_DATA_STS] = 0; + } + } else if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, EXPONENT)) { + if (imm > 31) { + qemu_log_mask(LOG_UNIMP, "QSPI exponential transfer too" + " long - 2 ^ %" PRId8 " requested\n", imm); + } + s->regs[R_GQSPI_DATA_STS] = 1ul << imm; + } else { + s->regs[R_GQSPI_DATA_STS] = imm; + } + } + /* Zero length transfer check */ + if (!s->regs[R_GQSPI_DATA_STS]) { + continue; + } + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE) && + fifo8_is_full(&s->rx_fifo_g)) { + /* No space in RX fifo for transfer - try again later */ + return; + } + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, STRIPE) && + (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) || + ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE))) { + num_stripes = 2; + } + if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) { + tx_rx[0] = ARRAY_FIELD_EX32(s->regs, + GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA); + } else if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT)) { + for (i = 0; i < num_stripes; ++i) { + if (!fifo8_is_empty(&s->tx_fifo_g)) { + tx_rx[i] = fifo8_pop(&s->tx_fifo_g); + s->tx_fifo_g_align++; + } else { + return; + } + } + } + if (num_stripes == 1) { + /* mirror */ + tx_rx[1] = tx_rx[0]; + } + busses = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT); + for (i = 0; i < 2; ++i) { + DB_PRINT_L(1, "bus %d tx = %02x\n", i, tx_rx[i]); + tx_rx[i] = ssi_transfer(XILINX_SPIPS(s)->spi[i], tx_rx[i]); + DB_PRINT_L(1, "bus %d rx = %02x\n", i, tx_rx[i]); + } + if (s->regs[R_GQSPI_DATA_STS] > 1 && + busses == 0x3 && num_stripes == 2) { + s->regs[R_GQSPI_DATA_STS] -= 2; + } else if (s->regs[R_GQSPI_DATA_STS] > 0) { + s->regs[R_GQSPI_DATA_STS]--; + } + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) { + for (i = 0; i < 2; ++i) { + if (busses & (1 << i)) { + DB_PRINT_L(1, "bus %d push_byte = %02x\n", i, tx_rx[i]); + fifo8_push(&s->rx_fifo_g, tx_rx[i]); + s->rx_fifo_g_align++; + } + } + } + if (!s->regs[R_GQSPI_DATA_STS]) { + for (; s->tx_fifo_g_align % 4; s->tx_fifo_g_align++) { + fifo8_pop(&s->tx_fifo_g); + } + for (; s->rx_fifo_g_align % 4; s->rx_fifo_g_align++) { + fifo8_push(&s->rx_fifo_g, 0); + } + } + } +} + +static int xilinx_spips_num_dummies(XilinxQSPIPS *qs, uint8_t command) +{ + if (!qs) { + /* The SPI device is not a QSPI device */ + return -1; + } + + switch (command) { /* check for dummies */ + case READ: /* no dummy bytes/cycles */ + case PP: + case DPP: + case QPP: + case READ_4: + case PP_4: + case QPP_4: + return 0; + case FAST_READ: + case DOR: + case QOR: + case DOR_4: + case QOR_4: + return 1; + case DIOR: + case FAST_READ_4: + case DIOR_4: + return 2; + case QIOR: + case QIOR_4: + return 5; + default: + return -1; + } +} + +static inline uint8_t get_addr_length(XilinxSPIPS *s, uint8_t cmd) +{ + switch (cmd) { + case PP_4: + case QPP_4: + case READ_4: + case QIOR_4: + case FAST_READ_4: + case DOR_4: + case QOR_4: + case DIOR_4: + return 4; + default: + return (s->regs[R_CMND] & R_CMND_EXT_ADD) ? 4 : 3; + } +} + static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) { int debug_level = 0; + XilinxQSPIPS *q = (XilinxQSPIPS *) object_dynamic_cast(OBJECT(s), + TYPE_XILINX_QSPIPS); for (;;) { int i; uint8_t tx = 0; uint8_t tx_rx[num_effective_busses(s)]; + uint8_t dummy_cycles = 0; + uint8_t addr_length; if (fifo8_is_empty(&s->tx_fifo)) { - if (!(s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE)) { - s->regs[R_INTR_STATUS] |= IXR_TX_FIFO_UNDERFLOW; - } xilinx_spips_update_ixr(s); return; } else if (s->snoop_state == SNOOP_STRIPING) { @@ -293,53 +585,102 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) tx_rx[i] = fifo8_pop(&s->tx_fifo); } stripe8(tx_rx, num_effective_busses(s), false); - } else { + } else if (s->snoop_state >= SNOOP_ADDR) { tx = fifo8_pop(&s->tx_fifo); for (i = 0; i < num_effective_busses(s); ++i) { tx_rx[i] = tx; } + } else { + /* Extract a dummy byte and generate dummy cycles according to the + * link state */ + tx = fifo8_pop(&s->tx_fifo); + dummy_cycles = 8 / s->link_state; } for (i = 0; i < num_effective_busses(s); ++i) { - DB_PRINT_L(debug_level, "tx = %02x\n", tx_rx[i]); - tx_rx[i] = ssi_transfer(s->spi[i], (uint32_t)tx_rx[i]); - DB_PRINT_L(debug_level, "rx = %02x\n", tx_rx[i]); + int bus = num_effective_busses(s) - 1 - i; + if (dummy_cycles) { + int d; + for (d = 0; d < dummy_cycles; ++d) { + tx_rx[0] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[0]); + } + } else { + DB_PRINT_L(debug_level, "tx = %02x\n", tx_rx[i]); + tx_rx[i] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[i]); + DB_PRINT_L(debug_level, "rx = %02x\n", tx_rx[i]); + } } - if (fifo8_is_full(&s->rx_fifo)) { + if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) { + DB_PRINT_L(debug_level, "dircarding drained rx byte\n"); + /* Do nothing */ + } else if (s->rx_discard) { + DB_PRINT_L(debug_level, "dircarding discarded rx byte\n"); + s->rx_discard -= 8 / s->link_state; + } else if (fifo8_is_full(&s->rx_fifo)) { s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW; DB_PRINT_L(0, "rx FIFO overflow"); } else if (s->snoop_state == SNOOP_STRIPING) { stripe8(tx_rx, num_effective_busses(s), true); for (i = 0; i < num_effective_busses(s); ++i) { fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[i]); + DB_PRINT_L(debug_level, "pushing striped rx byte\n"); } } else { + DB_PRINT_L(debug_level, "pushing unstriped rx byte\n"); fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[0]); } + if (s->link_state_next_when) { + s->link_state_next_when--; + if (!s->link_state_next_when) { + s->link_state = s->link_state_next; + } + } + DB_PRINT_L(debug_level, "initial snoop state: %x\n", (unsigned)s->snoop_state); switch (s->snoop_state) { case (SNOOP_CHECKING): - switch (tx) { /* new instruction code */ - case READ: /* 3 address bytes, no dummy bytes/cycles */ - case PP: - case DPP: - case QPP: - s->snoop_state = 3; - break; - case FAST_READ: /* 3 address bytes, 1 dummy byte */ - case DOR: - case QOR: - case DIOR: /* FIXME: these vary between vendor - set to spansion */ - s->snoop_state = 4; - break; - case QIOR: /* 3 address bytes, 2 dummy bytes */ - s->snoop_state = 6; - break; - default: + /* Store the count of dummy bytes in the txfifo */ + s->cmd_dummies = xilinx_spips_num_dummies(q, tx); + addr_length = get_addr_length(s, tx); + if (s->cmd_dummies < 0) { s->snoop_state = SNOOP_NONE; + } else { + s->snoop_state = SNOOP_ADDR + addr_length - 1; + } + switch (tx) { + case DPP: + case DOR: + case DOR_4: + s->link_state_next = 2; + s->link_state_next_when = addr_length + s->cmd_dummies; + break; + case QPP: + case QPP_4: + case QOR: + case QOR_4: + s->link_state_next = 4; + s->link_state_next_when = addr_length + s->cmd_dummies; + break; + case DIOR: + case DIOR_4: + s->link_state = 2; + break; + case QIOR: + case QIOR_4: + s->link_state = 4; + break; + } + break; + case (SNOOP_ADDR): + /* Address has been transmitted, transmit dummy cycles now if + * needed */ + if (s->cmd_dummies < 0) { + s->snoop_state = SNOOP_NONE; + } else { + s->snoop_state = s->cmd_dummies; } break; case (SNOOP_STRIPING): @@ -358,12 +699,128 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) } } -static inline void rx_data_bytes(XilinxSPIPS *s, uint8_t *value, int max) +static inline void tx_data_bytes(Fifo8 *fifo, uint32_t value, int num, bool be) +{ + int i; + for (i = 0; i < num && !fifo8_is_full(fifo); ++i) { + if (be) { + fifo8_push(fifo, (uint8_t)(value >> 24)); + value <<= 8; + } else { + fifo8_push(fifo, (uint8_t)value); + value >>= 8; + } + } +} + +static void xilinx_spips_check_zero_pump(XilinxSPIPS *s) +{ + if (!s->regs[R_TRANSFER_SIZE]) { + return; + } + if (!fifo8_is_empty(&s->tx_fifo) && s->regs[R_CMND] & R_CMND_PUSH_WAIT) { + return; + } + /* + * The zero pump must never fill tx fifo such that rx overflow is + * possible + */ + while (s->regs[R_TRANSFER_SIZE] && + s->rx_fifo.num + s->tx_fifo.num < RXFF_A_Q - 3) { + /* endianess just doesn't matter when zero pumping */ + tx_data_bytes(&s->tx_fifo, 0, 4, false); + s->regs[R_TRANSFER_SIZE] &= ~0x03ull; + s->regs[R_TRANSFER_SIZE] -= 4; + } +} + +static void xilinx_spips_check_flush(XilinxSPIPS *s) +{ + if (s->man_start_com || + (!fifo8_is_empty(&s->tx_fifo) && + !(s->regs[R_CONFIG] & MAN_START_EN))) { + xilinx_spips_check_zero_pump(s); + xilinx_spips_flush_txfifo(s); + } + if (fifo8_is_empty(&s->tx_fifo) && !s->regs[R_TRANSFER_SIZE]) { + s->man_start_com = false; + } + xilinx_spips_update_ixr(s); +} + +static void xlnx_zynqmp_qspips_check_flush(XlnxZynqMPQSPIPS *s) +{ + bool gqspi_has_work = s->regs[R_GQSPI_DATA_STS] || + !fifo32_is_empty(&s->fifo_g); + + if (ARRAY_FIELD_EX32(s->regs, GQSPI_SELECT, GENERIC_QSPI_EN)) { + if (s->man_start_com_g || (gqspi_has_work && + !ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, GEN_FIFO_START_MODE))) { + xlnx_zynqmp_qspips_flush_fifo_g(s); + } + } else { + xilinx_spips_check_flush(XILINX_SPIPS(s)); + } + if (!gqspi_has_work) { + s->man_start_com_g = false; + } + xlnx_zynqmp_qspips_update_ixr(s); +} + +static inline int rx_data_bytes(Fifo8 *fifo, uint8_t *value, int max) { int i; - for (i = 0; i < max && !fifo8_is_empty(&s->rx_fifo); ++i) { - value[i] = fifo8_pop(&s->rx_fifo); + for (i = 0; i < max && !fifo8_is_empty(fifo); ++i) { + value[i] = fifo8_pop(fifo); + } + return max - i; +} + +static const void *pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num) +{ + void *ret; + + if (max == 0 || max > fifo->num) { + abort(); + } + *num = MIN(fifo->capacity - fifo->head, max); + ret = &fifo->data[fifo->head]; + fifo->head += *num; + fifo->head %= fifo->capacity; + fifo->num -= *num; + return ret; +} + +static void xlnx_zynqmp_qspips_notify(void *opaque) +{ + XlnxZynqMPQSPIPS *rq = XLNX_ZYNQMP_QSPIPS(opaque); + XilinxSPIPS *s = XILINX_SPIPS(rq); + Fifo8 *recv_fifo; + + if (ARRAY_FIELD_EX32(rq->regs, GQSPI_SELECT, GENERIC_QSPI_EN)) { + if (!(ARRAY_FIELD_EX32(rq->regs, GQSPI_CNFG, MODE_EN) == 2)) { + return; + } + recv_fifo = &rq->rx_fifo_g; + } else { + if (!(s->regs[R_CMND] & R_CMND_DMA_EN)) { + return; + } + recv_fifo = &s->rx_fifo; + } + while (recv_fifo->num >= 4 + && stream_can_push(rq->dma, xlnx_zynqmp_qspips_notify, rq)) + { + size_t ret; + uint32_t num; + const void *rxd = pop_buf(recv_fifo, 4, &num); + + memcpy(rq->dma_buf, rxd, num); + + ret = stream_push(rq->dma, rq->dma_buf, 4); + assert(ret == 4); + xlnx_zynqmp_qspips_check_flush(rq); } } @@ -374,6 +831,7 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr, uint32_t mask = ~0; uint32_t ret; uint8_t rx_buf[4]; + int shortfall; addr >>= 2; switch (addr) { @@ -384,6 +842,7 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr, ret = s->regs[addr] & IXR_ALL; s->regs[addr] = 0; DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret); + xilinx_spips_update_ixr(s); return ret; case R_INTR_MASK: mask = IXR_ALL; @@ -404,10 +863,15 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr, break; case R_RX_DATA: memset(rx_buf, 0, sizeof(rx_buf)); - rx_data_bytes(s, rx_buf, s->num_txrx_bytes); - ret = s->regs[R_CONFIG] & ENDIAN ? cpu_to_be32(*(uint32_t *)rx_buf) - : cpu_to_le32(*(uint32_t *)rx_buf); + shortfall = rx_data_bytes(&s->rx_fifo, rx_buf, s->num_txrx_bytes); + ret = s->regs[R_CONFIG] & R_CONFIG_ENDIAN ? + cpu_to_be32(*(uint32_t *)rx_buf) : + cpu_to_le32(*(uint32_t *)rx_buf); + if (!(s->regs[R_CONFIG] & R_CONFIG_ENDIAN)) { + ret <<= 8 * shortfall; + } DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret); + xilinx_spips_check_flush(s); xilinx_spips_update_ixr(s); return ret; } @@ -417,16 +881,39 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr, } -static inline void tx_data_bytes(XilinxSPIPS *s, uint32_t value, int num) +static uint64_t xlnx_zynqmp_qspips_read(void *opaque, + hwaddr addr, unsigned size) { - int i; - for (i = 0; i < num && !fifo8_is_full(&s->tx_fifo); ++i) { - if (s->regs[R_CONFIG] & ENDIAN) { - fifo8_push(&s->tx_fifo, (uint8_t)(value >> 24)); - value <<= 8; - } else { - fifo8_push(&s->tx_fifo, (uint8_t)value); - value >>= 8; + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque); + uint32_t reg = addr / 4; + uint32_t ret; + uint8_t rx_buf[4]; + int shortfall; + + if (reg <= R_MOD_ID) { + return xilinx_spips_read(opaque, addr, size); + } else { + switch (reg) { + case R_GQSPI_RXD: + if (fifo8_is_empty(&s->rx_fifo_g)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from empty GQSPI RX FIFO\n"); + return 0; + } + memset(rx_buf, 0, sizeof(rx_buf)); + shortfall = rx_data_bytes(&s->rx_fifo_g, rx_buf, + XILINX_SPIPS(s)->num_txrx_bytes); + ret = ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN) ? + cpu_to_be32(*(uint32_t *)rx_buf) : + cpu_to_le32(*(uint32_t *)rx_buf); + if (!ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN)) { + ret <<= 8 * shortfall; + } + xlnx_zynqmp_qspips_check_flush(s); + xlnx_zynqmp_qspips_update_ixr(s); + return ret; + default: + return s->regs[reg]; } } } @@ -435,7 +922,6 @@ static void xilinx_spips_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { int mask = ~0; - int man_start_com = 0; XilinxSPIPS *s = opaque; DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr, (unsigned)value); @@ -443,8 +929,8 @@ static void xilinx_spips_write(void *opaque, hwaddr addr, switch (addr) { case R_CONFIG: mask = ~(R_CONFIG_RSVD | MAN_START_COM); - if (value & MAN_START_COM) { - man_start_com = 1; + if ((value & MAN_START_COM) && (s->regs[R_CONFIG] & MAN_START_EN)) { + s->man_start_com = true; } break; case R_INTR_STATUS: @@ -471,25 +957,26 @@ static void xilinx_spips_write(void *opaque, hwaddr addr, mask = 0; break; case R_TX_DATA: - tx_data_bytes(s, (uint32_t)value, s->num_txrx_bytes); + tx_data_bytes(&s->tx_fifo, (uint32_t)value, s->num_txrx_bytes, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); goto no_reg_update; case R_TXD1: - tx_data_bytes(s, (uint32_t)value, 1); + tx_data_bytes(&s->tx_fifo, (uint32_t)value, 1, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); goto no_reg_update; case R_TXD2: - tx_data_bytes(s, (uint32_t)value, 2); + tx_data_bytes(&s->tx_fifo, (uint32_t)value, 2, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); goto no_reg_update; case R_TXD3: - tx_data_bytes(s, (uint32_t)value, 3); + tx_data_bytes(&s->tx_fifo, (uint32_t)value, 3, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); goto no_reg_update; } s->regs[addr] = (s->regs[addr] & ~mask) | (value & mask); no_reg_update: xilinx_spips_update_cs_lines(s); - if ((man_start_com && s->regs[R_CONFIG] & MAN_START_EN) || - (fifo8_is_empty(&s->tx_fifo) && s->regs[R_CONFIG] & MAN_START_EN)) { - xilinx_spips_flush_txfifo(s); - } + xilinx_spips_check_flush(s); xilinx_spips_update_cs_lines(s); xilinx_spips_update_ixr(s); } @@ -517,6 +1004,7 @@ static void xilinx_qspips_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { XilinxQSPIPS *q = XILINX_QSPIPS(opaque); + XilinxSPIPS *s = XILINX_SPIPS(opaque); xilinx_spips_write(opaque, addr, value, size); addr >>= 2; @@ -524,6 +1012,72 @@ static void xilinx_qspips_write(void *opaque, hwaddr addr, if (addr == R_LQSPI_CFG) { xilinx_qspips_invalidate_mmio_ptr(q); } + if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) { + fifo8_reset(&s->rx_fifo); + } +} + +static void xlnx_zynqmp_qspips_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque); + uint32_t reg = addr / 4; + + if (reg <= R_MOD_ID) { + xilinx_qspips_write(opaque, addr, value, size); + } else { + switch (reg) { + case R_GQSPI_CNFG: + if (FIELD_EX32(value, GQSPI_CNFG, GEN_FIFO_START) && + ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, GEN_FIFO_START_MODE)) { + s->man_start_com_g = true; + } + s->regs[reg] = value & ~(R_GQSPI_CNFG_GEN_FIFO_START_MASK); + break; + case R_GQSPI_GEN_FIFO: + if (!fifo32_is_full(&s->fifo_g)) { + fifo32_push(&s->fifo_g, value); + } + break; + case R_GQSPI_TXD: + tx_data_bytes(&s->tx_fifo_g, (uint32_t)value, 4, + ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN)); + break; + case R_GQSPI_FIFO_CTRL: + if (FIELD_EX32(value, GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET)) { + fifo32_reset(&s->fifo_g); + } + if (FIELD_EX32(value, GQSPI_FIFO_CTRL, TX_FIFO_RESET)) { + fifo8_reset(&s->tx_fifo_g); + } + if (FIELD_EX32(value, GQSPI_FIFO_CTRL, RX_FIFO_RESET)) { + fifo8_reset(&s->rx_fifo_g); + } + break; + case R_GQSPI_IDR: + s->regs[R_GQSPI_IMR] |= value; + break; + case R_GQSPI_IER: + s->regs[R_GQSPI_IMR] &= ~value; + break; + case R_GQSPI_ISR: + s->regs[R_GQSPI_ISR] &= ~value; + break; + case R_GQSPI_IMR: + case R_GQSPI_RXD: + case R_GQSPI_GF_SNAPSHOT: + case R_GQSPI_MOD_ID: + break; + default: + s->regs[reg] = value; + break; + } + xlnx_zynqmp_qspips_update_cs_lines(s); + xlnx_zynqmp_qspips_check_flush(s); + xlnx_zynqmp_qspips_update_cs_lines(s); + xlnx_zynqmp_qspips_update_ixr(s); + } + xlnx_zynqmp_qspips_notify(s); } static const MemoryRegionOps qspips_ops = { @@ -532,6 +1086,12 @@ static const MemoryRegionOps qspips_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; +static const MemoryRegionOps xlnx_zynqmp_qspips_ops = { + .read = xlnx_zynqmp_qspips_read, + .write = xlnx_zynqmp_qspips_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + #define LQSPI_CACHE_SIZE 1024 static void lqspi_load_cache(void *opaque, hwaddr addr) @@ -563,6 +1123,9 @@ static void lqspi_load_cache(void *opaque, hwaddr addr) fifo8_push(&s->tx_fifo, s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE); /* read address */ DB_PRINT_L(0, "pushing read address %06x\n", flash_addr); + if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_ADDR4) { + fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 24)); + } fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 16)); fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 8)); fifo8_push(&s->tx_fifo, (uint8_t)flash_addr); @@ -586,11 +1149,11 @@ static void lqspi_load_cache(void *opaque, hwaddr addr) while (cache_entry < LQSPI_CACHE_SIZE) { for (i = 0; i < 64; ++i) { - tx_data_bytes(s, 0, 1); + tx_data_bytes(&s->tx_fifo, 0, 1, false); } xilinx_spips_flush_txfifo(s); for (i = 0; i < 64; ++i) { - rx_data_bytes(s, &q->lqspi_buf[cache_entry++], 1); + rx_data_bytes(&s->rx_fifo, &q->lqspi_buf[cache_entry++], 1); } } @@ -666,6 +1229,7 @@ static void xilinx_spips_realize(DeviceState *dev, Error **errp) } s->cs_lines = g_new0(qemu_irq, s->num_cs * s->num_busses); + s->cs_lines_state = g_new0(bool, s->num_cs * s->num_busses); for (i = 0, cs = s->cs_lines; i < s->num_busses; ++i, cs += s->num_cs) { ssi_auto_connect_slaves(DEVICE(s), cs, s->spi[i]); } @@ -676,7 +1240,7 @@ static void xilinx_spips_realize(DeviceState *dev, Error **errp) } memory_region_init_io(&s->iomem, OBJECT(s), xsc->reg_ops, s, - "spi", XLNX_SPIPS_R_MAX * 4); + "spi", XLNX_ZYNQMP_SPIPS_R_MAX * 4); sysbus_init_mmio(sbd, &s->iomem); s->irqline = -1; @@ -714,6 +1278,28 @@ static void xilinx_qspips_realize(DeviceState *dev, Error **errp) } } +static void xlnx_zynqmp_qspips_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(dev); + XilinxSPIPSClass *xsc = XILINX_SPIPS_GET_CLASS(s); + + xilinx_qspips_realize(dev, errp); + fifo8_create(&s->rx_fifo_g, xsc->rx_fifo_size); + fifo8_create(&s->tx_fifo_g, xsc->tx_fifo_size); + fifo32_create(&s->fifo_g, 32); +} + +static void xlnx_zynqmp_qspips_init(Object *obj) +{ + XlnxZynqMPQSPIPS *rq = XLNX_ZYNQMP_QSPIPS(obj); + + object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SLAVE, + (Object **)&rq->dma, + object_property_allow_set_link, + OBJ_PROP_LINK_UNREF_ON_RELEASE, + NULL); +} + static int xilinx_spips_post_load(void *opaque, int version_id) { xilinx_spips_update_ixr((XilinxSPIPS *)opaque); @@ -735,6 +1321,46 @@ static const VMStateDescription vmstate_xilinx_spips = { } }; +static int xlnx_zynqmp_qspips_post_load(void *opaque, int version_id) +{ + XlnxZynqMPQSPIPS *s = (XlnxZynqMPQSPIPS *)opaque; + XilinxSPIPS *qs = XILINX_SPIPS(s); + + if (ARRAY_FIELD_EX32(s->regs, GQSPI_SELECT, GENERIC_QSPI_EN) && + fifo8_is_empty(&qs->rx_fifo) && fifo8_is_empty(&qs->tx_fifo)) { + xlnx_zynqmp_qspips_update_ixr(s); + xlnx_zynqmp_qspips_update_cs_lines(s); + } + return 0; +} + +static const VMStateDescription vmstate_xilinx_qspips = { + .name = "xilinx_qspips", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, XilinxQSPIPS, 0, + vmstate_xilinx_spips, XilinxSPIPS), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xlnx_zynqmp_qspips = { + .name = "xlnx_zynqmp_qspips", + .version_id = 1, + .minimum_version_id = 1, + .post_load = xlnx_zynqmp_qspips_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, XlnxZynqMPQSPIPS, 0, + vmstate_xilinx_qspips, XilinxQSPIPS), + VMSTATE_FIFO8(tx_fifo_g, XlnxZynqMPQSPIPS), + VMSTATE_FIFO8(rx_fifo_g, XlnxZynqMPQSPIPS), + VMSTATE_FIFO32(fifo_g, XlnxZynqMPQSPIPS), + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPQSPIPS, XLNX_ZYNQMP_SPIPS_R_MAX), + VMSTATE_END_OF_LIST() + } +}; + static Property xilinx_qspips_properties[] = { /* We had to turn this off for 2.10 as it is not compatible with migration. * It can be enabled but will prevent the device to be migrated. @@ -779,6 +1405,19 @@ static void xilinx_spips_class_init(ObjectClass *klass, void *data) xsc->tx_fifo_size = TXFF_A; } +static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, void * data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); + + dc->realize = xlnx_zynqmp_qspips_realize; + dc->reset = xlnx_zynqmp_qspips_reset; + dc->vmsd = &vmstate_xlnx_zynqmp_qspips; + xsc->reg_ops = &xlnx_zynqmp_qspips_ops; + xsc->rx_fifo_size = RXFF_A_Q; + xsc->tx_fifo_size = TXFF_A_Q; +} + static const TypeInfo xilinx_spips_info = { .name = TYPE_XILINX_SPIPS, .parent = TYPE_SYS_BUS_DEVICE, @@ -794,10 +1433,19 @@ static const TypeInfo xilinx_qspips_info = { .class_init = xilinx_qspips_class_init, }; +static const TypeInfo xlnx_zynqmp_qspips_info = { + .name = TYPE_XLNX_ZYNQMP_QSPIPS, + .parent = TYPE_XILINX_QSPIPS, + .instance_size = sizeof(XlnxZynqMPQSPIPS), + .instance_init = xlnx_zynqmp_qspips_init, + .class_init = xlnx_zynqmp_qspips_class_init, +}; + static void xilinx_spips_register_types(void) { type_register_static(&xilinx_spips_info); type_register_static(&xilinx_qspips_info); + type_register_static(&xlnx_zynqmp_qspips_info); } type_init(xilinx_spips_register_types) diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index 6eff81a995..3e6fb9b7bd 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -40,6 +40,10 @@ #define XLNX_ZYNQMP_NUM_SDHCI 2 #define XLNX_ZYNQMP_NUM_SPIS 2 +#define XLNX_ZYNQMP_NUM_QSPI_BUS 2 +#define XLNX_ZYNQMP_NUM_QSPI_BUS_CS 2 +#define XLNX_ZYNQMP_NUM_QSPI_FLASH 4 + #define XLNX_ZYNQMP_NUM_OCM_BANKS 4 #define XLNX_ZYNQMP_OCM_RAM_0_ADDRESS 0xFFFC0000 #define XLNX_ZYNQMP_OCM_RAM_SIZE 0x10000 @@ -83,6 +87,7 @@ typedef struct XlnxZynqMPState { SysbusAHCIState sata; SDHCIState sdhci[XLNX_ZYNQMP_NUM_SDHCI]; XilinxSPIPS spi[XLNX_ZYNQMP_NUM_SPIS]; + XlnxZynqMPQSPIPS qspi; XlnxDPState dp; XlnxDPDMAState dpdma; diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h index ac7997ca8c..8bc29112e3 100644 --- a/include/hw/intc/armv7m_nvic.h +++ b/include/hw/intc/armv7m_nvic.h @@ -78,13 +78,15 @@ typedef struct NVICState { MemoryRegion sysregmem; MemoryRegion sysreg_ns_mem; + MemoryRegion systickmem; + MemoryRegion systick_ns_mem; MemoryRegion container; uint32_t num_irq; qemu_irq excpout; qemu_irq sysresetreq; - SysTickState systick; + SysTickState systick[M_REG_NUM_BANKS]; } NVICState; #endif diff --git a/include/hw/ssi/xilinx_spips.h b/include/hw/ssi/xilinx_spips.h index 06aa09629d..d398a4e81c 100644 --- a/include/hw/ssi/xilinx_spips.h +++ b/include/hw/ssi/xilinx_spips.h @@ -26,11 +26,29 @@ #define XILINX_SPIPS_H #include "hw/ssi/ssi.h" -#include "qemu/fifo8.h" +#include "qemu/fifo32.h" +#include "hw/stream.h" typedef struct XilinxSPIPS XilinxSPIPS; #define XLNX_SPIPS_R_MAX (0x100 / 4) +#define XLNX_ZYNQMP_SPIPS_R_MAX (0x830 / 4) + +/* Bite off 4k chunks at a time */ +#define LQSPI_CACHE_SIZE 1024 + +typedef enum { + READ = 0x3, READ_4 = 0x13, + FAST_READ = 0xb, FAST_READ_4 = 0x0c, + DOR = 0x3b, DOR_4 = 0x3c, + QOR = 0x6b, QOR_4 = 0x6c, + DIOR = 0xbb, DIOR_4 = 0xbc, + QIOR = 0xeb, QIOR_4 = 0xec, + + PP = 0x2, PP_4 = 0x12, + DPP = 0xa2, + QPP = 0x32, QPP_4 = 0x34, +} FlashCMD; struct XilinxSPIPS { SysBusDevice parent_obj; @@ -45,19 +63,70 @@ struct XilinxSPIPS { uint8_t num_busses; uint8_t snoop_state; + int cmd_dummies; + uint8_t link_state; + uint8_t link_state_next; + uint8_t link_state_next_when; qemu_irq *cs_lines; + bool *cs_lines_state; SSIBus **spi; Fifo8 rx_fifo; Fifo8 tx_fifo; uint8_t num_txrx_bytes; + uint32_t rx_discard; uint32_t regs[XLNX_SPIPS_R_MAX]; + + bool man_start_com; }; +typedef struct { + XilinxSPIPS parent_obj; + + uint8_t lqspi_buf[LQSPI_CACHE_SIZE]; + hwaddr lqspi_cached_addr; + Error *migration_blocker; + bool mmio_execution_enabled; +} XilinxQSPIPS; + +typedef struct { + XilinxQSPIPS parent_obj; + + StreamSlave *dma; + uint8_t dma_buf[4]; + int gqspi_irqline; + + uint32_t regs[XLNX_ZYNQMP_SPIPS_R_MAX]; + + /* GQSPI has seperate tx/rx fifos */ + Fifo8 rx_fifo_g; + Fifo8 tx_fifo_g; + Fifo32 fifo_g; + /* + * At the end of each generic command, misaligned extra bytes are discard + * or padded to tx and rx respectively to round it out (and avoid need for + * individual byte access. Since we use byte fifos, keep track of the + * alignment WRT to word access. + */ + uint8_t rx_fifo_g_align; + uint8_t tx_fifo_g_align; + bool man_start_com_g; +} XlnxZynqMPQSPIPS; + +typedef struct XilinxSPIPSClass { + SysBusDeviceClass parent_class; + + const MemoryRegionOps *reg_ops; + + uint32_t rx_fifo_size; + uint32_t tx_fifo_size; +} XilinxSPIPSClass; + #define TYPE_XILINX_SPIPS "xlnx.ps7-spi" #define TYPE_XILINX_QSPIPS "xlnx.ps7-qspi" +#define TYPE_XLNX_ZYNQMP_QSPIPS "xlnx.usmp-gqspi" #define XILINX_SPIPS(obj) \ OBJECT_CHECK(XilinxSPIPS, (obj), TYPE_XILINX_SPIPS) @@ -69,4 +138,7 @@ struct XilinxSPIPS { #define XILINX_QSPIPS(obj) \ OBJECT_CHECK(XilinxQSPIPS, (obj), TYPE_XILINX_QSPIPS) +#define XLNX_ZYNQMP_QSPIPS(obj) \ + OBJECT_CHECK(XlnxZynqMPQSPIPS, (obj), TYPE_XLNX_ZYNQMP_QSPIPS) + #endif /* XILINX_SPIPS_H */ diff --git a/include/standard-headers/asm-s390/virtio-ccw.h b/include/standard-headers/asm-s390/virtio-ccw.h index a9a4ebf79f..967aad3901 100644 --- a/include/standard-headers/asm-s390/virtio-ccw.h +++ b/include/standard-headers/asm-s390/virtio-ccw.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Definitions for virtio-ccw devices. * diff --git a/include/standard-headers/asm-x86/hyperv.h b/include/standard-headers/asm-x86/hyperv.h index 5f95d5ed02..ce87d0c344 100644 --- a/include/standard-headers/asm-x86/hyperv.h +++ b/include/standard-headers/asm-x86/hyperv.h @@ -1,393 +1 @@ -#ifndef _ASM_X86_HYPERV_H -#define _ASM_X86_HYPERV_H - -#include "standard-headers/linux/types.h" - -/* - * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent - * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). - */ -#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 -#define HYPERV_CPUID_INTERFACE 0x40000001 -#define HYPERV_CPUID_VERSION 0x40000002 -#define HYPERV_CPUID_FEATURES 0x40000003 -#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 -#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 - -#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000 -#define HYPERV_CPUID_MIN 0x40000005 -#define HYPERV_CPUID_MAX 0x4000ffff - -/* - * Feature identification. EAX indicates which features are available - * to the partition based upon the current partition privileges. - */ - -/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ -#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) -/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ -#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) -/* Partition reference TSC MSR is available */ -#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9) - -/* A partition's reference time stamp counter (TSC) page */ -#define HV_X64_MSR_REFERENCE_TSC 0x40000021 - -/* - * There is a single feature flag that signifies if the partition has access - * to MSRs with local APIC and TSC frequencies. - */ -#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11) - -/* - * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM - * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available - */ -#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) -/* - * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through - * HV_X64_MSR_STIMER3_COUNT) available - */ -#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3) -/* - * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) - * are available - */ -#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) -/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ -#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) -/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ -#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) -/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ -#define HV_X64_MSR_RESET_AVAILABLE (1 << 7) - /* - * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, - * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, - * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available - */ -#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) - -/* Frequency MSRs available */ -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE (1 << 8) - -/* Crash MSR available */ -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10) - -/* - * Feature identification: EBX indicates which flags were specified at - * partition creation. The format is the same as the partition creation - * flag structure defined in section Partition Creation Flags. - */ -#define HV_X64_CREATE_PARTITIONS (1 << 0) -#define HV_X64_ACCESS_PARTITION_ID (1 << 1) -#define HV_X64_ACCESS_MEMORY_POOL (1 << 2) -#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) -#define HV_X64_POST_MESSAGES (1 << 4) -#define HV_X64_SIGNAL_EVENTS (1 << 5) -#define HV_X64_CREATE_PORT (1 << 6) -#define HV_X64_CONNECT_PORT (1 << 7) -#define HV_X64_ACCESS_STATS (1 << 8) -#define HV_X64_DEBUGGING (1 << 11) -#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) -#define HV_X64_CONFIGURE_PROFILER (1 << 13) - -/* - * Feature identification. EDX indicates which miscellaneous features - * are available to the partition. - */ -/* The MWAIT instruction is available (per section MONITOR / MWAIT) */ -#define HV_X64_MWAIT_AVAILABLE (1 << 0) -/* Guest debugging support is available */ -#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) -/* Performance Monitor support is available*/ -#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) -/* Support for physical CPU dynamic partitioning events is available*/ -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) -/* - * Support for passing hypercall input parameter block via XMM - * registers is available - */ -#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) -/* Support for a virtual guest idle state is available */ -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) -/* Guest crash data handler available */ -#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10) - -/* - * Implementation recommendations. Indicates which behaviors the hypervisor - * recommends the OS implement for optimal performance. - */ - /* - * Recommend using hypercall for address space switches rather - * than MOV to CR3 instruction - */ -#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0) -/* Recommend using hypercall for local TLB flushes rather - * than INVLPG or MOV to CR3 instructions */ -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) -/* - * Recommend using hypercall for remote TLB flushes rather - * than inter-processor interrupts - */ -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) -/* - * Recommend using MSRs for accessing APIC registers - * EOI, ICR and TPR rather than their memory-mapped counterparts - */ -#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) -/* Recommend using the hypervisor-provided MSR to initiate a system RESET */ -#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) -/* - * Recommend using relaxed timing for this partition. If used, - * the VM should disable any watchdog timeouts that rely on the - * timely delivery of external interrupts - */ -#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) - -/* - * Virtual APIC support - */ -#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) - -/* Recommend using the newer ExProcessorMasks interface */ -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) - -/* - * Crash notification flag. - */ -#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63) - -/* MSR used to identify the guest OS. */ -#define HV_X64_MSR_GUEST_OS_ID 0x40000000 - -/* MSR used to setup pages used to communicate with the hypervisor. */ -#define HV_X64_MSR_HYPERCALL 0x40000001 - -/* MSR used to provide vcpu index */ -#define HV_X64_MSR_VP_INDEX 0x40000002 - -/* MSR used to reset the guest OS. */ -#define HV_X64_MSR_RESET 0x40000003 - -/* MSR used to provide vcpu runtime in 100ns units */ -#define HV_X64_MSR_VP_RUNTIME 0x40000010 - -/* MSR used to read the per-partition time reference counter */ -#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 - -/* MSR used to retrieve the TSC frequency */ -#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 - -/* MSR used to retrieve the local APIC timer frequency */ -#define HV_X64_MSR_APIC_FREQUENCY 0x40000023 - -/* Define the virtual APIC registers */ -#define HV_X64_MSR_EOI 0x40000070 -#define HV_X64_MSR_ICR 0x40000071 -#define HV_X64_MSR_TPR 0x40000072 -#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073 - -/* Define synthetic interrupt controller model specific registers. */ -#define HV_X64_MSR_SCONTROL 0x40000080 -#define HV_X64_MSR_SVERSION 0x40000081 -#define HV_X64_MSR_SIEFP 0x40000082 -#define HV_X64_MSR_SIMP 0x40000083 -#define HV_X64_MSR_EOM 0x40000084 -#define HV_X64_MSR_SINT0 0x40000090 -#define HV_X64_MSR_SINT1 0x40000091 -#define HV_X64_MSR_SINT2 0x40000092 -#define HV_X64_MSR_SINT3 0x40000093 -#define HV_X64_MSR_SINT4 0x40000094 -#define HV_X64_MSR_SINT5 0x40000095 -#define HV_X64_MSR_SINT6 0x40000096 -#define HV_X64_MSR_SINT7 0x40000097 -#define HV_X64_MSR_SINT8 0x40000098 -#define HV_X64_MSR_SINT9 0x40000099 -#define HV_X64_MSR_SINT10 0x4000009A -#define HV_X64_MSR_SINT11 0x4000009B -#define HV_X64_MSR_SINT12 0x4000009C -#define HV_X64_MSR_SINT13 0x4000009D -#define HV_X64_MSR_SINT14 0x4000009E -#define HV_X64_MSR_SINT15 0x4000009F - -/* - * Synthetic Timer MSRs. Four timers per vcpu. - */ -#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 -#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 -#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 -#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 -#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 -#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 -#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 -#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 - -/* Hyper-V guest crash notification MSR's */ -#define HV_X64_MSR_CRASH_P0 0x40000100 -#define HV_X64_MSR_CRASH_P1 0x40000101 -#define HV_X64_MSR_CRASH_P2 0x40000102 -#define HV_X64_MSR_CRASH_P3 0x40000103 -#define HV_X64_MSR_CRASH_P4 0x40000104 -#define HV_X64_MSR_CRASH_CTL 0x40000105 -#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63) -#define HV_X64_MSR_CRASH_PARAMS \ - (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) - -#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 -#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 -#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ - (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) - -/* Declare the various hypercall operations. */ -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 -#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 -#define HVCALL_POST_MESSAGE 0x005c -#define HVCALL_SIGNAL_EVENT 0x005d - -#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 -#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 -#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \ - (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) - -#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001 -#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12 - -#define HV_PROCESSOR_POWER_STATE_C0 0 -#define HV_PROCESSOR_POWER_STATE_C1 1 -#define HV_PROCESSOR_POWER_STATE_C2 2 -#define HV_PROCESSOR_POWER_STATE_C3 3 - -#define HV_FLUSH_ALL_PROCESSORS BIT(0) -#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) -#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) -#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) - -enum HV_GENERIC_SET_FORMAT { - HV_GENERIC_SET_SPARCE_4K, - HV_GENERIC_SET_ALL, -}; - -/* hypercall status code */ -#define HV_STATUS_SUCCESS 0 -#define HV_STATUS_INVALID_HYPERCALL_CODE 2 -#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 -#define HV_STATUS_INVALID_ALIGNMENT 4 -#define HV_STATUS_INSUFFICIENT_MEMORY 11 -#define HV_STATUS_INVALID_CONNECTION_ID 18 -#define HV_STATUS_INSUFFICIENT_BUFFERS 19 - -typedef struct _HV_REFERENCE_TSC_PAGE { - uint32_t tsc_sequence; - uint32_t res1; - uint64_t tsc_scale; - int64_t tsc_offset; -} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; - -/* Define the number of synthetic interrupt sources. */ -#define HV_SYNIC_SINT_COUNT (16) -/* Define the expected SynIC version. */ -#define HV_SYNIC_VERSION_1 (0x1) - -#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) -#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) -#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) -#define HV_SYNIC_SINT_MASKED (1ULL << 16) -#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) -#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) - -#define HV_SYNIC_STIMER_COUNT (4) - -/* Define synthetic interrupt controller message constants. */ -#define HV_MESSAGE_SIZE (256) -#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) -#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) - -/* Define hypervisor message types. */ -enum hv_message_type { - HVMSG_NONE = 0x00000000, - - /* Memory access messages. */ - HVMSG_UNMAPPED_GPA = 0x80000000, - HVMSG_GPA_INTERCEPT = 0x80000001, - - /* Timer notification messages. */ - HVMSG_TIMER_EXPIRED = 0x80000010, - - /* Error messages. */ - HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, - HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, - HVMSG_UNSUPPORTED_FEATURE = 0x80000022, - - /* Trace buffer complete messages. */ - HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, - - /* Platform-specific processor intercept messages. */ - HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, - HVMSG_X64_MSR_INTERCEPT = 0x80010001, - HVMSG_X64_CPUID_INTERCEPT = 0x80010002, - HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, - HVMSG_X64_APIC_EOI = 0x80010004, - HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 -}; - -/* Define synthetic interrupt controller message flags. */ -union hv_message_flags { - uint8_t asu8; - struct { - uint8_t msg_pending:1; - uint8_t reserved:7; - }; -}; - -/* Define port identifier type. */ -union hv_port_id { - uint32_t asu32; - struct { - uint32_t id:24; - uint32_t reserved:8; - } u; -}; - -/* Define synthetic interrupt controller message header. */ -struct hv_message_header { - uint32_t message_type; - uint8_t payload_size; - union hv_message_flags message_flags; - uint8_t reserved[2]; - union { - uint64_t sender; - union hv_port_id port; - }; -}; - -/* Define synthetic interrupt controller message format. */ -struct hv_message { - struct hv_message_header header; - union { - uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; - } u; -}; - -/* Define the synthetic interrupt message page layout. */ -struct hv_message_page { - struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; -}; - -/* Define timer message payload structure. */ -struct hv_timer_message_payload { - uint32_t timer_index; - uint32_t reserved; - uint64_t expiration_time; /* When the timer expired */ - uint64_t delivery_time; /* When the message was delivered */ -}; - -#define HV_STIMER_ENABLE (1ULL << 0) -#define HV_STIMER_PERIODIC (1ULL << 1) -#define HV_STIMER_LAZY (1ULL << 2) -#define HV_STIMER_AUTOENABLE (1ULL << 3) -#define HV_STIMER_SINT(config) (uint8_t)(((config) >> 16) & 0x0F) - -#endif + /* this is a temporary placeholder until kvm_para.h stops including it */ diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h index 2fa0f4ea6b..79841b543f 100644 --- a/include/standard-headers/linux/input-event-codes.h +++ b/include/standard-headers/linux/input-event-codes.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Input event codes * @@ -406,6 +407,7 @@ #define BTN_TOOL_MOUSE 0x146 #define BTN_TOOL_LENS 0x147 #define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */ +#define BTN_STYLUS3 0x149 #define BTN_TOUCH 0x14a #define BTN_STYLUS 0x14b #define BTN_STYLUS2 0x14c diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h index 666e201ddb..bc3e6d3d5b 100644 --- a/include/standard-headers/linux/input.h +++ b/include/standard-headers/linux/input.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (c) 1999-2002 Vojtech Pavlik * diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index f8d5804592..70c2b2ade0 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * pci_regs.h * @@ -746,6 +747,7 @@ #define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */ #define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ +#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ #define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ /* Virtual Channel */ @@ -939,9 +941,13 @@ #define PCI_SATA_SIZEOF_LONG 16 /* Resizable BARs */ +#define PCI_REBAR_CAP 4 /* capability register */ +#define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */ #define PCI_REBAR_CTRL 8 /* control register */ -#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */ -#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */ +#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */ +#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */ +#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */ +#define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */ /* Dynamic Power Allocation */ #define PCI_DPA_CAP 4 /* capability register */ @@ -960,6 +966,7 @@ /* Downstream Port Containment */ #define PCI_EXP_DPC_CAP 4 /* DPC Capability */ +#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */ #define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */ #define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */ #define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */ @@ -995,19 +1002,25 @@ #define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */ #define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */ -/* L1 PM Substates */ -#define PCI_L1SS_CAP 4 /* capability register */ -#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */ -#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */ -#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */ -#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */ -#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */ -#define PCI_L1SS_CTL1 8 /* Control Register 1 */ -#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */ -#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */ -#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */ -#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */ -#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F -#define PCI_L1SS_CTL2 0xC /* Control Register 2 */ +/* ASPM L1 PM Substates */ +#define PCI_L1SS_CAP 0x04 /* Capabilities Register */ +#define PCI_L1SS_CAP_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Supported */ +#define PCI_L1SS_CAP_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Supported */ +#define PCI_L1SS_CAP_ASPM_L1_2 0x00000004 /* ASPM L1.2 Supported */ +#define PCI_L1SS_CAP_ASPM_L1_1 0x00000008 /* ASPM L1.1 Supported */ +#define PCI_L1SS_CAP_L1_PM_SS 0x00000010 /* L1 PM Substates Supported */ +#define PCI_L1SS_CAP_CM_RESTORE_TIME 0x0000ff00 /* Port Common_Mode_Restore_Time */ +#define PCI_L1SS_CAP_P_PWR_ON_SCALE 0x00030000 /* Port T_POWER_ON scale */ +#define PCI_L1SS_CAP_P_PWR_ON_VALUE 0x00f80000 /* Port T_POWER_ON value */ +#define PCI_L1SS_CTL1 0x08 /* Control 1 Register */ +#define PCI_L1SS_CTL1_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Enable */ +#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */ +#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */ +#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */ +#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f +#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */ +#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ +#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ +#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ #endif /* LINUX_PCI_REGS_H */ diff --git a/linux-headers/asm-arm/kvm.h b/linux-headers/asm-arm/kvm.h index fa9fae8dc2..4392955081 100644 --- a/linux-headers/asm-arm/kvm.h +++ b/linux-headers/asm-arm/kvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall @@ -151,6 +152,12 @@ struct kvm_arch_memory_slot { (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) +/* PL1 Physical Timer Registers */ +#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) +#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) + +/* Virtual Timer Registers */ #define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) #define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) #define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) @@ -215,6 +222,7 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 +#define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 diff --git a/linux-headers/asm-arm/kvm_para.h b/linux-headers/asm-arm/kvm_para.h index 14fab8f0b9..baacc4996d 100644 --- a/linux-headers/asm-arm/kvm_para.h +++ b/linux-headers/asm-arm/kvm_para.h @@ -1 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #include diff --git a/linux-headers/asm-arm/unistd.h b/linux-headers/asm-arm/unistd.h index 155571b874..18b0825885 100644 --- a/linux-headers/asm-arm/unistd.h +++ b/linux-headers/asm-arm/unistd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * arch/arm/include/asm/unistd.h * @@ -35,5 +36,6 @@ #define __ARM_NR_usr26 (__ARM_NR_BASE+3) #define __ARM_NR_usr32 (__ARM_NR_BASE+4) #define __ARM_NR_set_tls (__ARM_NR_BASE+5) +#define __ARM_NR_get_tls (__ARM_NR_BASE+6) #endif /* __ASM_ARM_UNISTD_H */ diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index d254700b08..4e80651efe 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier @@ -195,6 +196,12 @@ struct kvm_arch_memory_slot { #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) +/* Physical Timer EL0 Registers */ +#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2) +#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1) + +/* EL0 Virtual Timer Registers */ #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) @@ -227,6 +234,7 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 +#define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 diff --git a/linux-headers/asm-arm64/unistd.h b/linux-headers/asm-arm64/unistd.h index 043d17a213..5072cbd15c 100644 --- a/linux-headers/asm-arm64/unistd.h +++ b/linux-headers/asm-arm64/unistd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * diff --git a/linux-headers/asm-powerpc/epapr_hcalls.h b/linux-headers/asm-powerpc/epapr_hcalls.h index 33b3f89f55..6cca559993 100644 --- a/linux-headers/asm-powerpc/epapr_hcalls.h +++ b/linux-headers/asm-powerpc/epapr_hcalls.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */ /* * ePAPR hcall interface * diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h index 8cf8f0c969..61d6049f4c 100644 --- a/linux-headers/asm-powerpc/kvm.h +++ b/linux-headers/asm-powerpc/kvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as diff --git a/linux-headers/asm-powerpc/kvm_para.h b/linux-headers/asm-powerpc/kvm_para.h index 2abcc46382..9beb49cc10 100644 --- a/linux-headers/asm-powerpc/kvm_para.h +++ b/linux-headers/asm-powerpc/kvm_para.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as diff --git a/linux-headers/asm-powerpc/unistd.h b/linux-headers/asm-powerpc/unistd.h index a1786340e9..36abf58582 100644 --- a/linux-headers/asm-powerpc/unistd.h +++ b/linux-headers/asm-powerpc/unistd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * This file contains the system call numbers. * diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h index 7b750ef7ee..32d372e977 100644 --- a/linux-headers/asm-s390/kvm.h +++ b/linux-headers/asm-s390/kvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_KVM_S390_H #define __LINUX_KVM_S390_H /* diff --git a/linux-headers/asm-s390/kvm_para.h b/linux-headers/asm-s390/kvm_para.h index ff1f4e7b30..0dc86b3a7c 100644 --- a/linux-headers/asm-s390/kvm_para.h +++ b/linux-headers/asm-s390/kvm_para.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * User API definitions for paravirtual devices on s390 * diff --git a/linux-headers/asm-s390/unistd.h b/linux-headers/asm-s390/unistd.h index 65e7e59dbb..99223b874a 100644 --- a/linux-headers/asm-s390/unistd.h +++ b/linux-headers/asm-s390/unistd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * S390 version * @@ -315,7 +316,8 @@ #define __NR_pwritev2 377 #define __NR_s390_guarded_storage 378 #define __NR_statx 379 -#define NR_syscalls 380 +#define __NR_s390_sthyi 380 +#define NR_syscalls 381 /* * There are some system calls that are not present on 64 bit, some diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index c2824d02ba..f3a960488e 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_KVM_H #define _ASM_X86_KVM_H diff --git a/linux-headers/asm-x86/kvm_para.h b/linux-headers/asm-x86/kvm_para.h index cefa127d84..4c300f6aaa 100644 --- a/linux-headers/asm-x86/kvm_para.h +++ b/linux-headers/asm-x86/kvm_para.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_KVM_PARA_H #define _ASM_X86_KVM_PARA_H @@ -109,5 +110,4 @@ struct kvm_vcpu_pv_apf_data { #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK #define KVM_PV_EOI_DISABLED 0x0 - #endif /* _ASM_X86_KVM_PARA_H */ diff --git a/linux-headers/asm-x86/unistd.h b/linux-headers/asm-x86/unistd.h index 1f99b12843..c04f638154 100644 --- a/linux-headers/asm-x86/unistd.h +++ b/linux-headers/asm-x86/unistd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_UNISTD_H #define _ASM_X86_UNISTD_H diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index dd8a91801e..ce6c2f11f4 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_KVM_H #define __LINUX_KVM_H @@ -930,6 +931,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_SMT_POSSIBLE 147 #define KVM_CAP_HYPERV_SYNIC2 148 #define KVM_CAP_HYPERV_VP_INDEX 149 +#define KVM_CAP_S390_AIS_MIGRATION 150 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/linux-headers/linux/kvm_para.h b/linux-headers/linux/kvm_para.h index 15b24ff6cf..8bcd0aa853 100644 --- a/linux-headers/linux/kvm_para.h +++ b/linux-headers/linux/kvm_para.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_KVM_PARA_H #define __LINUX_KVM_PARA_H diff --git a/linux-headers/linux/psci.h b/linux-headers/linux/psci.h index 08d443f7cf..ccd17731c6 100644 --- a/linux-headers/linux/psci.h +++ b/linux-headers/linux/psci.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * ARM Power State and Coordination Interface (PSCI) header * diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h index b43cf0d415..ce78878d12 100644 --- a/linux-headers/linux/userfaultfd.h +++ b/linux-headers/linux/userfaultfd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * include/linux/userfaultfd.h * diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index 4e7ab4c52a..4312e961ff 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * VFIO API definition * diff --git a/linux-headers/linux/vfio_ccw.h b/linux-headers/linux/vfio_ccw.h index 3a565511ab..5bf96c3812 100644 --- a/linux-headers/linux/vfio_ccw.h +++ b/linux-headers/linux/vfio_ccw.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Interfaces for vfio-ccw * diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index 1e86a3dd0d..e336395d67 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _LINUX_VHOST_H #define _LINUX_VHOST_H /* Userspace interface for in-kernel virtio accelerators. */ diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 89d49cdcb2..96316700dd 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -112,7 +112,7 @@ enum { #define ARM_CPU_VIRQ 2 #define ARM_CPU_VFIQ 3 -#define NB_MMU_MODES 7 +#define NB_MMU_MODES 8 /* ARM-specific extra insn start words: * 1: Conditional execution bits * 2: Partial exception syndrome for data aborts @@ -2226,13 +2226,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * They have the following different MMU indexes: * User * Privileged - * Execution priority negative (this is like privileged, but the - * MPU HFNMIENA bit means that it may have different access permission - * check results to normal privileged code, so can't share a TLB). + * User, execution priority negative (ie the MPU HFNMIENA bit may apply) + * Privileged, execution priority negative (ditto) * If the CPU supports the v8M Security Extension then there are also: * Secure User * Secure Privileged - * Secure, execution priority negative + * Secure User, execution priority negative + * Secure Privileged, execution priority negative * * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code * are not quite the same -- different CPU types (most notably M profile @@ -2251,11 +2251,18 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * The constant names here are patterned after the general style of the names * of the AT/ATS operations. * The values used are carefully arranged to make mmu_idx => EL lookup easy. + * For M profile we arrange them to have a bit for priv, a bit for negpri + * and a bit for secure. */ #define ARM_MMU_IDX_A 0x10 /* A profile */ #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ #define ARM_MMU_IDX_M 0x40 /* M profile */ +/* meanings of the bits for M profile mmu idx values */ +#define ARM_MMU_IDX_M_PRIV 0x1 +#define ARM_MMU_IDX_M_NEGPRI 0x2 +#define ARM_MMU_IDX_M_S 0x4 + #define ARM_MMU_IDX_TYPE_MASK (~0x7) #define ARM_MMU_IDX_COREIDX_MASK 0x7 @@ -2269,10 +2276,12 @@ typedef enum ARMMMUIdx { ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A, ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M, ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M, - ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M, - ARMMMUIdx_MSUser = 3 | ARM_MMU_IDX_M, - ARMMMUIdx_MSPriv = 4 | ARM_MMU_IDX_M, - ARMMMUIdx_MSNegPri = 5 | ARM_MMU_IDX_M, + ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M, + ARMMMUIdx_MPrivNegPri = 3 | ARM_MMU_IDX_M, + ARMMMUIdx_MSUser = 4 | ARM_MMU_IDX_M, + ARMMMUIdx_MSPriv = 5 | ARM_MMU_IDX_M, + ARMMMUIdx_MSUserNegPri = 6 | ARM_MMU_IDX_M, + ARMMMUIdx_MSPrivNegPri = 7 | ARM_MMU_IDX_M, /* Indexes below here don't have TLBs and are used only for AT system * instructions or for the first stage of an S12 page table walk. */ @@ -2293,10 +2302,12 @@ typedef enum ARMMMUIdxBit { ARMMMUIdxBit_S2NS = 1 << 6, ARMMMUIdxBit_MUser = 1 << 0, ARMMMUIdxBit_MPriv = 1 << 1, - ARMMMUIdxBit_MNegPri = 1 << 2, - ARMMMUIdxBit_MSUser = 1 << 3, - ARMMMUIdxBit_MSPriv = 1 << 4, - ARMMMUIdxBit_MSNegPri = 1 << 5, + ARMMMUIdxBit_MUserNegPri = 1 << 2, + ARMMMUIdxBit_MPrivNegPri = 1 << 3, + ARMMMUIdxBit_MSUser = 1 << 4, + ARMMMUIdxBit_MSPriv = 1 << 5, + ARMMMUIdxBit_MSUserNegPri = 1 << 6, + ARMMMUIdxBit_MSPrivNegPri = 1 << 7, } ARMMMUIdxBit; #define MMU_USER_IDX 0 @@ -2322,31 +2333,43 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) case ARM_MMU_IDX_A: return mmu_idx & 3; case ARM_MMU_IDX_M: - return (mmu_idx == ARMMMUIdx_MUser || mmu_idx == ARMMMUIdx_MSUser) - ? 0 : 1; + return mmu_idx & ARM_MMU_IDX_M_PRIV; default: g_assert_not_reached(); } } +/* Return the MMU index for a v7M CPU in the specified security and + * privilege state + */ +static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, + bool secstate, + bool priv) +{ + ARMMMUIdx mmu_idx = ARM_MMU_IDX_M; + + if (priv) { + mmu_idx |= ARM_MMU_IDX_M_PRIV; + } + + if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { + mmu_idx |= ARM_MMU_IDX_M_NEGPRI; + } + + if (secstate) { + mmu_idx |= ARM_MMU_IDX_M_S; + } + + return mmu_idx; +} + /* Return the MMU index for a v7M CPU in the specified security state */ static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) { - int el = arm_current_el(env); - ARMMMUIdx mmu_idx; + bool priv = arm_current_el(env) != 0; - if (el == 0) { - mmu_idx = secstate ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; - } else { - mmu_idx = secstate ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; - } - - if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { - mmu_idx = secstate ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; - } - - return mmu_idx; + return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); } /* Determine the current mmu_idx to use for normal loads/stores */ diff --git a/target/arm/helper.c b/target/arm/helper.c index 91a9300f11..d1395f9b73 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -28,13 +28,13 @@ typedef struct ARMCacheAttrs { static bool get_phys_addr(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, uint32_t *fsr, + target_ulong *page_size, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, - target_ulong *page_size_ptr, uint32_t *fsr, + target_ulong *page_size_ptr, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); /* Security attributes for an address, as returned by v8m_security_lookup. */ @@ -2160,20 +2160,44 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, hwaddr phys_addr; target_ulong page_size; int prot; - uint32_t fsr; bool ret; uint64_t par64; + bool format64 = false; MemTxAttrs attrs = {}; ARMMMUFaultInfo fi = {}; ARMCacheAttrs cacheattrs = {}; ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, - &prot, &page_size, &fsr, &fi, &cacheattrs); - if (arm_s1_regime_using_lpae_format(env, mmu_idx)) { - /* fsr is a DFSR/IFSR value for the long descriptor - * translation table format, but with WnR always clear. - * Convert it to a 64-bit PAR. + &prot, &page_size, &fi, &cacheattrs); + + if (is_a64(env)) { + format64 = true; + } else if (arm_feature(env, ARM_FEATURE_LPAE)) { + /* + * ATS1Cxx: + * * TTBCR.EAE determines whether the result is returned using the + * 32-bit or the 64-bit PAR format + * * Instructions executed in Hyp mode always use the 64bit format + * + * ATS1S2NSOxx uses the 64bit format if any of the following is true: + * * The Non-secure TTBCR.EAE bit is set to 1 + * * The implementation includes EL2, and the value of HCR.VM is 1 + * + * ATS1Hx always uses the 64bit format (not supported yet). */ + format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); + + if (arm_feature(env, ARM_FEATURE_EL2)) { + if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { + format64 |= env->cp15.hcr_el2 & HCR_VM; + } else { + format64 |= arm_current_el(env) == 2; + } + } + } + + if (format64) { + /* Create a 64-bit PAR */ par64 = (1 << 11); /* LPAE bit always set */ if (!ret) { par64 |= phys_addr & ~0xfffULL; @@ -2183,6 +2207,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ par64 |= cacheattrs.shareability << 7; /* SH */ } else { + uint32_t fsr = arm_fi_to_lfsc(&fi); + par64 |= 1; /* F */ par64 |= (fsr & 0x3f) << 1; /* FS */ /* Note that S2WLK and FSTAGE are always zero, because we don't @@ -2207,6 +2233,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, par64 |= (1 << 9); /* NS */ } } else { + uint32_t fsr = arm_fi_to_sfsc(&fi); + par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | ((fsr & 0xf) << 1) | 1; } @@ -5947,6 +5975,28 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) g_assert_not_reached(); } +uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) +{ + /* The TT instructions can be used by unprivileged code, but in + * user-only emulation we don't have the MPU. + * Luckily since we know we are NonSecure unprivileged (and that in + * turn means that the A flag wasn't specified), all the bits in the + * register must be zero: + * IREGION: 0 because IRVALID is 0 + * IRVALID: 0 because NS + * S: 0 because NS + * NSRW: 0 because NS + * NSR: 0 because NS + * RW: 0 because unpriv and A flag not set + * R: 0 because unpriv and A flag not set + * SRVALID: 0 because NS + * MRVALID: 0 because unpriv and A flag not set + * SREGION: 0 becaus SRVALID is 0 + * MREGION: 0 because MRVALID is 0 + */ + return 0; +} + void switch_mode(CPUARMState *env, int mode) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -6955,7 +7005,6 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, target_ulong page_size; hwaddr physaddr; int prot; - uint32_t fsr; v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); if (!sattrs.nsc || sattrs.ns) { @@ -6969,7 +7018,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, return false; } if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, - &physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) { + &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { /* the MPU lookup failed */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); @@ -7856,11 +7905,13 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_S1SE1: case ARMMMUIdx_S1NSE0: case ARMMMUIdx_S1NSE1: + case ARMMMUIdx_MPrivNegPri: + case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPriv: - case ARMMMUIdx_MNegPri: case ARMMMUIdx_MUser: + case ARMMMUIdx_MSPrivNegPri: + case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSPriv: - case ARMMMUIdx_MSNegPri: case ARMMMUIdx_MSUser: return 1; default: @@ -7883,8 +7934,7 @@ static inline bool regime_translation_disabled(CPUARMState *env, (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { case R_V7M_MPU_CTRL_ENABLE_MASK: /* Enabled, but not for HardFault and NMI */ - return mmu_idx == ARMMMUIdx_MNegPri || - mmu_idx == ARMMMUIdx_MSNegPri; + return mmu_idx & ARM_MMU_IDX_M_NEGPRI; case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: /* Enabled for all cases */ return false; @@ -8016,6 +8066,9 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_S1SE0: case ARMMMUIdx_S1NSE0: case ARMMMUIdx_MUser: + case ARMMMUIdx_MSUser: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MSUserNegPri: return true; default: return false; @@ -8240,7 +8293,6 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, /* Translate a S1 pagetable walk through S2 if needed. */ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, hwaddr addr, MemTxAttrs txattrs, - uint32_t *fsr, ARMMMUFaultInfo *fi) { if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && @@ -8251,7 +8303,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, int ret; ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, - &txattrs, &s2prot, &s2size, fsr, fi, NULL); + &txattrs, &s2prot, &s2size, fi, NULL); if (ret) { fi->s2addr = addr; fi->stage2 = true; @@ -8271,8 +8323,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, * (but not if it was for a debug access). */ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, - ARMMMUIdx mmu_idx, uint32_t *fsr, - ARMMMUFaultInfo *fi) + ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; @@ -8281,7 +8332,7 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, attrs.secure = is_secure; as = arm_addressspace(cs, attrs); - addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi); + addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } @@ -8293,8 +8344,7 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, } static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, - ARMMMUIdx mmu_idx, uint32_t *fsr, - ARMMMUFaultInfo *fi) + ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; @@ -8303,7 +8353,7 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, attrs.secure = is_secure; as = arm_addressspace(cs, attrs); - addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi); + addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } @@ -8317,11 +8367,11 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, - target_ulong *page_size, uint32_t *fsr, + target_ulong *page_size, ARMMMUFaultInfo *fi) { CPUState *cs = CPU(arm_env_get_cpu(env)); - int code; + int level = 1; uint32_t table; uint32_t desc; int type; @@ -8335,11 +8385,11 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, /* Lookup l1 descriptor. */ if (!get_level1_table_address(env, mmu_idx, &table, address)) { /* Section translation fault if page walk is disabled by PD0 or PD1 */ - code = 5; + fi->type = ARMFault_Translation; goto do_fault; } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fsr, fi); + mmu_idx, fi); type = (desc & 3); domain = (desc >> 5) & 0x0f; if (regime_el(env, mmu_idx) == 1) { @@ -8350,21 +8400,20 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, domain_prot = (dacr >> (domain * 2)) & 3; if (type == 0) { /* Section translation fault. */ - code = 5; + fi->type = ARMFault_Translation; goto do_fault; } + if (type != 2) { + level = 2; + } if (domain_prot == 0 || domain_prot == 2) { - if (type == 2) - code = 9; /* Section domain fault. */ - else - code = 11; /* Page domain fault. */ + fi->type = ARMFault_Domain; goto do_fault; } if (type == 2) { /* 1Mb section. */ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); ap = (desc >> 10) & 3; - code = 13; *page_size = 1024 * 1024; } else { /* Lookup l2 entry. */ @@ -8376,10 +8425,10 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fsr, fi); + mmu_idx, fi); switch (desc & 3) { case 0: /* Page translation fault. */ - code = 7; + fi->type = ARMFault_Translation; goto do_fault; case 1: /* 64k page. */ phys_addr = (desc & 0xffff0000) | (address & 0xffff); @@ -8402,7 +8451,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, /* UNPREDICTABLE in ARMv5; we choose to take a * page translation fault. */ - code = 7; + fi->type = ARMFault_Translation; goto do_fault; } } else { @@ -8415,29 +8464,29 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, /* Never happens, but compiler isn't smart enough to tell. */ abort(); } - code = 15; } *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); *prot |= *prot ? PAGE_EXEC : 0; if (!(*prot & (1 << access_type))) { /* Access permission fault. */ + fi->type = ARMFault_Permission; goto do_fault; } *phys_ptr = phys_addr; return false; do_fault: - *fsr = code | (domain << 4); + fi->domain = domain; + fi->level = level; return true; } static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, uint32_t *fsr, - ARMMMUFaultInfo *fi) + target_ulong *page_size, ARMMMUFaultInfo *fi) { CPUState *cs = CPU(arm_env_get_cpu(env)); - int code; + int level = 1; uint32_t table; uint32_t desc; uint32_t xn; @@ -8454,17 +8503,17 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, /* Lookup l1 descriptor. */ if (!get_level1_table_address(env, mmu_idx, &table, address)) { /* Section translation fault if page walk is disabled by PD0 or PD1 */ - code = 5; + fi->type = ARMFault_Translation; goto do_fault; } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fsr, fi); + mmu_idx, fi); type = (desc & 3); if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { /* Section translation fault, or attempt to use the encoding * which is Reserved on implementations without PXN. */ - code = 5; + fi->type = ARMFault_Translation; goto do_fault; } if ((type == 1) || !(desc & (1 << 18))) { @@ -8476,13 +8525,13 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, } else { dacr = env->cp15.dacr_s; } + if (type == 1) { + level = 2; + } domain_prot = (dacr >> (domain * 2)) & 3; if (domain_prot == 0 || domain_prot == 2) { - if (type != 1) { - code = 9; /* Section domain fault. */ - } else { - code = 11; /* Page domain fault. */ - } + /* Section or Page domain fault */ + fi->type = ARMFault_Domain; goto do_fault; } if (type != 1) { @@ -8500,7 +8549,6 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); xn = desc & (1 << 4); pxn = desc & 1; - code = 13; ns = extract32(desc, 19, 1); } else { if (arm_feature(env, ARM_FEATURE_PXN)) { @@ -8510,11 +8558,11 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, /* Lookup l2 entry. */ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), - mmu_idx, fsr, fi); + mmu_idx, fi); ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); switch (desc & 3) { case 0: /* Page translation fault. */ - code = 7; + fi->type = ARMFault_Translation; goto do_fault; case 1: /* 64k page. */ phys_addr = (desc & 0xffff0000) | (address & 0xffff); @@ -8530,7 +8578,6 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, /* Never happens, but compiler isn't smart enough to tell. */ abort(); } - code = 15; } if (domain_prot == 3) { *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; @@ -8538,15 +8585,17 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, if (pxn && !regime_is_user(env, mmu_idx)) { xn = 1; } - if (xn && access_type == MMU_INST_FETCH) + if (xn && access_type == MMU_INST_FETCH) { + fi->type = ARMFault_Permission; goto do_fault; + } if (arm_feature(env, ARM_FEATURE_V6K) && (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { /* The simplified model uses AP[0] as an access control bit. */ if ((ap & 1) == 0) { /* Access flag fault. */ - code = (code == 15) ? 6 : 3; + fi->type = ARMFault_AccessFlag; goto do_fault; } *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); @@ -8558,6 +8607,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, } if (!(*prot & (1 << access_type))) { /* Access permission fault. */ + fi->type = ARMFault_Permission; goto do_fault; } } @@ -8571,19 +8621,11 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, *phys_ptr = phys_addr; return false; do_fault: - *fsr = code | (domain << 4); + fi->domain = domain; + fi->level = level; return true; } -/* Fault type for long-descriptor MMU fault reporting; this corresponds - * to bits [5..2] in the STATUS field in long-format DFSR/IFSR. - */ -typedef enum { - translation_fault = 1, - access_fault = 2, - permission_fault = 3, -} MMUFaultType; - /* * check_s2_mmu_setup * @cpu: ARMCPU @@ -8685,13 +8727,13 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, - target_ulong *page_size_ptr, uint32_t *fsr, + target_ulong *page_size_ptr, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) { ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); /* Read an LPAE long-descriptor translation table. */ - MMUFaultType fault_type = translation_fault; + ARMFaultType fault_type = ARMFault_Translation; uint32_t level; uint32_t epd = 0; int32_t t0sz, t1sz; @@ -8801,7 +8843,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, ttbr_select = 1; } else { /* in the gap between the two regions, this is a Translation fault */ - fault_type = translation_fault; + fault_type = ARMFault_Translation; goto do_fault; } @@ -8887,7 +8929,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, ok = check_s2_mmu_setup(cpu, aarch64, startlevel, inputsize, stride); if (!ok) { - fault_type = translation_fault; + fault_type = ARMFault_Translation; goto do_fault; } level = startlevel; @@ -8921,7 +8963,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, descaddr |= (address >> (stride * (4 - level))) & indexmask; descaddr &= ~7ULL; nstable = extract32(tableattrs, 4, 1); - descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi); + descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); if (fi->s1ptw) { goto do_fault; } @@ -8973,7 +9015,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, /* Here descaddr is the final physical address, and attributes * are all in attrs. */ - fault_type = access_fault; + fault_type = ARMFault_AccessFlag; if ((attrs & (1 << 8)) == 0) { /* Access flag */ goto do_fault; @@ -8991,7 +9033,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); } - fault_type = permission_fault; + fault_type = ARMFault_Permission; if (!(*prot & (1 << access_type))) { goto do_fault; } @@ -9023,8 +9065,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, return false; do_fault: - /* Long-descriptor format IFSR/DFSR value */ - *fsr = (1 << 9) | (fault_type << 2) | level; + fi->type = fault_type; + fi->level = level; /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); return true; @@ -9108,7 +9150,8 @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address) static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, uint32_t *fsr) + hwaddr *phys_ptr, int *prot, + ARMMMUFaultInfo *fi) { ARMCPU *cpu = arm_env_get_cpu(env); int n; @@ -9203,7 +9246,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, if (n == -1) { /* no hits */ if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { /* background fault */ - *fsr = 0; + fi->type = ARMFault_Background; return true; } get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); @@ -9261,7 +9304,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, } } - *fsr = 0x00d; /* Permission fault */ + fi->type = ARMFault_Permission; + fi->level = 1; return !(*prot & (1 << access_type)); } @@ -9344,67 +9388,28 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, } } -static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, uint32_t *fsr) +static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, ARMMMUFaultInfo *fi, uint32_t *mregion) { + /* Perform a PMSAv8 MPU lookup (without also doing the SAU check + * that a full phys-to-virt translation does). + * mregion is (if not NULL) set to the region number which matched, + * or -1 if no region number is returned (MPU off, address did not + * hit a region, address hit in multiple regions). + */ ARMCPU *cpu = arm_env_get_cpu(env); bool is_user = regime_is_user(env, mmu_idx); uint32_t secure = regime_is_secure(env, mmu_idx); int n; int matchregion = -1; bool hit = false; - V8M_SAttributes sattrs = {}; *phys_ptr = address; *prot = 0; - - if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { - v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); - if (access_type == MMU_INST_FETCH) { - /* Instruction fetches always use the MMU bank and the - * transaction attribute determined by the fetch address, - * regardless of CPU state. This is painful for QEMU - * to handle, because it would mean we need to encode - * into the mmu_idx not just the (user, negpri) information - * for the current security state but also that for the - * other security state, which would balloon the number - * of mmu_idx values needed alarmingly. - * Fortunately we can avoid this because it's not actually - * possible to arbitrarily execute code from memory with - * the wrong security attribute: it will always generate - * an exception of some kind or another, apart from the - * special case of an NS CPU executing an SG instruction - * in S&NSC memory. So we always just fail the translation - * here and sort things out in the exception handler - * (including possibly emulating an SG instruction). - */ - if (sattrs.ns != !secure) { - *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT; - return true; - } - } else { - /* For data accesses we always use the MMU bank indicated - * by the current CPU state, but the security attributes - * might downgrade a secure access to nonsecure. - */ - if (sattrs.ns) { - txattrs->secure = false; - } else if (!secure) { - /* NS access to S memory must fault. - * Architecturally we should first check whether the - * MPU information for this address indicates that we - * are doing an unaligned access to Device memory, which - * should generate a UsageFault instead. QEMU does not - * currently check for that kind of unaligned access though. - * If we added it we would need to do so as a special case - * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). - */ - *fsr = M_FAKE_FSR_SFAULT; - return true; - } - } + if (mregion) { + *mregion = -1; } /* Unlike the ARM ARM pseudocode, we don't need to check whether this @@ -9442,7 +9447,8 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, /* Multiple regions match -- always a failure (unlike * PMSAv7 where highest-numbered-region wins) */ - *fsr = 0x00d; /* permission fault */ + fi->type = ARMFault_Permission; + fi->level = 1; return true; } @@ -9470,7 +9476,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, if (!hit) { /* background fault */ - *fsr = 0; + fi->type = ARMFault_Background; return true; } @@ -9493,15 +9499,88 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, /* We don't need to look the attribute up in the MAIR0/MAIR1 * registers because that only tells us about cacheability. */ + if (mregion) { + *mregion = matchregion; + } } - *fsr = 0x00d; /* Permission fault */ + fi->type = ARMFault_Permission; + fi->level = 1; return !(*prot & (1 << access_type)); } + +static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, ARMMMUFaultInfo *fi) +{ + uint32_t secure = regime_is_secure(env, mmu_idx); + V8M_SAttributes sattrs = {}; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); + if (access_type == MMU_INST_FETCH) { + /* Instruction fetches always use the MMU bank and the + * transaction attribute determined by the fetch address, + * regardless of CPU state. This is painful for QEMU + * to handle, because it would mean we need to encode + * into the mmu_idx not just the (user, negpri) information + * for the current security state but also that for the + * other security state, which would balloon the number + * of mmu_idx values needed alarmingly. + * Fortunately we can avoid this because it's not actually + * possible to arbitrarily execute code from memory with + * the wrong security attribute: it will always generate + * an exception of some kind or another, apart from the + * special case of an NS CPU executing an SG instruction + * in S&NSC memory. So we always just fail the translation + * here and sort things out in the exception handler + * (including possibly emulating an SG instruction). + */ + if (sattrs.ns != !secure) { + if (sattrs.nsc) { + fi->type = ARMFault_QEMU_NSCExec; + } else { + fi->type = ARMFault_QEMU_SFault; + } + *phys_ptr = address; + *prot = 0; + return true; + } + } else { + /* For data accesses we always use the MMU bank indicated + * by the current CPU state, but the security attributes + * might downgrade a secure access to nonsecure. + */ + if (sattrs.ns) { + txattrs->secure = false; + } else if (!secure) { + /* NS access to S memory must fault. + * Architecturally we should first check whether the + * MPU information for this address indicates that we + * are doing an unaligned access to Device memory, which + * should generate a UsageFault instead. QEMU does not + * currently check for that kind of unaligned access though. + * If we added it we would need to do so as a special case + * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). + */ + fi->type = ARMFault_QEMU_SFault; + *phys_ptr = address; + *prot = 0; + return true; + } + } + } + + return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, + txattrs, prot, fi, NULL); +} + static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, uint32_t *fsr) + hwaddr *phys_ptr, int *prot, + ARMMMUFaultInfo *fi) { int n; uint32_t mask; @@ -9530,7 +9609,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, } } if (n < 0) { - *fsr = 2; + fi->type = ARMFault_Background; return true; } @@ -9542,11 +9621,13 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, mask = (mask >> (n * 4)) & 0xf; switch (mask) { case 0: - *fsr = 1; + fi->type = ARMFault_Permission; + fi->level = 1; return true; case 1: if (is_user) { - *fsr = 1; + fi->type = ARMFault_Permission; + fi->level = 1; return true; } *prot = PAGE_READ | PAGE_WRITE; @@ -9562,7 +9643,8 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, break; case 5: if (is_user) { - *fsr = 1; + fi->type = ARMFault_Permission; + fi->level = 1; return true; } *prot = PAGE_READ; @@ -9572,7 +9654,8 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, break; default: /* Bad permission. */ - *fsr = 1; + fi->type = ARMFault_Permission; + fi->level = 1; return true; } *prot |= PAGE_EXEC; @@ -9689,14 +9772,13 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) * @attrs: set to the memory transaction attributes to use * @prot: set to the permissions for the page containing phys_ptr * @page_size: set to the size of the page containing phys_ptr - * @fsr: set to the DFSR/IFSR value on failure * @fi: set to fault info if the translation fails * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes */ static bool get_phys_addr(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, uint32_t *fsr, + target_ulong *page_size, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) { if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { @@ -9711,7 +9793,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, ret = get_phys_addr(env, address, access_type, stage_1_mmu_idx(mmu_idx), &ipa, attrs, - prot, page_size, fsr, fi, cacheattrs); + prot, page_size, fi, cacheattrs); /* If S1 fails or S2 is disabled, return early. */ if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { @@ -9722,7 +9804,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, /* S1 is done. Now do S2 translation. */ ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, phys_ptr, attrs, &s2_prot, - page_size, fsr, fi, + page_size, fi, cacheattrs != NULL ? &cacheattrs2 : NULL); fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ @@ -9768,15 +9850,15 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, - phys_ptr, attrs, prot, fsr); + phys_ptr, attrs, prot, fi); } else if (arm_feature(env, ARM_FEATURE_V7)) { /* PMSAv7 */ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, - phys_ptr, prot, fsr); + phys_ptr, prot, fi); } else { /* Pre-v7 MPU */ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, - phys_ptr, prot, fsr); + phys_ptr, prot, fi); } qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 " mmu_idx %u -> %s (prot %c%c%c)\n", @@ -9802,14 +9884,15 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, } if (regime_using_lpae_format(env, mmu_idx)) { - return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr, - attrs, prot, page_size, fsr, fi, cacheattrs); + return get_phys_addr_lpae(env, address, access_type, mmu_idx, + phys_ptr, attrs, prot, page_size, + fi, cacheattrs); } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { - return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr, - attrs, prot, page_size, fsr, fi); + return get_phys_addr_v6(env, address, access_type, mmu_idx, + phys_ptr, attrs, prot, page_size, fi); } else { - return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr, - prot, page_size, fsr, fi); + return get_phys_addr_v5(env, address, access_type, mmu_idx, + phys_ptr, prot, page_size, fi); } } @@ -9818,7 +9901,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, * fsr with ARM DFSR/IFSR fault register format value on failure. */ bool arm_tlb_fill(CPUState *cs, vaddr address, - MMUAccessType access_type, int mmu_idx, uint32_t *fsr, + MMUAccessType access_type, int mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); @@ -9831,7 +9914,7 @@ bool arm_tlb_fill(CPUState *cs, vaddr address, ret = get_phys_addr(env, address, access_type, core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, - &attrs, &prot, &page_size, fsr, fi, NULL); + &attrs, &prot, &page_size, fi, NULL); if (!ret) { /* Map a single [sub]page. */ phys_addr &= TARGET_PAGE_MASK; @@ -9853,14 +9936,13 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, target_ulong page_size; int prot; bool ret; - uint32_t fsr; ARMMMUFaultInfo fi = {}; ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); *attrs = (MemTxAttrs) {}; ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, - attrs, &prot, &page_size, &fsr, &fi, NULL); + attrs, &prot, &page_size, &fi, NULL); if (ret) { return -1; @@ -9953,11 +10035,9 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) switch (reg) { case 8: /* MSP */ - return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ? - env->v7m.other_sp : env->regs[13]; + return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; case 9: /* PSP */ - return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ? - env->regs[13] : env->v7m.other_sp; + return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; case 16: /* PRIMASK */ return env->v7m.primask[env->v7m.secure]; case 17: /* BASEPRI */ @@ -10059,14 +10139,14 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) } break; case 8: /* MSP */ - if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) { + if (v7m_using_psp(env)) { env->v7m.other_sp = val; } else { env->regs[13] = val; } break; case 9: /* PSP */ - if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) { + if (v7m_using_psp(env)) { env->regs[13] = val; } else { env->v7m.other_sp = val; @@ -10093,8 +10173,11 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) * thread mode; other bits can be updated by any privileged code. * write_v7m_control_spsel() deals with updating the SPSEL bit in * env->v7m.control, so we only need update the others. + * For v7M, we must just ignore explicit writes to SPSEL in handler + * mode; for v8M the write is permitted but will have no effect. */ - if (!arm_v7m_is_handler_mode(env)) { + if (arm_feature(env, ARM_FEATURE_V8) || + !arm_v7m_is_handler_mode(env)) { write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); } env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; @@ -10107,6 +10190,92 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) } } +uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) +{ + /* Implement the TT instruction. op is bits [7:6] of the insn. */ + bool forceunpriv = op & 1; + bool alt = op & 2; + V8M_SAttributes sattrs = {}; + uint32_t tt_resp; + bool r, rw, nsr, nsrw, mrvalid; + int prot; + ARMMMUFaultInfo fi = {}; + MemTxAttrs attrs = {}; + hwaddr phys_addr; + ARMMMUIdx mmu_idx; + uint32_t mregion; + bool targetpriv; + bool targetsec = env->v7m.secure; + + /* Work out what the security state and privilege level we're + * interested in is... + */ + if (alt) { + targetsec = !targetsec; + } + + if (forceunpriv) { + targetpriv = false; + } else { + targetpriv = arm_v7m_is_handler_mode(env) || + !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); + } + + /* ...and then figure out which MMU index this is */ + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); + + /* We know that the MPU and SAU don't care about the access type + * for our purposes beyond that we don't want to claim to be + * an insn fetch, so we arbitrarily call this a read. + */ + + /* MPU region info only available for privileged or if + * inspecting the other MPU state. + */ + if (arm_current_el(env) != 0 || alt) { + /* We can ignore the return value as prot is always set */ + pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, + &phys_addr, &attrs, &prot, &fi, &mregion); + if (mregion == -1) { + mrvalid = false; + mregion = 0; + } else { + mrvalid = true; + } + r = prot & PAGE_READ; + rw = prot & PAGE_WRITE; + } else { + r = false; + rw = false; + mrvalid = false; + mregion = 0; + } + + if (env->v7m.secure) { + v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); + nsr = sattrs.ns && r; + nsrw = sattrs.ns && rw; + } else { + sattrs.ns = true; + nsr = false; + nsrw = false; + } + + tt_resp = (sattrs.iregion << 24) | + (sattrs.irvalid << 23) | + ((!sattrs.ns) << 22) | + (nsrw << 21) | + (nsr << 20) | + (rw << 19) | + (r << 18) | + (sattrs.srvalid << 17) | + (mrvalid << 16) | + (sattrs.sregion << 8) | + mregion; + + return tt_resp; +} + #endif void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) diff --git a/target/arm/helper.h b/target/arm/helper.h index 439d228420..066729e8ad 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -66,6 +66,8 @@ DEF_HELPER_2(v7m_mrs, i32, env, i32) DEF_HELPER_2(v7m_bxns, void, env, i32) DEF_HELPER_2(v7m_blxns, void, env, i32) +DEF_HELPER_3(v7m_tt, i32, env, i32, i32) + DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) DEF_HELPER_2(get_cp_reg, i32, env, ptr) diff --git a/target/arm/internals.h b/target/arm/internals.h index d9cc75e4c5..876854d876 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -487,8 +487,40 @@ static inline void arm_clear_exclusive(CPUARMState *env) env->exclusive_addr = -1; } +/** + * ARMFaultType: type of an ARM MMU fault + * This corresponds to the v8A pseudocode's Fault enumeration, + * with extensions for QEMU internal conditions. + */ +typedef enum ARMFaultType { + ARMFault_None, + ARMFault_AccessFlag, + ARMFault_Alignment, + ARMFault_Background, + ARMFault_Domain, + ARMFault_Permission, + ARMFault_Translation, + ARMFault_AddressSize, + ARMFault_SyncExternal, + ARMFault_SyncExternalOnWalk, + ARMFault_SyncParity, + ARMFault_SyncParityOnWalk, + ARMFault_AsyncParity, + ARMFault_AsyncExternal, + ARMFault_Debug, + ARMFault_TLBConflict, + ARMFault_Lockdown, + ARMFault_Exclusive, + ARMFault_ICacheMaint, + ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ + ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ +} ARMFaultType; + /** * ARMMMUFaultInfo: Information describing an ARM MMU Fault + * @type: Type of fault + * @level: Table walk level (for translation, access flag and permission faults) + * @domain: Domain of the fault address (for non-LPAE CPUs only) * @s2addr: Address that caused a fault at stage 2 * @stage2: True if we faulted at stage 2 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk @@ -496,16 +528,169 @@ static inline void arm_clear_exclusive(CPUARMState *env) */ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; struct ARMMMUFaultInfo { + ARMFaultType type; target_ulong s2addr; + int level; + int domain; bool stage2; bool s1ptw; bool ea; }; +/** + * arm_fi_to_sfsc: Convert fault info struct to short-format FSC + * Compare pseudocode EncodeSDFSC(), though unlike that function + * we set up a whole FSR-format code including domain field and + * putting the high bit of the FSC into bit 10. + */ +static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) +{ + uint32_t fsc; + + switch (fi->type) { + case ARMFault_None: + return 0; + case ARMFault_AccessFlag: + fsc = fi->level == 1 ? 0x3 : 0x6; + break; + case ARMFault_Alignment: + fsc = 0x1; + break; + case ARMFault_Permission: + fsc = fi->level == 1 ? 0xd : 0xf; + break; + case ARMFault_Domain: + fsc = fi->level == 1 ? 0x9 : 0xb; + break; + case ARMFault_Translation: + fsc = fi->level == 1 ? 0x5 : 0x7; + break; + case ARMFault_SyncExternal: + fsc = 0x8 | (fi->ea << 12); + break; + case ARMFault_SyncExternalOnWalk: + fsc = fi->level == 1 ? 0xc : 0xe; + fsc |= (fi->ea << 12); + break; + case ARMFault_SyncParity: + fsc = 0x409; + break; + case ARMFault_SyncParityOnWalk: + fsc = fi->level == 1 ? 0x40c : 0x40e; + break; + case ARMFault_AsyncParity: + fsc = 0x408; + break; + case ARMFault_AsyncExternal: + fsc = 0x406 | (fi->ea << 12); + break; + case ARMFault_Debug: + fsc = 0x2; + break; + case ARMFault_TLBConflict: + fsc = 0x400; + break; + case ARMFault_Lockdown: + fsc = 0x404; + break; + case ARMFault_Exclusive: + fsc = 0x405; + break; + case ARMFault_ICacheMaint: + fsc = 0x4; + break; + case ARMFault_Background: + fsc = 0x0; + break; + case ARMFault_QEMU_NSCExec: + fsc = M_FAKE_FSR_NSC_EXEC; + break; + case ARMFault_QEMU_SFault: + fsc = M_FAKE_FSR_SFAULT; + break; + default: + /* Other faults can't occur in a context that requires a + * short-format status code. + */ + g_assert_not_reached(); + } + + fsc |= (fi->domain << 4); + return fsc; +} + +/** + * arm_fi_to_lfsc: Convert fault info struct to long-format FSC + * Compare pseudocode EncodeLDFSC(), though unlike that function + * we fill in also the LPAE bit 9 of a DFSR format. + */ +static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) +{ + uint32_t fsc; + + switch (fi->type) { + case ARMFault_None: + return 0; + case ARMFault_AddressSize: + fsc = fi->level & 3; + break; + case ARMFault_AccessFlag: + fsc = (fi->level & 3) | (0x2 << 2); + break; + case ARMFault_Permission: + fsc = (fi->level & 3) | (0x3 << 2); + break; + case ARMFault_Translation: + fsc = (fi->level & 3) | (0x1 << 2); + break; + case ARMFault_SyncExternal: + fsc = 0x10 | (fi->ea << 12); + break; + case ARMFault_SyncExternalOnWalk: + fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); + break; + case ARMFault_SyncParity: + fsc = 0x18; + break; + case ARMFault_SyncParityOnWalk: + fsc = (fi->level & 3) | (0x7 << 2); + break; + case ARMFault_AsyncParity: + fsc = 0x19; + break; + case ARMFault_AsyncExternal: + fsc = 0x11 | (fi->ea << 12); + break; + case ARMFault_Alignment: + fsc = 0x21; + break; + case ARMFault_Debug: + fsc = 0x22; + break; + case ARMFault_TLBConflict: + fsc = 0x30; + break; + case ARMFault_Lockdown: + fsc = 0x34; + break; + case ARMFault_Exclusive: + fsc = 0x35; + break; + default: + /* Other faults can't occur in a context that requires a + * long-format status code. + */ + g_assert_not_reached(); + } + + fsc |= 1 << 9; + return fsc; +} + /* Do a page table walk and add page to TLB if possible */ bool arm_tlb_fill(CPUState *cpu, vaddr address, MMUAccessType access_type, int mmu_idx, - uint32_t *fsr, ARMMMUFaultInfo *fi); + ARMMMUFaultInfo *fi); /* Return true if the stage 1 translation regime is using LPAE format page * tables */ @@ -544,15 +729,17 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_S1NSE1: case ARMMMUIdx_S1E2: case ARMMMUIdx_S2NS: + case ARMMMUIdx_MPrivNegPri: + case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPriv: - case ARMMMUIdx_MNegPri: case ARMMMUIdx_MUser: return false; case ARMMMUIdx_S1E3: case ARMMMUIdx_S1SE0: case ARMMMUIdx_S1SE1: + case ARMMMUIdx_MSPrivNegPri: + case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSPriv: - case ARMMMUIdx_MSNegPri: case ARMMMUIdx_MSUser: return true; default: diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index a40a84ac24..c2bb4f3a43 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -116,12 +116,13 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, } static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type, - uint32_t fsr, uint32_t fsc, ARMMMUFaultInfo *fi) + int mmu_idx, ARMMMUFaultInfo *fi) { CPUARMState *env = &cpu->env; int target_el; bool same_el; - uint32_t syn, exc; + uint32_t syn, exc, fsr, fsc; + ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); target_el = exception_target_el(env); if (fi->stage2) { @@ -130,14 +131,21 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type, } same_el = (arm_current_el(env) == target_el); - if (fsc == 0x3f) { - /* Caller doesn't have a long-format fault status code. This - * should only happen if this fault will never actually be reported - * to an EL that uses a syndrome register. Check that here. - * 0x3f is a (currently) reserved FSC code, in case the constructed - * syndrome does leak into the guest somehow. + if (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { + /* LPAE format fault status register : bottom 6 bits are + * status code in the same form as needed for syndrome */ - assert(target_el != 2 && !arm_el_is_aa64(env, target_el)); + fsr = arm_fi_to_lfsc(fi); + fsc = extract32(fsr, 0, 6); + } else { + fsr = arm_fi_to_sfsc(fi); + /* Short format FSR : this fault will never actually be reported + * to an EL that uses a syndrome register. Use a (currently) + * reserved FSR code in case the constructed syndrome does leak + * into the guest somehow. + */ + fsc = 0x3f; } if (access_type == MMU_INST_FETCH) { @@ -168,35 +176,18 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { bool ret; - uint32_t fsr = 0; ARMMMUFaultInfo fi = {}; - ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi); + ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi); if (unlikely(ret)) { ARMCPU *cpu = ARM_CPU(cs); - uint32_t fsc; if (retaddr) { /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr); } - if (fsr & (1 << 9)) { - /* LPAE format fault status register : bottom 6 bits are - * status code in the same form as needed for syndrome - */ - fsc = extract32(fsr, 0, 6); - } else { - /* Short format FSR : this fault will never actually be reported - * to an EL that uses a syndrome register. Use a (currently) - * reserved FSR code in case the constructed syndrome does leak - * into the guest somehow. deliver_fault will assert that - * we don't target an EL using the syndrome. - */ - fsc = 0x3f; - } - - deliver_fault(cpu, addr, access_type, fsr, fsc, &fi); + deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } } @@ -206,27 +197,15 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int mmu_idx, uintptr_t retaddr) { ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint32_t fsr, fsc; ARMMMUFaultInfo fi = {}; - ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); if (retaddr) { /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr); } - /* the DFSR for an alignment fault depends on whether we're using - * the LPAE long descriptor format, or the short descriptor format - */ - if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { - fsr = (1 << 9) | 0x21; - } else { - fsr = 0x1; - } - fsc = 0x21; - - deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi); + fi.type = ARMFault_Alignment; + deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); } /* arm_cpu_do_transaction_failed: handle a memory system error response @@ -240,10 +219,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MemTxResult response, uintptr_t retaddr) { ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint32_t fsr, fsc; ARMMMUFaultInfo fi = {}; - ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); if (retaddr) { /* now we have a real cpu fault */ @@ -256,20 +232,8 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, * Slave error (1); in QEMU we follow that. */ fi.ea = (response != MEMTX_DECODE_ERROR); - - /* The fault status register format depends on whether we're using - * the LPAE long descriptor format, or the short descriptor format. - */ - if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { - /* long descriptor form, STATUS 0b010000: synchronous ext abort */ - fsr = (fi.ea << 12) | (1 << 9) | 0x10; - } else { - /* short descriptor form, FSR 0b01000 : synchronous ext abort */ - fsr = (fi.ea << 12) | 0x8; - } - fsc = 0x10; - - deliver_fault(cpu, addr, access_type, fsr, fsc, &fi); + fi.type = ARMFault_SyncExternal; + deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } #endif /* !defined(CONFIG_USER_ONLY) */ diff --git a/target/arm/translate.c b/target/arm/translate.c index f120932f44..e15192d5d6 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -159,12 +159,16 @@ static inline int get_a32_user_mem_index(DisasContext *s) return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0); case ARMMMUIdx_MUser: case ARMMMUIdx_MPriv: - case ARMMMUIdx_MNegPri: return arm_to_core_mmu_idx(ARMMMUIdx_MUser); + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MPrivNegPri: + return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri); case ARMMMUIdx_MSUser: case ARMMMUIdx_MSPriv: - case ARMMMUIdx_MSNegPri: return arm_to_core_mmu_idx(ARMMMUIdx_MSUser); + case ARMMMUIdx_MSUserNegPri: + case ARMMMUIdx_MSPrivNegPri: + return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri); case ARMMMUIdx_S2NS: default: g_assert_not_reached(); @@ -9806,7 +9810,7 @@ static int disas_thumb2_insn(DisasContext *s, uint32_t insn) if (insn & (1 << 22)) { /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx * - load/store doubleword, load/store exclusive, ldacq/strel, - * table branch. + * table branch, TT. */ if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) && arm_dc_feature(s, ARM_FEATURE_V8)) { @@ -9883,8 +9887,35 @@ static int disas_thumb2_insn(DisasContext *s, uint32_t insn) } else if ((insn & (1 << 23)) == 0) { /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx * - load/store exclusive word + * - TT (v8M only) */ if (rs == 15) { + if (!(insn & (1 << 20)) && + arm_dc_feature(s, ARM_FEATURE_M) && + arm_dc_feature(s, ARM_FEATURE_V8)) { + /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx + * - TT (v8M only) + */ + bool alt = insn & (1 << 7); + TCGv_i32 addr, op, ttresp; + + if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) { + /* we UNDEF for these UNPREDICTABLE cases */ + goto illegal_op; + } + + if (alt && !s->v8m_secure) { + goto illegal_op; + } + + addr = load_reg(s, rn); + op = tcg_const_i32(extract32(insn, 6, 2)); + ttresp = tcg_temp_new_i32(); + gen_helper_v7m_tt(ttresp, cpu_env, addr, op); + tcg_temp_free_i32(addr); + tcg_temp_free_i32(op); + store_reg(s, rd, ttresp); + } goto illegal_op; } addr = tcg_temp_local_new_i32();