HW core patch queue

. Deprecate unmaintained SH-4 models (Samuel)
 . HPET: Convert DPRINTF calls to trace events (Daniel)
 . Implement buffered block writes in Intel PFlash (Gerd)
 . Ignore ELF loadable segments with zero size (Bin)
 . ESP/NCR53C9x: PCI DMA fixes (Mark)
 . PIIX: Simplify Xen PCI IRQ routing (Bernhard)
 . Restrict CPU 'start-powered-off' property to sysemu (Phil)
 
 . target/alpha: Only build sys_helper.c on system emulation (Phil)
 . target/xtensa: Use generic instruction breakpoint API & add test (Max)
 . Restrict icount to system emulation (Phil)
 . Do not set CPUState TCG-specific flags in non-TCG accels (Phil)
 . Cleanup TCG tb_invalidate API (Phil)
 . Correct LoongArch/KVM include path (Bibo)
 . Do not ignore throttle errors in crypto backends (Phil)
 
 . MAINTAINERS updates (Raphael, Zhao)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmWqXbkACgkQ4+MsLN6t
 wN6VVBAAkP/Bs2JfQYobPZVV868wceM97KeUJMXP2YWf6dSLpHRCQN5KtuJcACM9
 y3k3R7nMeVJSGmzl/1gF1G9JhjoCLoVLX/ejeBppv4Wq//9sEdggaQfdCwkhWw2o
 IK/gPjTZpimE7Er4hPlxmuhSRuM1MX4duKFRRfuZpE7XY14Y7/Hk12VIG7LooO0x
 2Sl8CaU0DN7CWmRVDoUkwVx7JBy28UVarRDsgpBim7oKmjjBFnCJkH6B6NJXEiYr
 z1BmIcHa87S09kG1ek+y8aZpG9iPC7nUWjPIQyJGhnfrnBuO7hQHwCLIjHHp5QBR
 BoMr8YQNTI34/M/D8pBfg96LrGDjkQOfwRyRddkMP/jJcNPMAPMNGbfVaIrfij1e
 T+jFF4gQenOvy1XKCY3Uk/a11P3tIRFBEeOlzzQg4Aje9W2MhUNwK2HTlRfBbrRr
 V30R764FDmHlsyOu6/E3jqp4GVCgryF1bglPOBjVEU5uytbQTP8jshIpGVnxBbF+
 OpFwtsoDbsousNKVcO5+B0mlHcB9Ru9h11M5/YD/jfLMk95Ga90JGdgYpqQ5tO5Y
 aqQhKfCKbfgKuKhysxpsdWAwHZzVrlSf+UrObF0rl2lMXXfcppjCqNaw4QJ0oedc
 DNBxTPcCE2vWhUzP3A60VH7jLh4nLaqSTrxxQKkbx+Je1ERGrxs=
 =KmQh
 -----END PGP SIGNATURE-----

Merge tag 'hw-cpus-20240119' of https://github.com/philmd/qemu into staging

HW core patch queue

. Deprecate unmaintained SH-4 models (Samuel)
. HPET: Convert DPRINTF calls to trace events (Daniel)
. Implement buffered block writes in Intel PFlash (Gerd)
. Ignore ELF loadable segments with zero size (Bin)
. ESP/NCR53C9x: PCI DMA fixes (Mark)
. PIIX: Simplify Xen PCI IRQ routing (Bernhard)
. Restrict CPU 'start-powered-off' property to sysemu (Phil)

. target/alpha: Only build sys_helper.c on system emulation (Phil)
. target/xtensa: Use generic instruction breakpoint API & add test (Max)
. Restrict icount to system emulation (Phil)
. Do not set CPUState TCG-specific flags in non-TCG accels (Phil)
. Cleanup TCG tb_invalidate API (Phil)
. Correct LoongArch/KVM include path (Bibo)
. Do not ignore throttle errors in crypto backends (Phil)

. MAINTAINERS updates (Raphael, Zhao)

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmWqXbkACgkQ4+MsLN6t
# wN6VVBAAkP/Bs2JfQYobPZVV868wceM97KeUJMXP2YWf6dSLpHRCQN5KtuJcACM9
# y3k3R7nMeVJSGmzl/1gF1G9JhjoCLoVLX/ejeBppv4Wq//9sEdggaQfdCwkhWw2o
# IK/gPjTZpimE7Er4hPlxmuhSRuM1MX4duKFRRfuZpE7XY14Y7/Hk12VIG7LooO0x
# 2Sl8CaU0DN7CWmRVDoUkwVx7JBy28UVarRDsgpBim7oKmjjBFnCJkH6B6NJXEiYr
# z1BmIcHa87S09kG1ek+y8aZpG9iPC7nUWjPIQyJGhnfrnBuO7hQHwCLIjHHp5QBR
# BoMr8YQNTI34/M/D8pBfg96LrGDjkQOfwRyRddkMP/jJcNPMAPMNGbfVaIrfij1e
# T+jFF4gQenOvy1XKCY3Uk/a11P3tIRFBEeOlzzQg4Aje9W2MhUNwK2HTlRfBbrRr
# V30R764FDmHlsyOu6/E3jqp4GVCgryF1bglPOBjVEU5uytbQTP8jshIpGVnxBbF+
# OpFwtsoDbsousNKVcO5+B0mlHcB9Ru9h11M5/YD/jfLMk95Ga90JGdgYpqQ5tO5Y
# aqQhKfCKbfgKuKhysxpsdWAwHZzVrlSf+UrObF0rl2lMXXfcppjCqNaw4QJ0oedc
# DNBxTPcCE2vWhUzP3A60VH7jLh4nLaqSTrxxQKkbx+Je1ERGrxs=
# =KmQh
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 19 Jan 2024 11:32:09 GMT
# gpg:                using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full]
# Primary key fingerprint: FAAB E75E 1291 7221 DCFD  6BB2 E3E3 2C2C DEAD C0DE

* tag 'hw-cpus-20240119' of https://github.com/philmd/qemu: (36 commits)
  configure: Add linux header compile support for LoongArch
  MAINTAINERS: Update hw/core/cpu.c entry
  MAINTAINERS: Update Raphael Norwitz email
  hw/elf_ops: Ignore loadable segments with zero size
  hw/scsi/esp-pci: set DMA_STAT_BCMBLT when BLAST command issued
  hw/scsi/esp-pci: synchronise setting of DMA_STAT_DONE with ESP completion interrupt
  hw/scsi/esp-pci: generate PCI interrupt from separate ESP and PCI sources
  hw/scsi/esp-pci: use correct address register for PCI DMA transfers
  target/riscv: Rename tcg_cpu_FOO() to include 'riscv'
  target/i386: Rename tcg_cpu_FOO() to include 'x86'
  hw/s390x: Rename cpu_class_init() to include 'sclp'
  hw/core/cpu: Rename cpu_class_init() to include 'common'
  accel: Rename accel_init_ops_interfaces() to include 'system'
  cpus: Restrict 'start-powered-off' property to system emulation
  system/watchpoint: Move TCG specific code to accel/tcg/
  system/replay: Restrict icount to system emulation
  hw/pflash: implement update buffer for block writes
  hw/pflash: use ldn_{be,le}_p and stn_{be,le}_p
  hw/pflash: refactor pflash_data_write()
  hw/i386/pc_piix: Make piix_intx_routing_notifier_xen() more device independent
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2024-01-19 11:39:38 +00:00
commit 3f2a357b95
50 changed files with 603 additions and 520 deletions

View File

@ -1867,7 +1867,8 @@ M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org> R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Yanan Wang <wangyanan55@huawei.com> R: Yanan Wang <wangyanan55@huawei.com>
S: Supported S: Supported
F: hw/core/cpu.c F: hw/core/cpu-common.c
F: hw/core/cpu-sysemu.c
F: hw/core/machine-qmp-cmds.c F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c F: hw/core/machine.c
F: hw/core/machine-smp.c F: hw/core/machine-smp.c
@ -2555,7 +2556,7 @@ F: include/hw/virtio/virtio-gpu.h
F: docs/system/devices/virtio-gpu.rst F: docs/system/devices/virtio-gpu.rst
vhost-user-blk vhost-user-blk
M: Raphael Norwitz <raphael.norwitz@nutanix.com> M: Raphael Norwitz <raphael.s.norwitz@gmail.com>
S: Maintained S: Maintained
F: contrib/vhost-user-blk/ F: contrib/vhost-user-blk/
F: contrib/vhost-user-scsi/ F: contrib/vhost-user-scsi/

View File

@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms)
} }
/* initialize the arch-independent accel operation interfaces */ /* initialize the arch-independent accel operation interfaces */
void accel_init_ops_interfaces(AccelClass *ac) void accel_system_init_ops_interfaces(AccelClass *ac)
{ {
const char *ac_name; const char *ac_name;
char *ops_name; char *ops_name;

View File

@ -10,6 +10,6 @@
#ifndef ACCEL_SYSTEM_H #ifndef ACCEL_SYSTEM_H
#define ACCEL_SYSTEM_H #define ACCEL_SYSTEM_H
void accel_init_ops_interfaces(AccelClass *ac); void accel_system_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SYSTEM_H */ #endif /* ACCEL_SYSTEM_H */

View File

@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
void accel_init_interfaces(AccelClass *ac) void accel_init_interfaces(AccelClass *ac)
{ {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
accel_init_ops_interfaces(ac); accel_system_init_ops_interfaces(ac);
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
accel_init_cpu_interfaces(ac); accel_init_cpu_interfaces(ac);

View File

@ -27,7 +27,6 @@ static void *dummy_cpu_thread_fn(void *arg)
bql_lock(); bql_lock();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
#ifndef _WIN32 #ifndef _WIN32

View File

@ -428,7 +428,6 @@ static void *hvf_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
hvf_init_vcpu(cpu); hvf_init_vcpu(cpu);

View File

@ -36,7 +36,6 @@ static void *kvm_vcpu_thread_fn(void *arg)
bql_lock(); bql_lock();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal); r = kvm_init_vcpu(cpu, &error_fatal);

View File

@ -49,21 +49,19 @@ static bool icount_sleep = true;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */ /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10 #define MAX_ICOUNT_SHIFT 10
/* /* Do not count executed instructions */
* 0 = Do not count executed instructions. ICountMode use_icount = ICOUNT_DISABLED;
* 1 = Fixed conversion of insn to ns via "shift" option
* 2 = Runtime adaptive algorithm to compute shift
*/
int use_icount;
static void icount_enable_precise(void) static void icount_enable_precise(void)
{ {
use_icount = 1; /* Fixed conversion of insn to ns via "shift" option */
use_icount = ICOUNT_PRECISE;
} }
static void icount_enable_adaptive(void) static void icount_enable_adaptive(void)
{ {
use_icount = 2; /* Runtime adaptive algorithm to compute shift */
use_icount = ICOUNT_ADAPTATIVE;
} }
/* /*
@ -256,7 +254,7 @@ static void icount_warp_rt(void)
int64_t warp_delta; int64_t warp_delta;
warp_delta = clock - timers_state.vm_clock_warp_start; warp_delta = clock - timers_state.vm_clock_warp_start;
if (icount_enabled() == 2) { if (icount_enabled() == ICOUNT_ADAPTATIVE) {
/* /*
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
* ahead of real time (it might already be ahead so careful not * ahead of real time (it might already be ahead so careful not
@ -419,7 +417,7 @@ void icount_account_warp_timer(void)
icount_warp_rt(); icount_warp_rt();
} }
void icount_configure(QemuOpts *opts, Error **errp) bool icount_configure(QemuOpts *opts, Error **errp)
{ {
const char *option = qemu_opt_get(opts, "shift"); const char *option = qemu_opt_get(opts, "shift");
bool sleep = qemu_opt_get_bool(opts, "sleep", true); bool sleep = qemu_opt_get_bool(opts, "sleep", true);
@ -429,27 +427,28 @@ void icount_configure(QemuOpts *opts, Error **errp)
if (!option) { if (!option) {
if (qemu_opt_get(opts, "align") != NULL) { if (qemu_opt_get(opts, "align") != NULL) {
error_setg(errp, "Please specify shift option when using align"); error_setg(errp, "Please specify shift option when using align");
return false;
} }
return; return true;
} }
if (align && !sleep) { if (align && !sleep) {
error_setg(errp, "align=on and sleep=off are incompatible"); error_setg(errp, "align=on and sleep=off are incompatible");
return; return false;
} }
if (strcmp(option, "auto") != 0) { if (strcmp(option, "auto") != 0) {
if (qemu_strtol(option, NULL, 0, &time_shift) < 0 if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) { || time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
error_setg(errp, "icount: Invalid shift value"); error_setg(errp, "icount: Invalid shift value");
return; return false;
} }
} else if (icount_align_option) { } else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible"); error_setg(errp, "shift=auto and align=on are incompatible");
return; return false;
} else if (!icount_sleep) { } else if (!icount_sleep) {
error_setg(errp, "shift=auto and sleep=off are incompatible"); error_setg(errp, "shift=auto and sleep=off are incompatible");
return; return false;
} }
icount_sleep = sleep; icount_sleep = sleep;
@ -463,7 +462,7 @@ void icount_configure(QemuOpts *opts, Error **errp)
if (time_shift >= 0) { if (time_shift >= 0) {
timers_state.icount_time_shift = time_shift; timers_state.icount_time_shift = time_shift;
icount_enable_precise(); icount_enable_precise();
return; return true;
} }
icount_enable_adaptive(); icount_enable_adaptive();
@ -491,11 +490,14 @@ void icount_configure(QemuOpts *opts, Error **errp)
timer_mod(timers_state.icount_vm_timer, timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10); NANOSECONDS_PER_SECOND / 10);
return true;
} }
void icount_notify_exit(void) void icount_notify_exit(void)
{ {
if (icount_enabled() && current_cpu) { assert(icount_enabled());
if (current_cpu) {
qemu_cpu_kick(current_cpu); qemu_cpu_kick(current_cpu);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL); qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
} }

View File

@ -24,6 +24,7 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'cputlb.c', 'cputlb.c',
'watchpoint.c',
)) ))
system_ss.add(when: ['CONFIG_TCG'], if_true: files( system_ss.add(when: ['CONFIG_TCG'], if_true: files(

View File

@ -1021,7 +1021,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
* Called with mmap_lock held for user-mode emulation * Called with mmap_lock held for user-mode emulation
* NOTE: this function must not be called while a TB is running. * NOTE: this function must not be called while a TB is running.
*/ */
void tb_invalidate_phys_page(tb_page_addr_t addr) static void tb_invalidate_phys_page(tb_page_addr_t addr)
{ {
tb_page_addr_t start, last; tb_page_addr_t start, last;
@ -1160,28 +1160,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#endif #endif
} }
/*
* Invalidate all TBs which intersect with the target physical
* address page @addr.
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
if (p == NULL) {
return;
}
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
pages = page_collection_lock(start, last);
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
page_collection_unlock(pages);
}
/* /*
* Invalidate all TBs which intersect with the target physical address range * Invalidate all TBs which intersect with the target physical address range
* [start;last]. NOTE: start and end may refer to *different* physical pages. * [start;last]. NOTE: start and end may refer to *different* physical pages.

143
accel/tcg/watchpoint.c Normal file
View File

@ -0,0 +1,143 @@
/*
* CPU watchpoints
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "hw/core/tcg-cpu-ops.h"
#include "hw/core/cpu.h"
/*
* Return true if this watchpoint address matches the specified
* access (ie the address range covered by the watchpoint overlaps
* partially or completely with the address range covered by the
* access).
*/
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/*
* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
* exactly at the top of the address space and so addr + len
* wraps round to zero.
*/
vaddr wpend = wp->vaddr + wp->len - 1;
vaddr addrend = addr + len - 1;
return !(addr > wpend || wp->vaddr > addrend);
}
/* Return flags for watchpoints that match addr + prot. */
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
CPUWatchpoint *wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)) {
ret |= wp->flags;
}
}
return ret;
}
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
bql_lock();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
bql_unlock();
return;
}
if (cc->tcg_ops->adjust_watchpoint_address) {
/* this is currently used only by ARM BE32 */
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
}
assert((flags & ~BP_MEM_ACCESS) == 0);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
int hit_flags = wp->flags & flags;
if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
if (replay_running_debug()) {
/*
* replay_breakpoint reads icount.
* Force recompile to succeed, because icount may
* be read only at the end of the block.
*/
if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
cpu_loop_exit_restore(cpu, ra);
}
/*
* Don't process the watchpoints when we are
* in a reverse debugging operation.
*/
replay_breakpoint();
return;
}
wp->flags |= hit_flags << BP_HIT_SHIFT;
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (wp->flags & BP_CPU
&& cc->tcg_ops->debug_check_watchpoint
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
/* This call also restores vCPU state */
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}

View File

@ -398,6 +398,7 @@ static void cryptodev_backend_set_ops(Object *obj, Visitor *v,
static void static void
cryptodev_backend_complete(UserCreatable *uc, Error **errp) cryptodev_backend_complete(UserCreatable *uc, Error **errp)
{ {
ERRP_GUARD();
CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc); CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc); CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
uint32_t services; uint32_t services;
@ -406,11 +407,20 @@ cryptodev_backend_complete(UserCreatable *uc, Error **errp)
QTAILQ_INIT(&backend->opinfos); QTAILQ_INIT(&backend->opinfos);
value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg; value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp); cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
if (*errp) {
return;
}
value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg; value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp); cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
if (*errp) {
return;
}
if (bc->init) { if (bc->init) {
bc->init(backend, errp); bc->init(backend, errp);
if (*errp) {
return;
}
} }
services = backend->conf.crypto_services; services = backend->conf.crypto_services;

1
configure vendored
View File

@ -445,6 +445,7 @@ case "$cpu" in
loongarch*) loongarch*)
cpu=loongarch64 cpu=loongarch64
host_arch=loongarch64 host_arch=loongarch64
linux_arch=loongarch
;; ;;
mips64*) mips64*)

View File

@ -204,6 +204,7 @@ static Property cpu_common_props[] = {
DEFINE_PROP_END_OF_LIST(), DEFINE_PROP_END_OF_LIST(),
}; };
#ifndef CONFIG_USER_ONLY
static bool cpu_get_start_powered_off(Object *obj, Error **errp) static bool cpu_get_start_powered_off(Object *obj, Error **errp)
{ {
CPUState *cpu = CPU(obj); CPUState *cpu = CPU(obj);
@ -215,12 +216,13 @@ static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
CPUState *cpu = CPU(obj); CPUState *cpu = CPU(obj);
cpu->start_powered_off = value; cpu->start_powered_off = value;
} }
#endif
void cpu_class_init_props(DeviceClass *dc) void cpu_class_init_props(DeviceClass *dc)
{ {
#ifndef CONFIG_USER_ONLY
ObjectClass *oc = OBJECT_CLASS(dc); ObjectClass *oc = OBJECT_CLASS(dc);
device_class_set_props(dc, cpu_common_props);
/* /*
* We can't use DEFINE_PROP_BOOL in the Property array for this * We can't use DEFINE_PROP_BOOL in the Property array for this
* property, because we want this to be settable after realize. * property, because we want this to be settable after realize.
@ -228,6 +230,9 @@ void cpu_class_init_props(DeviceClass *dc)
object_class_property_add_bool(oc, "start-powered-off", object_class_property_add_bool(oc, "start-powered-off",
cpu_get_start_powered_off, cpu_get_start_powered_off,
cpu_set_start_powered_off); cpu_set_start_powered_off);
#endif
device_class_set_props(dc, cpu_common_props);
} }
void cpu_exec_initfn(CPUState *cpu) void cpu_exec_initfn(CPUState *cpu)
@ -314,35 +319,6 @@ void list_cpus(void)
cpu_list(); cpu_list();
} }
#if defined(CONFIG_USER_ONLY)
void tb_invalidate_phys_addr(hwaddr addr)
{
mmap_lock();
tb_invalidate_phys_page(addr);
mmap_unlock();
}
#else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
{
ram_addr_t ram_addr;
MemoryRegion *mr;
hwaddr l = 1;
if (!tcg_enabled()) {
return;
}
RCU_READ_LOCK_GUARD();
mr = address_space_translate(as, addr, &addr, &l, false, attrs);
if (!(memory_region_is_ram(mr)
|| memory_region_is_romd(mr))) {
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_invalidate_phys_page(ram_addr);
}
#endif
/* enable or disable single step mode. EXCP_DEBUG is returned by the /* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */ CPU loop after each instruction */
void cpu_single_step(CPUState *cpu, int enabled) void cpu_single_step(CPUState *cpu, int enabled)

View File

@ -269,6 +269,11 @@ Nios II ``10m50-ghrd`` and ``nios2-generic-nommu`` machines (since 8.2)
The Nios II architecture is orphan. The Nios II architecture is orphan.
``shix`` (since 9.0)
''''''''''''''''''''
The machine is no longer in existence and has been long unmaintained
in QEMU. This also holds for the TC51828 16MiB flash that it uses.
Backend options Backend options
--------------- ---------------

View File

@ -80,16 +80,39 @@ struct PFlashCFI01 {
uint16_t ident3; uint16_t ident3;
uint8_t cfi_table[0x52]; uint8_t cfi_table[0x52];
uint64_t counter; uint64_t counter;
unsigned int writeblock_size; uint32_t writeblock_size;
MemoryRegion mem; MemoryRegion mem;
char *name; char *name;
void *storage; void *storage;
VMChangeStateEntry *vmstate; VMChangeStateEntry *vmstate;
bool old_multiple_chip_handling; bool old_multiple_chip_handling;
/* block update buffer */
unsigned char *blk_bytes;
uint32_t blk_offset;
}; };
static int pflash_post_load(void *opaque, int version_id); static int pflash_post_load(void *opaque, int version_id);
static bool pflash_blk_write_state_needed(void *opaque)
{
PFlashCFI01 *pfl = opaque;
return (pfl->blk_offset != -1);
}
static const VMStateDescription vmstate_pflash_blk_write = {
.name = "pflash_cfi01_blk_write",
.version_id = 1,
.minimum_version_id = 1,
.needed = pflash_blk_write_state_needed,
.fields = (const VMStateField[]) {
VMSTATE_VBUFFER_UINT32(blk_bytes, PFlashCFI01, 0, NULL, writeblock_size),
VMSTATE_UINT32(blk_offset, PFlashCFI01),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_pflash = { static const VMStateDescription vmstate_pflash = {
.name = "pflash_cfi01", .name = "pflash_cfi01",
.version_id = 1, .version_id = 1,
@ -101,6 +124,10 @@ static const VMStateDescription vmstate_pflash = {
VMSTATE_UINT8(status, PFlashCFI01), VMSTATE_UINT8(status, PFlashCFI01),
VMSTATE_UINT64(counter, PFlashCFI01), VMSTATE_UINT64(counter, PFlashCFI01),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
&vmstate_pflash_blk_write,
NULL
} }
}; };
@ -225,34 +252,10 @@ static uint32_t pflash_data_read(PFlashCFI01 *pfl, hwaddr offset,
uint32_t ret; uint32_t ret;
p = pfl->storage; p = pfl->storage;
switch (width) { if (be) {
case 1: ret = ldn_be_p(p + offset, width);
ret = p[offset]; } else {
break; ret = ldn_le_p(p + offset, width);
case 2:
if (be) {
ret = p[offset] << 8;
ret |= p[offset + 1];
} else {
ret = p[offset];
ret |= p[offset + 1] << 8;
}
break;
case 4:
if (be) {
ret = p[offset] << 24;
ret |= p[offset + 1] << 16;
ret |= p[offset + 2] << 8;
ret |= p[offset + 3];
} else {
ret = p[offset];
ret |= p[offset + 1] << 8;
ret |= p[offset + 2] << 16;
ret |= p[offset + 3] << 24;
}
break;
default:
abort();
} }
trace_pflash_data_read(pfl->name, offset, width, ret); trace_pflash_data_read(pfl->name, offset, width, ret);
return ret; return ret;
@ -400,40 +403,61 @@ static void pflash_update(PFlashCFI01 *pfl, int offset,
} }
} }
/* copy current flash content to block update buffer */
static void pflash_blk_write_start(PFlashCFI01 *pfl, hwaddr offset)
{
hwaddr mask = ~(pfl->writeblock_size - 1);
trace_pflash_write_block_start(pfl->name, pfl->counter);
pfl->blk_offset = offset & mask;
memcpy(pfl->blk_bytes, pfl->storage + pfl->blk_offset,
pfl->writeblock_size);
}
/* commit block update buffer changes */
static void pflash_blk_write_flush(PFlashCFI01 *pfl)
{
g_assert(pfl->blk_offset != -1);
trace_pflash_write_block_flush(pfl->name);
memcpy(pfl->storage + pfl->blk_offset, pfl->blk_bytes,
pfl->writeblock_size);
pflash_update(pfl, pfl->blk_offset, pfl->writeblock_size);
pfl->blk_offset = -1;
}
/* discard block update buffer changes */
static void pflash_blk_write_abort(PFlashCFI01 *pfl)
{
trace_pflash_write_block_abort(pfl->name);
pfl->blk_offset = -1;
}
static inline void pflash_data_write(PFlashCFI01 *pfl, hwaddr offset, static inline void pflash_data_write(PFlashCFI01 *pfl, hwaddr offset,
uint32_t value, int width, int be) uint32_t value, int width, int be)
{ {
uint8_t *p = pfl->storage; uint8_t *p;
trace_pflash_data_write(pfl->name, offset, width, value, pfl->counter); if (pfl->blk_offset != -1) {
switch (width) { /* block write: redirect writes to block update buffer */
case 1: if ((offset < pfl->blk_offset) ||
p[offset] = value; (offset + width > pfl->blk_offset + pfl->writeblock_size)) {
break; pfl->status |= 0x10; /* Programming error */
case 2: return;
if (be) {
p[offset] = value >> 8;
p[offset + 1] = value;
} else {
p[offset] = value;
p[offset + 1] = value >> 8;
} }
break; trace_pflash_data_write_block(pfl->name, offset, width, value,
case 4: pfl->counter);
if (be) { p = pfl->blk_bytes + (offset - pfl->blk_offset);
p[offset] = value >> 24; } else {
p[offset + 1] = value >> 16; /* write directly to storage */
p[offset + 2] = value >> 8; trace_pflash_data_write(pfl->name, offset, width, value);
p[offset + 3] = value; p = pfl->storage + offset;
} else {
p[offset] = value;
p[offset + 1] = value >> 8;
p[offset + 2] = value >> 16;
p[offset + 3] = value >> 24;
}
break;
} }
if (be) {
stn_be_p(p, width, value);
} else {
stn_le_p(p, width, value);
}
} }
static void pflash_write(PFlashCFI01 *pfl, hwaddr offset, static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
@ -548,9 +572,9 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
} else { } else {
value = extract32(value, 0, pfl->bank_width * 8); value = extract32(value, 0, pfl->bank_width * 8);
} }
trace_pflash_write_block(pfl->name, value);
pfl->counter = value; pfl->counter = value;
pfl->wcycle++; pfl->wcycle++;
pflash_blk_write_start(pfl, offset);
break; break;
case 0x60: case 0x60:
if (cmd == 0xd0) { if (cmd == 0xd0) {
@ -581,12 +605,7 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
switch (pfl->cmd) { switch (pfl->cmd) {
case 0xe8: /* Block write */ case 0xe8: /* Block write */
/* FIXME check @offset, @width */ /* FIXME check @offset, @width */
if (!pfl->ro) { if (!pfl->ro && (pfl->blk_offset != -1)) {
/*
* FIXME writing straight to memory is *wrong*. We
* should write to a buffer, and flush it to memory
* only on confirm command (see below).
*/
pflash_data_write(pfl, offset, value, width, be); pflash_data_write(pfl, offset, value, width, be);
} else { } else {
pfl->status |= 0x10; /* Programming error */ pfl->status |= 0x10; /* Programming error */
@ -595,18 +614,8 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
pfl->status |= 0x80; pfl->status |= 0x80;
if (!pfl->counter) { if (!pfl->counter) {
hwaddr mask = pfl->writeblock_size - 1;
mask = ~mask;
trace_pflash_write(pfl->name, "block write finished"); trace_pflash_write(pfl->name, "block write finished");
pfl->wcycle++; pfl->wcycle++;
if (!pfl->ro) {
/* Flush the entire write buffer onto backing storage. */
/* FIXME premature! */
pflash_update(pfl, offset & mask, pfl->writeblock_size);
} else {
pfl->status |= 0x10; /* Programming error */
}
} }
pfl->counter--; pfl->counter--;
@ -618,20 +627,17 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
case 3: /* Confirm mode */ case 3: /* Confirm mode */
switch (pfl->cmd) { switch (pfl->cmd) {
case 0xe8: /* Block write */ case 0xe8: /* Block write */
if (cmd == 0xd0) { if ((cmd == 0xd0) && !(pfl->status & 0x10)) {
/* FIXME this is where we should write out the buffer */ pflash_blk_write_flush(pfl);
pfl->wcycle = 0; pfl->wcycle = 0;
pfl->status |= 0x80; pfl->status |= 0x80;
} else { } else {
qemu_log_mask(LOG_UNIMP, pflash_blk_write_abort(pfl);
"%s: Aborting write to buffer not implemented,"
" the data is already written to storage!\n"
"Flash device reset into READ mode.\n",
__func__);
goto mode_read_array; goto mode_read_array;
} }
break; break;
default: default:
pflash_blk_write_abort(pfl);
goto error_flash; goto error_flash;
} }
break; break;
@ -865,6 +871,9 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
pfl->cmd = 0x00; pfl->cmd = 0x00;
pfl->status = 0x80; /* WSM ready */ pfl->status = 0x80; /* WSM ready */
pflash_cfi01_fill_cfi_table(pfl); pflash_cfi01_fill_cfi_table(pfl);
pfl->blk_bytes = g_malloc(pfl->writeblock_size);
pfl->blk_offset = -1;
} }
static void pflash_cfi01_system_reset(DeviceState *dev) static void pflash_cfi01_system_reset(DeviceState *dev)
@ -884,6 +893,8 @@ static void pflash_cfi01_system_reset(DeviceState *dev)
* This model deliberately ignores this delay. * This model deliberately ignores this delay.
*/ */
pfl->status = 0x80; pfl->status = 0x80;
pfl->blk_offset = -1;
} }
static Property pflash_cfi01_properties[] = { static Property pflash_cfi01_properties[] = {

View File

@ -546,7 +546,7 @@ static void pflash_write(void *opaque, hwaddr offset, uint64_t value,
} }
goto reset_flash; goto reset_flash;
} }
trace_pflash_data_write(pfl->name, offset, width, value, 0); trace_pflash_data_write(pfl->name, offset, width, value);
if (!pfl->ro) { if (!pfl->ro) {
p = (uint8_t *)pfl->storage + offset; p = (uint8_t *)pfl->storage + offset;
if (pfl->be) { if (pfl->be) {

View File

@ -202,6 +202,7 @@ static sh7750_io_device tc58128 = {
int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2) int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2)
{ {
warn_report_once("The TC58128 flash device is deprecated");
init_dev(&tc58128_devs[0], zone1); init_dev(&tc58128_devs[0], zone1);
init_dev(&tc58128_devs[1], zone2); init_dev(&tc58128_devs[1], zone2);
return sh7750_register_io_device(s, &tc58128); return sh7750_register_io_device(s, &tc58128);

View File

@ -12,7 +12,8 @@ fdctrl_tc_pulse(int level) "TC pulse: %u"
pflash_chip_erase_invalid(const char *name, uint64_t offset) "%s: chip erase: invalid address 0x%" PRIx64 pflash_chip_erase_invalid(const char *name, uint64_t offset) "%s: chip erase: invalid address 0x%" PRIx64
pflash_chip_erase_start(const char *name) "%s: start chip erase" pflash_chip_erase_start(const char *name) "%s: start chip erase"
pflash_data_read(const char *name, uint64_t offset, unsigned size, uint32_t value) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x" pflash_data_read(const char *name, uint64_t offset, unsigned size, uint32_t value) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x"
pflash_data_write(const char *name, uint64_t offset, unsigned size, uint32_t value, uint64_t counter) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x counter:0x%016"PRIx64 pflash_data_write(const char *name, uint64_t offset, unsigned size, uint32_t value) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x"
pflash_data_write_block(const char *name, uint64_t offset, unsigned size, uint32_t value, uint64_t counter) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x counter:0x%016"PRIx64
pflash_device_id(const char *name, uint16_t id) "%s: read device ID: 0x%04x" pflash_device_id(const char *name, uint16_t id) "%s: read device ID: 0x%04x"
pflash_device_info(const char *name, uint64_t offset) "%s: read device information offset:0x%04" PRIx64 pflash_device_info(const char *name, uint64_t offset) "%s: read device information offset:0x%04" PRIx64
pflash_erase_complete(const char *name) "%s: sector erase complete" pflash_erase_complete(const char *name) "%s: sector erase complete"
@ -32,7 +33,9 @@ pflash_unlock0_failed(const char *name, uint64_t offset, uint8_t cmd, uint16_t a
pflash_unlock1_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: unlock0 failed 0x%" PRIx64 " 0x%02x" pflash_unlock1_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: unlock0 failed 0x%" PRIx64 " 0x%02x"
pflash_unsupported_device_configuration(const char *name, uint8_t width, uint8_t max) "%s: unsupported device configuration: device_width:%d max_device_width:%d" pflash_unsupported_device_configuration(const char *name, uint8_t width, uint8_t max) "%s: unsupported device configuration: device_width:%d max_device_width:%d"
pflash_write(const char *name, const char *str) "%s: %s" pflash_write(const char *name, const char *str) "%s: %s"
pflash_write_block(const char *name, uint32_t value) "%s: block write: bytes:0x%x" pflash_write_block_start(const char *name, uint32_t value) "%s: block write start: bytes:0x%x"
pflash_write_block_flush(const char *name) "%s: block write flush"
pflash_write_block_abort(const char *name) "%s: block write abort"
pflash_write_block_erase(const char *name, uint64_t offset, uint64_t len) "%s: block erase offset:0x%" PRIx64 " bytes:0x%" PRIx64 pflash_write_block_erase(const char *name, uint64_t offset, uint64_t len) "%s: block erase offset:0x%" PRIx64 " bytes:0x%" PRIx64
pflash_write_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: command failed 0x%" PRIx64 " 0x%02x" pflash_write_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: command failed 0x%" PRIx64 " 0x%02x"
pflash_write_invalid(const char *name, uint8_t cmd) "%s: invalid write for command 0x%02x" pflash_write_invalid(const char *name, uint8_t cmd) "%s: invalid write for command 0x%02x"

View File

@ -273,7 +273,7 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
return cpu->cpu_index; return cpu->cpu_index;
} }
static void cpu_class_init(ObjectClass *klass, void *data) static void cpu_common_class_init(ObjectClass *klass, void *data)
{ {
DeviceClass *dc = DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass);
@ -304,7 +304,7 @@ static const TypeInfo cpu_type_info = {
.instance_finalize = cpu_common_finalize, .instance_finalize = cpu_common_finalize,
.abstract = true, .abstract = true,
.class_size = sizeof(CPUClass), .class_size = sizeof(CPUClass),
.class_init = cpu_class_init, .class_init = cpu_common_class_init,
}; };
static void cpu_register_types(void) static void cpu_register_types(void)

View File

@ -92,13 +92,10 @@ static void piix_intx_routing_notifier_xen(PCIDevice *dev)
{ {
int i; int i;
/* Scan for updates to PCI link routes (0x60-0x63). */ /* Scan for updates to PCI link routes. */
for (i = 0; i < PIIX_NUM_PIRQS; i++) { for (i = 0; i < PIIX_NUM_PIRQS; i++) {
uint8_t v = dev->config_read(dev, PIIX_PIRQCA + i, 1); const PCIINTxRoute route = pci_device_route_intx_to_irq(dev, i);
if (v & 0x80) { const uint8_t v = route.mode == PCI_INTX_ENABLED ? route.irq : 0;
v = 0;
}
v &= 0xf;
xen_set_pci_link_route(i, v); xen_set_pci_link_route(i, v);
} }
} }

View File

@ -73,7 +73,7 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
return 1; return 1;
} }
static void cpu_class_init(ObjectClass *oc, void *data) static void sclp_cpu_class_init(ObjectClass *oc, void *data)
{ {
SCLPEventClass *k = SCLP_EVENT_CLASS(oc); SCLPEventClass *k = SCLP_EVENT_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc);
@ -94,7 +94,7 @@ static const TypeInfo sclp_cpu_info = {
.name = TYPE_SCLP_CPU_HOTPLUG, .name = TYPE_SCLP_CPU_HOTPLUG,
.parent = TYPE_SCLP_EVENT, .parent = TYPE_SCLP_EVENT,
.instance_size = sizeof(SCLPEvent), .instance_size = sizeof(SCLPEvent),
.class_init = cpu_class_init, .class_init = sclp_cpu_class_init,
.class_size = sizeof(SCLPEventClass), .class_size = sizeof(SCLPEventClass),
}; };

View File

@ -77,6 +77,41 @@ struct PCIESPState {
ESPState esp; ESPState esp;
}; };
static void esp_pci_update_irq(PCIESPState *pci)
{
int scsi_level = !!(pci->dma_regs[DMA_STAT] & DMA_STAT_SCSIINT);
int dma_level = (pci->dma_regs[DMA_CMD] & DMA_CMD_INTE_D) ?
!!(pci->dma_regs[DMA_STAT] & DMA_STAT_DONE) : 0;
int level = scsi_level || dma_level;
pci_set_irq(PCI_DEVICE(pci), level);
}
static void esp_irq_handler(void *opaque, int irq_num, int level)
{
PCIESPState *pci = PCI_ESP(opaque);
if (level) {
pci->dma_regs[DMA_STAT] |= DMA_STAT_SCSIINT;
/*
* If raising the ESP IRQ to indicate end of DMA transfer, set
* DMA_STAT_DONE at the same time. In theory this should be done in
* esp_pci_dma_memory_rw(), however there is a delay between setting
* DMA_STAT_DONE and the ESP IRQ arriving which is visible to the
* guest that can cause confusion e.g. Linux
*/
if ((pci->dma_regs[DMA_CMD] & DMA_CMD_MASK) == 0x3 &&
pci->dma_regs[DMA_WBC] == 0) {
pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
}
} else {
pci->dma_regs[DMA_STAT] &= ~DMA_STAT_SCSIINT;
}
esp_pci_update_irq(pci);
}
static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val) static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
{ {
ESPState *s = &pci->esp; ESPState *s = &pci->esp;
@ -89,6 +124,7 @@ static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
{ {
trace_esp_pci_dma_blast(val); trace_esp_pci_dma_blast(val);
qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n"); qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
pci->dma_regs[DMA_STAT] |= DMA_STAT_BCMBLT;
} }
static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val) static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
@ -151,6 +187,7 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
/* clear some bits on write */ /* clear some bits on write */
uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE; uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
pci->dma_regs[DMA_STAT] &= ~(val & mask); pci->dma_regs[DMA_STAT] &= ~(val & mask);
esp_pci_update_irq(pci);
} }
break; break;
default: default:
@ -161,17 +198,14 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr) static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
{ {
ESPState *s = &pci->esp;
uint32_t val; uint32_t val;
val = pci->dma_regs[saddr]; val = pci->dma_regs[saddr];
if (saddr == DMA_STAT) { if (saddr == DMA_STAT) {
if (s->rregs[ESP_RSTAT] & STAT_INT) {
val |= DMA_STAT_SCSIINT;
}
if (!(pci->sbac & SBAC_STATUS)) { if (!(pci->sbac & SBAC_STATUS)) {
pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT | pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
DMA_STAT_DONE); DMA_STAT_DONE);
esp_pci_update_irq(pci);
} }
} }
@ -275,7 +309,7 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n"); qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
} }
addr = pci->dma_regs[DMA_SPA]; addr = pci->dma_regs[DMA_WAC];
if (pci->dma_regs[DMA_WBC] < len) { if (pci->dma_regs[DMA_WBC] < len) {
len = pci->dma_regs[DMA_WBC]; len = pci->dma_regs[DMA_WBC];
} }
@ -285,9 +319,6 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
/* update status registers */ /* update status registers */
pci->dma_regs[DMA_WBC] -= len; pci->dma_regs[DMA_WBC] -= len;
pci->dma_regs[DMA_WAC] += len; pci->dma_regs[DMA_WAC] += len;
if (pci->dma_regs[DMA_WBC] == 0) {
pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
}
} }
static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len) static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
@ -342,23 +373,13 @@ static const VMStateDescription vmstate_esp_pci_scsi = {
} }
}; };
static void esp_pci_command_complete(SCSIRequest *req, size_t resid)
{
ESPState *s = req->hba_private;
PCIESPState *pci = container_of(s, PCIESPState, esp);
esp_command_complete(req, resid);
pci->dma_regs[DMA_WBC] = 0;
pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
}
static const struct SCSIBusInfo esp_pci_scsi_info = { static const struct SCSIBusInfo esp_pci_scsi_info = {
.tcq = false, .tcq = false,
.max_target = ESP_MAX_DEVS, .max_target = ESP_MAX_DEVS,
.max_lun = 7, .max_lun = 7,
.transfer_data = esp_transfer_data, .transfer_data = esp_transfer_data,
.complete = esp_pci_command_complete, .complete = esp_command_complete,
.cancel = esp_request_cancelled, .cancel = esp_request_cancelled,
}; };
@ -386,7 +407,7 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
"esp-io", 0x80); "esp-io", 0x80);
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
s->irq = pci_allocate_irq(dev); s->irq = qemu_allocate_irq(esp_irq_handler, pci, 0);
scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info); scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info);
} }

View File

@ -80,6 +80,7 @@ static void shix_machine_init(MachineClass *mc)
mc->init = shix_init; mc->init = shix_init;
mc->is_default = true; mc->is_default = true;
mc->default_cpu_type = TYPE_SH7750R_CPU; mc->default_cpu_type = TYPE_SH7750R_CPU;
mc->deprecation_reason = "old and unmaintained";
} }
DEFINE_MACHINE("shix", shix_machine_init) DEFINE_MACHINE("shix", shix_machine_init)

View File

@ -39,13 +39,7 @@
#include "hw/timer/i8254.h" #include "hw/timer/i8254.h"
#include "exec/address-spaces.h" #include "exec/address-spaces.h"
#include "qom/object.h" #include "qom/object.h"
#include "trace.h"
//#define HPET_DEBUG
#ifdef HPET_DEBUG
#define DPRINTF printf
#else
#define DPRINTF(...)
#endif
#define HPET_MSI_SUPPORT 0 #define HPET_MSI_SUPPORT 0
@ -431,7 +425,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
HPETState *s = opaque; HPETState *s = opaque;
uint64_t cur_tick, index; uint64_t cur_tick, index;
DPRINTF("qemu: Enter hpet_ram_readl at %" PRIx64 "\n", addr); trace_hpet_ram_read(addr);
index = addr; index = addr;
/*address range of all TN regs*/ /*address range of all TN regs*/
if (index >= 0x100 && index <= 0x3ff) { if (index >= 0x100 && index <= 0x3ff) {
@ -439,7 +433,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
HPETTimer *timer = &s->timer[timer_id]; HPETTimer *timer = &s->timer[timer_id];
if (timer_id > s->num_timers) { if (timer_id > s->num_timers) {
DPRINTF("qemu: timer id out of range\n"); trace_hpet_timer_id_out_of_range(timer_id);
return 0; return 0;
} }
@ -457,7 +451,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
case HPET_TN_ROUTE + 4: case HPET_TN_ROUTE + 4:
return timer->fsb >> 32; return timer->fsb >> 32;
default: default:
DPRINTF("qemu: invalid hpet_ram_readl\n"); trace_hpet_ram_read_invalid();
break; break;
} }
} else { } else {
@ -469,7 +463,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
case HPET_CFG: case HPET_CFG:
return s->config; return s->config;
case HPET_CFG + 4: case HPET_CFG + 4:
DPRINTF("qemu: invalid HPET_CFG + 4 hpet_ram_readl\n"); trace_hpet_invalid_hpet_cfg(4);
return 0; return 0;
case HPET_COUNTER: case HPET_COUNTER:
if (hpet_enabled(s)) { if (hpet_enabled(s)) {
@ -477,7 +471,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
} else { } else {
cur_tick = s->hpet_counter; cur_tick = s->hpet_counter;
} }
DPRINTF("qemu: reading counter = %" PRIx64 "\n", cur_tick); trace_hpet_ram_read_reading_counter(0, cur_tick);
return cur_tick; return cur_tick;
case HPET_COUNTER + 4: case HPET_COUNTER + 4:
if (hpet_enabled(s)) { if (hpet_enabled(s)) {
@ -485,12 +479,12 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
} else { } else {
cur_tick = s->hpet_counter; cur_tick = s->hpet_counter;
} }
DPRINTF("qemu: reading counter + 4 = %" PRIx64 "\n", cur_tick); trace_hpet_ram_read_reading_counter(4, cur_tick);
return cur_tick >> 32; return cur_tick >> 32;
case HPET_STATUS: case HPET_STATUS:
return s->isr; return s->isr;
default: default:
DPRINTF("qemu: invalid hpet_ram_readl\n"); trace_hpet_ram_read_invalid();
break; break;
} }
} }
@ -504,8 +498,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
HPETState *s = opaque; HPETState *s = opaque;
uint64_t old_val, new_val, val, index; uint64_t old_val, new_val, val, index;
DPRINTF("qemu: Enter hpet_ram_writel at %" PRIx64 " = 0x%" PRIx64 "\n", trace_hpet_ram_write(addr, value);
addr, value);
index = addr; index = addr;
old_val = hpet_ram_read(opaque, addr, 4); old_val = hpet_ram_read(opaque, addr, 4);
new_val = value; new_val = value;
@ -515,14 +508,14 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
uint8_t timer_id = (addr - 0x100) / 0x20; uint8_t timer_id = (addr - 0x100) / 0x20;
HPETTimer *timer = &s->timer[timer_id]; HPETTimer *timer = &s->timer[timer_id];
DPRINTF("qemu: hpet_ram_writel timer_id = 0x%x\n", timer_id); trace_hpet_ram_write_timer_id(timer_id);
if (timer_id > s->num_timers) { if (timer_id > s->num_timers) {
DPRINTF("qemu: timer id out of range\n"); trace_hpet_timer_id_out_of_range(timer_id);
return; return;
} }
switch ((addr - 0x100) % 0x20) { switch ((addr - 0x100) % 0x20) {
case HPET_TN_CFG: case HPET_TN_CFG:
DPRINTF("qemu: hpet_ram_writel HPET_TN_CFG\n"); trace_hpet_ram_write_tn_cfg();
if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) { if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) {
update_irq(timer, 0); update_irq(timer, 0);
} }
@ -540,10 +533,10 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
} }
break; break;
case HPET_TN_CFG + 4: // Interrupt capabilities case HPET_TN_CFG + 4: // Interrupt capabilities
DPRINTF("qemu: invalid HPET_TN_CFG+4 write\n"); trace_hpet_ram_write_invalid_tn_cfg(4);
break; break;
case HPET_TN_CMP: // comparator register case HPET_TN_CMP: // comparator register
DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP\n"); trace_hpet_ram_write_tn_cmp(0);
if (timer->config & HPET_TN_32BIT) { if (timer->config & HPET_TN_32BIT) {
new_val = (uint32_t)new_val; new_val = (uint32_t)new_val;
} }
@ -566,7 +559,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
} }
break; break;
case HPET_TN_CMP + 4: // comparator register high order case HPET_TN_CMP + 4: // comparator register high order
DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP + 4\n"); trace_hpet_ram_write_tn_cmp(4);
if (!timer_is_periodic(timer) if (!timer_is_periodic(timer)
|| (timer->config & HPET_TN_SETVAL)) { || (timer->config & HPET_TN_SETVAL)) {
timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32; timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32;
@ -591,7 +584,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff); timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff);
break; break;
default: default:
DPRINTF("qemu: invalid hpet_ram_writel\n"); trace_hpet_ram_write_invalid();
break; break;
} }
return; return;
@ -631,7 +624,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
} }
break; break;
case HPET_CFG + 4: case HPET_CFG + 4:
DPRINTF("qemu: invalid HPET_CFG+4 write\n"); trace_hpet_invalid_hpet_cfg(4);
break; break;
case HPET_STATUS: case HPET_STATUS:
val = new_val & s->isr; val = new_val & s->isr;
@ -643,24 +636,20 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
break; break;
case HPET_COUNTER: case HPET_COUNTER:
if (hpet_enabled(s)) { if (hpet_enabled(s)) {
DPRINTF("qemu: Writing counter while HPET enabled!\n"); trace_hpet_ram_write_counter_write_while_enabled();
} }
s->hpet_counter = s->hpet_counter =
(s->hpet_counter & 0xffffffff00000000ULL) | value; (s->hpet_counter & 0xffffffff00000000ULL) | value;
DPRINTF("qemu: HPET counter written. ctr = 0x%" PRIx64 " -> " trace_hpet_ram_write_counter_written(0, value, s->hpet_counter);
"%" PRIx64 "\n", value, s->hpet_counter);
break; break;
case HPET_COUNTER + 4: case HPET_COUNTER + 4:
if (hpet_enabled(s)) { trace_hpet_ram_write_counter_write_while_enabled();
DPRINTF("qemu: Writing counter while HPET enabled!\n");
}
s->hpet_counter = s->hpet_counter =
(s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32); (s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32);
DPRINTF("qemu: HPET counter + 4 written. ctr = 0x%" PRIx64 " -> " trace_hpet_ram_write_counter_written(4, value, s->hpet_counter);
"%" PRIx64 "\n", value, s->hpet_counter);
break; break;
default: default:
DPRINTF("qemu: invalid hpet_ram_writel\n"); trace_hpet_ram_write_invalid();
break; break;
} }
} }

View File

@ -99,3 +99,18 @@ sifive_pwm_write(uint64_t data, uint64_t offset) "Write 0x%" PRIx64 " at address
sh_timer_start_stop(int enable, int current) "%d (%d)" sh_timer_start_stop(int enable, int current) "%d (%d)"
sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64 sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64
sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64 sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64
# hpet.c
hpet_timer_id_out_of_range(uint8_t timer_id) "timer id out of range: 0x%" PRIx8
hpet_invalid_hpet_cfg(uint8_t reg_off) "invalid HPET_CFG + %u" PRIx8
hpet_ram_read(uint64_t addr) "enter hpet_ram_readl at 0x%" PRIx64
hpet_ram_read_reading_counter(uint8_t reg_off, uint64_t cur_tick) "reading counter + %" PRIu8 " = 0x%" PRIx64
hpet_ram_read_invalid(void) "invalid hpet_ram_readl"
hpet_ram_write(uint64_t addr, uint64_t value) "enter hpet_ram_writel at 0x%" PRIx64 " = 0x%" PRIx64
hpet_ram_write_timer_id(uint64_t timer_id) "hpet_ram_writel timer_id = 0x%" PRIx64
hpet_ram_write_tn_cfg(void) "hpet_ram_writel HPET_TN_CFG"
hpet_ram_write_invalid_tn_cfg(uint8_t reg_off) "invalid HPET_TN_CFG + %" PRIu8 " write"
hpet_ram_write_tn_cmp(uint8_t reg_off) "hpet_ram_writel HPET_TN_CMP + %" PRIu8
hpet_ram_write_invalid(void) "invalid hpet_ram_writel"
hpet_ram_write_counter_write_while_enabled(void) "Writing counter while HPET enabled!"
hpet_ram_write_counter_written(uint8_t reg_off, uint64_t value, uint64_t counter) "HPET counter + %" PRIu8 "written. crt = 0x%" PRIx64 " -> 0x%" PRIx64

View File

@ -518,11 +518,6 @@ static inline void tb_set_page_addr1(TranslationBlock *tb,
uint32_t curr_cflags(CPUState *cpu); uint32_t curr_cflags(CPUState *cpu);
/* TranslationBlock invalidate API */ /* TranslationBlock invalidate API */
#if defined(CONFIG_USER_ONLY)
void tb_invalidate_phys_addr(hwaddr addr);
#else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);

View File

@ -23,7 +23,6 @@
/* translate-all.c */ /* translate-all.c */
void tb_invalidate_phys_page(tb_page_addr_t addr);
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY

View File

@ -427,6 +427,16 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd,
file_size = ph->p_filesz; /* Size of the allocated data */ file_size = ph->p_filesz; /* Size of the allocated data */
data_offset = ph->p_offset; /* Offset where the data is located */ data_offset = ph->p_offset; /* Offset where the data is located */
/*
* Some ELF files really do have segments of zero size;
* just ignore them rather than trying to set the wrong addr,
* or create empty ROM blobs, because the zero-length blob can
* falsely trigger the overlapping-ROM-blobs check.
*/
if (mem_size == 0) {
continue;
}
if (file_size > 0) { if (file_size > 0) {
if (g_mapped_file_get_length(mapped_file) < if (g_mapped_file_get_length(mapped_file) <
file_size + data_offset) { file_size + data_offset) {
@ -530,45 +540,38 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd,
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr; *pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
} }
/* Some ELF files really do have segments of zero size; if (load_rom) {
* just ignore them rather than trying to create empty g_autofree char *label =
* ROM blobs, because the zero-length blob can falsely g_strdup_printf("%s ELF program header segment %d",
* trigger the overlapping-ROM-blobs check. name, i);
*/
if (mem_size != 0) {
if (load_rom) {
g_autofree char *label =
g_strdup_printf("%s ELF program header segment %d",
name, i);
/* /*
* rom_add_elf_program() takes its own reference to * rom_add_elf_program() takes its own reference to
* 'mapped_file'. * 'mapped_file'.
*/ */
rom_add_elf_program(label, mapped_file, data, file_size, rom_add_elf_program(label, mapped_file, data, file_size,
mem_size, addr, as); mem_size, addr, as);
} else { } else {
MemTxResult res; MemTxResult res;
res = address_space_write(as ? as : &address_space_memory, res = address_space_write(as ? as : &address_space_memory,
addr, MEMTXATTRS_UNSPECIFIED, addr, MEMTXATTRS_UNSPECIFIED,
data, file_size); data, file_size);
if (res != MEMTX_OK) {
goto fail;
}
/*
* We need to zero'ify the space that is not copied
* from file
*/
if (file_size < mem_size) {
res = address_space_set(as ? as : &address_space_memory,
addr + file_size, 0,
mem_size - file_size,
MEMTXATTRS_UNSPECIFIED);
if (res != MEMTX_OK) { if (res != MEMTX_OK) {
goto fail; goto fail;
} }
/*
* We need to zero'ify the space that is not copied
* from file
*/
if (file_size < mem_size) {
res = address_space_set(as ? as : &address_space_memory,
addr + file_size, 0,
mem_size - file_size,
MEMTXATTRS_UNSPECIFIED);
if (res != MEMTX_OK) {
goto fail;
}
}
} }
} }

View File

@ -17,18 +17,24 @@ void cpu_timers_init(void);
/* icount - Instruction Counter API */ /* icount - Instruction Counter API */
/* /**
* icount enablement state: * ICountMode: icount enablement state:
* *
* 0 = Disabled - Do not count executed instructions. * @ICOUNT_DISABLED: Disabled - Do not count executed instructions.
* 1 = Enabled - Fixed conversion of insn to ns via "shift" option * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option
* 2 = Enabled - Runtime adaptive algorithm to compute shift * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift
*/ */
#ifdef CONFIG_TCG typedef enum {
extern int use_icount; ICOUNT_DISABLED = 0,
ICOUNT_PRECISE,
ICOUNT_ADAPTATIVE,
} ICountMode;
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
extern ICountMode use_icount;
#define icount_enabled() (use_icount) #define icount_enabled() (use_icount)
#else #else
#define icount_enabled() 0 #define icount_enabled() ICOUNT_DISABLED
#endif #endif
/* /*
@ -50,8 +56,14 @@ int64_t icount_get(void);
*/ */
int64_t icount_to_ns(int64_t icount); int64_t icount_to_ns(int64_t icount);
/* configure the icount options, including "shift" */ /**
void icount_configure(QemuOpts *opts, Error **errp); * icount_configure: configure the icount options, including "shift"
* @opts: Options to parse
* @errp: pointer to a NULL-initialized error object
*
* Return: true on success, else false setting @errp with error
*/
bool icount_configure(QemuOpts *opts, Error **errp);
/* used by tcg vcpu thread to calc icount budget */ /* used by tcg vcpu thread to calc icount budget */
int64_t icount_round(int64_t count); int64_t icount_round(int64_t count);

View File

@ -1,6 +1,3 @@
#ifndef SYSEMU_REPLAY_H
#define SYSEMU_REPLAY_H
/* /*
* QEMU replay (system interface) * QEMU replay (system interface)
* *
@ -11,6 +8,12 @@
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
* *
*/ */
#ifndef SYSEMU_REPLAY_H
#define SYSEMU_REPLAY_H
#ifdef CONFIG_USER_ONLY
#error Cannot include this header from user emulation
#endif
#include "exec/replay-core.h" #include "exec/replay-core.h"
#include "qapi/qapi-types-misc.h" #include "qapi/qapi-types-misc.h"
@ -84,12 +87,14 @@ int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount); int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount);
/*! Saves or reads the clock depending on the current replay mode. */ /*! Saves or reads the clock depending on the current replay mode. */
#define REPLAY_CLOCK(clock, value) \ #define REPLAY_CLOCK(clock, value) \
!icount_enabled() ? (value) : \
(replay_mode == REPLAY_MODE_PLAY \ (replay_mode == REPLAY_MODE_PLAY \
? replay_read_clock((clock), icount_get_raw()) \ ? replay_read_clock((clock), icount_get_raw()) \
: replay_mode == REPLAY_MODE_RECORD \ : replay_mode == REPLAY_MODE_RECORD \
? replay_save_clock((clock), (value), icount_get_raw()) \ ? replay_save_clock((clock), (value), icount_get_raw()) \
: (value)) : (value))
#define REPLAY_CLOCK_LOCKED(clock, value) \ #define REPLAY_CLOCK_LOCKED(clock, value) \
!icount_enabled() ? (value) : \
(replay_mode == REPLAY_MODE_PLAY \ (replay_mode == REPLAY_MODE_PLAY \
? replay_read_clock((clock), icount_get_raw_locked()) \ ? replay_read_clock((clock), icount_get_raw_locked()) \
: replay_mode == REPLAY_MODE_RECORD \ : replay_mode == REPLAY_MODE_RECORD \

View File

@ -4,37 +4,20 @@
/* icount - Instruction Counter API */ /* icount - Instruction Counter API */
int use_icount; ICountMode use_icount = ICOUNT_DISABLED;
void icount_update(CPUState *cpu) bool icount_configure(QemuOpts *opts, Error **errp)
{
abort();
}
void icount_configure(QemuOpts *opts, Error **errp)
{ {
/* signal error */ /* signal error */
error_setg(errp, "cannot configure icount, TCG support not available"); error_setg(errp, "cannot configure icount, TCG support not available");
return false;
} }
int64_t icount_get_raw(void) int64_t icount_get_raw(void)
{ {
abort(); abort();
return 0; return 0;
} }
int64_t icount_get(void)
{
abort();
return 0;
}
int64_t icount_to_ns(int64_t icount)
{
abort();
return 0;
}
int64_t icount_round(int64_t count)
{
abort();
return 0;
}
void icount_start_warp_timer(void) void icount_start_warp_timer(void)
{ {
abort(); abort();
@ -43,7 +26,7 @@ void icount_account_warp_timer(void)
{ {
abort(); abort();
} }
void icount_notify_exit(void) void icount_notify_exit(void)
{ {
abort();
} }

View File

@ -154,7 +154,7 @@ static bool adjust_timers_state_needed(void *opaque)
static bool icount_shift_state_needed(void *opaque) static bool icount_shift_state_needed(void *opaque)
{ {
return icount_enabled() == 2; return icount_enabled() == ICOUNT_ADAPTATIVE;
} }
/* /*

View File

@ -2270,8 +2270,7 @@ static void user_register_global_props(void)
static int do_configure_icount(void *opaque, QemuOpts *opts, Error **errp) static int do_configure_icount(void *opaque, QemuOpts *opts, Error **errp)
{ {
icount_configure(opts, errp); return !icount_configure(opts, errp);
return 0;
} }
static int accelerator_set_property(void *opaque, static int accelerator_set_property(void *opaque,

View File

@ -18,13 +18,8 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "hw/core/tcg-cpu-ops.h"
#include "hw/core/cpu.h" #include "hw/core/cpu.h"
/* Add a watchpoint. */ /* Add a watchpoint. */
@ -103,122 +98,3 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
} }
} }
} }
#ifdef CONFIG_TCG
/*
* Return true if this watchpoint address matches the specified
* access (ie the address range covered by the watchpoint overlaps
* partially or completely with the address range covered by the
* access).
*/
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/*
* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
* exactly at the top of the address space and so addr + len
* wraps round to zero.
*/
vaddr wpend = wp->vaddr + wp->len - 1;
vaddr addrend = addr + len - 1;
return !(addr > wpend || wp->vaddr > addrend);
}
/* Return flags for watchpoints that match addr + prot. */
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
CPUWatchpoint *wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)) {
ret |= wp->flags;
}
}
return ret;
}
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
bql_lock();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
bql_unlock();
return;
}
if (cc->tcg_ops->adjust_watchpoint_address) {
/* this is currently used only by ARM BE32 */
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
}
assert((flags & ~BP_MEM_ACCESS) == 0);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
int hit_flags = wp->flags & flags;
if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
if (replay_running_debug()) {
/*
* replay_breakpoint reads icount.
* Force recompile to succeed, because icount may
* be read only at the end of the block.
*/
if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
cpu_loop_exit_restore(cpu, ra);
}
/*
* Don't process the watchpoints when we are
* in a reverse debugging operation.
*/
replay_breakpoint();
return;
}
wp->flags |= hit_flags << BP_HIT_SHIFT;
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (wp->flags & BP_CPU
&& cc->tcg_ops->debug_check_watchpoint
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
/* This call also restores vCPU state */
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
#endif /* CONFIG_TCG */

32
target/alpha/clk_helper.c Normal file
View File

@ -0,0 +1,32 @@
/*
* QEMU Alpha clock helpers.
*
* Copyright (c) 2007 Jocelyn Mayer
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#include "qemu/osdep.h"
#include "qemu/timer.h"
#include "exec/helper-proto.h"
#include "cpu.h"
uint64_t helper_load_pcc(CPUAlphaState *env)
{
#ifndef CONFIG_USER_ONLY
/*
* In system mode we have access to a decent high-resolution clock.
* In order to make OS-level time accounting work with the RPCC,
* present it with a well-timed clock fixed at 250MHz.
*/
return (((uint64_t)env->pcc_ofs << 32)
| (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
#else
/*
* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through
* the host cpu clock ticks. Also, don't bother taking PCC_OFS into
* account.
*/
return (uint32_t)cpu_get_host_ticks();
#endif
}

View File

@ -4,15 +4,18 @@ alpha_ss.add(files(
'fpu_helper.c', 'fpu_helper.c',
'gdbstub.c', 'gdbstub.c',
'helper.c', 'helper.c',
'clk_helper.c',
'int_helper.c', 'int_helper.c',
'mem_helper.c', 'mem_helper.c',
'sys_helper.c',
'translate.c', 'translate.c',
'vax_helper.c', 'vax_helper.c',
)) ))
alpha_system_ss = ss.source_set() alpha_system_ss = ss.source_set()
alpha_system_ss.add(files('machine.c')) alpha_system_ss.add(files(
'machine.c',
'sys_helper.c',
))
target_arch += {'alpha': alpha_ss} target_arch += {'alpha': alpha_ss}
target_system_arch += {'alpha': alpha_system_ss} target_system_arch += {'alpha': alpha_system_ss}

View File

@ -27,23 +27,7 @@
#include "qemu/timer.h" #include "qemu/timer.h"
uint64_t helper_load_pcc(CPUAlphaState *env)
{
#ifndef CONFIG_USER_ONLY
/* In system mode we have access to a decent high-resolution clock.
In order to make OS-level time accounting work with the RPCC,
present it with a well-timed clock fixed at 250MHz. */
return (((uint64_t)env->pcc_ofs << 32)
| (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
#else
/* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
clock ticks. Also, don't bother taking PCC_OFS into account. */
return (uint32_t)cpu_get_host_ticks();
#endif
}
/* PALcode support special instructions */ /* PALcode support special instructions */
#ifndef CONFIG_USER_ONLY
void helper_tbia(CPUAlphaState *env) void helper_tbia(CPUAlphaState *env)
{ {
tlb_flush(env_cpu(env)); tlb_flush(env_cpu(env));
@ -89,5 +73,3 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
timer_del(cpu->alarm_timer); timer_del(cpu->alarm_timer);
} }
} }
#endif /* CONFIG_USER_ONLY */

View File

@ -1796,8 +1796,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
int pagebits; int pagebits;
Error *local_err = NULL; Error *local_err = NULL;
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
/* Use pc-relative instructions in system-mode */ /* Use pc-relative instructions in system-mode */
#ifndef CONFIG_USER_ONLY
cs->tcg_cflags |= CF_PCREL; cs->tcg_cflags |= CF_PCREL;
#endif #endif

View File

@ -948,16 +948,19 @@ static int64_t cycles_ns_per(uint64_t cycles)
static bool instructions_supported(CPUARMState *env) static bool instructions_supported(CPUARMState *env)
{ {
return icount_enabled() == 1; /* Precise instruction counting */ /* Precise instruction counting */
return icount_enabled() == ICOUNT_PRECISE;
} }
static uint64_t instructions_get_count(CPUARMState *env) static uint64_t instructions_get_count(CPUARMState *env)
{ {
assert(icount_enabled() == ICOUNT_PRECISE);
return (uint64_t)icount_get_raw(); return (uint64_t)icount_get_raw();
} }
static int64_t instructions_ns_per(uint64_t icount) static int64_t instructions_ns_per(uint64_t icount)
{ {
assert(icount_enabled() == ICOUNT_PRECISE);
return icount_to_ns((int64_t)icount); return icount_to_ns((int64_t)icount);
} }
#endif #endif

View File

@ -7221,8 +7221,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
static bool ht_warned; static bool ht_warned;
unsigned requested_lbr_fmt; unsigned requested_lbr_fmt;
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
/* Use pc-relative instructions in system-mode */ /* Use pc-relative instructions in system-mode */
#ifndef CONFIG_USER_ONLY
cs->tcg_cflags |= CF_PCREL; cs->tcg_cflags |= CF_PCREL;
#endif #endif

View File

@ -126,18 +126,18 @@ static const struct TCGCPUOps x86_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
}; };
static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
{ {
/* for x86, all cpus use the same set of operations */ /* for x86, all cpus use the same set of operations */
cc->tcg_ops = &x86_tcg_ops; cc->tcg_ops = &x86_tcg_ops;
} }
static void tcg_cpu_class_init(CPUClass *cc) static void x86_tcg_cpu_class_init(CPUClass *cc)
{ {
cc->init_accel_cpu = tcg_cpu_init_ops; cc->init_accel_cpu = x86_tcg_cpu_init_ops;
} }
static void tcg_cpu_xsave_init(void) static void x86_tcg_cpu_xsave_init(void)
{ {
#define XO(bit, field) \ #define XO(bit, field) \
x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field); x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
@ -159,25 +159,25 @@ static void tcg_cpu_xsave_init(void)
* TCG-specific defaults that override cpudef models when using TCG. * TCG-specific defaults that override cpudef models when using TCG.
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types. * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
*/ */
static PropValue tcg_default_props[] = { static PropValue x86_tcg_default_props[] = {
{ "vme", "off" }, { "vme", "off" },
{ NULL, NULL }, { NULL, NULL },
}; };
static void tcg_cpu_instance_init(CPUState *cs) static void x86_tcg_cpu_instance_init(CPUState *cs)
{ {
X86CPU *cpu = X86_CPU(cs); X86CPU *cpu = X86_CPU(cs);
X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
if (xcc->model) { if (xcc->model) {
/* Special cases not set in the X86CPUDefinition structs: */ /* Special cases not set in the X86CPUDefinition structs: */
x86_cpu_apply_props(cpu, tcg_default_props); x86_cpu_apply_props(cpu, x86_tcg_default_props);
} }
tcg_cpu_xsave_init(); x86_tcg_cpu_xsave_init();
} }
static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
{ {
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
@ -185,18 +185,18 @@ static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
acc->cpu_target_realize = tcg_cpu_realizefn; acc->cpu_target_realize = tcg_cpu_realizefn;
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
acc->cpu_class_init = tcg_cpu_class_init; acc->cpu_class_init = x86_tcg_cpu_class_init;
acc->cpu_instance_init = tcg_cpu_instance_init; acc->cpu_instance_init = x86_tcg_cpu_instance_init;
} }
static const TypeInfo tcg_cpu_accel_type_info = { static const TypeInfo x86_tcg_cpu_accel_type_info = {
.name = ACCEL_CPU_NAME("tcg"), .name = ACCEL_CPU_NAME("tcg"),
.parent = TYPE_ACCEL_CPU, .parent = TYPE_ACCEL_CPU,
.class_init = tcg_cpu_accel_class_init, .class_init = x86_tcg_cpu_accel_class_init,
.abstract = true, .abstract = true,
}; };
static void tcg_cpu_accel_register_types(void) static void x86_tcg_cpu_accel_register_types(void)
{ {
type_register_static(&tcg_cpu_accel_type_info); type_register_static(&x86_tcg_cpu_accel_type_info);
} }
type_init(tcg_cpu_accel_register_types); type_init(x86_tcg_cpu_accel_register_types);

View File

@ -929,7 +929,7 @@ static bool riscv_cpu_is_vendor(Object *cpu_obj)
* -> cpu_exec_realizefn() * -> cpu_exec_realizefn()
* -> tcg_cpu_realize() (via accel_cpu_common_realize()) * -> tcg_cpu_realize() (via accel_cpu_common_realize())
*/ */
static bool tcg_cpu_realize(CPUState *cs, Error **errp) static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
{ {
RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPU *cpu = RISCV_CPU(cs);
Error *local_err = NULL; Error *local_err = NULL;
@ -1372,7 +1372,7 @@ static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
} }
static void tcg_cpu_instance_init(CPUState *cs) static void riscv_tcg_cpu_instance_init(CPUState *cs)
{ {
RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPU *cpu = RISCV_CPU(cs);
Object *obj = OBJECT(cpu); Object *obj = OBJECT(cpu);
@ -1386,7 +1386,7 @@ static void tcg_cpu_instance_init(CPUState *cs)
} }
} }
static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
{ {
/* /*
* All cpus use the same set of operations. * All cpus use the same set of operations.
@ -1394,30 +1394,30 @@ static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
cc->tcg_ops = &riscv_tcg_ops; cc->tcg_ops = &riscv_tcg_ops;
} }
static void tcg_cpu_class_init(CPUClass *cc) static void riscv_tcg_cpu_class_init(CPUClass *cc)
{ {
cc->init_accel_cpu = tcg_cpu_init_ops; cc->init_accel_cpu = riscv_tcg_cpu_init_ops;
} }
static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
{ {
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
acc->cpu_class_init = tcg_cpu_class_init; acc->cpu_class_init = riscv_tcg_cpu_class_init;
acc->cpu_instance_init = tcg_cpu_instance_init; acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
acc->cpu_target_realize = tcg_cpu_realize; acc->cpu_target_realize = riscv_tcg_cpu_realize;
} }
static const TypeInfo tcg_cpu_accel_type_info = { static const TypeInfo riscv_tcg_cpu_accel_type_info = {
.name = ACCEL_CPU_NAME("tcg"), .name = ACCEL_CPU_NAME("tcg"),
.parent = TYPE_ACCEL_CPU, .parent = TYPE_ACCEL_CPU,
.class_init = tcg_cpu_accel_class_init, .class_init = riscv_tcg_cpu_accel_class_init,
.abstract = true, .abstract = true,
}; };
static void tcg_cpu_accel_register_types(void) static void riscv_tcg_cpu_accel_register_types(void)
{ {
type_register_static(&tcg_cpu_accel_type_info); type_register_static(&riscv_tcg_cpu_accel_type_info);
} }
type_init(tcg_cpu_accel_register_types); type_init(riscv_tcg_cpu_accel_register_types);

View File

@ -233,6 +233,7 @@ static const struct TCGCPUOps xtensa_tcg_ops = {
.do_interrupt = xtensa_cpu_do_interrupt, .do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed, .do_transaction_failed = xtensa_cpu_do_transaction_failed,
.do_unaligned_access = xtensa_cpu_do_unaligned_access, .do_unaligned_access = xtensa_cpu_do_unaligned_access,
.debug_check_breakpoint = xtensa_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
}; };

View File

@ -229,6 +229,7 @@ enum {
#define MAX_NCCOMPARE 3 #define MAX_NCCOMPARE 3
#define MAX_TLB_WAY_SIZE 8 #define MAX_TLB_WAY_SIZE 8
#define MAX_NDBREAK 2 #define MAX_NDBREAK 2
#define MAX_NIBREAK 2
#define MAX_NMEMORY 4 #define MAX_NMEMORY 4
#define MAX_MPU_FOREGROUND_SEGMENTS 32 #define MAX_MPU_FOREGROUND_SEGMENTS 32
@ -547,6 +548,8 @@ struct CPUArchState {
/* Watchpoints for DBREAK registers */ /* Watchpoints for DBREAK registers */
struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK]; struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
/* Breakpoints for IBREAK registers */
struct CPUBreakpoint *cpu_breakpoint[MAX_NIBREAK];
}; };
/** /**
@ -590,6 +593,7 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
int mmu_idx, MemTxAttrs attrs, int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr); MemTxResult response, uintptr_t retaddr);
hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
bool xtensa_debug_check_breakpoint(CPUState *cs);
#endif #endif
void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags); void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
void xtensa_count_regs(const XtensaConfig *config, void xtensa_count_regs(const XtensaConfig *config,

View File

@ -33,27 +33,21 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/address-spaces.h" #include "exec/address-spaces.h"
static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
{
uint32_t paddr;
uint32_t page_size;
unsigned access;
int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
&paddr, &page_size, &access);
if (ret == 0) {
tb_invalidate_phys_addr(&address_space_memory, paddr,
MEMTXATTRS_UNSPECIFIED);
}
}
void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
{ {
CPUState *cs = env_cpu(env);
uint32_t change = v ^ env->sregs[IBREAKENABLE]; uint32_t change = v ^ env->sregs[IBREAKENABLE];
unsigned i; unsigned i;
for (i = 0; i < env->config->nibreak; ++i) { for (i = 0; i < env->config->nibreak; ++i) {
if (change & (1 << i)) { if (change & (1 << i)) {
tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); if (v & (1 << i)) {
cpu_breakpoint_insert(cs, env->sregs[IBREAKA + i],
BP_CPU, &env->cpu_breakpoint[i]);
} else {
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[i]);
env->cpu_breakpoint[i] = NULL;
}
} }
} }
env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
@ -62,12 +56,32 @@ void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
{ {
if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); CPUState *cs = env_cpu(env);
tb_invalidate_virtual_addr(env, v);
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[i]);
cpu_breakpoint_insert(cs, v, BP_CPU, &env->cpu_breakpoint[i]);
} }
env->sregs[IBREAKA + i] = v; env->sregs[IBREAKA + i] = v;
} }
bool xtensa_debug_check_breakpoint(CPUState *cs)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
unsigned int i;
if (xtensa_get_cintlevel(env) >= env->config->debug_level) {
return false;
}
for (i = 0; i < env->config->nibreak; ++i) {
if (env->sregs[IBREAKENABLE] & (1 << i) &&
env->sregs[IBREAKA + i] == env->pc) {
return true;
}
}
return false;
}
static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
uint32_t dbreakc) uint32_t dbreakc)
{ {

View File

@ -231,6 +231,18 @@ void xtensa_breakpoint_handler(CPUState *cs)
} }
cpu_loop_exit_noexc(cs); cpu_loop_exit_noexc(cs);
} }
} else {
if (cpu_breakpoint_test(cs, env->pc, BP_GDB)
|| !cpu_breakpoint_test(cs, env->pc, BP_CPU)) {
return;
}
if (env->sregs[ICOUNT] == 0xffffffff &&
xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
debug_exception_env(env, DEBUGCAUSE_IC);
} else {
debug_exception_env(env, DEBUGCAUSE_IB);
}
cpu_loop_exit_noexc(cs);
} }
} }

View File

@ -1123,19 +1123,6 @@ static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
return xtensa_op0_insn_len(dc, b0); return xtensa_op0_insn_len(dc, b0);
} }
static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
{
unsigned i;
for (i = 0; i < dc->config->nibreak; ++i) {
if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
env->sregs[IBREAKA + i] == dc->pc) {
gen_debug_exception(dc, DEBUGCAUSE_IB);
break;
}
}
}
static void xtensa_tr_init_disas_context(DisasContextBase *dcbase, static void xtensa_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cpu) CPUState *cpu)
{ {
@ -1205,10 +1192,6 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
gen_set_label(label); gen_set_label(label);
} }
if (dc->debug) {
gen_ibreak_check(env, dc);
}
disas_xtensa_insn(env, dc); disas_xtensa_insn(env, dc);
if (dc->icount) { if (dc->icount) {

View File

@ -129,7 +129,7 @@ test ibreak_remove
4: 4:
test_end test_end
test ibreak_priority test ibreak_break_priority
set_vector debug_vector, 2f set_vector debug_vector, 2f
rsil a2, debug_level - 1 rsil a2, debug_level - 1
movi a2, 1f movi a2, 1f
@ -145,6 +145,29 @@ test ibreak_priority
movi a3, 0x2 movi a3, 0x2
assert eq, a2, a3 assert eq, a2, a3
test_end test_end
test ibreak_icount_priority
set_vector debug_vector, 2f
rsil a2, debug_level - 1
movi a2, 1f
wsr a2, ibreaka0
movi a2, 1
wsr a2, ibreakenable
movi a2, -2
wsr a2, icount
movi a2, 1
wsr a2, icountlevel
isync
rsil a2, 0
nop
1:
break 0, 0
test_fail
2:
rsr a2, debugcause
movi a3, 0x1
assert eq, a2, a3
test_end
#endif #endif
test icount test icount

View File

@ -94,13 +94,15 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
} }
aio_notify(ctx); aio_notify(ctx);
/* if (unlikely(icount_enabled())) {
* Workaround for record/replay. /*
* vCPU execution should be suspended when new BH is set. * Workaround for record/replay.
* This is needed to avoid guest timeouts caused * vCPU execution should be suspended when new BH is set.
* by the long cycles of the execution. * This is needed to avoid guest timeouts caused
*/ * by the long cycles of the execution.
icount_notify_exit(); */
icount_notify_exit();
}
} }
/* Only called from aio_bh_poll() and aio_ctx_finalize() */ /* Only called from aio_bh_poll() and aio_ctx_finalize() */