* AccelCPUClass and sysemu/user split for i386 (Claudio)

* i386 page walk unification
 * Fix detection of gdbus-codegen
 * Misc refactoring
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmCblEEUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroObnQgAj10pRDODY9hIiUiYj2sTcEQTly3p
 DC+ZWDaup67z3WV2C/vAS/x31RGIus+7bzji3fgtUcnGOr7sbuOCcs7CPY8mam5Y
 GMPNrsUk2sZ5z9SVTq2vjEa61tjxtMpYXx9pnhgJzJAO4NJzNuX74ZdpA+oV5aTC
 CvZDk8lC7BLU16MfeLcbw44xE4Oy05wWwaoP2pvhdOg47y85t/S9Il1yBCYi3y8C
 pTOBBCYmHGPj/r7i4MhUGrAjIyGQu1w7av8nZXouRegoeVl28RKR8+pl7TfFpt+E
 cp95yE8dPuNFJnCiZ3Kv01eBcSUcyp4gVb5H2Oa/nkkYRLpnONbUzYFtIA==
 =zR5U
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into staging

* AccelCPUClass and sysemu/user split for i386 (Claudio)
* i386 page walk unification
* Fix detection of gdbus-codegen
* Misc refactoring

# gpg: Signature made Wed 12 May 2021 09:39:29 BST
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini-gitlab/tags/for-upstream: (32 commits)
  coverity-scan: list components, move model to scripts/coverity-scan
  configure: fix detection of gdbus-codegen
  qemu-option: support accept-any QemuOptsList in qemu_opts_absorb_qdict
  main-loop: remove dead code
  target/i386: use mmu_translate for NPT walk
  target/i386: allow customizing the next phase of the translation
  target/i386: extend pg_mode to more CR0 and CR4 bits
  target/i386: pass cr3 to mmu_translate
  target/i386: extract mmu_translate
  target/i386: move paging mode constants from SVM to cpu.h
  target/i386: merge SVM_NPTEXIT_* with PF_ERROR_* constants
  accel: add init_accel_cpu for adapting accel behavior to CPU type
  accel: move call to accel_init_interfaces
  i386: make cpu_load_efer sysemu-only
  target/i386: gdbstub: only write CR0/CR2/CR3/EFER for sysemu
  target/i386: gdbstub: introduce aux functions to read/write CS64 regs
  i386: split off sysemu part of cpu.c
  i386: split seg_helper into user-only and sysemu parts
  i386: split svm_helper into sysemu and stub-only user
  i386: separate fpu_helper sysemu-only parts
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2021-05-12 16:07:50 +01:00
commit 31589644ba
59 changed files with 3299 additions and 2674 deletions

View File

@ -351,7 +351,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
M: Richard Henderson <richard.henderson@linaro.org>
M: Eduardo Habkost <ehabkost@redhat.com>
S: Maintained
F: target/i386/
F: target/i386/tcg/
F: tests/tcg/i386/
F: tests/tcg/x86_64/
F: hw/i386/

View File

@ -54,10 +54,23 @@ static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
CPUClass *cc = CPU_CLASS(klass);
AccelCPUClass *accel_cpu = opaque;
/*
* The first callback allows accel-cpu to run initializations
* for the CPU, customizing CPU behavior according to the accelerator.
*
* The second one allows the CPU to customize the accel-cpu
* behavior according to the CPU.
*
* The second is currently only used by TCG, to specialize the
* TCGCPUOps depending on the CPU type.
*/
cc->accel_cpu = accel_cpu;
if (accel_cpu->cpu_class_init) {
accel_cpu->cpu_class_init(cc);
}
if (cc->init_accel_cpu) {
cc->init_accel_cpu(accel_cpu, cc);
}
}
/* initialize the arch-specific accel CpuClass interfaces */
@ -89,6 +102,25 @@ void accel_init_interfaces(AccelClass *ac)
accel_init_cpu_interfaces(ac);
}
void accel_cpu_instance_init(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
cc->accel_cpu->cpu_instance_init(cpu);
}
}
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
return cc->accel_cpu->cpu_realizefn(cpu, errp);
}
return true;
}
static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU,
.parent = TYPE_OBJECT,

View File

@ -913,8 +913,8 @@ int main(int argc, char **argv)
{
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
ac->init_machine(NULL);
accel_init_interfaces(ac);
ac->init_machine(NULL);
}
cpu = cpu_create(cpu_type);
env = cpu->env_ptr;

4
configure vendored
View File

@ -3341,7 +3341,7 @@ if ! test "$gio" = "no"; then
gio_cflags=$($pkg_config --cflags gio-2.0)
gio_libs=$($pkg_config --libs gio-2.0)
gdbus_codegen=$($pkg_config --variable=gdbus_codegen gio-2.0)
if [ ! -x "$gdbus_codegen" ]; then
if ! has "$gdbus_codegen"; then
gdbus_codegen=
fi
# Check that the libraries actually work -- Ubuntu 18.04 ships
@ -5711,6 +5711,8 @@ if test "$gio" = "yes" ; then
echo "CONFIG_GIO=y" >> $config_host_mak
echo "GIO_CFLAGS=$gio_cflags" >> $config_host_mak
echo "GIO_LIBS=$gio_libs" >> $config_host_mak
fi
if test "$gdbus_codegen" != "" ; then
echo "GDBUS_CODEGEN=$gdbus_codegen" >> $config_host_mak
fi
echo "CONFIG_TLS_PRIORITY=\"$tls_priority\"" >> $config_host_mak

5
cpu.c
View File

@ -36,6 +36,7 @@
#include "sysemu/replay.h"
#include "exec/translate-all.h"
#include "exec/log.h"
#include "hw/core/accel-cpu.h"
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
@ -129,7 +130,9 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
CPUClass *cc = CPU_GET_CLASS(cpu);
cpu_list_add(cpu);
if (!accel_cpu_realizefn(cpu, errp)) {
return;
}
#ifdef CONFIG_TCG
/* NB: errp parameter is unused currently */
if (tcg_enabled()) {

View File

@ -1234,6 +1234,7 @@ void machine_run_board_init(MachineState *machine)
"on", false);
}
accel_init_interfaces(ACCEL_GET_CLASS(machine->accelerator));
machine_class->init(machine);
phase_advance(PHASE_MACHINE_INITIALIZED);
}

View File

@ -61,6 +61,7 @@
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
#include "hw/i386/acpi-build.h"
#include "kvm/kvm-cpu.h"
#define MAX_IDE_BUS 2

View File

@ -32,7 +32,7 @@ typedef struct AccelCPUClass {
void (*cpu_class_init)(CPUClass *cc);
void (*cpu_instance_init)(CPUState *cpu);
void (*cpu_realizefn)(CPUState *cpu, Error **errp);
bool (*cpu_realizefn)(CPUState *cpu, Error **errp);
} AccelCPUClass;
#endif /* ACCEL_CPU_H */

View File

@ -192,6 +192,12 @@ struct CPUClass {
/* when TCG is not available, this pointer is NULL */
struct TCGCPUOps *tcg_ops;
/*
* if not NULL, this is called in order for the CPUClass to initialize
* class data that depends on the accelerator, see accel/accel-common.c.
*/
void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
};
/*

View File

@ -78,4 +78,17 @@ int accel_init_machine(AccelState *accel, MachineState *ms);
void accel_setup_post(MachineState *ms);
#endif /* !CONFIG_USER_ONLY */
/**
* accel_cpu_instance_init:
* @cpu: The CPU that needs to do accel-specific object initializations.
*/
void accel_cpu_instance_init(CPUState *cpu);
/**
* accel_cpu_realizefn:
* @cpu: The CPU that needs to call accel-specific cpu realization.
* @errp: currently unused.
*/
bool accel_cpu_realizefn(CPUState *cpu, Error **errp);
#endif /* QEMU_ACCEL_H */

View File

@ -234,24 +234,6 @@ void event_notifier_set_handler(EventNotifier *e,
GSource *iohandler_get_g_source(void);
AioContext *iohandler_get_aio_context(void);
#ifdef CONFIG_POSIX
/**
* qemu_add_child_watch: Register a child process for reaping.
*
* Under POSIX systems, a parent process must read the exit status of
* its child processes using waitpid, or the operating system will not
* free some of the resources attached to that process.
*
* This function directs the QEMU main loop to observe a child process
* and call waitpid as soon as it exits; the watch is then removed
* automatically. It is useful whenever QEMU forks a child process
* but will find out about its termination by other means such as a
* "broken pipe".
*
* @pid: The pid that QEMU should observe.
*/
int qemu_add_child_watch(pid_t pid);
#endif
/**
* qemu_mutex_iothread_locked: Return lock status of the main loop mutex.

View File

@ -729,8 +729,8 @@ int main(int argc, char **argv, char **envp)
{
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
ac->init_machine(NULL);
accel_init_interfaces(ac);
ac->init_machine(NULL);
}
cpu = cpu_create(cpu_type);
env = cpu->env_ptr;

View File

@ -0,0 +1,154 @@
This is the list of currently configured Coverity components:
alpha
~ (/qemu)?((/include)?/hw/alpha/.*|/target/alpha/.*)
arm
~ (/qemu)?((/include)?/hw/arm/.*|(/include)?/hw/.*/(arm|allwinner-a10|bcm28|digic|exynos|imx|omap|stellaris|pxa2xx|versatile|zynq|cadence).*|/hw/net/xgmac.c|/hw/ssi/xilinx_spips.c|/target/arm/.*)
avr
~ (/qemu)?((/include)?/hw/avr/.*|/target/avr/.*)
cris
~ (/qemu)?((/include)?/hw/cris/.*|/target/cris/.*)
hexagon
~ (/qemu)?(/target/hexagon/.*)
hppa
~ (/qemu)?((/include)?/hw/hppa/.*|/target/hppa/.*)
i386
~ (/qemu)?((/include)?/hw/i386/.*|/target/i386/.*|/hw/intc/[^/]*apic[^/]*\.c)
lm32
~ (/qemu)?((/include)?/hw/lm32/.*|/target/lm32/.*|/hw/.*/(milkymist|lm32).*)
m68k
~ (/qemu)?((/include)?/hw/m68k/.*|/target/m68k/.*|(/include)?/hw(/.*)?/mcf.*)
microblaze
~ (/qemu)?((/include)?/hw/microblaze/.*|/target/microblaze/.*)
mips
~ (/qemu)?((/include)?/hw/mips/.*|/target/mips/.*)
nios2
~ (/qemu)?((/include)?/hw/nios2/.*|/target/nios2/.*)
ppc
~ (/qemu)?((/include)?/hw/ppc/.*|/target/ppc/.*|/hw/pci-host/(uninorth.*|dec.*|prep.*|ppc.*)|/hw/misc/macio/.*|(/include)?/hw/.*/(xics|openpic|spapr).*)
riscv
~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*)
rx
~ (/qemu)?((/include)?/hw/rx/.*|/target/rx/.*)
s390
~ (/qemu)?((/include)?/hw/s390x/.*|/target/s390x/.*|/hw/.*/s390_.*)
sh4
~ (/qemu)?((/include)?/hw/sh4/.*|/target/sh4/.*)
sparc
~ (/qemu)?((/include)?/hw/sparc(64)?.*|/target/sparc/.*|/hw/.*/grlib.*|/hw/display/cg3.c)
tilegx
~ (/qemu)?(/target/tilegx/.*)
tricore
~ (/qemu)?((/include)?/hw/tricore/.*|/target/tricore/.*)
unicore32
~ (/qemu)?((/include)?/hw/unicore32/.*|/target/unicore32/.*)
9pfs
~ (/qemu)?(/hw/9pfs/.*|/fsdev/.*)
audio
~ (/qemu)?((/include)?/(audio|hw/audio)/.*)
block
~ (/qemu)?(/block.*|(/include?)(/hw)?/(block|storage-daemon)/.*|(/include)?/hw/ide/.*|/qemu-(img|io).*|/util/(aio|async|thread-pool).*)
char
~ (/qemu)?(/qemu-char\.c|/include/sysemu/char\.h|(/include)?/hw/char/.*)
capstone
~ (/qemu)?(/capstone/.*)
crypto
~ (/qemu)?((/include)?/crypto/.*|/hw/.*/crypto.*)
disas
~ (/qemu)?((/include)?/disas.*)
fpu
~ (/qemu)?((/include)?(/fpu|/libdecnumber)/.*)
io
~ (/qemu)?((/include)?/io/.*)
ipmi
~ (/qemu)?((/include)?/hw/ipmi/.*)
libvixl
~ (/qemu)?(/disas/libvixl/.*)
migration
~ (/qemu)?((/include)?/migration/.*)
monitor
~ (/qemu)?(/qapi.*|/qobject/.*|/monitor\..*|/[hq]mp\..*)
nbd
~ (/qemu)?(/nbd/.*|/include/block/nbd.*|/qemu-nbd\.c)
net
~ (/qemu)?((/include)?(/hw)?/(net|rdma)/.*)
pci
~ (/qemu)?(/hw/pci.*|/include/hw/pci.*)
qemu-ga
~ (/qemu)?(/qga/.*)
scsi
~ (/qemu)?(/scsi/.*|/hw/scsi/.*|/include/hw/scsi/.*)
slirp
~ (/qemu)?(/.*slirp.*)
tcg
~ (/qemu)?(/accel/tcg/.*|/replay/.*|/(.*/)?softmmu.*)
trace
~ (/qemu)?(/.*trace.*\.[ch])
ui
~ (/qemu)?((/include)?(/ui|/hw/display|/hw/input)/.*)
usb
~ (/qemu)?(/hw/usb/.*|/include/hw/usb/.*)
user
~ (/qemu)?(/linux-user/.*|/bsd-user/.*|/user-exec\.c|/thunk\.c|/include/exec/user/.*)
util
~ (/qemu)?(/util/.*|/include/qemu/.*)
xen
~ (/qemu)?(.*/xen.*)
virtiofsd
~ (/qemu)?(/tools/virtiofsd/.*)
(headers)
~ (/qemu)?(/include/.*)
testlibs
~ (/qemu)?(/tests/qtest(/libqos/.*|/libqtest.*))
tests
~ (/qemu)?(/tests/.*)

View File

@ -3595,7 +3595,6 @@ void qemu_init(int argc, char **argv, char **envp)
current_machine->cpu_type = parse_cpu_option(cpu_option);
}
/* NB: for machine none cpu_type could STILL be NULL here! */
accel_init_interfaces(ACCEL_GET_CLASS(current_machine->accelerator));
qemu_resolve_machine_memdev();
parse_numa_opts(current_machine);

View File

@ -0,0 +1,70 @@
/*
* i386 CPU internal definitions to be shared between cpu.c and cpu-sysemu.c
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef I386_CPU_INTERNAL_H
#define I386_CPU_INTERNAL_H
typedef enum FeatureWordType {
CPUID_FEATURE_WORD,
MSR_FEATURE_WORD,
} FeatureWordType;
typedef struct FeatureWordInfo {
FeatureWordType type;
/* feature flags names are taken from "Intel Processor Identification and
* the CPUID Instruction" and AMD's "CPUID Specification".
* In cases of disagreement between feature naming conventions,
* aliases may be added.
*/
const char *feat_names[64];
union {
/* If type==CPUID_FEATURE_WORD */
struct {
uint32_t eax; /* Input EAX for CPUID */
bool needs_ecx; /* CPUID instruction uses ECX as input */
uint32_t ecx; /* Input ECX value for CPUID */
int reg; /* output register (R_* constant) */
} cpuid;
/* If type==MSR_FEATURE_WORD */
struct {
uint32_t index;
} msr;
};
uint64_t tcg_features; /* Feature flags supported by TCG */
uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
uint64_t migratable_flags; /* Feature flags known to be migratable */
/* Features that shouldn't be auto-enabled by "-cpu host" */
uint64_t no_autoenable_flags;
} FeatureWordInfo;
extern FeatureWordInfo feature_word_info[];
void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
#ifndef CONFIG_USER_ONLY
GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs);
void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp);
void x86_cpu_apic_create(X86CPU *cpu, Error **errp);
void x86_cpu_apic_realize(X86CPU *cpu, Error **errp);
void x86_cpu_machine_reset_cb(void *opaque);
#endif /* !CONFIG_USER_ONLY */
#endif /* I386_CPU_INTERNAL_H */

352
target/i386/cpu-sysemu.c Normal file
View File

@ -0,0 +1,352 @@
/*
* i386 CPUID, CPU class, definitions, models: sysemu-only code
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "sysemu/xen.h"
#include "sysemu/whpx.h"
#include "kvm/kvm_i386.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-run-state.h"
#include "qapi/qmp/qdict.h"
#include "qom/qom-qobject.h"
#include "qapi/qapi-commands-machine-target.h"
#include "hw/qdev-properties.h"
#include "exec/address-spaces.h"
#include "hw/i386/apic_internal.h"
#include "cpu-internal.h"
/* Return a QDict containing keys for all properties that can be included
* in static expansion of CPU models. All properties set by x86_cpu_load_model()
* must be included in the dictionary.
*/
static QDict *x86_cpu_static_props(void)
{
FeatureWord w;
int i;
static const char *props[] = {
"min-level",
"min-xlevel",
"family",
"model",
"stepping",
"model-id",
"vendor",
"lmce",
NULL,
};
static QDict *d;
if (d) {
return d;
}
d = qdict_new();
for (i = 0; props[i]; i++) {
qdict_put_null(d, props[i]);
}
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *fi = &feature_word_info[w];
int bit;
for (bit = 0; bit < 64; bit++) {
if (!fi->feat_names[bit]) {
continue;
}
qdict_put_null(d, fi->feat_names[bit]);
}
}
return d;
}
/* Add an entry to @props dict, with the value for property. */
static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
{
QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
&error_abort);
qdict_put_obj(props, prop, value);
}
/* Convert CPU model data from X86CPU object to a property dictionary
* that can recreate exactly the same CPU model.
*/
static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
{
QDict *sprops = x86_cpu_static_props();
const QDictEntry *e;
for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
const char *prop = qdict_entry_key(e);
x86_cpu_expand_prop(cpu, props, prop);
}
}
/* Convert CPU model data from X86CPU object to a property dictionary
* that can recreate exactly the same CPU model, including every
* writeable QOM property.
*/
static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
{
ObjectPropertyIterator iter;
ObjectProperty *prop;
object_property_iter_init(&iter, OBJECT(cpu));
while ((prop = object_property_iter_next(&iter))) {
/* skip read-only or write-only properties */
if (!prop->get || !prop->set) {
continue;
}
/* "hotplugged" is the only property that is configurable
* on the command-line but will be set differently on CPUs
* created using "-cpu ... -smp ..." and by CPUs created
* on the fly by x86_cpu_from_model() for querying. Skip it.
*/
if (!strcmp(prop->name, "hotplugged")) {
continue;
}
x86_cpu_expand_prop(cpu, props, prop->name);
}
}
static void object_apply_props(Object *obj, QDict *props, Error **errp)
{
const QDictEntry *prop;
for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
if (!object_property_set_qobject(obj, qdict_entry_key(prop),
qdict_entry_value(prop), errp)) {
break;
}
}
}
/* Create X86CPU object according to model+props specification */
static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
{
X86CPU *xc = NULL;
X86CPUClass *xcc;
Error *err = NULL;
xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
if (xcc == NULL) {
error_setg(&err, "CPU model '%s' not found", model);
goto out;
}
xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
if (props) {
object_apply_props(OBJECT(xc), props, &err);
if (err) {
goto out;
}
}
x86_cpu_expand_features(xc, &err);
if (err) {
goto out;
}
out:
if (err) {
error_propagate(errp, err);
object_unref(OBJECT(xc));
xc = NULL;
}
return xc;
}
CpuModelExpansionInfo *
qmp_query_cpu_model_expansion(CpuModelExpansionType type,
CpuModelInfo *model,
Error **errp)
{
X86CPU *xc = NULL;
Error *err = NULL;
CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
QDict *props = NULL;
const char *base_name;
xc = x86_cpu_from_model(model->name,
model->has_props ?
qobject_to(QDict, model->props) :
NULL, &err);
if (err) {
goto out;
}
props = qdict_new();
ret->model = g_new0(CpuModelInfo, 1);
ret->model->props = QOBJECT(props);
ret->model->has_props = true;
switch (type) {
case CPU_MODEL_EXPANSION_TYPE_STATIC:
/* Static expansion will be based on "base" only */
base_name = "base";
x86_cpu_to_dict(xc, props);
break;
case CPU_MODEL_EXPANSION_TYPE_FULL:
/* As we don't return every single property, full expansion needs
* to keep the original model name+props, and add extra
* properties on top of that.
*/
base_name = model->name;
x86_cpu_to_dict_full(xc, props);
break;
default:
error_setg(&err, "Unsupported expansion type");
goto out;
}
x86_cpu_to_dict(xc, props);
ret->model->name = g_strdup(base_name);
out:
object_unref(OBJECT(xc));
if (err) {
error_propagate(errp, err);
qapi_free_CpuModelExpansionInfo(ret);
ret = NULL;
}
return ret;
}
void cpu_clear_apic_feature(CPUX86State *env)
{
env->features[FEAT_1_EDX] &= ~CPUID_APIC;
}
bool cpu_is_bsp(X86CPU *cpu)
{
return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
}
/* TODO: remove me, when reset over QOM tree is implemented */
void x86_cpu_machine_reset_cb(void *opaque)
{
X86CPU *cpu = opaque;
cpu_reset(CPU(cpu));
}
APICCommonClass *apic_get_class(void)
{
const char *apic_type = "apic";
/* TODO: in-kernel irqchip for hvf */
if (kvm_apic_in_kernel()) {
apic_type = "kvm-apic";
} else if (xen_enabled()) {
apic_type = "xen-apic";
} else if (whpx_apic_in_platform()) {
apic_type = "whpx-apic";
}
return APIC_COMMON_CLASS(object_class_by_name(apic_type));
}
void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
{
APICCommonState *apic;
ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
cpu->apic_state = DEVICE(object_new_with_class(apic_class));
object_property_add_child(OBJECT(cpu), "lapic",
OBJECT(cpu->apic_state));
object_unref(OBJECT(cpu->apic_state));
qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
/* TODO: convert to link<> */
apic = APIC_COMMON(cpu->apic_state);
apic->cpu = cpu;
apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
}
void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
APICCommonState *apic;
static bool apic_mmio_map_once;
if (cpu->apic_state == NULL) {
return;
}
qdev_realize(DEVICE(cpu->apic_state), NULL, errp);
/* Map APIC MMIO area */
apic = APIC_COMMON(cpu->apic_state);
if (!apic_mmio_map_once) {
memory_region_add_subregion_overlap(get_system_memory(),
apic->apicbase &
MSR_IA32_APICBASE_BASE,
&apic->io_memory,
0x1000);
apic_mmio_map_once = true;
}
}
GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
GuestPanicInformation *panic_info = NULL;
if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
panic_info = g_malloc0(sizeof(GuestPanicInformation));
panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
assert(HV_CRASH_PARAMS >= 5);
panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
}
return panic_info;
}
void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
CPUState *cs = CPU(obj);
GuestPanicInformation *panic_info;
if (!cs->crash_occurred) {
error_setg(errp, "No crash occured");
return;
}
panic_info = x86_cpu_get_crash_info(cs);
if (panic_info == NULL) {
error_setg(errp, "No crash information");
return;
}
visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
errp);
qapi_free_GuestPanicInformation(panic_info);
}

View File

@ -1,5 +1,5 @@
/*
* i386 CPUID helper functions
* i386 CPUID, CPU class, definitions, models
*
* Copyright (c) 2003 Fabrice Bellard
*
@ -20,49 +20,26 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "tcg/tcg-cpu.h"
#include "tcg/helper-tcg.h"
#include "exec/exec-all.h"
#include "sysemu/kvm.h"
#include "sysemu/reset.h"
#include "sysemu/hvf.h"
#include "sysemu/cpus.h"
#include "sysemu/xen.h"
#include "sysemu/whpx.h"
#include "kvm/kvm_i386.h"
#include "sev_i386.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-machine.h"
#include "qapi/qapi-visit-run-state.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qapi/visitor.h"
#include "qom/qom-qobject.h"
#include "sysemu/arch_init.h"
#include "qapi/qapi-commands-machine-target.h"
#include "standard-headers/asm-x86/kvm_para.h"
#include "sysemu/sysemu.h"
#include "sysemu/tcg.h"
#include "hw/qdev-properties.h"
#include "hw/i386/topology.h"
#ifndef CONFIG_USER_ONLY
#include "exec/address-spaces.h"
#include "hw/i386/apic_internal.h"
#include "hw/boards.h"
#endif
#include "disas/capstone.h"
#include "cpu-internal.h"
/* Helpers for building CPUID[2] descriptors: */
@ -595,8 +572,8 @@ static CPUCacheInfo legacy_l3_cache = {
#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
uint32_t vendor2, uint32_t vendor3)
void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
uint32_t vendor2, uint32_t vendor3)
{
int i;
for (i = 0; i < 4; i++) {
@ -677,40 +654,7 @@ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
#define TCG_14_0_ECX_FEATURES 0
typedef enum FeatureWordType {
CPUID_FEATURE_WORD,
MSR_FEATURE_WORD,
} FeatureWordType;
typedef struct FeatureWordInfo {
FeatureWordType type;
/* feature flags names are taken from "Intel Processor Identification and
* the CPUID Instruction" and AMD's "CPUID Specification".
* In cases of disagreement between feature naming conventions,
* aliases may be added.
*/
const char *feat_names[64];
union {
/* If type==CPUID_FEATURE_WORD */
struct {
uint32_t eax; /* Input EAX for CPUID */
bool needs_ecx; /* CPUID instruction uses ECX as input */
uint32_t ecx; /* Input ECX value for CPUID */
int reg; /* output register (R_* constant) */
} cpuid;
/* If type==MSR_FEATURE_WORD */
struct {
uint32_t index;
} msr;
};
uint64_t tcg_features; /* Feature flags supported by TCG */
uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
uint64_t migratable_flags; /* Feature flags known to be migratable */
/* Features that shouldn't be auto-enabled by "-cpu host" */
uint64_t no_autoenable_flags;
} FeatureWordInfo;
static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@ -1589,25 +1533,6 @@ void host_cpuid(uint32_t function, uint32_t count,
*edx = vec[3];
}
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
{
uint32_t eax, ebx, ecx, edx;
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
if (family) {
*family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
}
if (model) {
*model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
}
if (stepping) {
*stepping = eax & 0x0F;
}
}
/* CPU class name definitions: */
/* Return type name for a given CPU model name
@ -1632,10 +1557,6 @@ static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
}
typedef struct PropValue {
const char *prop, *value;
} PropValue;
typedef struct X86CPUVersionDefinition {
X86CPUVersion version;
const char *alias;
@ -4249,32 +4170,6 @@ static X86CPUDefinition builtin_x86_defs[] = {
},
};
/* KVM-specific features that are automatically added/removed
* from all CPU models when KVM is enabled.
*/
static PropValue kvm_default_props[] = {
{ "kvmclock", "on" },
{ "kvm-nopiodelay", "on" },
{ "kvm-asyncpf", "on" },
{ "kvm-steal-time", "on" },
{ "kvm-pv-eoi", "on" },
{ "kvmclock-stable-bit", "on" },
{ "x2apic", "on" },
{ "kvm-msi-ext-dest-id", "off" },
{ "acpi", "off" },
{ "monitor", "off" },
{ "svm", "off" },
{ NULL, NULL },
};
/* TCG-specific defaults that override all CPU models when using TCG
*/
static PropValue tcg_default_props[] = {
{ "vme", "off" },
{ NULL, NULL },
};
/*
* We resolve CPU model aliases using -v1 when using "-machine
* none", but this is just for compatibility while libvirt isn't
@ -4316,61 +4211,6 @@ static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
return v;
}
void x86_cpu_change_kvm_default(const char *prop, const char *value)
{
PropValue *pv;
for (pv = kvm_default_props; pv->prop; pv++) {
if (!strcmp(pv->prop, prop)) {
pv->value = value;
break;
}
}
/* It is valid to call this function only for properties that
* are already present in the kvm_default_props table.
*/
assert(pv->prop);
}
static bool lmce_supported(void)
{
uint64_t mce_cap = 0;
#ifdef CONFIG_KVM
if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
return false;
}
#endif
return !!(mce_cap & MCG_LMCE_P);
}
#define CPUID_MODEL_ID_SZ 48
/**
* cpu_x86_fill_model_id:
* Get CPUID model ID string from host CPU.
*
* @str should have at least CPUID_MODEL_ID_SZ bytes
*
* The function does NOT add a null terminator to the string
* automatically.
*/
static int cpu_x86_fill_model_id(char *str)
{
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
int i;
for (i = 0; i < 3; i++) {
host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
memcpy(str + i * 16 + 0, &eax, 4);
memcpy(str + i * 16 + 4, &ebx, 4);
memcpy(str + i * 16 + 8, &ecx, 4);
memcpy(str + i * 16 + 12, &edx, 4);
}
return 0;
}
static Property max_x86_cpu_properties[] = {
DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
@ -4393,62 +4233,25 @@ static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
static void max_x86_cpu_initfn(Object *obj)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
KVMState *s = kvm_state;
/* We can't fill the features array here because we don't know yet if
* "migratable" is true or false.
*/
cpu->max_features = true;
if (accel_uses_host_cpuid()) {
char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
int family, model, stepping;
host_vendor_fms(vendor, &family, &model, &stepping);
cpu_x86_fill_model_id(model_id);
object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
object_property_set_int(OBJECT(cpu), "family", family, &error_abort);
object_property_set_int(OBJECT(cpu), "model", model, &error_abort);
object_property_set_int(OBJECT(cpu), "stepping", stepping,
&error_abort);
object_property_set_str(OBJECT(cpu), "model-id", model_id,
&error_abort);
if (kvm_enabled()) {
env->cpuid_min_level =
kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
env->cpuid_min_xlevel =
kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
env->cpuid_min_xlevel2 =
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
} else {
env->cpuid_min_level =
hvf_get_supported_cpuid(0x0, 0, R_EAX);
env->cpuid_min_xlevel =
hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
env->cpuid_min_xlevel2 =
hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
}
if (lmce_supported()) {
object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort);
}
object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort);
} else {
object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD,
&error_abort);
object_property_set_int(OBJECT(cpu), "family", 6, &error_abort);
object_property_set_int(OBJECT(cpu), "model", 6, &error_abort);
object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort);
object_property_set_str(OBJECT(cpu), "model-id",
"QEMU TCG CPU version " QEMU_HW_VERSION,
&error_abort);
}
object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort);
/*
* these defaults are used for TCG and all other accelerators
* besides KVM and HVF, which overwrite these values
*/
object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD,
&error_abort);
object_property_set_int(OBJECT(cpu), "family", 6, &error_abort);
object_property_set_int(OBJECT(cpu), "model", 6, &error_abort);
object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort);
object_property_set_str(OBJECT(cpu), "model-id",
"QEMU TCG CPU version " QEMU_HW_VERSION,
&error_abort);
}
static const TypeInfo max_x86_cpu_type_info = {
@ -4458,31 +4261,6 @@ static const TypeInfo max_x86_cpu_type_info = {
.class_init = max_x86_cpu_class_init,
};
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
xcc->host_cpuid_required = true;
xcc->ordering = 8;
#if defined(CONFIG_KVM)
xcc->model_description =
"KVM processor with all supported host features ";
#elif defined(CONFIG_HVF)
xcc->model_description =
"HVF processor with all supported host features ";
#endif
}
static const TypeInfo host_x86_cpu_type_info = {
.name = X86_CPU_TYPE_NAME("host"),
.parent = X86_CPU_TYPE_NAME("max"),
.class_init = host_x86_cpu_class_init,
};
#endif
static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
{
assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
@ -4930,7 +4708,6 @@ static void x86_cpu_parse_featurestr(const char *typename, char *features,
}
}
static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
/* Build a list with the name of all features on a feature word array */
@ -5201,7 +4978,7 @@ static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
return r;
}
static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
{
PropValue *pv;
for (pv = props; pv->prop; pv++) {
@ -5248,8 +5025,6 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
{
X86CPUDefinition *def = model->cpudef;
CPUX86State *env = &cpu->env;
const char *vendor;
char host_vendor[CPUID_VENDOR_SZ + 1];
FeatureWord w;
/*NOTE: any property set by this function should be returned by
@ -5276,20 +5051,6 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
/* legacy-cache defaults to 'off' if CPU model provides cache info */
cpu->legacy_cache = !def->cache_info;
/* Special cases not set in the X86CPUDefinition structs: */
/* TODO: in-kernel irqchip for hvf */
if (kvm_enabled()) {
if (!kvm_irqchip_in_kernel()) {
x86_cpu_change_kvm_default("x2apic", "off");
} else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) {
x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on");
}
x86_cpu_apply_props(cpu, kvm_default_props);
} else if (tcg_enabled()) {
x86_cpu_apply_props(cpu, tcg_default_props);
}
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
/* sysenter isn't supported in compatibility mode on AMD,
@ -5299,15 +5060,12 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
* KVM's sysenter/syscall emulation in compatibility mode and
* when doing cross vendor migration
*/
vendor = def->vendor;
if (accel_uses_host_cpuid()) {
uint32_t ebx = 0, ecx = 0, edx = 0;
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
vendor = host_vendor;
}
object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
/*
* vendor property is set here but then overloaded with the
* host cpu vendor for KVM and HVF.
*/
object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort);
x86_cpu_apply_version_props(cpu, model);
@ -5319,207 +5077,6 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
memset(&env->user_features, 0, sizeof(env->user_features));
}
#ifndef CONFIG_USER_ONLY
/* Return a QDict containing keys for all properties that can be included
* in static expansion of CPU models. All properties set by x86_cpu_load_model()
* must be included in the dictionary.
*/
static QDict *x86_cpu_static_props(void)
{
FeatureWord w;
int i;
static const char *props[] = {
"min-level",
"min-xlevel",
"family",
"model",
"stepping",
"model-id",
"vendor",
"lmce",
NULL,
};
static QDict *d;
if (d) {
return d;
}
d = qdict_new();
for (i = 0; props[i]; i++) {
qdict_put_null(d, props[i]);
}
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *fi = &feature_word_info[w];
int bit;
for (bit = 0; bit < 64; bit++) {
if (!fi->feat_names[bit]) {
continue;
}
qdict_put_null(d, fi->feat_names[bit]);
}
}
return d;
}
/* Add an entry to @props dict, with the value for property. */
static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
{
QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
&error_abort);
qdict_put_obj(props, prop, value);
}
/* Convert CPU model data from X86CPU object to a property dictionary
* that can recreate exactly the same CPU model.
*/
static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
{
QDict *sprops = x86_cpu_static_props();
const QDictEntry *e;
for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
const char *prop = qdict_entry_key(e);
x86_cpu_expand_prop(cpu, props, prop);
}
}
/* Convert CPU model data from X86CPU object to a property dictionary
* that can recreate exactly the same CPU model, including every
* writeable QOM property.
*/
static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
{
ObjectPropertyIterator iter;
ObjectProperty *prop;
object_property_iter_init(&iter, OBJECT(cpu));
while ((prop = object_property_iter_next(&iter))) {
/* skip read-only or write-only properties */
if (!prop->get || !prop->set) {
continue;
}
/* "hotplugged" is the only property that is configurable
* on the command-line but will be set differently on CPUs
* created using "-cpu ... -smp ..." and by CPUs created
* on the fly by x86_cpu_from_model() for querying. Skip it.
*/
if (!strcmp(prop->name, "hotplugged")) {
continue;
}
x86_cpu_expand_prop(cpu, props, prop->name);
}
}
static void object_apply_props(Object *obj, QDict *props, Error **errp)
{
const QDictEntry *prop;
for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
if (!object_property_set_qobject(obj, qdict_entry_key(prop),
qdict_entry_value(prop), errp)) {
break;
}
}
}
/* Create X86CPU object according to model+props specification */
static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
{
X86CPU *xc = NULL;
X86CPUClass *xcc;
Error *err = NULL;
xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
if (xcc == NULL) {
error_setg(&err, "CPU model '%s' not found", model);
goto out;
}
xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
if (props) {
object_apply_props(OBJECT(xc), props, &err);
if (err) {
goto out;
}
}
x86_cpu_expand_features(xc, &err);
if (err) {
goto out;
}
out:
if (err) {
error_propagate(errp, err);
object_unref(OBJECT(xc));
xc = NULL;
}
return xc;
}
CpuModelExpansionInfo *
qmp_query_cpu_model_expansion(CpuModelExpansionType type,
CpuModelInfo *model,
Error **errp)
{
X86CPU *xc = NULL;
Error *err = NULL;
CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
QDict *props = NULL;
const char *base_name;
xc = x86_cpu_from_model(model->name,
model->has_props ?
qobject_to(QDict, model->props) :
NULL, &err);
if (err) {
goto out;
}
props = qdict_new();
ret->model = g_new0(CpuModelInfo, 1);
ret->model->props = QOBJECT(props);
ret->model->has_props = true;
switch (type) {
case CPU_MODEL_EXPANSION_TYPE_STATIC:
/* Static expansion will be based on "base" only */
base_name = "base";
x86_cpu_to_dict(xc, props);
break;
case CPU_MODEL_EXPANSION_TYPE_FULL:
/* As we don't return every single property, full expansion needs
* to keep the original model name+props, and add extra
* properties on top of that.
*/
base_name = model->name;
x86_cpu_to_dict_full(xc, props);
break;
default:
error_setg(&err, "Unsupported expansion type");
goto out;
}
x86_cpu_to_dict(xc, props);
ret->model->name = g_strdup(base_name);
out:
object_unref(OBJECT(xc));
if (err) {
error_propagate(errp, err);
qapi_free_CpuModelExpansionInfo(ret);
ret = NULL;
}
return ret;
}
#endif /* !CONFIG_USER_ONLY */
static gchar *x86_gdb_arch_name(CPUState *cs)
{
#ifdef TARGET_X86_64
@ -5594,15 +5151,6 @@ static void x86_register_cpudef_types(X86CPUDefinition *def)
}
#if !defined(CONFIG_USER_ONLY)
void cpu_clear_apic_feature(CPUX86State *env)
{
env->features[FEAT_1_EDX] &= ~CPUID_APIC;
}
#endif /* !CONFIG_USER_ONLY */
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
@ -6251,20 +5799,6 @@ static void x86_cpu_reset(DeviceState *dev)
#endif
}
#ifndef CONFIG_USER_ONLY
bool cpu_is_bsp(X86CPU *cpu)
{
return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
}
/* TODO: remove me, when reset over QOM tree is implemented */
static void x86_cpu_machine_reset_cb(void *opaque)
{
X86CPU *cpu = opaque;
cpu_reset(CPU(cpu));
}
#endif
static void mce_init(X86CPU *cpu)
{
CPUX86State *cenv = &cpu->env;
@ -6282,109 +5816,6 @@ static void mce_init(X86CPU *cpu)
}
}
#ifndef CONFIG_USER_ONLY
APICCommonClass *apic_get_class(void)
{
const char *apic_type = "apic";
/* TODO: in-kernel irqchip for hvf */
if (kvm_apic_in_kernel()) {
apic_type = "kvm-apic";
} else if (xen_enabled()) {
apic_type = "xen-apic";
} else if (whpx_apic_in_platform()) {
apic_type = "whpx-apic";
}
return APIC_COMMON_CLASS(object_class_by_name(apic_type));
}
static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
{
APICCommonState *apic;
ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
cpu->apic_state = DEVICE(object_new_with_class(apic_class));
object_property_add_child(OBJECT(cpu), "lapic",
OBJECT(cpu->apic_state));
object_unref(OBJECT(cpu->apic_state));
qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
/* TODO: convert to link<> */
apic = APIC_COMMON(cpu->apic_state);
apic->cpu = cpu;
apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
}
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
APICCommonState *apic;
static bool apic_mmio_map_once;
if (cpu->apic_state == NULL) {
return;
}
qdev_realize(DEVICE(cpu->apic_state), NULL, errp);
/* Map APIC MMIO area */
apic = APIC_COMMON(cpu->apic_state);
if (!apic_mmio_map_once) {
memory_region_add_subregion_overlap(get_system_memory(),
apic->apicbase &
MSR_IA32_APICBASE_BASE,
&apic->io_memory,
0x1000);
apic_mmio_map_once = true;
}
}
static void x86_cpu_machine_done(Notifier *n, void *unused)
{
X86CPU *cpu = container_of(n, X86CPU, machine_done);
MemoryRegion *smram =
(MemoryRegion *) object_resolve_path("/machine/smram", NULL);
if (smram) {
cpu->smram = g_new(MemoryRegion, 1);
memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
smram, 0, 4 * GiB);
memory_region_set_enabled(cpu->smram, true);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
}
}
#else
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
}
#endif
/* Note: Only safe for use on x86(-64) hosts */
static uint32_t x86_host_phys_bits(void)
{
uint32_t eax;
uint32_t host_phys_bits;
host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
if (eax >= 0x80000008) {
host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
/* Note: According to AMD doc 25481 rev 2.34 they have a field
* at 23:16 that can specify a maximum physical address bits for
* the guest that can override this value; but I've not seen
* anything with that set.
*/
host_phys_bits = eax & 0xff;
} else {
/* It's an odd 64 bit machine that doesn't have the leaf for
* physical address bits; fall back to 36 that's most older
* Intel.
*/
host_phys_bits = 36;
}
return host_phys_bits;
}
static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
{
if (*min < value) {
@ -6488,7 +5919,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
/* Expand CPU configuration data, based on configured features
* and host/accelerator capabilities when appropriate.
*/
static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
{
CPUX86State *env = &cpu->env;
FeatureWord w;
@ -6702,27 +6133,19 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
Error *local_err = NULL;
static bool ht_warned;
if (xcc->host_cpuid_required) {
if (!accel_uses_host_cpuid()) {
g_autofree char *name = x86_cpu_class_get_model_name(xcc);
error_setg(&local_err, "CPU model '%s' requires KVM", name);
goto out;
}
/* Process Hyper-V enlightenments */
x86_cpu_hyperv_realize(cpu);
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
if (cpu->max_features && accel_uses_host_cpuid()) {
if (enable_cpu_pm) {
host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
&cpu->mwait.ecx, &cpu->mwait.edx);
env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
if (kvm_enabled() && kvm_has_waitpkg()) {
env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG;
}
}
if (kvm_enabled() && cpu->ucode_rev == 0) {
cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
MSR_IA32_UCODE_REV);
}
if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
g_autofree char *name = x86_cpu_class_get_model_name(xcc);
error_setg(&local_err, "CPU model '%s' requires KVM or HVF", name);
goto out;
}
if (cpu->ucode_rev == 0) {
@ -6774,30 +6197,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
* consumer AMD devices but nothing else.
*/
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
if (accel_uses_host_cpuid()) {
uint32_t host_phys_bits = x86_host_phys_bits();
static bool warned;
/* Print a warning if the user set it to a value that's not the
* host value.
*/
if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
!warned) {
warn_report("Host physical bits (%u)"
" does not match phys-bits property (%u)",
host_phys_bits, cpu->phys_bits);
warned = true;
}
if (cpu->host_phys_bits) {
/* The user asked for us to use the host physical bits */
cpu->phys_bits = host_phys_bits;
if (cpu->host_phys_bits_limit &&
cpu->phys_bits > cpu->host_phys_bits_limit) {
cpu->phys_bits = cpu->host_phys_bits_limit;
}
}
}
if (cpu->phys_bits &&
(cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
cpu->phys_bits < 32)) {
@ -6806,9 +6205,10 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
return;
}
/* 0 means it was not explicitly set by the user (or by machine
* compat_props or by the host code above). In this case, the default
* is the value used by TCG (40).
/*
* 0 means it was not explicitly set by the user (or by machine
* compat_props or by the host code in host-cpu.c).
* In this case, the default is the value used by TCG (40).
*/
if (cpu->phys_bits == 0) {
cpu->phys_bits = TCG_PHYS_ADDR_BITS;
@ -6857,15 +6257,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
env->cache_info_amd.l3_cache = &legacy_l3_cache;
}
/* Process Hyper-V enlightenments */
x86_cpu_hyperv_realize(cpu);
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
#ifndef CONFIG_USER_ONLY
MachineState *ms = MACHINE(qdev_get_machine());
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
@ -6880,33 +6271,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
mce_init(cpu);
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
/* Outer container... */
memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
memory_region_set_enabled(cpu->cpu_as_root, true);
/* ... with two regions inside: normal system memory with low
* priority, and...
*/
memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
cs->num_ases = 2;
cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
qemu_add_machine_init_done_notifier(&cpu->machine_done);
}
#endif
qemu_init_vcpu(cs);
/*
@ -6929,10 +6293,12 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
ht_warned = true;
}
#ifndef CONFIG_USER_ONLY
x86_cpu_apic_realize(cpu, &local_err);
if (local_err != NULL) {
goto out;
}
#endif /* !CONFIG_USER_ONLY */
cpu_reset(cs);
xcc->parent_realize(dev, &local_err);
@ -7056,52 +6422,6 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc,
x86_cpu_register_bit_prop(xcc, name, w, bitnr);
}
#if !defined(CONFIG_USER_ONLY)
static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
GuestPanicInformation *panic_info = NULL;
if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
panic_info = g_malloc0(sizeof(GuestPanicInformation));
panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
assert(HV_CRASH_PARAMS >= 5);
panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
}
return panic_info;
}
static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
CPUState *cs = CPU(obj);
GuestPanicInformation *panic_info;
if (!cs->crash_occurred) {
error_setg(errp, "No crash occurred");
return;
}
panic_info = x86_cpu_get_crash_info(cs);
if (panic_info == NULL) {
error_setg(errp, "No crash information");
return;
}
visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
errp);
qapi_free_GuestPanicInformation(panic_info);
}
#endif /* !CONFIG_USER_ONLY */
static void x86_cpu_initfn(Object *obj)
{
X86CPU *cpu = X86_CPU(obj);
@ -7153,6 +6473,9 @@ static void x86_cpu_initfn(Object *obj)
if (xcc->model) {
x86_cpu_load_model(cpu, xcc->model);
}
/* if required, do accelerator-specific cpu initializations */
accel_cpu_instance_init(CPU(obj));
}
static int64_t x86_cpu_get_arch_id(CPUState *cs)
@ -7410,11 +6733,6 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->class_by_name = x86_cpu_class_by_name;
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
#ifdef CONFIG_TCG
tcg_cpu_common_class_init(cc);
#endif /* CONFIG_TCG */
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->gdb_read_register = x86_cpu_gdb_read_register;
@ -7525,9 +6843,6 @@ static void x86_cpu_register_types(void)
}
type_register_static(&max_x86_cpu_type_info);
type_register_static(&x86_base_cpu_type_info);
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
type_register_static(&host_x86_cpu_type_info);
#endif
}
type_init(x86_cpu_register_types)

View File

@ -303,6 +303,19 @@ typedef enum X86Seg {
#define PG_ERROR_I_D_MASK 0x10
#define PG_ERROR_PK_MASK 0x20
#define PG_MODE_PAE (1 << 0)
#define PG_MODE_LMA (1 << 1)
#define PG_MODE_NXE (1 << 2)
#define PG_MODE_PSE (1 << 3)
#define PG_MODE_LA57 (1 << 4)
#define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15)
/* Bits of CR4 that do not affect the NPT page format. */
#define PG_MODE_WP (1 << 16)
#define PG_MODE_PKE (1 << 17)
#define PG_MODE_PKS (1 << 18)
#define PG_MODE_SMEP (1 << 19)
#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
#define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
@ -1817,7 +1830,10 @@ int cpu_x86_support_mca_broadcast(CPUX86State *env);
int cpu_get_pic_interrupt(CPUX86State *s);
/* MSDOS compatibility mode FPU exception support */
void x86_register_ferr_irq(qemu_irq irq);
void fpu_check_raise_ferr_irq(CPUX86State *s);
void cpu_set_ignne(void);
void cpu_clear_ignne(void);
/* mpx_helper.c */
void cpu_sync_bndcs_hflags(CPUX86State *env);
@ -1926,13 +1942,20 @@ int cpu_x86_signal_handler(int host_signum, void *pinfo,
void *puc);
/* cpu.c */
void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
uint32_t vendor2, uint32_t vendor3);
typedef struct PropValue {
const char *prop, *value;
} PropValue;
void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
/* cpu.c other functions (cpuid) */
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
void cpu_clear_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
@ -1948,6 +1971,11 @@ static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
}
/*
* load efer and update the corresponding hflags. XXX: do consistency
* checks with cpuid bits?
*/
void cpu_load_efer(CPUX86State *env, uint64_t val);
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
@ -2044,21 +2072,6 @@ static inline uint32_t cpu_compute_eflags(CPUX86State *env)
return eflags;
}
/* load efer and update the corresponding hflags. XXX: do consistency
checks with cpuid bits? */
static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
{
env->efer = val;
env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
if (env->efer & MSR_EFER_LMA) {
env->hflags |= HF_LMA_MASK;
}
if (env->efer & MSR_EFER_SVME) {
env->hflags |= HF_SVME_MASK;
}
}
static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
{
return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
@ -2105,6 +2118,9 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
}
/* excp_helper.c */
int get_pg_mode(CPUX86State *env);
/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
@ -2137,17 +2153,6 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
TPRAccess access);
/* Change the value of a KVM-specific default
*
* If value is NULL, no default will be set and the original
* value from the CPU model table will be kept.
*
* It is valid to call this function only for properties that
* are already present in the kvm_default_props table.
*/
void x86_cpu_change_kvm_default(const char *prop, const char *value);
/* Special values for X86CPUVersion: */
/* Resolve to latest CPU version */

View File

@ -78,6 +78,23 @@ static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
#define GDB_FORCE_64 0
#endif
static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong val)
{
if ((hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(buf, val);
}
return gdb_get_reg32(buf, val);
}
static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
{
if (hflags & HF_CS64_MASK) {
*val = ldq_p(buf);
return 8;
}
*val = ldl_p(buf);
return 4;
}
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
@ -142,25 +159,14 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
case IDX_SEG_REGS + 5:
return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
case IDX_SEG_REGS + 6:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->segs[R_FS].base);
}
return gdb_get_reg32(mem_buf, env->segs[R_FS].base);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_FS].base);
case IDX_SEG_REGS + 7:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->segs[R_GS].base);
}
return gdb_get_reg32(mem_buf, env->segs[R_GS].base);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_GS].base);
case IDX_SEG_REGS + 8:
#ifdef TARGET_X86_64
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->kernelgsbase);
}
return gdb_get_reg32(mem_buf, env->kernelgsbase);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->kernelgsbase);
#else
return gdb_get_reg32(mem_buf, 0);
#endif
@ -188,45 +194,23 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return gdb_get_reg32(mem_buf, env->mxcsr);
case IDX_CTL_CR0_REG:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->cr[0]);
}
return gdb_get_reg32(mem_buf, env->cr[0]);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[0]);
case IDX_CTL_CR2_REG:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->cr[2]);
}
return gdb_get_reg32(mem_buf, env->cr[2]);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[2]);
case IDX_CTL_CR3_REG:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->cr[3]);
}
return gdb_get_reg32(mem_buf, env->cr[3]);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[3]);
case IDX_CTL_CR4_REG:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->cr[4]);
}
return gdb_get_reg32(mem_buf, env->cr[4]);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[4]);
case IDX_CTL_CR8_REG:
#ifdef CONFIG_SOFTMMU
#ifndef CONFIG_USER_ONLY
tpr = cpu_get_apic_tpr(cpu->apic_state);
#else
tpr = 0;
#endif
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, tpr);
}
return gdb_get_reg32(mem_buf, tpr);
return gdb_read_reg_cs64(env->hflags, mem_buf, tpr);
case IDX_CTL_EFER_REG:
if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
return gdb_get_reg64(mem_buf, env->efer);
}
return gdb_get_reg32(mem_buf, env->efer);
return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer);
}
}
return 0;
@ -266,7 +250,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint32_t tmp;
target_ulong tmp;
int len;
/* N.B. GDB can't deal with changes in registers or sizes in the middle
of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
@ -329,30 +314,13 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
case IDX_SEG_REGS + 5:
return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
case IDX_SEG_REGS + 6:
if (env->hflags & HF_CS64_MASK) {
env->segs[R_FS].base = ldq_p(mem_buf);
return 8;
}
env->segs[R_FS].base = ldl_p(mem_buf);
return 4;
return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_FS].base);
case IDX_SEG_REGS + 7:
if (env->hflags & HF_CS64_MASK) {
env->segs[R_GS].base = ldq_p(mem_buf);
return 8;
}
env->segs[R_GS].base = ldl_p(mem_buf);
return 4;
return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_GS].base);
case IDX_SEG_REGS + 8:
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
env->kernelgsbase = ldq_p(mem_buf);
return 8;
}
env->kernelgsbase = ldl_p(mem_buf);
return gdb_write_reg_cs64(env->hflags, mem_buf, &env->kernelgsbase);
#endif
return 4;
@ -382,57 +350,46 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 4;
case IDX_CTL_CR0_REG:
if (env->hflags & HF_CS64_MASK) {
cpu_x86_update_cr0(env, ldq_p(mem_buf));
return 8;
}
cpu_x86_update_cr0(env, ldl_p(mem_buf));
return 4;
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
#ifndef CONFIG_USER_ONLY
cpu_x86_update_cr0(env, tmp);
#endif
return len;
case IDX_CTL_CR2_REG:
if (env->hflags & HF_CS64_MASK) {
env->cr[2] = ldq_p(mem_buf);
return 8;
}
env->cr[2] = ldl_p(mem_buf);
return 4;
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
#ifndef CONFIG_USER_ONLY
env->cr[2] = tmp;
#endif
return len;
case IDX_CTL_CR3_REG:
if (env->hflags & HF_CS64_MASK) {
cpu_x86_update_cr3(env, ldq_p(mem_buf));
return 8;
}
cpu_x86_update_cr3(env, ldl_p(mem_buf));
return 4;
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
#ifndef CONFIG_USER_ONLY
cpu_x86_update_cr3(env, tmp);
#endif
return len;
case IDX_CTL_CR4_REG:
if (env->hflags & HF_CS64_MASK) {
cpu_x86_update_cr4(env, ldq_p(mem_buf));
return 8;
}
cpu_x86_update_cr4(env, ldl_p(mem_buf));
return 4;
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
#ifndef CONFIG_USER_ONLY
cpu_x86_update_cr4(env, tmp);
#endif
return len;
case IDX_CTL_CR8_REG:
if (env->hflags & HF_CS64_MASK) {
#ifdef CONFIG_SOFTMMU
cpu_set_apic_tpr(cpu->apic_state, ldq_p(mem_buf));
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
#ifndef CONFIG_USER_ONLY
cpu_set_apic_tpr(cpu->apic_state, tmp);
#endif
return 8;
}
#ifdef CONFIG_SOFTMMU
cpu_set_apic_tpr(cpu->apic_state, ldl_p(mem_buf));
#endif
return 4;
return len;
case IDX_CTL_EFER_REG:
if (env->hflags & HF_CS64_MASK) {
cpu_load_efer(env, ldq_p(mem_buf));
return 8;
}
cpu_load_efer(env, ldl_p(mem_buf));
return 4;
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
#ifndef CONFIG_USER_ONLY
cpu_load_efer(env, tmp);
#endif
return len;
}
}
/* Unrecognised register. */

View File

@ -574,6 +574,19 @@ void do_cpu_sipi(X86CPU *cpu)
#endif
#ifndef CONFIG_USER_ONLY
void cpu_load_efer(CPUX86State *env, uint64_t val)
{
env->efer = val;
env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
if (env->efer & MSR_EFER_LMA) {
env->hflags |= HF_LMA_MASK;
}
if (env->efer & MSR_EFER_SVME) {
env->hflags |= HF_SVME_MASK;
}
}
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);

View File

@ -46,7 +46,11 @@ DEF_HELPER_2(read_crN, tl, env, int)
DEF_HELPER_3(write_crN, void, env, int, tl)
DEF_HELPER_2(lmsw, void, env, tl)
DEF_HELPER_1(clts, void, env)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
#endif /* !CONFIG_USER_ONLY */
DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
DEF_HELPER_2(invlpg, void, env, tl)
@ -70,7 +74,11 @@ DEF_HELPER_1(clac, void, env)
DEF_HELPER_1(stac, void, env)
DEF_HELPER_3(boundw, void, env, tl, int)
DEF_HELPER_3(boundl, void, env, tl, int)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(rsm, void, env)
#endif /* !CONFIG_USER_ONLY */
DEF_HELPER_2(into, void, env, int)
DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl)
DEF_HELPER_2(cmpxchg8b, void, env, tl)
@ -96,7 +104,10 @@ DEF_HELPER_3(outw, void, env, i32, i32)
DEF_HELPER_2(inw, tl, env, i32)
DEF_HELPER_3(outl, void, env, i32, i32)
DEF_HELPER_2(inl, tl, env, i32)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
#endif /* !CONFIG_USER_ONLY */
DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)

204
target/i386/host-cpu.c Normal file
View File

@ -0,0 +1,204 @@
/*
* x86 host CPU functions, and "host" cpu type initialization
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "host-cpu.h"
#include "qapi/error.h"
#include "sysemu/sysemu.h"
/* Note: Only safe for use on x86(-64) hosts */
static uint32_t host_cpu_phys_bits(void)
{
uint32_t eax;
uint32_t host_phys_bits;
host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
if (eax >= 0x80000008) {
host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
/*
* Note: According to AMD doc 25481 rev 2.34 they have a field
* at 23:16 that can specify a maximum physical address bits for
* the guest that can override this value; but I've not seen
* anything with that set.
*/
host_phys_bits = eax & 0xff;
} else {
/*
* It's an odd 64 bit machine that doesn't have the leaf for
* physical address bits; fall back to 36 that's most older
* Intel.
*/
host_phys_bits = 36;
}
return host_phys_bits;
}
static void host_cpu_enable_cpu_pm(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
&cpu->mwait.ecx, &cpu->mwait.edx);
env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
}
static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu)
{
uint32_t host_phys_bits = host_cpu_phys_bits();
uint32_t phys_bits = cpu->phys_bits;
static bool warned;
/*
* Print a warning if the user set it to a value that's not the
* host value.
*/
if (phys_bits != host_phys_bits && phys_bits != 0 &&
!warned) {
warn_report("Host physical bits (%u)"
" does not match phys-bits property (%u)",
host_phys_bits, phys_bits);
warned = true;
}
if (cpu->host_phys_bits) {
/* The user asked for us to use the host physical bits */
phys_bits = host_phys_bits;
if (cpu->host_phys_bits_limit &&
phys_bits > cpu->host_phys_bits_limit) {
phys_bits = cpu->host_phys_bits_limit;
}
}
return phys_bits;
}
bool host_cpu_realizefn(CPUState *cs, Error **errp)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
if (cpu->max_features && enable_cpu_pm) {
host_cpu_enable_cpu_pm(cpu);
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
uint32_t phys_bits = host_cpu_adjust_phys_bits(cpu);
if (phys_bits &&
(phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
phys_bits < 32)) {
error_setg(errp, "phys-bits should be between 32 and %u "
" (but is %u)",
TARGET_PHYS_ADDR_SPACE_BITS, phys_bits);
return false;
}
cpu->phys_bits = phys_bits;
}
return true;
}
#define CPUID_MODEL_ID_SZ 48
/**
* cpu_x86_fill_model_id:
* Get CPUID model ID string from host CPU.
*
* @str should have at least CPUID_MODEL_ID_SZ bytes
*
* The function does NOT add a null terminator to the string
* automatically.
*/
static int host_cpu_fill_model_id(char *str)
{
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
int i;
for (i = 0; i < 3; i++) {
host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
memcpy(str + i * 16 + 0, &eax, 4);
memcpy(str + i * 16 + 4, &ebx, 4);
memcpy(str + i * 16 + 8, &ecx, 4);
memcpy(str + i * 16 + 12, &edx, 4);
}
return 0;
}
void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping)
{
uint32_t eax, ebx, ecx, edx;
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
if (family) {
*family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
}
if (model) {
*model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
}
if (stepping) {
*stepping = eax & 0x0F;
}
}
void host_cpu_instance_init(X86CPU *cpu)
{
uint32_t ebx = 0, ecx = 0, edx = 0;
char vendor[CPUID_VENDOR_SZ + 1];
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
}
void host_cpu_max_instance_init(X86CPU *cpu)
{
char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
int family, model, stepping;
/* Use max host physical address bits if -cpu max option is applied */
object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort);
host_cpu_vendor_fms(vendor, &family, &model, &stepping);
host_cpu_fill_model_id(model_id);
object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
object_property_set_int(OBJECT(cpu), "family", family, &error_abort);
object_property_set_int(OBJECT(cpu), "model", model, &error_abort);
object_property_set_int(OBJECT(cpu), "stepping", stepping,
&error_abort);
object_property_set_str(OBJECT(cpu), "model-id", model_id,
&error_abort);
}
static void host_cpu_class_init(ObjectClass *oc, void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
xcc->host_cpuid_required = true;
xcc->ordering = 8;
xcc->model_description =
g_strdup_printf("processor with all supported host features ");
}
static const TypeInfo host_cpu_type_info = {
.name = X86_CPU_TYPE_NAME("host"),
.parent = X86_CPU_TYPE_NAME("max"),
.class_init = host_cpu_class_init,
};
static void host_cpu_type_init(void)
{
type_register_static(&host_cpu_type_info);
}
type_init(host_cpu_type_init);

19
target/i386/host-cpu.h Normal file
View File

@ -0,0 +1,19 @@
/*
* x86 host CPU type initialization and host CPU functions
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef HOST_CPU_H
#define HOST_CPU_H
void host_cpu_instance_init(X86CPU *cpu);
void host_cpu_max_instance_init(X86CPU *cpu);
bool host_cpu_realizefn(CPUState *cs, Error **errp);
void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping);
#endif /* HOST_CPU_H */

68
target/i386/hvf/hvf-cpu.c Normal file
View File

@ -0,0 +1,68 @@
/*
* x86 HVF CPU type initialization
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "host-cpu.h"
#include "qapi/error.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "sysemu/hvf.h"
#include "hw/core/accel-cpu.h"
static void hvf_cpu_max_instance_init(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
host_cpu_max_instance_init(cpu);
env->cpuid_min_level =
hvf_get_supported_cpuid(0x0, 0, R_EAX);
env->cpuid_min_xlevel =
hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
env->cpuid_min_xlevel2 =
hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
}
static void hvf_cpu_instance_init(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
host_cpu_instance_init(cpu);
/* Special cases not set in the X86CPUDefinition structs: */
/* TODO: in-kernel irqchip for hvf */
if (cpu->max_features) {
hvf_cpu_max_instance_init(cpu);
}
}
static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
acc->cpu_realizefn = host_cpu_realizefn;
acc->cpu_instance_init = hvf_cpu_instance_init;
}
static const TypeInfo hvf_cpu_accel_type_info = {
.name = ACCEL_CPU_NAME("hvf"),
.parent = TYPE_ACCEL_CPU,
.class_init = hvf_cpu_accel_class_init,
.abstract = true,
};
static void hvf_cpu_accel_register_types(void)
{
type_register_static(&hvf_cpu_accel_type_info);
}
type_init(hvf_cpu_accel_register_types);

View File

@ -10,4 +10,5 @@ i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
'x86_mmu.c',
'x86_task.c',
'x86hvf.c',
'hvf-cpu.c',
))

151
target/i386/kvm/kvm-cpu.c Normal file
View File

@ -0,0 +1,151 @@
/*
* x86 KVM CPU type initialization
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "host-cpu.h"
#include "kvm-cpu.h"
#include "qapi/error.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "kvm_i386.h"
#include "hw/core/accel-cpu.h"
static bool kvm_cpu_realizefn(CPUState *cs, Error **errp)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
/*
* The realize order is important, since x86_cpu_realize() checks if
* nothing else has been set by the user (or by accelerators) in
* cpu->ucode_rev and cpu->phys_bits.
*
* realize order:
* kvm_cpu -> host_cpu -> x86_cpu
*/
if (cpu->max_features) {
if (enable_cpu_pm && kvm_has_waitpkg()) {
env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG;
}
if (cpu->ucode_rev == 0) {
cpu->ucode_rev =
kvm_arch_get_supported_msr_feature(kvm_state,
MSR_IA32_UCODE_REV);
}
}
return host_cpu_realizefn(cs, errp);
}
/*
* KVM-specific features that are automatically added/removed
* from all CPU models when KVM is enabled.
*/
static PropValue kvm_default_props[] = {
{ "kvmclock", "on" },
{ "kvm-nopiodelay", "on" },
{ "kvm-asyncpf", "on" },
{ "kvm-steal-time", "on" },
{ "kvm-pv-eoi", "on" },
{ "kvmclock-stable-bit", "on" },
{ "x2apic", "on" },
{ "kvm-msi-ext-dest-id", "off" },
{ "acpi", "off" },
{ "monitor", "off" },
{ "svm", "off" },
{ NULL, NULL },
};
void x86_cpu_change_kvm_default(const char *prop, const char *value)
{
PropValue *pv;
for (pv = kvm_default_props; pv->prop; pv++) {
if (!strcmp(pv->prop, prop)) {
pv->value = value;
break;
}
}
/*
* It is valid to call this function only for properties that
* are already present in the kvm_default_props table.
*/
assert(pv->prop);
}
static bool lmce_supported(void)
{
uint64_t mce_cap = 0;
if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
return false;
}
return !!(mce_cap & MCG_LMCE_P);
}
static void kvm_cpu_max_instance_init(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
KVMState *s = kvm_state;
host_cpu_max_instance_init(cpu);
if (lmce_supported()) {
object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort);
}
env->cpuid_min_level =
kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
env->cpuid_min_xlevel =
kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
env->cpuid_min_xlevel2 =
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
}
static void kvm_cpu_instance_init(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
host_cpu_instance_init(cpu);
if (!kvm_irqchip_in_kernel()) {
x86_cpu_change_kvm_default("x2apic", "off");
} else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) {
x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on");
}
/* Special cases not set in the X86CPUDefinition structs: */
x86_cpu_apply_props(cpu, kvm_default_props);
if (cpu->max_features) {
kvm_cpu_max_instance_init(cpu);
}
}
static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
acc->cpu_realizefn = kvm_cpu_realizefn;
acc->cpu_instance_init = kvm_cpu_instance_init;
}
static const TypeInfo kvm_cpu_accel_type_info = {
.name = ACCEL_CPU_NAME("kvm"),
.parent = TYPE_ACCEL_CPU,
.class_init = kvm_cpu_accel_class_init,
.abstract = true,
};
static void kvm_cpu_accel_register_types(void)
{
type_register_static(&kvm_cpu_accel_type_info);
}
type_init(kvm_cpu_accel_register_types);

41
target/i386/kvm/kvm-cpu.h Normal file
View File

@ -0,0 +1,41 @@
/*
* i386 KVM CPU type and functions
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef KVM_CPU_H
#define KVM_CPU_H
#ifdef CONFIG_KVM
/*
* Change the value of a KVM-specific default
*
* If value is NULL, no default will be set and the original
* value from the CPU model table will be kept.
*
* It is valid to call this function only for properties that
* are already present in the kvm_default_props table.
*/
void x86_cpu_change_kvm_default(const char *prop, const char *value);
#else /* !CONFIG_KVM */
#define x86_cpu_change_kvm_default(a, b)
#endif /* CONFIG_KVM */
#endif /* KVM_CPU_H */

View File

@ -22,6 +22,7 @@
#include "standard-headers/asm-x86/kvm_para.h"
#include "cpu.h"
#include "host-cpu.h"
#include "sysemu/sysemu.h"
#include "sysemu/hw_accel.h"
#include "sysemu/kvm_int.h"
@ -288,7 +289,7 @@ static bool host_tsx_broken(void)
int family, model, stepping;\
char vendor[CPUID_VENDOR_SZ + 1];
host_vendor_fms(vendor, &family, &model, &stepping);
host_cpu_vendor_fms(vendor, &family, &model, &stepping);
/* Check if we are running on a Haswell host known to have broken TSX */
return !strcmp(vendor, CPUID_VENDOR_INTEL) &&

View File

@ -1,3 +1,8 @@
i386_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
i386_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
i386_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files(
'kvm.c',
'kvm-cpu.c',
))
i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c'))

View File

@ -6,7 +6,11 @@ i386_ss.add(files(
'xsave_helper.c',
'cpu-dump.c',
))
i386_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-stub.c'))
i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c', 'sev.c'), if_false: files('sev-stub.c'))
# x86 cpu type
i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c'))
i386_ss.add(when: 'CONFIG_HVF', if_true: files('host-cpu.c'))
i386_softmmu_ss = ss.source_set()
i386_softmmu_ss.add(files(
@ -14,7 +18,9 @@ i386_softmmu_ss.add(files(
'arch_memory_mapping.c',
'machine.c',
'monitor.c',
'cpu-sysemu.c',
))
i386_user_ss = ss.source_set()
subdir('kvm')
subdir('hax')
@ -25,3 +31,4 @@ subdir('tcg')
target_arch += {'i386': i386_ss}
target_softmmu_arch += {'i386': i386_softmmu_ss}
target_user_arch += {'i386': i386_user_ss}

View File

@ -132,16 +132,6 @@
#define SVM_NPT_ENABLED (1 << 0)
#define SVM_NPT_PAE (1 << 0)
#define SVM_NPT_LMA (1 << 1)
#define SVM_NPT_NXE (1 << 2)
#define SVM_NPT_PSE (1 << 3)
#define SVM_NPTEXIT_P (1ULL << 0)
#define SVM_NPTEXIT_RW (1ULL << 1)
#define SVM_NPTEXIT_US (1ULL << 2)
#define SVM_NPTEXIT_RSVD (1ULL << 3)
#define SVM_NPTEXIT_ID (1ULL << 4)
#define SVM_NPTEXIT_GPA (1ULL << 32)
#define SVM_NPTEXIT_GPT (1ULL << 33)

View File

@ -19,223 +19,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "helper-tcg.h"
#ifndef CONFIG_USER_ONLY
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
{
return (dr7 >> (index * 2)) & 1;
}
static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
{
return (dr7 >> (index * 2)) & 2;
}
static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
{
return hw_global_breakpoint_enabled(dr7, index) ||
hw_local_breakpoint_enabled(dr7, index);
}
static inline int hw_breakpoint_type(unsigned long dr7, int index)
{
return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
}
static inline int hw_breakpoint_len(unsigned long dr7, int index)
{
int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
return (len == 2) ? 8 : len + 1;
}
static int hw_breakpoint_insert(CPUX86State *env, int index)
{
CPUState *cs = env_cpu(env);
target_ulong dr7 = env->dr[7];
target_ulong drN = env->dr[index];
int err = 0;
switch (hw_breakpoint_type(dr7, index)) {
case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(dr7, index)) {
err = cpu_breakpoint_insert(cs, drN, BP_CPU,
&env->cpu_breakpoint[index]);
}
break;
case DR7_TYPE_IO_RW:
/* Notice when we should enable calls to bpt_io. */
return hw_breakpoint_enabled(env->dr[7], index)
? HF_IOBPT_MASK : 0;
case DR7_TYPE_DATA_WR:
if (hw_breakpoint_enabled(dr7, index)) {
err = cpu_watchpoint_insert(cs, drN,
hw_breakpoint_len(dr7, index),
BP_CPU | BP_MEM_WRITE,
&env->cpu_watchpoint[index]);
}
break;
case DR7_TYPE_DATA_RW:
if (hw_breakpoint_enabled(dr7, index)) {
err = cpu_watchpoint_insert(cs, drN,
hw_breakpoint_len(dr7, index),
BP_CPU | BP_MEM_ACCESS,
&env->cpu_watchpoint[index]);
}
break;
}
if (err) {
env->cpu_breakpoint[index] = NULL;
}
return 0;
}
static void hw_breakpoint_remove(CPUX86State *env, int index)
{
CPUState *cs = env_cpu(env);
switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST:
if (env->cpu_breakpoint[index]) {
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
env->cpu_breakpoint[index] = NULL;
}
break;
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
if (env->cpu_breakpoint[index]) {
cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
env->cpu_breakpoint[index] = NULL;
}
break;
case DR7_TYPE_IO_RW:
/* HF_IOBPT_MASK cleared elsewhere. */
break;
}
}
void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
{
target_ulong old_dr7 = env->dr[7];
int iobpt = 0;
int i;
new_dr7 |= DR7_FIXED_1;
/* If nothing is changing except the global/local enable bits,
then we can make the change more efficient. */
if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
/* Fold the global and local enable bits together into the
global fields, then xor to show which registers have
changed collective enable state. */
int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
for (i = 0; i < DR7_MAX_BP; i++) {
if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
hw_breakpoint_remove(env, i);
}
}
env->dr[7] = new_dr7;
for (i = 0; i < DR7_MAX_BP; i++) {
if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
iobpt |= hw_breakpoint_insert(env, i);
} else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
&& hw_breakpoint_enabled(new_dr7, i)) {
iobpt |= HF_IOBPT_MASK;
}
}
} else {
for (i = 0; i < DR7_MAX_BP; i++) {
hw_breakpoint_remove(env, i);
}
env->dr[7] = new_dr7;
for (i = 0; i < DR7_MAX_BP; i++) {
iobpt |= hw_breakpoint_insert(env, i);
}
}
env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
}
static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
{
target_ulong dr6;
int reg;
bool hit_enabled = false;
dr6 = env->dr[6] & ~0xf;
for (reg = 0; reg < DR7_MAX_BP; reg++) {
bool bp_match = false;
bool wp_match = false;
switch (hw_breakpoint_type(env->dr[7], reg)) {
case DR7_TYPE_BP_INST:
if (env->dr[reg] == env->eip) {
bp_match = true;
}
break;
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
if (env->cpu_watchpoint[reg] &&
env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
wp_match = true;
}
break;
case DR7_TYPE_IO_RW:
break;
}
if (bp_match || wp_match) {
dr6 |= 1 << reg;
if (hw_breakpoint_enabled(env->dr[7], reg)) {
hit_enabled = true;
}
}
}
if (hit_enabled || force_dr6_update) {
env->dr[6] = dr6;
}
return hit_enabled;
}
void breakpoint_handler(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
if (cs->watchpoint_hit->flags & BP_CPU) {
cs->watchpoint_hit = NULL;
if (check_hw_breakpoints(env, false)) {
raise_exception(env, EXCP01_DB);
} else {
cpu_loop_exit_noexc(cs);
}
}
} else {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, true);
raise_exception(env, EXCP01_DB);
}
break;
}
}
}
}
#endif
void helper_single_step(CPUX86State *env)
{
#ifndef CONFIG_USER_ONLY
@ -252,41 +38,6 @@ void helper_rechecking_single_step(CPUX86State *env)
}
}
void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
{
#ifndef CONFIG_USER_ONLY
switch (reg) {
case 0: case 1: case 2: case 3:
if (hw_breakpoint_enabled(env->dr[7], reg)
&& hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
hw_breakpoint_remove(env, reg);
env->dr[reg] = t0;
hw_breakpoint_insert(env, reg);
} else {
env->dr[reg] = t0;
}
return;
case 4:
if (env->cr[4] & CR4_DE_MASK) {
break;
}
/* fallthru */
case 6:
env->dr[6] = t0 | DR6_FIXED_1;
return;
case 5:
if (env->cr[4] & CR4_DE_MASK) {
break;
}
/* fallthru */
case 7:
cpu_x86_update_dr7(env, t0);
return;
}
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
#endif
}
target_ulong helper_get_dr(CPUX86State *env, int reg)
{
switch (reg) {
@ -307,30 +58,3 @@ target_ulong helper_get_dr(CPUX86State *env, int reg)
}
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
/* Check if Port I/O is trapped by a breakpoint. */
void helper_bpt_io(CPUX86State *env, uint32_t port,
uint32_t size, target_ulong next_eip)
{
#ifndef CONFIG_USER_ONLY
target_ulong dr7 = env->dr[7];
int i, hit = 0;
for (i = 0; i < DR7_MAX_BP; ++i) {
if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
&& hw_breakpoint_enabled(dr7, i)) {
int bpt_len = hw_breakpoint_len(dr7, i);
if (port + size - 1 >= env->dr[i]
&& port <= env->dr[i] + bpt_len - 1) {
hit |= 1 << i;
}
}
}
if (hit) {
env->dr[6] = (env->dr[6] & ~0xf) | hit;
env->eip = next_eip;
raise_exception(env, EXCP01_DB);
}
#endif
}

View File

@ -137,576 +137,3 @@ void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr
{
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
#if !defined(CONFIG_USER_ONLY)
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
uint64_t ptep, pte;
uint64_t exit_info_1 = 0;
target_ulong pde_addr, pte_addr;
uint32_t page_offset;
int page_size;
if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
return gphys;
}
if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
rsvd_mask |= PG_NX_MASK;
}
if (env->nested_pg_mode & SVM_NPT_PAE) {
uint64_t pde, pdpe;
target_ulong pdpe_addr;
#ifdef TARGET_X86_64
if (env->nested_pg_mode & SVM_NPT_LMA) {
uint64_t pml5e;
uint64_t pml4e_addr, pml4e;
pml5e = env->nested_cr3;
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
(((gphys >> 39) & 0x1ff) << 3);
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
}
ptep &= pml4e ^ PG_NX_MASK;
pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
(((gphys >> 30) & 0x1ff) << 3);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
}
if (pdpe & PG_PSE_MASK) {
/* 1 GB page */
page_size = 1024 * 1024 * 1024;
pte_addr = pdpe_addr;
pte = pdpe;
goto do_check_protect;
}
} else
#endif
{
pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
rsvd_mask |= PG_HI_USER_MASK;
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pde & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pde ^ PG_NX_MASK;
if (pde & PG_PSE_MASK) {
/* 2 MB page */
page_size = 2048 * 1024;
pte_addr = pde_addr;
pte = pde;
goto do_check_protect;
}
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
pte = x86_ldq_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
page_size = 4096;
} else {
uint32_t pde;
/* page directory entry */
pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
ptep = pde | PG_NX_MASK;
/* if host cr4 PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
page_size = 4096 * 1024;
pte_addr = pde_addr;
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
* Leave bits 20-13 in place for setting accessed/dirty bits below.
*/
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
rsvd_mask = 0x200000;
goto do_check_protect_pse36;
}
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
/* page directory entry */
pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep &= pte | PG_NX_MASK;
page_size = 4096;
rsvd_mask = 0;
}
do_check_protect:
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
do_check_protect_pse36:
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
ptep ^= PG_NX_MASK;
if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if (ptep & PG_NX_MASK) {
if (access_type == MMU_INST_FETCH) {
goto do_fault_protect;
}
*prot &= ~PAGE_EXEC;
}
if (!(ptep & PG_RW_MASK)) {
if (access_type == MMU_DATA_STORE) {
goto do_fault_protect;
}
*prot &= ~PAGE_WRITE;
}
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
page_offset = gphys & (page_size - 1);
return pte + page_offset;
do_fault_rsvd:
exit_info_1 |= SVM_NPTEXIT_RSVD;
do_fault_protect:
exit_info_1 |= SVM_NPTEXIT_P;
do_fault:
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
gphys);
exit_info_1 |= SVM_NPTEXIT_US;
if (access_type == MMU_DATA_STORE) {
exit_info_1 |= SVM_NPTEXIT_RW;
} else if (access_type == MMU_INST_FETCH) {
exit_info_1 |= SVM_NPTEXIT_ID;
}
if (prot) {
exit_info_1 |= SVM_NPTEXIT_GPA;
} else { /* page table access */
exit_info_1 |= SVM_NPTEXIT_GPT;
}
cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
}
/* return value:
* -1 = cannot handle fault
* 0 = nothing more to do
* 1 = generate PF fault
*/
static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t ptep, pte;
int32_t a20_mask;
target_ulong pde_addr, pte_addr;
int error_code = 0;
int is_dirty, prot, page_size, is_write, is_user;
hwaddr paddr;
uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
uint32_t page_offset;
target_ulong vaddr;
uint32_t pkr;
is_user = mmu_idx == MMU_USER_IDX;
#if defined(DEBUG_MMU)
printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
addr, is_write1, is_user, env->eip);
#endif
is_write = is_write1 & 1;
a20_mask = x86_get_a20_mask(env);
if (!(env->cr[0] & CR0_PG_MASK)) {
pte = addr;
#ifdef TARGET_X86_64
if (!(env->hflags & HF_LMA_MASK)) {
/* Without long mode we can only address 32bits in real mode */
pte = (uint32_t)pte;
}
#endif
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
page_size = 4096;
goto do_mapping;
}
if (!(env->efer & MSR_EFER_NXE)) {
rsvd_mask |= PG_NX_MASK;
}
if (env->cr[4] & CR4_PAE_MASK) {
uint64_t pde, pdpe;
target_ulong pdpe_addr;
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
bool la57 = env->cr[4] & CR4_LA57_MASK;
uint64_t pml5e_addr, pml5e;
uint64_t pml4e_addr, pml4e;
int32_t sext;
/* test virtual address sign extension */
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
if (sext != 0 && sext != -1) {
env->error_code = 0;
cs->exception_index = EXCP0D_GPF;
return 1;
}
if (la57) {
pml5e_addr = ((env->cr[3] & ~0xfff) +
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
pml5e = x86_ldq_phys(cs, pml5e_addr);
if (!(pml5e & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd;
}
if (!(pml5e & PG_ACCESSED_MASK)) {
pml5e |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
}
ptep = pml5e ^ PG_NX_MASK;
} else {
pml5e = env->cr[3];
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
}
ptep &= pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
a20_mask;
pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
}
if (pdpe & PG_PSE_MASK) {
/* 1 GB page */
page_size = 1024 * 1024 * 1024;
pte_addr = pdpe_addr;
pte = pdpe;
goto do_check_protect;
}
} else
#endif
{
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
a20_mask;
pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
rsvd_mask |= PG_HI_USER_MASK;
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
a20_mask;
pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pde & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pde ^ PG_NX_MASK;
if (pde & PG_PSE_MASK) {
/* 2 MB page */
page_size = 2048 * 1024;
pte_addr = pde_addr;
pte = pde;
goto do_check_protect;
}
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
a20_mask;
pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
pte = x86_ldq_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
page_size = 4096;
} else {
uint32_t pde;
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
a20_mask;
pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
ptep = pde | PG_NX_MASK;
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
pte_addr = pde_addr;
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
* Leave bits 20-13 in place for setting accessed/dirty bits below.
*/
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
rsvd_mask = 0x200000;
goto do_check_protect_pse36;
}
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
a20_mask;
pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep &= pte | PG_NX_MASK;
page_size = 4096;
rsvd_mask = 0;
}
do_check_protect:
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
do_check_protect_pse36:
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
ptep ^= PG_NX_MASK;
/* can the page can be put in the TLB? prot will tell us */
if (is_user && !(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
prot = 0;
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
prot |= PAGE_READ;
if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
prot |= PAGE_WRITE;
}
}
if (!(ptep & PG_NX_MASK) &&
(mmu_idx == MMU_USER_IDX ||
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
prot |= PAGE_EXEC;
}
if (!(env->hflags & HF_LMA_MASK)) {
pkr = 0;
} else if (ptep & PG_USER_MASK) {
pkr = env->cr[4] & CR4_PKE_MASK ? env->pkru : 0;
} else {
pkr = env->cr[4] & CR4_PKS_MASK ? env->pkrs : 0;
}
if (pkr) {
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
uint32_t pkr_ad = (pkr >> pk * 2) & 1;
uint32_t pkr_wd = (pkr >> pk * 2) & 2;
uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
if (pkr_ad) {
pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
} else if (pkr_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
pkr_prot &= ~PAGE_WRITE;
}
prot &= pkr_prot;
if ((pkr_prot & (1 << is_write1)) == 0) {
assert(is_write1 != 2);
error_code |= PG_ERROR_PK_MASK;
goto do_fault_protect;
}
}
if ((prot & (1 << is_write1)) == 0) {
goto do_fault_protect;
}
/* yes, it can! */
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty) {
pte |= PG_DIRTY_MASK;
}
x86_stl_phys_notdirty(cs, pte_addr, pte);
}
if (!(pte & PG_DIRTY_MASK)) {
/* only set write access if already dirty... otherwise wait
for dirty access */
assert(!is_write);
prot &= ~PAGE_WRITE;
}
do_mapping:
pte = pte & a20_mask;
/* align to page_size */
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
page_offset = addr & (page_size - 1);
paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
/* Even if 4MB pages, we map only one 4KB page in the cache to
avoid filling it too fast */
vaddr = addr & TARGET_PAGE_MASK;
paddr &= TARGET_PAGE_MASK;
assert(prot & (1 << is_write1));
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
prot, mmu_idx, page_size);
return 0;
do_fault_rsvd:
error_code |= PG_ERROR_RSVD_MASK;
do_fault_protect:
error_code |= PG_ERROR_P_MASK;
do_fault:
error_code |= (is_write << PG_ERROR_W_BIT);
if (is_user)
error_code |= PG_ERROR_U_MASK;
if (is_write1 == 2 &&
(((env->efer & MSR_EFER_NXE) &&
(env->cr[4] & CR4_PAE_MASK)) ||
(env->cr[4] & CR4_SMEP_MASK)))
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
}
env->error_code = error_code;
cs->exception_index = EXCP0E_PAGE;
return 1;
}
#endif
bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
#ifdef CONFIG_USER_ONLY
/* user mode only emulation */
env->cr[2] = addr;
env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
env->error_code |= PG_ERROR_U_MASK;
cs->exception_index = EXCP0E_PAGE;
env->exception_is_int = 0;
env->exception_next_eip = -1;
cpu_loop_exit_restore(cs, retaddr);
#else
env->retaddr = retaddr;
if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
/* FIXME: On error in get_hphys we have already jumped out. */
g_assert(!probe);
raise_exception_err_ra(env, cs->exception_index,
env->error_code, retaddr);
}
return true;
#endif
}

View File

@ -21,17 +21,10 @@
#include <math.h>
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "fpu/softfloat.h"
#include "fpu/softfloat-macros.h"
#include "helper-tcg.h"
#ifdef CONFIG_SOFTMMU
#include "hw/irq.h"
#endif
/* float macros */
#define FT0 (env->ft0)
#define ST0 (env->fpregs[env->fpstt].d)
@ -75,36 +68,6 @@
#define floatx80_ln2_d make_floatx80(0x3ffe, 0xb17217f7d1cf79abLL)
#define floatx80_pi_d make_floatx80(0x4000, 0xc90fdaa22168c234LL)
#if !defined(CONFIG_USER_ONLY)
static qemu_irq ferr_irq;
void x86_register_ferr_irq(qemu_irq irq)
{
ferr_irq = irq;
}
static void cpu_clear_ignne(void)
{
CPUX86State *env = &X86_CPU(first_cpu)->env;
env->hflags2 &= ~HF2_IGNNE_MASK;
}
void cpu_set_ignne(void)
{
CPUX86State *env = &X86_CPU(first_cpu)->env;
env->hflags2 |= HF2_IGNNE_MASK;
/*
* We get here in response to a write to port F0h. The chipset should
* deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is
* cleared, because FERR# and FP_IRQ are two separate pins on real
* hardware. However, we don't model FERR# as a qemu_irq, so we just
* do directly what the chipset would do, i.e. deassert FP_IRQ.
*/
qemu_irq_lower(ferr_irq);
}
#endif
static inline void fpush(CPUX86State *env)
{
env->fpstt = (env->fpstt - 1) & 7;
@ -117,8 +80,7 @@ static inline void fpop(CPUX86State *env)
env->fpstt = (env->fpstt + 1) & 7;
}
static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr,
uintptr_t retaddr)
static floatx80 do_fldt(CPUX86State *env, target_ulong ptr, uintptr_t retaddr)
{
CPU_LDoubleU temp;
@ -127,8 +89,8 @@ static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr,
return temp.d;
}
static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr,
uintptr_t retaddr)
static void do_fstt(CPUX86State *env, floatx80 f, target_ulong ptr,
uintptr_t retaddr)
{
CPU_LDoubleU temp;
@ -203,8 +165,8 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
raise_exception_ra(env, EXCP10_COPR, retaddr);
}
#if !defined(CONFIG_USER_ONLY)
else if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
qemu_irq_raise(ferr_irq);
else {
fpu_check_raise_ferr_irq(env);
}
#endif
}
@ -405,14 +367,14 @@ void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
int new_fpstt;
new_fpstt = (env->fpstt - 1) & 7;
env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC());
env->fpregs[new_fpstt].d = do_fldt(env, ptr, GETPC());
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
}
void helper_fstt_ST0(CPUX86State *env, target_ulong ptr)
{
helper_fstt(env, ST0, ptr, GETPC());
do_fstt(env, ST0, ptr, GETPC());
}
void helper_fpush(CPUX86State *env)
@ -2458,17 +2420,18 @@ void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
do_fldenv(env, ptr, data32, GETPC());
}
void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
static void do_fsave(CPUX86State *env, target_ulong ptr, int data32,
uintptr_t retaddr)
{
floatx80 tmp;
int i;
do_fstenv(env, ptr, data32, GETPC());
do_fstenv(env, ptr, data32, retaddr);
ptr += (14 << data32);
for (i = 0; i < 8; i++) {
tmp = ST(i);
helper_fstt(env, tmp, ptr, GETPC());
do_fstt(env, tmp, ptr, retaddr);
ptr += 10;
}
@ -2486,30 +2449,41 @@ void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
env->fptags[7] = 1;
}
void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
{
do_fsave(env, ptr, data32, GETPC());
}
static void do_frstor(CPUX86State *env, target_ulong ptr, int data32,
uintptr_t retaddr)
{
floatx80 tmp;
int i;
do_fldenv(env, ptr, data32, GETPC());
do_fldenv(env, ptr, data32, retaddr);
ptr += (14 << data32);
for (i = 0; i < 8; i++) {
tmp = helper_fldt(env, ptr, GETPC());
tmp = do_fldt(env, ptr, retaddr);
ST(i) = tmp;
ptr += 10;
}
}
void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
{
do_frstor(env, ptr, data32, GETPC());
}
#if defined(CONFIG_USER_ONLY)
void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32)
{
helper_fsave(env, ptr, data32);
do_fsave(env, ptr, data32, 0);
}
void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
{
helper_frstor(env, ptr, data32);
do_frstor(env, ptr, data32, 0);
}
#endif
@ -2539,7 +2513,7 @@ static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
addr = ptr + XO(legacy.fpregs);
for (i = 0; i < 8; i++) {
floatx80 tmp = ST(i);
helper_fstt(env, tmp, addr, ra);
do_fstt(env, tmp, addr, ra);
addr += 16;
}
}
@ -2594,10 +2568,8 @@ static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
cpu_stq_data_ra(env, ptr, env->pkru, ra);
}
void helper_fxsave(CPUX86State *env, target_ulong ptr)
static void do_fxsave(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
uintptr_t ra = GETPC();
/* The operand must be 16 byte aligned */
if (ptr & 0xf) {
raise_exception_ra(env, EXCP0D_GPF, ra);
@ -2616,6 +2588,11 @@ void helper_fxsave(CPUX86State *env, target_ulong ptr)
}
}
void helper_fxsave(CPUX86State *env, target_ulong ptr)
{
do_fxsave(env, ptr, GETPC());
}
static uint64_t get_xinuse(CPUX86State *env)
{
uint64_t inuse = -1;
@ -2703,7 +2680,7 @@ static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
addr = ptr + XO(legacy.fpregs);
for (i = 0; i < 8; i++) {
floatx80 tmp = helper_fldt(env, addr, ra);
floatx80 tmp = do_fldt(env, addr, ra);
ST(i) = tmp;
addr += 16;
}
@ -2758,10 +2735,8 @@ static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
env->pkru = cpu_ldq_data_ra(env, ptr, ra);
}
void helper_fxrstor(CPUX86State *env, target_ulong ptr)
static void do_fxrstor(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
uintptr_t ra = GETPC();
/* The operand must be 16 byte aligned */
if (ptr & 0xf) {
raise_exception_ra(env, EXCP0D_GPF, ra);
@ -2780,15 +2755,20 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr)
}
}
void helper_fxrstor(CPUX86State *env, target_ulong ptr)
{
do_fxrstor(env, ptr, GETPC());
}
#if defined(CONFIG_USER_ONLY)
void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr)
{
helper_fxsave(env, ptr);
do_fxsave(env, ptr, 0);
}
void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr)
{
helper_fxrstor(env, ptr);
do_fxrstor(env, ptr, 0);
}
#endif

View File

@ -84,8 +84,16 @@ void do_vmexit(CPUX86State *env);
/* seg_helper.c */
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
int error_code, target_ulong next_eip, int is_hw);
void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm);
int exception_has_error_code(int intno);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
/* bpt_helper.c */
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
#endif /* I386_HELPER_TCG_H */

View File

@ -8,7 +8,8 @@ i386_ss.add(when: 'CONFIG_TCG', if_true: files(
'misc_helper.c',
'mpx_helper.c',
'seg_helper.c',
'smm_helper.c',
'svm_helper.c',
'tcg-cpu.c',
'translate.c'), if_false: files('tcg-stub.c'))
subdir('sysemu')
subdir('user')

View File

@ -18,12 +18,9 @@
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/address-spaces.h"
#include "helper-tcg.h"
/*
@ -39,69 +36,6 @@ void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask)
(eflags & update_mask) | 0x2;
}
void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
{
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
#else
address_space_stb(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
#endif
}
target_ulong helper_inb(CPUX86State *env, uint32_t port)
{
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "inb: port=0x%04x\n", port);
return 0;
#else
return address_space_ldub(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
#endif
}
void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
{
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
#else
address_space_stw(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
#endif
}
target_ulong helper_inw(CPUX86State *env, uint32_t port)
{
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "inw: port=0x%04x\n", port);
return 0;
#else
return address_space_lduw(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
#endif
}
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
{
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outl: port=0x%04x, data=%08x\n", port, data);
#else
address_space_stl(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
#endif
}
target_ulong helper_inl(CPUX86State *env, uint32_t port)
{
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "inl: port=0x%04x\n", port);
return 0;
#else
return address_space_ldl(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
#endif
}
void helper_into(CPUX86State *env, int next_eip_addend)
{
int eflags;
@ -126,68 +60,6 @@ void helper_cpuid(CPUX86State *env)
env->regs[R_EDX] = edx;
}
#if defined(CONFIG_USER_ONLY)
target_ulong helper_read_crN(CPUX86State *env, int reg)
{
return 0;
}
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
{
}
#else
target_ulong helper_read_crN(CPUX86State *env, int reg)
{
target_ulong val;
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC());
switch (reg) {
default:
val = env->cr[reg];
break;
case 8:
if (!(env->hflags2 & HF2_VINTR_MASK)) {
val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
} else {
val = env->v_tpr;
}
break;
}
return val;
}
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
{
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC());
switch (reg) {
case 0:
cpu_x86_update_cr0(env, t0);
break;
case 3:
cpu_x86_update_cr3(env, t0);
break;
case 4:
if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
(env->hflags & HF_CS64_MASK)) {
raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
cpu_x86_update_cr4(env, t0);
break;
case 8:
if (!(env->hflags2 & HF2_VINTR_MASK)) {
qemu_mutex_lock_iothread();
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
qemu_mutex_unlock_iothread();
}
env->v_tpr = t0 & 0x0f;
break;
default:
env->cr[reg] = t0;
break;
}
}
#endif
void helper_lmsw(CPUX86State *env, target_ulong t0)
{
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
@ -237,345 +109,6 @@ void helper_rdpmc(CPUX86State *env)
raise_exception_err(env, EXCP06_ILLOP, 0);
}
#if defined(CONFIG_USER_ONLY)
void helper_wrmsr(CPUX86State *env)
{
}
void helper_rdmsr(CPUX86State *env)
{
}
#else
void helper_wrmsr(CPUX86State *env)
{
uint64_t val;
CPUState *cs = env_cpu(env);
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
val = ((uint32_t)env->regs[R_EAX]) |
((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
switch ((uint32_t)env->regs[R_ECX]) {
case MSR_IA32_SYSENTER_CS:
env->sysenter_cs = val & 0xffff;
break;
case MSR_IA32_SYSENTER_ESP:
env->sysenter_esp = val;
break;
case MSR_IA32_SYSENTER_EIP:
env->sysenter_eip = val;
break;
case MSR_IA32_APICBASE:
cpu_set_apic_base(env_archcpu(env)->apic_state, val);
break;
case MSR_EFER:
{
uint64_t update_mask;
update_mask = 0;
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
update_mask |= MSR_EFER_SCE;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
update_mask |= MSR_EFER_LME;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
update_mask |= MSR_EFER_FFXSR;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
update_mask |= MSR_EFER_NXE;
}
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
update_mask |= MSR_EFER_SVME;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
update_mask |= MSR_EFER_FFXSR;
}
cpu_load_efer(env, (env->efer & ~update_mask) |
(val & update_mask));
}
break;
case MSR_STAR:
env->star = val;
break;
case MSR_PAT:
env->pat = val;
break;
case MSR_IA32_PKRS:
if (val & 0xFFFFFFFF00000000ull) {
goto error;
}
env->pkrs = val;
tlb_flush(cs);
break;
case MSR_VM_HSAVE_PA:
env->vm_hsave = val;
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
break;
case MSR_CSTAR:
env->cstar = val;
break;
case MSR_FMASK:
env->fmask = val;
break;
case MSR_FSBASE:
env->segs[R_FS].base = val;
break;
case MSR_GSBASE:
env->segs[R_GS].base = val;
break;
case MSR_KERNELGSBASE:
env->kernelgsbase = val;
break;
#endif
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):
case MSR_MTRRphysBase(2):
case MSR_MTRRphysBase(3):
case MSR_MTRRphysBase(4):
case MSR_MTRRphysBase(5):
case MSR_MTRRphysBase(6):
case MSR_MTRRphysBase(7):
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysBase(0)) / 2].base = val;
break;
case MSR_MTRRphysMask(0):
case MSR_MTRRphysMask(1):
case MSR_MTRRphysMask(2):
case MSR_MTRRphysMask(3):
case MSR_MTRRphysMask(4):
case MSR_MTRRphysMask(5):
case MSR_MTRRphysMask(6):
case MSR_MTRRphysMask(7):
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysMask(0)) / 2].mask = val;
break;
case MSR_MTRRfix64K_00000:
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix64K_00000] = val;
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix16K_80000 + 1] = val;
break;
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix4K_C0000 + 3] = val;
break;
case MSR_MTRRdefType:
env->mtrr_deftype = val;
break;
case MSR_MCG_STATUS:
env->mcg_status = val;
break;
case MSR_MCG_CTL:
if ((env->mcg_cap & MCG_CTL_P)
&& (val == 0 || val == ~(uint64_t)0)) {
env->mcg_ctl = val;
}
break;
case MSR_TSC_AUX:
env->tsc_aux = val;
break;
case MSR_IA32_MISC_ENABLE:
env->msr_ia32_misc_enable = val;
break;
case MSR_IA32_BNDCFGS:
/* FIXME: #GP if reserved bits are set. */
/* FIXME: Extend highest implemented bit of linear address. */
env->msr_bndcfgs = val;
cpu_sync_bndcs_hflags(env);
break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
(4 * env->mcg_cap & 0xff)) {
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
if ((offset & 0x3) != 0
|| (val == 0 || val == ~(uint64_t)0)) {
env->mce_banks[offset] = val;
}
break;
}
/* XXX: exception? */
break;
}
return;
error:
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
void helper_rdmsr(CPUX86State *env)
{
X86CPU *x86_cpu = env_archcpu(env);
uint64_t val;
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
switch ((uint32_t)env->regs[R_ECX]) {
case MSR_IA32_SYSENTER_CS:
val = env->sysenter_cs;
break;
case MSR_IA32_SYSENTER_ESP:
val = env->sysenter_esp;
break;
case MSR_IA32_SYSENTER_EIP:
val = env->sysenter_eip;
break;
case MSR_IA32_APICBASE:
val = cpu_get_apic_base(env_archcpu(env)->apic_state);
break;
case MSR_EFER:
val = env->efer;
break;
case MSR_STAR:
val = env->star;
break;
case MSR_PAT:
val = env->pat;
break;
case MSR_IA32_PKRS:
val = env->pkrs;
break;
case MSR_VM_HSAVE_PA:
val = env->vm_hsave;
break;
case MSR_IA32_PERF_STATUS:
/* tsc_increment_by_tick */
val = 1000ULL;
/* CPU multiplier */
val |= (((uint64_t)4ULL) << 40);
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
val = env->lstar;
break;
case MSR_CSTAR:
val = env->cstar;
break;
case MSR_FMASK:
val = env->fmask;
break;
case MSR_FSBASE:
val = env->segs[R_FS].base;
break;
case MSR_GSBASE:
val = env->segs[R_GS].base;
break;
case MSR_KERNELGSBASE:
val = env->kernelgsbase;
break;
case MSR_TSC_AUX:
val = env->tsc_aux;
break;
#endif
case MSR_SMI_COUNT:
val = env->msr_smi_count;
break;
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):
case MSR_MTRRphysBase(2):
case MSR_MTRRphysBase(3):
case MSR_MTRRphysBase(4):
case MSR_MTRRphysBase(5):
case MSR_MTRRphysBase(6):
case MSR_MTRRphysBase(7):
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysBase(0)) / 2].base;
break;
case MSR_MTRRphysMask(0):
case MSR_MTRRphysMask(1):
case MSR_MTRRphysMask(2):
case MSR_MTRRphysMask(3):
case MSR_MTRRphysMask(4):
case MSR_MTRRphysMask(5):
case MSR_MTRRphysMask(6):
case MSR_MTRRphysMask(7):
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysMask(0)) / 2].mask;
break;
case MSR_MTRRfix64K_00000:
val = env->mtrr_fixed[0];
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix16K_80000 + 1];
break;
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix4K_C0000 + 3];
break;
case MSR_MTRRdefType:
val = env->mtrr_deftype;
break;
case MSR_MTRRcap:
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
MSR_MTRRcap_WC_SUPPORTED;
} else {
/* XXX: exception? */
val = 0;
}
break;
case MSR_MCG_CAP:
val = env->mcg_cap;
break;
case MSR_MCG_CTL:
if (env->mcg_cap & MCG_CTL_P) {
val = env->mcg_ctl;
} else {
val = 0;
}
break;
case MSR_MCG_STATUS:
val = env->mcg_status;
break;
case MSR_IA32_MISC_ENABLE:
val = env->msr_ia32_misc_enable;
break;
case MSR_IA32_BNDCFGS:
val = env->msr_bndcfgs;
break;
case MSR_IA32_UCODE_REV:
val = x86_cpu->ucode_rev;
break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
(4 * env->mcg_cap & 0xff)) {
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
val = env->mce_banks[offset];
break;
}
/* XXX: exception? */
val = 0;
break;
}
env->regs[R_EAX] = (uint32_t)(val);
env->regs[R_EDX] = (uint32_t)(val >> 32);
}
#endif
static void do_pause(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);

View File

@ -26,49 +26,7 @@
#include "exec/cpu_ldst.h"
#include "exec/log.h"
#include "helper-tcg.h"
//#define DEBUG_PCALL
#ifdef DEBUG_PCALL
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
# define LOG_PCALL_STATE(cpu) \
log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
#else
# define LOG_PCALL(...) do { } while (0)
# define LOG_PCALL_STATE(cpu) do { } while (0)
#endif
/*
* TODO: Convert callers to compute cpu_mmu_index_kernel once
* and use *_mmuidx_ra directly.
*/
#define cpu_ldub_kernel_ra(e, p, r) \
cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_lduw_kernel_ra(e, p, r) \
cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldl_kernel_ra(e, p, r) \
cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldq_kernel_ra(e, p, r) \
cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_stb_kernel_ra(e, p, v, r) \
cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stw_kernel_ra(e, p, v, r) \
cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stl_kernel_ra(e, p, v, r) \
cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stq_kernel_ra(e, p, v, r) \
cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
#include "seg_helper.h"
/* return non zero if error */
static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
@ -531,7 +489,7 @@ static inline unsigned int get_sp_mask(unsigned int e2)
}
}
static int exception_has_error_code(int intno)
int exception_has_error_code(int intno)
{
switch (intno) {
case 8:
@ -976,72 +934,6 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
}
#endif
#ifdef TARGET_X86_64
#if defined(CONFIG_USER_ONLY)
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
CPUState *cs = env_cpu(env);
cs->exception_index = EXCP_SYSCALL;
env->exception_is_int = 0;
env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(cs);
}
#else
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
int selector;
if (!(env->efer & MSR_EFER_SCE)) {
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
selector = (env->star >> 32) & 0xffff;
if (env->hflags & HF_LMA_MASK) {
int code64;
env->regs[R_ECX] = env->eip + next_eip_addend;
env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
code64 = env->hflags & HF_CS64_MASK;
env->eflags &= ~(env->fmask | RF_MASK);
cpu_load_eflags(env, env->eflags, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
DESC_L_MASK);
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
if (code64) {
env->eip = env->lstar;
} else {
env->eip = env->cstar;
}
} else {
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
env->eip = (uint32_t)env->star;
}
}
#endif
#endif
#ifdef TARGET_X86_64
void helper_sysret(CPUX86State *env, int dflag)
{
@ -1136,84 +1028,13 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
}
#if defined(CONFIG_USER_ONLY)
/* fake user mode interrupt. is_int is TRUE if coming from the int
* instruction. next_eip is the env->eip value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE or if intno
* is EXCP_SYSCALL.
*/
static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
int error_code, target_ulong next_eip)
{
if (is_int) {
SegmentCache *dt;
target_ulong ptr;
int dpl, cpl, shift;
uint32_t e2;
dt = &env->idt;
if (env->hflags & HF_LMA_MASK) {
shift = 4;
} else {
shift = 3;
}
ptr = dt->base + (intno << shift);
e2 = cpu_ldl_kernel(env, ptr + 4);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privilege if software int */
if (dpl < cpl) {
raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
}
}
/* Since we emulate only user space, we cannot do more than
exiting the emulation with the suitable exception and error
code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
if (is_int || intno == EXCP_SYSCALL) {
env->eip = next_eip;
}
}
#else
static void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm)
{
CPUState *cs = env_cpu(env);
uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (!(event_inj & SVM_EVTINJ_VALID)) {
int type;
if (is_int) {
type = SVM_EVTINJ_TYPE_SOFT;
} else {
type = SVM_EVTINJ_TYPE_EXEPT;
}
event_inj = intno | type | SVM_EVTINJ_VALID;
if (!rm && exception_has_error_code(intno)) {
event_inj |= SVM_EVTINJ_VALID_ERR;
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj_err),
error_code);
}
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
event_inj);
}
}
#endif
/*
* Begin execution of an interruption. is_int is TRUE if coming from
* the int instruction. next_eip is the env->eip value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
int error_code, target_ulong next_eip, int is_hw)
void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
int error_code, target_ulong next_eip, int is_hw)
{
CPUX86State *env = &cpu->env;
@ -1289,36 +1110,6 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
#endif
}
void x86_cpu_do_interrupt(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
do_interrupt_user(env, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
/* successfully delivered */
env->old_exception = -1;
#else
if (cs->exception_index == EXCP_VMEXIT) {
assert(env->old_exception == -1);
do_vmexit(env);
} else {
do_interrupt_all(cpu, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
/* successfully delivered */
env->old_exception = -1;
}
#endif
}
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
{
do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
@ -1351,7 +1142,11 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
case CPU_INTERRUPT_SMI:
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
#ifdef CONFIG_USER_ONLY
cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
#else
do_smm_enter(cpu);
#endif /* CONFIG_USER_ONLY */
break;
case CPU_INTERRUPT_NMI:
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
@ -2622,22 +2417,6 @@ void helper_verw(CPUX86State *env, target_ulong selector1)
CC_SRC = eflags | CC_Z;
}
#if defined(CONFIG_USER_ONLY)
void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector)
{
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
int dpl = (env->eflags & VM_MASK) ? 3 : 0;
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
(selector << 4), 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
} else {
helper_load_seg(env, seg_reg, selector);
}
}
#endif
/* check if Port I/O is allowed in TSS */
static inline void check_io(CPUX86State *env, int addr, int size,
uintptr_t retaddr)

View File

@ -0,0 +1,66 @@
/*
* x86 segmentation related helpers macros
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SEG_HELPER_H
#define SEG_HELPER_H
//#define DEBUG_PCALL
#ifdef DEBUG_PCALL
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
# define LOG_PCALL_STATE(cpu) \
log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
#else
# define LOG_PCALL(...) do { } while (0)
# define LOG_PCALL_STATE(cpu) do { } while (0)
#endif
/*
* TODO: Convert callers to compute cpu_mmu_index_kernel once
* and use *_mmuidx_ra directly.
*/
#define cpu_ldub_kernel_ra(e, p, r) \
cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_lduw_kernel_ra(e, p, r) \
cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldl_kernel_ra(e, p, r) \
cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldq_kernel_ra(e, p, r) \
cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_stb_kernel_ra(e, p, v, r) \
cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stw_kernel_ra(e, p, v, r) \
cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stl_kernel_ra(e, p, v, r) \
cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stq_kernel_ra(e, p, v, r) \
cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
#endif /* SEG_HELPER_H */

View File

@ -0,0 +1,293 @@
/*
* i386 breakpoint helpers - sysemu code
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "tcg/helper-tcg.h"
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
{
return (dr7 >> (index * 2)) & 1;
}
static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
{
return (dr7 >> (index * 2)) & 2;
}
static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
{
return hw_global_breakpoint_enabled(dr7, index) ||
hw_local_breakpoint_enabled(dr7, index);
}
static inline int hw_breakpoint_type(unsigned long dr7, int index)
{
return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
}
static inline int hw_breakpoint_len(unsigned long dr7, int index)
{
int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
return (len == 2) ? 8 : len + 1;
}
static int hw_breakpoint_insert(CPUX86State *env, int index)
{
CPUState *cs = env_cpu(env);
target_ulong dr7 = env->dr[7];
target_ulong drN = env->dr[index];
int err = 0;
switch (hw_breakpoint_type(dr7, index)) {
case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(dr7, index)) {
err = cpu_breakpoint_insert(cs, drN, BP_CPU,
&env->cpu_breakpoint[index]);
}
break;
case DR7_TYPE_IO_RW:
/* Notice when we should enable calls to bpt_io. */
return hw_breakpoint_enabled(env->dr[7], index)
? HF_IOBPT_MASK : 0;
case DR7_TYPE_DATA_WR:
if (hw_breakpoint_enabled(dr7, index)) {
err = cpu_watchpoint_insert(cs, drN,
hw_breakpoint_len(dr7, index),
BP_CPU | BP_MEM_WRITE,
&env->cpu_watchpoint[index]);
}
break;
case DR7_TYPE_DATA_RW:
if (hw_breakpoint_enabled(dr7, index)) {
err = cpu_watchpoint_insert(cs, drN,
hw_breakpoint_len(dr7, index),
BP_CPU | BP_MEM_ACCESS,
&env->cpu_watchpoint[index]);
}
break;
}
if (err) {
env->cpu_breakpoint[index] = NULL;
}
return 0;
}
static void hw_breakpoint_remove(CPUX86State *env, int index)
{
CPUState *cs = env_cpu(env);
switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST:
if (env->cpu_breakpoint[index]) {
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
env->cpu_breakpoint[index] = NULL;
}
break;
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
if (env->cpu_breakpoint[index]) {
cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
env->cpu_breakpoint[index] = NULL;
}
break;
case DR7_TYPE_IO_RW:
/* HF_IOBPT_MASK cleared elsewhere. */
break;
}
}
void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
{
target_ulong old_dr7 = env->dr[7];
int iobpt = 0;
int i;
new_dr7 |= DR7_FIXED_1;
/* If nothing is changing except the global/local enable bits,
then we can make the change more efficient. */
if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
/* Fold the global and local enable bits together into the
global fields, then xor to show which registers have
changed collective enable state. */
int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
for (i = 0; i < DR7_MAX_BP; i++) {
if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
hw_breakpoint_remove(env, i);
}
}
env->dr[7] = new_dr7;
for (i = 0; i < DR7_MAX_BP; i++) {
if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
iobpt |= hw_breakpoint_insert(env, i);
} else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
&& hw_breakpoint_enabled(new_dr7, i)) {
iobpt |= HF_IOBPT_MASK;
}
}
} else {
for (i = 0; i < DR7_MAX_BP; i++) {
hw_breakpoint_remove(env, i);
}
env->dr[7] = new_dr7;
for (i = 0; i < DR7_MAX_BP; i++) {
iobpt |= hw_breakpoint_insert(env, i);
}
}
env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
}
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
{
target_ulong dr6;
int reg;
bool hit_enabled = false;
dr6 = env->dr[6] & ~0xf;
for (reg = 0; reg < DR7_MAX_BP; reg++) {
bool bp_match = false;
bool wp_match = false;
switch (hw_breakpoint_type(env->dr[7], reg)) {
case DR7_TYPE_BP_INST:
if (env->dr[reg] == env->eip) {
bp_match = true;
}
break;
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
if (env->cpu_watchpoint[reg] &&
env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
wp_match = true;
}
break;
case DR7_TYPE_IO_RW:
break;
}
if (bp_match || wp_match) {
dr6 |= 1 << reg;
if (hw_breakpoint_enabled(env->dr[7], reg)) {
hit_enabled = true;
}
}
}
if (hit_enabled || force_dr6_update) {
env->dr[6] = dr6;
}
return hit_enabled;
}
void breakpoint_handler(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
if (cs->watchpoint_hit->flags & BP_CPU) {
cs->watchpoint_hit = NULL;
if (check_hw_breakpoints(env, false)) {
raise_exception(env, EXCP01_DB);
} else {
cpu_loop_exit_noexc(cs);
}
}
} else {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, true);
raise_exception(env, EXCP01_DB);
}
break;
}
}
}
}
void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
{
switch (reg) {
case 0: case 1: case 2: case 3:
if (hw_breakpoint_enabled(env->dr[7], reg)
&& hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
hw_breakpoint_remove(env, reg);
env->dr[reg] = t0;
hw_breakpoint_insert(env, reg);
} else {
env->dr[reg] = t0;
}
return;
case 4:
if (env->cr[4] & CR4_DE_MASK) {
break;
}
/* fallthru */
case 6:
env->dr[6] = t0 | DR6_FIXED_1;
return;
case 5:
if (env->cr[4] & CR4_DE_MASK) {
break;
}
/* fallthru */
case 7:
cpu_x86_update_dr7(env, t0);
return;
}
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
/* Check if Port I/O is trapped by a breakpoint. */
void helper_bpt_io(CPUX86State *env, uint32_t port,
uint32_t size, target_ulong next_eip)
{
target_ulong dr7 = env->dr[7];
int i, hit = 0;
for (i = 0; i < DR7_MAX_BP; ++i) {
if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
&& hw_breakpoint_enabled(dr7, i)) {
int bpt_len = hw_breakpoint_len(dr7, i);
if (port + size - 1 >= env->dr[i]
&& port <= env->dr[i] + bpt_len - 1) {
hit |= 1 << i;
}
}
}
if (hit) {
env->dr[6] = (env->dr[6] & ~0xf) | hit;
env->eip = next_eip;
raise_exception(env, EXCP01_DB);
}
}

View File

@ -0,0 +1,471 @@
/*
* x86 exception helpers - sysemu code
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg/helper-tcg.h"
int get_pg_mode(CPUX86State *env)
{
int pg_mode = 0;
if (env->cr[0] & CR0_WP_MASK) {
pg_mode |= PG_MODE_WP;
}
if (env->cr[4] & CR4_PAE_MASK) {
pg_mode |= PG_MODE_PAE;
}
if (env->cr[4] & CR4_PSE_MASK) {
pg_mode |= PG_MODE_PSE;
}
if (env->cr[4] & CR4_PKE_MASK) {
pg_mode |= PG_MODE_PKE;
}
if (env->cr[4] & CR4_PKS_MASK) {
pg_mode |= PG_MODE_PKS;
}
if (env->cr[4] & CR4_SMEP_MASK) {
pg_mode |= PG_MODE_SMEP;
}
if (env->cr[4] & CR4_LA57_MASK) {
pg_mode |= PG_MODE_LA57;
}
if (env->hflags & HF_LMA_MASK) {
pg_mode |= PG_MODE_LMA;
}
if (env->efer & MSR_EFER_NXE) {
pg_mode |= PG_MODE_NXE;
}
return pg_mode;
}
#define PG_ERROR_OK (-1)
typedef hwaddr (*MMUTranslateFunc)(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot);
#define GET_HPHYS(cs, gpa, access_type, prot) \
(get_hphys_func ? get_hphys_func(cs, gpa, access_type, prot) : gpa)
static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_func,
uint64_t cr3, int is_write1, int mmu_idx, int pg_mode,
hwaddr *xlat, int *page_size, int *prot)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t ptep, pte;
int32_t a20_mask;
target_ulong pde_addr, pte_addr;
int error_code = 0;
int is_dirty, is_write, is_user;
uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
uint32_t page_offset;
uint32_t pkr;
is_user = (mmu_idx == MMU_USER_IDX);
is_write = is_write1 & 1;
a20_mask = x86_get_a20_mask(env);
if (!(pg_mode & PG_MODE_NXE)) {
rsvd_mask |= PG_NX_MASK;
}
if (pg_mode & PG_MODE_PAE) {
uint64_t pde, pdpe;
target_ulong pdpe_addr;
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
bool la57 = pg_mode & PG_MODE_LA57;
uint64_t pml5e_addr, pml5e;
uint64_t pml4e_addr, pml4e;
int32_t sext;
/* test virtual address sign extension */
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
if (get_hphys_func && sext != 0 && sext != -1) {
env->error_code = 0;
cs->exception_index = EXCP0D_GPF;
return 1;
}
if (la57) {
pml5e_addr = ((cr3 & ~0xfff) +
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
pml5e_addr = GET_HPHYS(cs, pml5e_addr, MMU_DATA_STORE, NULL);
pml5e = x86_ldq_phys(cs, pml5e_addr);
if (!(pml5e & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd;
}
if (!(pml5e & PG_ACCESSED_MASK)) {
pml5e |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
}
ptep = pml5e ^ PG_NX_MASK;
} else {
pml5e = cr3;
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
pml4e_addr = GET_HPHYS(cs, pml4e_addr, MMU_DATA_STORE, NULL);
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
}
ptep &= pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
a20_mask;
pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
}
if (pdpe & PG_PSE_MASK) {
/* 1 GB page */
*page_size = 1024 * 1024 * 1024;
pte_addr = pdpe_addr;
pte = pdpe;
goto do_check_protect;
}
} else
#endif
{
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((cr3 & ~0x1f) + ((addr >> 27) & 0x18)) &
a20_mask;
pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
rsvd_mask |= PG_HI_USER_MASK;
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
a20_mask;
pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL);
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pde & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pde ^ PG_NX_MASK;
if (pde & PG_PSE_MASK) {
/* 2 MB page */
*page_size = 2048 * 1024;
pte_addr = pde_addr;
pte = pde;
goto do_check_protect;
}
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
a20_mask;
pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL);
pte = x86_ldq_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
*page_size = 4096;
} else {
uint32_t pde;
/* page directory entry */
pde_addr = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc)) &
a20_mask;
pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL);
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
ptep = pde | PG_NX_MASK;
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) {
*page_size = 4096 * 1024;
pte_addr = pde_addr;
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
* Leave bits 20-13 in place for setting accessed/dirty bits below.
*/
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
rsvd_mask = 0x200000;
goto do_check_protect_pse36;
}
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
a20_mask;
pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL);
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep &= pte | PG_NX_MASK;
*page_size = 4096;
rsvd_mask = 0;
}
do_check_protect:
rsvd_mask |= (*page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
do_check_protect_pse36:
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
ptep ^= PG_NX_MASK;
/* can the page can be put in the TLB? prot will tell us */
if (is_user && !(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
*prot = 0;
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
*prot |= PAGE_READ;
if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
*prot |= PAGE_WRITE;
}
}
if (!(ptep & PG_NX_MASK) &&
(mmu_idx == MMU_USER_IDX ||
!((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) {
*prot |= PAGE_EXEC;
}
if (!(env->hflags & HF_LMA_MASK)) {
pkr = 0;
} else if (ptep & PG_USER_MASK) {
pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0;
} else {
pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0;
}
if (pkr) {
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
uint32_t pkr_ad = (pkr >> pk * 2) & 1;
uint32_t pkr_wd = (pkr >> pk * 2) & 2;
uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
if (pkr_ad) {
pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
} else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) {
pkr_prot &= ~PAGE_WRITE;
}
*prot &= pkr_prot;
if ((pkr_prot & (1 << is_write1)) == 0) {
assert(is_write1 != 2);
error_code |= PG_ERROR_PK_MASK;
goto do_fault_protect;
}
}
if ((*prot & (1 << is_write1)) == 0) {
goto do_fault_protect;
}
/* yes, it can! */
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty) {
pte |= PG_DIRTY_MASK;
}
x86_stl_phys_notdirty(cs, pte_addr, pte);
}
if (!(pte & PG_DIRTY_MASK)) {
/* only set write access if already dirty... otherwise wait
for dirty access */
assert(!is_write);
*prot &= ~PAGE_WRITE;
}
pte = pte & a20_mask;
/* align to page_size */
pte &= PG_ADDRESS_MASK & ~(*page_size - 1);
page_offset = addr & (*page_size - 1);
*xlat = GET_HPHYS(cs, pte + page_offset, is_write1, prot);
return PG_ERROR_OK;
do_fault_rsvd:
error_code |= PG_ERROR_RSVD_MASK;
do_fault_protect:
error_code |= PG_ERROR_P_MASK;
do_fault:
error_code |= (is_write << PG_ERROR_W_BIT);
if (is_user)
error_code |= PG_ERROR_U_MASK;
if (is_write1 == 2 &&
(((pg_mode & PG_MODE_NXE) && (pg_mode & PG_MODE_PAE)) ||
(pg_mode & PG_MODE_SMEP)))
error_code |= PG_ERROR_I_D_MASK;
return error_code;
}
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
CPUX86State *env = &X86_CPU(cs)->env;
uint64_t exit_info_1;
int page_size;
int next_prot;
hwaddr hphys;
if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
return gphys;
}
exit_info_1 = mmu_translate(cs, gphys, NULL, env->nested_cr3,
access_type, MMU_USER_IDX, env->nested_pg_mode,
&hphys, &page_size, &next_prot);
if (exit_info_1 == PG_ERROR_OK) {
if (prot) {
*prot &= next_prot;
}
return hphys;
}
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
gphys);
if (prot) {
exit_info_1 |= SVM_NPTEXIT_GPA;
} else { /* page table access */
exit_info_1 |= SVM_NPTEXIT_GPT;
}
cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
}
/* return value:
* -1 = cannot handle fault
* 0 = nothing more to do
* 1 = generate PF fault
*/
static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
int error_code = PG_ERROR_OK;
int pg_mode, prot, page_size;
hwaddr paddr;
hwaddr vaddr;
#if defined(DEBUG_MMU)
printf("MMU fault: addr=%" VADDR_PRIx " w=%d mmu=%d eip=" TARGET_FMT_lx "\n",
addr, is_write1, mmu_idx, env->eip);
#endif
if (!(env->cr[0] & CR0_PG_MASK)) {
paddr = addr;
#ifdef TARGET_X86_64
if (!(env->hflags & HF_LMA_MASK)) {
/* Without long mode we can only address 32bits in real mode */
paddr = (uint32_t)paddr;
}
#endif
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
page_size = 4096;
} else {
pg_mode = get_pg_mode(env);
error_code = mmu_translate(cs, addr, get_hphys, env->cr[3], is_write1,
mmu_idx, pg_mode,
&paddr, &page_size, &prot);
}
if (error_code == PG_ERROR_OK) {
/* Even if 4MB pages, we map only one 4KB page in the cache to
avoid filling it too fast */
vaddr = addr & TARGET_PAGE_MASK;
paddr &= TARGET_PAGE_MASK;
assert(prot & (1 << is_write1));
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
prot, mmu_idx, page_size);
return 0;
} else {
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
}
env->error_code = error_code;
cs->exception_index = EXCP0E_PAGE;
return 1;
}
}
bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
env->retaddr = retaddr;
if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
/* FIXME: On error in get_hphys we have already jumped out. */
g_assert(!probe);
raise_exception_err_ra(env, cs->exception_index,
env->error_code, retaddr);
}
return true;
}

View File

@ -0,0 +1,57 @@
/*
* x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (sysemu code)
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "hw/irq.h"
static qemu_irq ferr_irq;
void x86_register_ferr_irq(qemu_irq irq)
{
ferr_irq = irq;
}
void fpu_check_raise_ferr_irq(CPUX86State *env)
{
if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
qemu_irq_raise(ferr_irq);
return;
}
}
void cpu_clear_ignne(void)
{
CPUX86State *env = &X86_CPU(first_cpu)->env;
env->hflags2 &= ~HF2_IGNNE_MASK;
}
void cpu_set_ignne(void)
{
CPUX86State *env = &X86_CPU(first_cpu)->env;
env->hflags2 |= HF2_IGNNE_MASK;
/*
* We get here in response to a write to port F0h. The chipset should
* deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is
* cleared, because FERR# and FP_IRQ are two separate pins on real
* hardware. However, we don't model FERR# as a qemu_irq, so we just
* do directly what the chipset would do, i.e. deassert FP_IRQ.
*/
qemu_irq_lower(ferr_irq);
}

View File

@ -0,0 +1,10 @@
i386_softmmu_ss.add(when: ['CONFIG_TCG', 'CONFIG_SOFTMMU'], if_true: files(
'tcg-cpu.c',
'smm_helper.c',
'excp_helper.c',
'bpt_helper.c',
'misc_helper.c',
'fpu_helper.c',
'svm_helper.c',
'seg_helper.c',
))

View File

@ -0,0 +1,442 @@
/*
* x86 misc helpers - sysemu code
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/address-spaces.h"
#include "tcg/helper-tcg.h"
void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
{
address_space_stb(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
}
target_ulong helper_inb(CPUX86State *env, uint32_t port)
{
return address_space_ldub(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
}
void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
{
address_space_stw(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
}
target_ulong helper_inw(CPUX86State *env, uint32_t port)
{
return address_space_lduw(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
}
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
{
address_space_stl(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
}
target_ulong helper_inl(CPUX86State *env, uint32_t port)
{
return address_space_ldl(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
}
target_ulong helper_read_crN(CPUX86State *env, int reg)
{
target_ulong val;
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC());
switch (reg) {
default:
val = env->cr[reg];
break;
case 8:
if (!(env->hflags2 & HF2_VINTR_MASK)) {
val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
} else {
val = env->v_tpr;
}
break;
}
return val;
}
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
{
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC());
switch (reg) {
case 0:
cpu_x86_update_cr0(env, t0);
break;
case 3:
cpu_x86_update_cr3(env, t0);
break;
case 4:
if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
(env->hflags & HF_CS64_MASK)) {
raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
cpu_x86_update_cr4(env, t0);
break;
case 8:
if (!(env->hflags2 & HF2_VINTR_MASK)) {
qemu_mutex_lock_iothread();
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
qemu_mutex_unlock_iothread();
}
env->v_tpr = t0 & 0x0f;
break;
default:
env->cr[reg] = t0;
break;
}
}
void helper_wrmsr(CPUX86State *env)
{
uint64_t val;
CPUState *cs = env_cpu(env);
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
val = ((uint32_t)env->regs[R_EAX]) |
((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
switch ((uint32_t)env->regs[R_ECX]) {
case MSR_IA32_SYSENTER_CS:
env->sysenter_cs = val & 0xffff;
break;
case MSR_IA32_SYSENTER_ESP:
env->sysenter_esp = val;
break;
case MSR_IA32_SYSENTER_EIP:
env->sysenter_eip = val;
break;
case MSR_IA32_APICBASE:
cpu_set_apic_base(env_archcpu(env)->apic_state, val);
break;
case MSR_EFER:
{
uint64_t update_mask;
update_mask = 0;
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
update_mask |= MSR_EFER_SCE;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
update_mask |= MSR_EFER_LME;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
update_mask |= MSR_EFER_FFXSR;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
update_mask |= MSR_EFER_NXE;
}
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
update_mask |= MSR_EFER_SVME;
}
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
update_mask |= MSR_EFER_FFXSR;
}
cpu_load_efer(env, (env->efer & ~update_mask) |
(val & update_mask));
}
break;
case MSR_STAR:
env->star = val;
break;
case MSR_PAT:
env->pat = val;
break;
case MSR_IA32_PKRS:
if (val & 0xFFFFFFFF00000000ull) {
goto error;
}
env->pkrs = val;
tlb_flush(cs);
break;
case MSR_VM_HSAVE_PA:
env->vm_hsave = val;
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
break;
case MSR_CSTAR:
env->cstar = val;
break;
case MSR_FMASK:
env->fmask = val;
break;
case MSR_FSBASE:
env->segs[R_FS].base = val;
break;
case MSR_GSBASE:
env->segs[R_GS].base = val;
break;
case MSR_KERNELGSBASE:
env->kernelgsbase = val;
break;
#endif
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):
case MSR_MTRRphysBase(2):
case MSR_MTRRphysBase(3):
case MSR_MTRRphysBase(4):
case MSR_MTRRphysBase(5):
case MSR_MTRRphysBase(6):
case MSR_MTRRphysBase(7):
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysBase(0)) / 2].base = val;
break;
case MSR_MTRRphysMask(0):
case MSR_MTRRphysMask(1):
case MSR_MTRRphysMask(2):
case MSR_MTRRphysMask(3):
case MSR_MTRRphysMask(4):
case MSR_MTRRphysMask(5):
case MSR_MTRRphysMask(6):
case MSR_MTRRphysMask(7):
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysMask(0)) / 2].mask = val;
break;
case MSR_MTRRfix64K_00000:
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix64K_00000] = val;
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix16K_80000 + 1] = val;
break;
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix4K_C0000 + 3] = val;
break;
case MSR_MTRRdefType:
env->mtrr_deftype = val;
break;
case MSR_MCG_STATUS:
env->mcg_status = val;
break;
case MSR_MCG_CTL:
if ((env->mcg_cap & MCG_CTL_P)
&& (val == 0 || val == ~(uint64_t)0)) {
env->mcg_ctl = val;
}
break;
case MSR_TSC_AUX:
env->tsc_aux = val;
break;
case MSR_IA32_MISC_ENABLE:
env->msr_ia32_misc_enable = val;
break;
case MSR_IA32_BNDCFGS:
/* FIXME: #GP if reserved bits are set. */
/* FIXME: Extend highest implemented bit of linear address. */
env->msr_bndcfgs = val;
cpu_sync_bndcs_hflags(env);
break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
(4 * env->mcg_cap & 0xff)) {
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
if ((offset & 0x3) != 0
|| (val == 0 || val == ~(uint64_t)0)) {
env->mce_banks[offset] = val;
}
break;
}
/* XXX: exception? */
break;
}
return;
error:
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
void helper_rdmsr(CPUX86State *env)
{
X86CPU *x86_cpu = env_archcpu(env);
uint64_t val;
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
switch ((uint32_t)env->regs[R_ECX]) {
case MSR_IA32_SYSENTER_CS:
val = env->sysenter_cs;
break;
case MSR_IA32_SYSENTER_ESP:
val = env->sysenter_esp;
break;
case MSR_IA32_SYSENTER_EIP:
val = env->sysenter_eip;
break;
case MSR_IA32_APICBASE:
val = cpu_get_apic_base(env_archcpu(env)->apic_state);
break;
case MSR_EFER:
val = env->efer;
break;
case MSR_STAR:
val = env->star;
break;
case MSR_PAT:
val = env->pat;
break;
case MSR_IA32_PKRS:
val = env->pkrs;
break;
case MSR_VM_HSAVE_PA:
val = env->vm_hsave;
break;
case MSR_IA32_PERF_STATUS:
/* tsc_increment_by_tick */
val = 1000ULL;
/* CPU multiplier */
val |= (((uint64_t)4ULL) << 40);
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
val = env->lstar;
break;
case MSR_CSTAR:
val = env->cstar;
break;
case MSR_FMASK:
val = env->fmask;
break;
case MSR_FSBASE:
val = env->segs[R_FS].base;
break;
case MSR_GSBASE:
val = env->segs[R_GS].base;
break;
case MSR_KERNELGSBASE:
val = env->kernelgsbase;
break;
case MSR_TSC_AUX:
val = env->tsc_aux;
break;
#endif
case MSR_SMI_COUNT:
val = env->msr_smi_count;
break;
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):
case MSR_MTRRphysBase(2):
case MSR_MTRRphysBase(3):
case MSR_MTRRphysBase(4):
case MSR_MTRRphysBase(5):
case MSR_MTRRphysBase(6):
case MSR_MTRRphysBase(7):
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysBase(0)) / 2].base;
break;
case MSR_MTRRphysMask(0):
case MSR_MTRRphysMask(1):
case MSR_MTRRphysMask(2):
case MSR_MTRRphysMask(3):
case MSR_MTRRphysMask(4):
case MSR_MTRRphysMask(5):
case MSR_MTRRphysMask(6):
case MSR_MTRRphysMask(7):
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
MSR_MTRRphysMask(0)) / 2].mask;
break;
case MSR_MTRRfix64K_00000:
val = env->mtrr_fixed[0];
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix16K_80000 + 1];
break;
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
MSR_MTRRfix4K_C0000 + 3];
break;
case MSR_MTRRdefType:
val = env->mtrr_deftype;
break;
case MSR_MTRRcap:
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
MSR_MTRRcap_WC_SUPPORTED;
} else {
/* XXX: exception? */
val = 0;
}
break;
case MSR_MCG_CAP:
val = env->mcg_cap;
break;
case MSR_MCG_CTL:
if (env->mcg_cap & MCG_CTL_P) {
val = env->mcg_ctl;
} else {
val = 0;
}
break;
case MSR_MCG_STATUS:
val = env->mcg_status;
break;
case MSR_IA32_MISC_ENABLE:
val = env->msr_ia32_misc_enable;
break;
case MSR_IA32_BNDCFGS:
val = env->msr_bndcfgs;
break;
case MSR_IA32_UCODE_REV:
val = x86_cpu->ucode_rev;
break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
(4 * env->mcg_cap & 0xff)) {
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
val = env->mce_banks[offset];
break;
}
/* XXX: exception? */
val = 0;
break;
}
env->regs[R_EAX] = (uint32_t)(val);
env->regs[R_EDX] = (uint32_t)(val >> 32);
}

View File

@ -0,0 +1,125 @@
/*
* x86 segmentation related helpers: (sysemu-only code)
* TSS, interrupts, system calls, jumps and call/task gates, descriptors
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "tcg/helper-tcg.h"
#ifdef TARGET_X86_64
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
int selector;
if (!(env->efer & MSR_EFER_SCE)) {
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
}
selector = (env->star >> 32) & 0xffff;
if (env->hflags & HF_LMA_MASK) {
int code64;
env->regs[R_ECX] = env->eip + next_eip_addend;
env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
code64 = env->hflags & HF_CS64_MASK;
env->eflags &= ~(env->fmask | RF_MASK);
cpu_load_eflags(env, env->eflags, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
DESC_L_MASK);
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
if (code64) {
env->eip = env->lstar;
} else {
env->eip = env->cstar;
}
} else {
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
env->eip = (uint32_t)env->star;
}
}
#endif /* TARGET_X86_64 */
void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm)
{
CPUState *cs = env_cpu(env);
uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (!(event_inj & SVM_EVTINJ_VALID)) {
int type;
if (is_int) {
type = SVM_EVTINJ_TYPE_SOFT;
} else {
type = SVM_EVTINJ_TYPE_EXEPT;
}
event_inj = intno | type | SVM_EVTINJ_VALID;
if (!rm && exception_has_error_code(intno)) {
event_inj |= SVM_EVTINJ_VALID_ERR;
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj_err),
error_code);
}
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
event_inj);
}
}
void x86_cpu_do_interrupt(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
if (cs->exception_index == EXCP_VMEXIT) {
assert(env->old_exception == -1);
do_vmexit(env);
} else {
do_interrupt_all(cpu, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
/* successfully delivered */
env->old_exception = -1;
}
}

View File

@ -1,5 +1,5 @@
/*
* x86 SMM helpers
* x86 SMM helpers (sysemu-only)
*
* Copyright (c) 2003 Fabrice Bellard
*
@ -18,27 +18,14 @@
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/log.h"
#include "helper-tcg.h"
#include "tcg/helper-tcg.h"
/* SMM support */
#if defined(CONFIG_USER_ONLY)
void do_smm_enter(X86CPU *cpu)
{
}
void helper_rsm(CPUX86State *env)
{
}
#else
#ifdef TARGET_X86_64
#define SMM_REVISION_ID 0x00020064
#else
@ -330,5 +317,3 @@ void helper_rsm(CPUX86State *env)
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
}
#endif /* !CONFIG_USER_ONLY */

View File

@ -1,5 +1,5 @@
/*
* x86 SVM helpers
* x86 SVM helpers (sysemu only)
*
* Copyright (c) 2003 Fabrice Bellard
*
@ -22,66 +22,10 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "helper-tcg.h"
#include "tcg/helper-tcg.h"
/* Secure Virtual Machine helpers */
#if defined(CONFIG_USER_ONLY)
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
}
void helper_vmmcall(CPUX86State *env)
{
}
void helper_vmload(CPUX86State *env, int aflag)
{
}
void helper_vmsave(CPUX86State *env, int aflag)
{
}
void helper_stgi(CPUX86State *env)
{
}
void helper_clgi(CPUX86State *env)
{
}
void helper_skinit(CPUX86State *env)
{
}
void helper_invlpga(CPUX86State *env, int aflag)
{
}
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
uintptr_t retaddr)
{
assert(0);
}
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param)
{
}
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param, uintptr_t retaddr)
{
}
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
uint32_t next_eip_addend)
{
}
#else
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
@ -219,18 +163,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
control.nested_cr3));
env->hflags2 |= HF2_NPT_MASK;
if (env->cr[4] & CR4_PAE_MASK) {
env->nested_pg_mode |= SVM_NPT_PAE;
}
if (env->cr[4] & CR4_PSE_MASK) {
env->nested_pg_mode |= SVM_NPT_PSE;
}
if (env->hflags & HF_LMA_MASK) {
env->nested_pg_mode |= SVM_NPT_LMA;
}
if (env->efer & MSR_EFER_NXE) {
env->nested_pg_mode |= SVM_NPT_NXE;
}
env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
}
/* enable intercepts */
@ -796,5 +729,3 @@ void do_vmexit(CPUX86State *env)
host's code segment or non-canonical (in the case of long mode), a
#GP fault is delivered inside the host. */
}
#endif

View File

@ -0,0 +1,83 @@
/*
* i386 TCG cpu class initialization functions specific to sysemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg/helper-tcg.h"
#include "sysemu/sysemu.h"
#include "qemu/units.h"
#include "exec/address-spaces.h"
#include "tcg/tcg-cpu.h"
static void tcg_cpu_machine_done(Notifier *n, void *unused)
{
X86CPU *cpu = container_of(n, X86CPU, machine_done);
MemoryRegion *smram =
(MemoryRegion *) object_resolve_path("/machine/smram", NULL);
if (smram) {
cpu->smram = g_new(MemoryRegion, 1);
memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
smram, 0, 4 * GiB);
memory_region_set_enabled(cpu->smram, true);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0,
cpu->smram, 1);
}
}
bool tcg_cpu_realizefn(CPUState *cs, Error **errp)
{
X86CPU *cpu = X86_CPU(cs);
/*
* The realize order is important, since x86_cpu_realize() checks if
* nothing else has been set by the user (or by accelerators) in
* cpu->ucode_rev and cpu->phys_bits, and the memory regions
* initialized here are needed for the vcpu initialization.
*
* realize order:
* tcg_cpu -> host_cpu -> x86_cpu
*/
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
/* Outer container... */
memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
memory_region_set_enabled(cpu->cpu_as_root, true);
/*
* ... with two regions inside: normal system memory with low
* priority, and...
*/
memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
cs->num_ases = 2;
cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = tcg_cpu_machine_done;
qemu_add_machine_init_done_notifier(&cpu->machine_done);
return true;
}

View File

@ -19,14 +19,11 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg-cpu.h"
#include "exec/exec-all.h"
#include "sysemu/runstate.h"
#include "helper-tcg.h"
#include "qemu/accel.h"
#include "hw/core/accel-cpu.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
#include "tcg-cpu.h"
/* Frob eflags into and out of the CPU temporary format. */
@ -72,7 +69,52 @@ static struct TCGCPUOps x86_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */
};
void tcg_cpu_common_class_init(CPUClass *cc)
static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
{
/* for x86, all cpus use the same set of operations */
cc->tcg_ops = &x86_tcg_ops;
}
static void tcg_cpu_class_init(CPUClass *cc)
{
cc->init_accel_cpu = tcg_cpu_init_ops;
}
/*
* TCG-specific defaults that override all CPU models when using TCG
*/
static PropValue tcg_default_props[] = {
{ "vme", "off" },
{ NULL, NULL },
};
static void tcg_cpu_instance_init(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
/* Special cases not set in the X86CPUDefinition structs: */
x86_cpu_apply_props(cpu, tcg_default_props);
}
static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
#ifndef CONFIG_USER_ONLY
acc->cpu_realizefn = tcg_cpu_realizefn;
#endif /* CONFIG_USER_ONLY */
acc->cpu_class_init = tcg_cpu_class_init;
acc->cpu_instance_init = tcg_cpu_instance_init;
}
static const TypeInfo tcg_cpu_accel_type_info = {
.name = ACCEL_CPU_NAME("tcg"),
.parent = TYPE_ACCEL_CPU,
.class_init = tcg_cpu_accel_class_init,
.abstract = true,
};
static void tcg_cpu_accel_register_types(void)
{
type_register_static(&tcg_cpu_accel_type_info);
}
type_init(tcg_cpu_accel_register_types);

View File

@ -1,15 +1,24 @@
/*
* i386 TCG CPU class initialization
* i386 TCG cpu class initialization functions
*
* Copyright 2020 SUSE LLC
* Copyright (c) 2003 Fabrice Bellard
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TCG_CPU_H
#define TCG_CPU_H
void tcg_cpu_common_class_init(CPUClass *cc);
bool tcg_cpu_realizefn(CPUState *cs, Error **errp);
#endif /* TCG_CPU_H */

View File

@ -1117,16 +1117,20 @@ static inline void gen_cmps(DisasContext *s, MemOp ot)
static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
{
if (s->flags & HF_IOBPT_MASK) {
#ifdef CONFIG_USER_ONLY
/* user-mode cpu should not be in IOBPT mode */
g_assert_not_reached();
#else
TCGv_i32 t_size = tcg_const_i32(1 << ot);
TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
tcg_temp_free_i32(t_size);
tcg_temp_free(t_next);
#endif /* CONFIG_USER_ONLY */
}
}
static inline void gen_ins(DisasContext *s, MemOp ot)
{
gen_string_movl_A0_EDI(s);
@ -8061,6 +8065,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
#ifndef CONFIG_USER_ONLY
modrm = x86_ldub_code(env, s);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
@ -8089,6 +8094,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, rm, s->T0);
}
#endif /* !CONFIG_USER_ONLY */
}
break;
case 0x106: /* clts */
@ -8325,9 +8331,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
if (!(s->flags & HF_SMM_MASK))
goto illegal_op;
#ifdef CONFIG_USER_ONLY
/* we should not be in SMM mode */
g_assert_not_reached();
#else
gen_update_cc_op(s);
gen_jmp_im(s, s->pc - s->cs_base);
gen_helper_rsm(cpu_env);
#endif /* CONFIG_USER_ONLY */
gen_eob(s);
break;
case 0x1b8: /* SSE4.2 popcnt */

View File

@ -0,0 +1,39 @@
/*
* x86 exception helpers - user-mode specific code
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg/helper-tcg.h"
bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
env->cr[2] = addr;
env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
env->error_code |= PG_ERROR_U_MASK;
cs->exception_index = EXCP0E_PAGE;
env->exception_is_int = 0;
env->exception_next_eip = -1;
cpu_loop_exit_restore(cs, retaddr);
}

View File

@ -0,0 +1,6 @@
i386_user_ss.add(when: ['CONFIG_TCG', 'CONFIG_USER_ONLY'], if_true: files(
'excp_helper.c',
'misc_stubs.c',
'svm_stubs.c',
'seg_helper.c',
))

View File

@ -0,0 +1,75 @@
/*
* x86 misc helpers
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
{
g_assert_not_reached();
}
target_ulong helper_inb(CPUX86State *env, uint32_t port)
{
g_assert_not_reached();
return 0;
}
void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
{
g_assert_not_reached();
}
target_ulong helper_inw(CPUX86State *env, uint32_t port)
{
g_assert_not_reached();
return 0;
}
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
{
g_assert_not_reached();
}
target_ulong helper_inl(CPUX86State *env, uint32_t port)
{
g_assert_not_reached();
return 0;
}
target_ulong helper_read_crN(CPUX86State *env, int reg)
{
g_assert_not_reached();
}
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
{
g_assert_not_reached();
}
void helper_wrmsr(CPUX86State *env)
{
g_assert_not_reached();
}
void helper_rdmsr(CPUX86State *env)
{
g_assert_not_reached();
}

View File

@ -0,0 +1,109 @@
/*
* x86 segmentation related helpers (user-mode code):
* TSS, interrupts, system calls, jumps and call/task gates, descriptors
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "tcg/helper-tcg.h"
#include "tcg/seg_helper.h"
#ifdef TARGET_X86_64
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
CPUState *cs = env_cpu(env);
cs->exception_index = EXCP_SYSCALL;
env->exception_is_int = 0;
env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(cs);
}
#endif /* TARGET_X86_64 */
/*
* fake user mode interrupt. is_int is TRUE if coming from the int
* instruction. next_eip is the env->eip value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE or if intno
* is EXCP_SYSCALL.
*/
static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
int error_code, target_ulong next_eip)
{
if (is_int) {
SegmentCache *dt;
target_ulong ptr;
int dpl, cpl, shift;
uint32_t e2;
dt = &env->idt;
if (env->hflags & HF_LMA_MASK) {
shift = 4;
} else {
shift = 3;
}
ptr = dt->base + (intno << shift);
e2 = cpu_ldl_kernel(env, ptr + 4);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privilege if software int */
if (dpl < cpl) {
raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
}
}
/* Since we emulate only user space, we cannot do more than
exiting the emulation with the suitable exception and error
code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
if (is_int || intno == EXCP_SYSCALL) {
env->eip = next_eip;
}
}
void x86_cpu_do_interrupt(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
do_interrupt_user(env, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
/* successfully delivered */
env->old_exception = -1;
}
void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector)
{
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
int dpl = (env->eflags & VM_MASK) ? 3 : 0;
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
(selector << 4), 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
} else {
helper_load_seg(env, seg_reg, selector);
}
}

View File

@ -0,0 +1,76 @@
/*
* x86 SVM helpers (user-mode)
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "tcg/helper-tcg.h"
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
}
void helper_vmmcall(CPUX86State *env)
{
}
void helper_vmload(CPUX86State *env, int aflag)
{
}
void helper_vmsave(CPUX86State *env, int aflag)
{
}
void helper_stgi(CPUX86State *env)
{
}
void helper_clgi(CPUX86State *env)
{
}
void helper_skinit(CPUX86State *env)
{
}
void helper_invlpga(CPUX86State *env, int aflag)
{
}
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
uintptr_t retaddr)
{
assert(0);
}
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param)
{
}
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param, uintptr_t retaddr)
{
}
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
uint32_t next_eip_addend)
{
}

View File

@ -591,64 +591,3 @@ void event_notifier_set_handler(EventNotifier *e,
aio_set_event_notifier(iohandler_ctx, e, false,
handler, NULL);
}
/* reaping of zombies. right now we're not passing the status to
anyone, but it would be possible to add a callback. */
#ifndef _WIN32
typedef struct ChildProcessRecord {
int pid;
QLIST_ENTRY(ChildProcessRecord) next;
} ChildProcessRecord;
static QLIST_HEAD(, ChildProcessRecord) child_watches =
QLIST_HEAD_INITIALIZER(child_watches);
static QEMUBH *sigchld_bh;
static void sigchld_handler(int signal)
{
qemu_bh_schedule(sigchld_bh);
}
static void sigchld_bh_handler(void *opaque)
{
ChildProcessRecord *rec, *next;
QLIST_FOREACH_SAFE(rec, &child_watches, next, next) {
if (waitpid(rec->pid, NULL, WNOHANG) == rec->pid) {
QLIST_REMOVE(rec, next);
g_free(rec);
}
}
}
static void qemu_init_child_watch(void)
{
struct sigaction act;
sigchld_bh = qemu_bh_new(sigchld_bh_handler, NULL);
memset(&act, 0, sizeof(act));
act.sa_handler = sigchld_handler;
act.sa_flags = SA_NOCLDSTOP;
sigaction(SIGCHLD, &act, NULL);
}
int qemu_add_child_watch(pid_t pid)
{
ChildProcessRecord *rec;
if (!sigchld_bh) {
qemu_init_child_watch();
}
QLIST_FOREACH(rec, &child_watches, next) {
if (rec->pid == pid) {
return 1;
}
}
rec = g_malloc0(sizeof(ChildProcessRecord));
rec->pid = pid;
QLIST_INSERT_HEAD(&child_watches, rec, next);
return 0;
}
#endif

View File

@ -1056,7 +1056,8 @@ bool qemu_opts_absorb_qdict(QemuOpts *opts, QDict *qdict, Error **errp)
while (entry != NULL) {
next = qdict_next(qdict, entry);
if (find_desc_by_name(opts->list->desc, entry->key)) {
if (opts_accepts_any(opts->list) ||
find_desc_by_name(opts->list->desc, entry->key)) {
if (!qemu_opts_from_qdict_entry(opts, entry, errp)) {
return false;
}