mirror of
https://github.com/proxmox/mirror_qemu
synced 2025-10-07 04:23:51 +03:00
Compare commits
213 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c95e38d33b | ||
![]() |
c06aef082f | ||
![]() |
5b2fd6cf37 | ||
![]() |
b831182728 | ||
![]() |
fab36df7bd | ||
![]() |
25d0ca4fb0 | ||
![]() |
4b2648356f | ||
![]() |
5133410f8b | ||
![]() |
5a92f023d9 | ||
![]() |
701afca639 | ||
![]() |
5c9f464531 | ||
![]() |
ed7e167699 | ||
![]() |
59de6e246f | ||
![]() |
66f14b70fe | ||
![]() |
fbca2cbbef | ||
![]() |
f5584a7d8d | ||
![]() |
a3e46300c5 | ||
![]() |
48e0dfb837 | ||
![]() |
502f15db55 | ||
![]() |
49727560c7 | ||
![]() |
169c593f78 | ||
![]() |
5cf2cf1f9f | ||
![]() |
6f51114b0e | ||
![]() |
87ff608c6f | ||
![]() |
837148a31a | ||
![]() |
9ac76067ab | ||
![]() |
8d0b8fead4 | ||
![]() |
8328d79621 | ||
![]() |
5e7f6afe98 | ||
![]() |
983a4a828c | ||
![]() |
88e79a2dfd | ||
![]() |
cc5124d979 | ||
![]() |
c6c0a1888f | ||
![]() |
db8e86cc40 | ||
![]() |
d8ad972b2d | ||
![]() |
179cc58e00 | ||
![]() |
d19b4a4215 | ||
![]() |
b88b9585d8 | ||
![]() |
25f55508fc | ||
![]() |
150ebd076e | ||
![]() |
221c0e1426 | ||
![]() |
100feda604 | ||
![]() |
097c347136 | ||
![]() |
a16eec9fb2 | ||
![]() |
255422dc75 | ||
![]() |
88b778e4c2 | ||
![]() |
07c2a9c0fa | ||
![]() |
60f7b60429 | ||
![]() |
f15258b196 | ||
![]() |
f68a36b7c4 | ||
![]() |
801b7e4390 | ||
![]() |
30d90aebcd | ||
![]() |
e301a77abb | ||
![]() |
bcc9879c1c | ||
![]() |
a753815aa8 | ||
![]() |
cc9f53b3ec | ||
![]() |
fe8eb3187c | ||
![]() |
1f560fa276 | ||
![]() |
a89c8b96a8 | ||
![]() |
287303495c | ||
![]() |
30a4cc2723 | ||
![]() |
b644416ec6 | ||
![]() |
0f2dd05b9f | ||
![]() |
5450203719 | ||
![]() |
91d789a891 | ||
![]() |
3321ec125f | ||
![]() |
5f0083a95d | ||
![]() |
47c408b80e | ||
![]() |
6ede082daf | ||
![]() |
155856d890 | ||
![]() |
baf28675da | ||
![]() |
b7867c8262 | ||
![]() |
bd8d9c618a | ||
![]() |
de18cbdaf2 | ||
![]() |
3a2d501916 | ||
![]() |
b938418f0d | ||
![]() |
143352cd7d | ||
![]() |
fd4ce7455f | ||
![]() |
28facf1598 | ||
![]() |
fdeedb886a | ||
![]() |
31c6d1d654 | ||
![]() |
59ee12a961 | ||
![]() |
1e4c468ec7 | ||
![]() |
814f91d679 | ||
![]() |
e73f57b1b5 | ||
![]() |
71452f87c9 | ||
![]() |
791cbfeec2 | ||
![]() |
6dbb538a71 | ||
![]() |
7a04747125 | ||
![]() |
23b0010786 | ||
![]() |
adbbddf90b | ||
![]() |
663aca79f9 | ||
![]() |
95b3854bf7 | ||
![]() |
9285589334 | ||
![]() |
af566ccb38 | ||
![]() |
78385bc738 | ||
![]() |
cc33ee45d6 | ||
![]() |
a8c0d82f7b | ||
![]() |
2e42ba01f1 | ||
![]() |
5dddba9f38 | ||
![]() |
b6170717ea | ||
![]() |
d8b9e0c8bc | ||
![]() |
0f1d63d824 | ||
![]() |
2adbc3b1e5 | ||
![]() |
fe3afc06fa | ||
![]() |
e6d9dd102d | ||
![]() |
197cc86a12 | ||
![]() |
809d5995c8 | ||
![]() |
2990ba5471 | ||
![]() |
19159a7f01 | ||
![]() |
7771e35b39 | ||
![]() |
ab6314506d | ||
![]() |
1e5839828d | ||
![]() |
db2d4bcb8e | ||
![]() |
6831048324 | ||
![]() |
f9f1d0906b | ||
![]() |
e855a6ec51 | ||
![]() |
8194d5827e | ||
![]() |
ee7ce8a949 | ||
![]() |
d1b867cca6 | ||
![]() |
9fb45b0558 | ||
![]() |
d6cca99ecd | ||
![]() |
6c2e2e4f77 | ||
![]() |
a98097d3a9 | ||
![]() |
de11111ee8 | ||
![]() |
837ca790c6 | ||
![]() |
d9ec18a0fc | ||
![]() |
4ade907b30 | ||
![]() |
dec7785fab | ||
![]() |
1ad3fa152c | ||
![]() |
d37260b9f0 | ||
![]() |
73393af917 | ||
![]() |
f5480c4d82 | ||
![]() |
cc3a33400c | ||
![]() |
0b246f8e9e | ||
![]() |
3b86b92bfb | ||
![]() |
17f3a6221f | ||
![]() |
6b7fa3cbab | ||
![]() |
b9b84b2d41 | ||
![]() |
0215e8e872 | ||
![]() |
7329cc1c19 | ||
![]() |
d0cd94e2b6 | ||
![]() |
cb6ed2f7f7 | ||
![]() |
8a043309ad | ||
![]() |
8ef6104413 | ||
![]() |
31a471430f | ||
![]() |
825af96d6a | ||
![]() |
c2e6a00b5f | ||
![]() |
f59caeca76 | ||
![]() |
6970f5ba0e | ||
![]() |
fda70be0c5 | ||
![]() |
f7f97b9ad8 | ||
![]() |
fb9e03529c | ||
![]() |
b6fa8e42d1 | ||
![]() |
1c2343cc61 | ||
![]() |
97fec8f2c4 | ||
![]() |
0e35c812b9 | ||
![]() |
73b7a81107 | ||
![]() |
4bff0a8287 | ||
![]() |
ded5edee00 | ||
![]() |
6bb4a8a47a | ||
![]() |
045fa84784 | ||
![]() |
56270e5d3d | ||
![]() |
60da8301fe | ||
![]() |
8b479229ff | ||
![]() |
d4919bbcc2 | ||
![]() |
cae7dc1452 | ||
![]() |
7385e00665 | ||
![]() |
1d4fb5815c | ||
![]() |
b822207513 | ||
![]() |
2947da750e | ||
![]() |
60a7f5c8fe | ||
![]() |
566dac7127 | ||
![]() |
8ae20123b6 | ||
![]() |
6c24b6000b | ||
![]() |
987e90cfd2 | ||
![]() |
b9f83298b9 | ||
![]() |
3d6251f416 | ||
![]() |
9832a670b3 | ||
![]() |
df33ce9b6d | ||
![]() |
eeee989f72 | ||
![]() |
93d4107937 | ||
![]() |
6356785daa | ||
![]() |
01bf87c8e3 | ||
![]() |
25ec23ab3f | ||
![]() |
9dc6f05cc8 | ||
![]() |
d5361580ac | ||
![]() |
e8f5ca57e4 | ||
![]() |
4448c345bc | ||
![]() |
4fbd5a5202 | ||
![]() |
16cc9594d2 | ||
![]() |
1efefd13ca | ||
![]() |
c2e0495e3c | ||
![]() |
f64f1f8704 | ||
![]() |
9f54fef2c0 | ||
![]() |
5358980d33 | ||
![]() |
6864f05cb1 | ||
![]() |
0175121c6c | ||
![]() |
e975434d62 | ||
![]() |
e5e77f256f | ||
![]() |
645b87f650 | ||
![]() |
86d7b08d71 | ||
![]() |
5691fbf440 | ||
![]() |
441106eebb | ||
![]() |
63188a00bb | ||
![]() |
7012e20b2d | ||
![]() |
c8e381d672 | ||
![]() |
34808d041c | ||
![]() |
01f6417f15 | ||
![]() |
14a8213b75 | ||
![]() |
c12eddbd48 | ||
![]() |
880e82ed78 | ||
![]() |
5980189e96 |
13
.b4-config
13
.b4-config
@@ -1,13 +0,0 @@
|
||||
#
|
||||
# Common b4 settings that can be used to send patches to QEMU upstream.
|
||||
# https://b4.docs.kernel.org/
|
||||
#
|
||||
|
||||
[b4]
|
||||
send-series-to = qemu-devel@nongnu.org
|
||||
send-auto-to-cmd = echo
|
||||
send-auto-cc-cmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback
|
||||
am-perpatch-check-cmd = scripts/checkpatch.pl -q --terse --no-summary --mailback -
|
||||
prep-perpatch-check-cmd = scripts/checkpatch.pl -q --terse --no-summary --mailback -
|
||||
searchmask = https://lore.kernel.org/qemu-devel/?x=m&t=1&q=%s
|
||||
linkmask = https://lore.kernel.org/qemu-devel/%s
|
@@ -47,16 +47,3 @@ emacs_mode = glsl
|
||||
[*.json]
|
||||
indent_style = space
|
||||
emacs_mode = python
|
||||
|
||||
# by default follow QEMU's style
|
||||
[*.pl]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
emacs_mode = perl
|
||||
|
||||
# but user kernel "style" for imported scripts
|
||||
[scripts/{kernel-doc,get_maintainer.pl,checkpatch.pl}]
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
emacs_mode = perl
|
||||
|
||||
|
5
.gitattributes
vendored
5
.gitattributes
vendored
@@ -2,8 +2,3 @@
|
||||
*.h.inc diff=c
|
||||
*.m diff=objc
|
||||
*.py diff=python
|
||||
*.rs diff=rust
|
||||
*.rs.inc diff=rust
|
||||
Cargo.lock diff=toml merge=binary
|
||||
|
||||
*.patch -text -whitespace
|
||||
|
@@ -24,10 +24,6 @@ variables:
|
||||
# Each script line from will be in a collapsible section in the job output
|
||||
# and show the duration of each line.
|
||||
FF_SCRIPT_SECTIONS: 1
|
||||
# The project has a fairly fat GIT repo so we try and avoid bringing in things
|
||||
# we don't need. The --filter options avoid blobs and tree references we aren't going to use
|
||||
# and we also avoid fetching tags.
|
||||
GIT_FETCH_EXTRA_FLAGS: --filter=blob:none --filter=tree:0 --no-tags --prune --quiet
|
||||
|
||||
interruptible: true
|
||||
|
||||
@@ -45,10 +41,6 @@ variables:
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
|
||||
when: never
|
||||
|
||||
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Cirrus jobs can't run unless the creds / target repo are set
|
||||
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
|
||||
when: never
|
||||
@@ -69,10 +61,14 @@ variables:
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
|
||||
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
|
||||
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
|
||||
|
||||
#############################################################
|
||||
# Stage 2: fine tune execution of jobs in specific scenarios
|
||||
# where the catch all logic is inappropriate
|
||||
# where the catch all logic is inapprorpaite
|
||||
#############################################################
|
||||
|
||||
# Optional jobs should not be run unless manually triggered
|
||||
@@ -97,8 +93,8 @@ variables:
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
# Functional jobs can be manually started in forks
|
||||
- if: '$QEMU_JOB_FUNCTIONAL && $QEMU_CI_FUNCTIONAL != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
|
||||
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
@@ -124,7 +120,7 @@ variables:
|
||||
when: manual
|
||||
|
||||
# Jobs can run if any jobs they depend on were successful
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: on_success
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
@@ -2,29 +2,12 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- cat /packages.txt
|
||||
- section_end setup
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- du -sh .git
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
--disable-debug-info
|
||||
${TARGETS:+--target-list="$TARGETS"}
|
||||
$CONFIGURE_ARGS ||
|
||||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
@@ -32,17 +15,11 @@
|
||||
then
|
||||
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
|
||||
fi || exit 1;
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- $MAKE -j"$JOBS"
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- make -j"$JOBS"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
make -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- ccache --show-stats
|
||||
|
||||
# We jump some hoops in common_test_job_template to avoid
|
||||
# rebuilding all the object files we skip in the artifacts
|
||||
@@ -56,30 +33,22 @@
|
||||
exclude:
|
||||
- build/**/*.p
|
||||
- build/**/*.a.p
|
||||
- build/**/*.fa.p
|
||||
- build/**/*.c.o
|
||||
- build/**/*.c.o.d
|
||||
- build/**/*.fa
|
||||
|
||||
.common_test_job_template:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start buildenv "Setting up to run tests"
|
||||
- scripts/git-submodule.sh update roms/SLOF
|
||||
- build/pyvenv/bin/meson subprojects download $(cd build/subprojects && echo *)
|
||||
- meson subprojects download $(cd build/subprojects && echo *)
|
||||
- cd build
|
||||
- find . -type f -exec touch {} +
|
||||
# Avoid recompiling by hiding ninja with NINJA=":"
|
||||
# We also have to pre-cache the functional tests manually in this case
|
||||
- if [ "x${QEMU_TEST_CACHE_DIR}" != "x" ]; then
|
||||
$MAKE precache-functional ;
|
||||
fi
|
||||
- section_end buildenv
|
||||
- section_start test "Running tests"
|
||||
# doctests need all the compilation artifacts
|
||||
- $MAKE NINJA=":" MTESTARGS="--no-suite doc" $MAKE_CHECK_ARGS
|
||||
- section_end test
|
||||
- make NINJA=":" $MAKE_CHECK_ARGS
|
||||
|
||||
.native_test_job_template:
|
||||
extends: .common_test_job_template
|
||||
@@ -92,12 +61,12 @@
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
|
||||
.functional_test_job_template:
|
||||
.avocado_test_job_template:
|
||||
extends: .common_test_job_template
|
||||
cache:
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
paths:
|
||||
- ${CI_PROJECT_DIR}/functional-cache
|
||||
- ${CI_PROJECT_DIR}/avocado-cache
|
||||
policy: pull-push
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
@@ -106,41 +75,21 @@
|
||||
paths:
|
||||
- build/tests/results/latest/results.xml
|
||||
- build/tests/results/latest/test-results
|
||||
- build/tests/functional/*/*/*.log
|
||||
reports:
|
||||
junit: build/tests/results/latest/results.xml
|
||||
before_script:
|
||||
- export QEMU_TEST_ALLOW_UNTRUSTED_CODE=1
|
||||
- export QEMU_TEST_CACHE_DIR=${CI_PROJECT_DIR}/functional-cache
|
||||
- mkdir -p ~/.config/avocado
|
||||
- echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
|
||||
- echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']"
|
||||
>> ~/.config/avocado/avocado.conf
|
||||
- echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
|
||||
>> ~/.config/avocado/avocado.conf
|
||||
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
|
||||
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
|
||||
fi
|
||||
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
|
||||
after_script:
|
||||
- cd build
|
||||
- du -chs ${CI_PROJECT_DIR}/*-cache
|
||||
- du -chs ${CI_PROJECT_DIR}/avocado-cache
|
||||
variables:
|
||||
QEMU_JOB_FUNCTIONAL: 1
|
||||
|
||||
.wasm_build_job_template:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- section_end setup
|
||||
script:
|
||||
- du -sh .git
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- emconfigure ../configure --disable-docs
|
||||
${TARGETS:+--target-list="$TARGETS"}
|
||||
$CONFIGURE_ARGS ||
|
||||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- if test -n "$LD_JOBS";
|
||||
then
|
||||
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
|
||||
fi || exit 1;
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- emmake make -j"$JOBS"
|
||||
- section_end build
|
||||
QEMU_JOB_AVOCADO: 1
|
||||
|
@@ -22,14 +22,14 @@ check-system-alpine:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-unit check-qtest
|
||||
|
||||
functional-system-alpine:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-alpine:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-alpine
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-system-ubuntu:
|
||||
extends:
|
||||
@@ -39,9 +39,10 @@ build-system-ubuntu:
|
||||
job: amd64-ubuntu2204-container
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-docs --enable-rust
|
||||
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build check-doc
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-ubuntu:
|
||||
extends: .native_test_job_template
|
||||
@@ -52,14 +53,14 @@ check-system-ubuntu:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-ubuntu:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-ubuntu:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-ubuntu
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-system-debian:
|
||||
extends:
|
||||
@@ -68,10 +69,10 @@ build-system-debian:
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
variables:
|
||||
IMAGE: debian
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack --enable-rust
|
||||
IMAGE: debian-amd64
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack
|
||||
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
|
||||
sparc-softmmu xtensa-softmmu
|
||||
sparc-softmmu xtensaeb-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-debian:
|
||||
@@ -80,17 +81,17 @@ check-system-debian:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-debian:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-debian:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
crash-test-debian:
|
||||
extends: .native_test_job_template
|
||||
@@ -98,11 +99,11 @@ crash-test-debian:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
|
||||
|
||||
build-system-fedora:
|
||||
extends:
|
||||
@@ -112,24 +113,10 @@ build-system-fedora:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs --enable-crypto-afalg --enable-rust
|
||||
TARGETS: microblaze-softmmu mips-softmmu
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
|
||||
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
|
||||
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build check-doc
|
||||
|
||||
build-system-fedora-rust-nightly:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-fedora-rust-nightly-container
|
||||
variables:
|
||||
IMAGE: fedora-rust-nightly
|
||||
CONFIGURE_ARGS: --disable-docs --enable-rust --enable-strict-rust-lints
|
||||
TARGETS: aarch64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build check-doc
|
||||
|
||||
allow_failure: true
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-fedora:
|
||||
extends: .native_test_job_template
|
||||
@@ -140,14 +127,14 @@ check-system-fedora:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-fedora:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-fedora:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-fedora
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
crash-test-fedora:
|
||||
extends: .native_test_job_template
|
||||
@@ -159,115 +146,40 @@ crash-test-fedora:
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
|
||||
build-system-centos:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
job: amd64-centos8-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
|
||||
--enable-modules --enable-trace-backends=dtrace --enable-docs
|
||||
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
|
||||
x86_64-softmmu rx-softmmu sh4-softmmu
|
||||
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
# Previous QEMU release. Used for cross-version migration tests.
|
||||
build-previous-qemu:
|
||||
extends: .native_build_job_template
|
||||
artifacts:
|
||||
when: on_success
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build-previous/qemu-bundle
|
||||
- build-previous/qemu-system-aarch64
|
||||
- build-previous/qemu-system-x86_64
|
||||
- build-previous/tests/qtest/migration-test
|
||||
- build-previous/scripts
|
||||
needs:
|
||||
job: amd64-opensuse-leap-container
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
TARGETS: x86_64-softmmu aarch64-softmmu
|
||||
# Override the default flags as we need more to grab the old version
|
||||
GIT_FETCH_EXTRA_FLAGS: --prune --quiet
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
# Skip if this series contains the release bump commit. During the
|
||||
# release process there might be a window of commits when the
|
||||
# version tag is not yet present in the remote and git fetch would
|
||||
# fail.
|
||||
- if grep -q "\.0$" VERSION; then exit 0; fi
|
||||
- export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)"
|
||||
- git remote add upstream https://gitlab.com/qemu-project/qemu
|
||||
- git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION
|
||||
- git checkout $QEMU_PREV_VERSION
|
||||
after_script:
|
||||
- mv build build-previous
|
||||
|
||||
.migration-compat-common:
|
||||
extends: .common_test_job_template
|
||||
needs:
|
||||
- job: build-previous-qemu
|
||||
- job: build-system-opensuse
|
||||
# The old QEMU could have bugs unrelated to migration that are
|
||||
# already fixed in the current development branch, so this test
|
||||
# might fail.
|
||||
allow_failure: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
script:
|
||||
# Skip for round release numbers, this job is only relevant for
|
||||
# testing a development tree.
|
||||
- if grep -q "\.0$" VERSION; then exit 0; fi
|
||||
# Use the migration-tests from the older QEMU tree. This avoids
|
||||
# testing an old QEMU against new features/tests that it is not
|
||||
# compatible with.
|
||||
- cd build-previous
|
||||
# old to new
|
||||
- QTEST_QEMU_BINARY_SRC=./qemu-system-${TARGET}
|
||||
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
|
||||
# new to old
|
||||
- QTEST_QEMU_BINARY_DST=./qemu-system-${TARGET}
|
||||
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
|
||||
|
||||
# This job needs to be disabled until we can have an aarch64 CPU model that
|
||||
# will both (1) support both KVM and TCG, and (2) provide a stable ABI.
|
||||
# Currently only "-cpu max" can provide (1), however it doesn't guarantee
|
||||
# (2). Mark this test skipped until later.
|
||||
migration-compat-aarch64:
|
||||
extends: .migration-compat-common
|
||||
variables:
|
||||
TARGET: aarch64
|
||||
QEMU_JOB_SKIPPED: 1
|
||||
|
||||
migration-compat-x86_64:
|
||||
extends: .migration-compat-common
|
||||
variables:
|
||||
TARGET: x86_64
|
||||
|
||||
check-system-centos:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-centos
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-centos:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-centos:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-centos
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
IMAGE: centos8
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-system-opensuse:
|
||||
extends:
|
||||
@@ -289,44 +201,15 @@ check-system-opensuse:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-opensuse:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-opensuse:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-opensuse
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
#
|
||||
# Flaky tests. We don't run these by default and they are allow fail
|
||||
# but often the CI system is the only way to trigger the failures.
|
||||
#
|
||||
|
||||
build-system-flaky:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
variables:
|
||||
IMAGE: debian
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
TARGETS: aarch64-softmmu arm-softmmu mips64el-softmmu
|
||||
ppc64-softmmu rx-softmmu s390x-softmmu sh4-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
functional-system-flaky:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-flaky
|
||||
artifacts: true
|
||||
allow_failure: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
QEMU_TEST_FLAKY_TESTS: 1
|
||||
|
||||
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
|
||||
# the configure script. The container doesn't contain Xen headers so
|
||||
@@ -338,9 +221,9 @@ functional-system-flaky:
|
||||
build-tcg-disabled:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
job: amd64-centos8-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
@@ -353,13 +236,11 @@ build-tcg-disabled:
|
||||
- cd tests/qemu-iotests/
|
||||
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
|
||||
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
|
||||
170 171 184 192 194 208 221 226 227 236 253 277 image-fleecing
|
||||
170 171 183 184 192 194 208 221 226 227 236 253 277 image-fleecing
|
||||
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
|
||||
124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
|
||||
208 209 216 218 227 234 246 247 248 250 254 255 257 258
|
||||
260 261 262 263 264 270 272 273 277 279 image-fleecing
|
||||
- cd ../..
|
||||
- make distclean
|
||||
|
||||
build-user:
|
||||
extends: .native_build_job_template
|
||||
@@ -368,7 +249,6 @@ build-user:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-static:
|
||||
@@ -378,18 +258,6 @@ build-user-static:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system --static
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# targets stuck on older compilers
|
||||
build-legacy:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-legacy-cross-container
|
||||
variables:
|
||||
IMAGE: debian-legacy-test-cross
|
||||
TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user
|
||||
CONFIGURE_ARGS: --disable-tools
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-hexagon:
|
||||
@@ -402,9 +270,7 @@ build-user-hexagon:
|
||||
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Build the softmmu targets we have check-tcg tests and compilers in
|
||||
# our omnibus all-test-cross container. Those targets that haven't got
|
||||
# Debian cross compiler support need to use special containers.
|
||||
# Only build the softmmu targets we have check-tcg tests for
|
||||
build-some-softmmu:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
@@ -412,18 +278,7 @@ build-some-softmmu:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu
|
||||
s390x-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-loongarch64:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: loongarch-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-loongarch-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: loongarch64-linux-user loongarch64-softmmu
|
||||
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# We build tricore in a very minimal tricore only container
|
||||
@@ -443,8 +298,8 @@ clang-system:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan
|
||||
--extra-cflags=-fno-sanitize-recover=undefined
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++
|
||||
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
|
||||
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu
|
||||
MAKE_CHECK_ARGS: check-qtest check-tcg
|
||||
|
||||
@@ -455,9 +310,9 @@ clang-user:
|
||||
timeout: 70m
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system --enable-ubsan
|
||||
--target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--extra-cflags=-fno-sanitize-recover=undefined
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
|
||||
--target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
|
||||
MAKE_CHECK_ARGS: check-unit check-tcg
|
||||
|
||||
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
|
||||
@@ -467,8 +322,8 @@ clang-user:
|
||||
# Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with
|
||||
# CFI builds, and thus have to disable it here.
|
||||
#
|
||||
# Split in three sets of build/check/functional to limit the execution time
|
||||
# of each job
|
||||
# Split in three sets of build/check/avocado to limit the execution time of each
|
||||
# job
|
||||
build-cfi-aarch64:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
@@ -498,14 +353,14 @@ check-cfi-aarch64:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-aarch64:
|
||||
extends: .functional_test_job_template
|
||||
avocado-cfi-aarch64:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-aarch64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-cfi-ppc64-s390x:
|
||||
extends:
|
||||
@@ -536,14 +391,14 @@ check-cfi-ppc64-s390x:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-ppc64-s390x:
|
||||
extends: .functional_test_job_template
|
||||
avocado-cfi-ppc64-s390x:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-ppc64-s390x
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-cfi-x86_64:
|
||||
extends:
|
||||
@@ -570,14 +425,14 @@ check-cfi-x86_64:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-x86_64:
|
||||
extends: .functional_test_job_template
|
||||
avocado-cfi-x86_64:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-x86_64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
tsan-build:
|
||||
extends: .native_build_job_template
|
||||
@@ -588,9 +443,6 @@ tsan-build:
|
||||
CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
|
||||
--enable-trace-backends=ust --disable-slirp
|
||||
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
|
||||
# Remove when we switch to a distro with clang >= 18
|
||||
# https://github.com/google/sanitizers/issues/1716
|
||||
MAKE: setarch -R make
|
||||
|
||||
# gcov is a GCC features
|
||||
gcov:
|
||||
@@ -632,15 +484,12 @@ build-oss-fuzz:
|
||||
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
|
||||
./scripts/oss-fuzz/build.sh
|
||||
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
|
||||
- failures=0
|
||||
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
|
||||
| grep -v slirp); do
|
||||
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
|
||||
echo Testing ${fuzzer} ... ;
|
||||
"${fuzzer}" -runs=1 -seed=1 || { echo "FAILED:"" ${fuzzer} exit code is $?"; failures=$(($failures+1)); };
|
||||
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
|
||||
done
|
||||
- echo "Number of failures:"" $failures"
|
||||
- test $failures = 0
|
||||
|
||||
build-tci:
|
||||
extends: .native_build_job_template
|
||||
@@ -649,10 +498,10 @@ build-tci:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
script:
|
||||
- TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter --disable-kvm --disable-docs --disable-gtk --disable-vnc
|
||||
- ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
||||
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
@@ -670,9 +519,9 @@ build-tci:
|
||||
build-without-defaults:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
job: amd64-centos8-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
CONFIGURE_ARGS:
|
||||
--without-default-devices
|
||||
--without-default-features
|
||||
@@ -680,7 +529,8 @@ build-without-defaults:
|
||||
--disable-pie
|
||||
--disable-qom-cast-debug
|
||||
--disable-strip
|
||||
--target-list-exclude=aarch64-softmmu,microblaze-softmmu,mips64-softmmu,mipsel-softmmu,ppc64-softmmu,sh4el-softmmu,xtensa-softmmu,x86_64-softmmu
|
||||
TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
|
||||
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
build-libvhost-user:
|
||||
@@ -706,7 +556,7 @@ build-tools-and-docs-debian:
|
||||
# when running on 'master' we use pre-existing container
|
||||
optional: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
|
||||
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
|
||||
QEMU_JOB_PUBLISH: 1
|
||||
@@ -726,7 +576,7 @@ build-tools-and-docs-debian:
|
||||
# of what topic branch they're currently using
|
||||
pages:
|
||||
extends: .base_job_template
|
||||
image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG
|
||||
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:$QEMU_CI_CONTAINER_TAG
|
||||
stage: test
|
||||
needs:
|
||||
- job: build-tools-and-docs-debian
|
||||
@@ -734,10 +584,7 @@ pages:
|
||||
- mkdir -p public
|
||||
# HTML-ised source tree
|
||||
- make gtags
|
||||
# We unset variables to work around a bug in some htags versions
|
||||
# which causes it to fail when the environment is large
|
||||
- CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= htags
|
||||
-anT --tree-view=filetree -m qemu_init
|
||||
- htags -anT --tree-view=filetree -m qemu_init
|
||||
-t "Welcome to the QEMU sourcecode"
|
||||
- mv HTML public/src
|
||||
# Project documentation
|
||||
@@ -749,49 +596,3 @@ pages:
|
||||
- public
|
||||
variables:
|
||||
QEMU_JOB_PUBLISH: 1
|
||||
|
||||
coverity:
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
|
||||
stage: build
|
||||
allow_failure: true
|
||||
timeout: 3h
|
||||
needs:
|
||||
- job: amd64-fedora-container
|
||||
optional: true
|
||||
before_script:
|
||||
- dnf install -y curl wget
|
||||
script:
|
||||
# would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089)
|
||||
# for example:
|
||||
# curl --request POST --header "PRIVATE-TOKEN: $CI_JOB_TOKEN" "${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/jobs/${CI_JOB_ID}/cancel
|
||||
- 'scripts/coverity-scan/run-coverity-scan --check-upload-only || { exitcode=$?; if test $exitcode = 1; then
|
||||
exit 0;
|
||||
else
|
||||
exit $exitcode;
|
||||
fi; };
|
||||
scripts/coverity-scan/run-coverity-scan --update-tools-only > update-tools.log 2>&1 || { cat update-tools.log; exit 1; };
|
||||
scripts/coverity-scan/run-coverity-scan --no-update-tools'
|
||||
rules:
|
||||
- if: '$COVERITY_TOKEN == null'
|
||||
when: never
|
||||
- if: '$COVERITY_EMAIL == null'
|
||||
when: never
|
||||
# Never included on upstream pipelines, except for schedules
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: on_success
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
# Forks don't get any pipeline unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2"'
|
||||
when: never
|
||||
# Always manual on forks even if $QEMU_CI == "2"
|
||||
- when: manual
|
||||
|
||||
build-wasm:
|
||||
extends: .wasm_build_job_template
|
||||
timeout: 2h
|
||||
needs:
|
||||
job: wasm-emsdk-cross-container
|
||||
variables:
|
||||
IMAGE: emsdk-wasm32-cross
|
||||
CONFIGURE_ARGS: --static --disable-tools --enable-debug --enable-tcg-interpreter
|
||||
|
@@ -19,9 +19,10 @@ cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
print(f"adding upstream git repo @ {repourl}")
|
||||
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
|
||||
subprocess.check_call(["git", "fetch", "--refetch", "check-dco", "master"])
|
||||
subprocess.check_call(["git", "fetch", "check-dco", "master"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-dco/master", "HEAD"],
|
||||
@@ -78,10 +79,7 @@ of Origin 1.1 (DCO):
|
||||
|
||||
To indicate acceptance of the DCO every commit must have a tag
|
||||
|
||||
Signed-off-by: YOUR NAME <EMAIL>
|
||||
|
||||
where "YOUR NAME" is your commonly known identity in the context
|
||||
of the community.
|
||||
Signed-off-by: REAL NAME <EMAIL>
|
||||
|
||||
This can be achieved by passing the "-s" flag to the "git commit" command.
|
||||
|
||||
|
@@ -19,12 +19,13 @@ cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
print(f"adding upstream git repo @ {repourl}")
|
||||
# GitLab CI environment does not give us any direct info about the
|
||||
# base for the user's branch. We thus need to figure out a common
|
||||
# ancestor between the user's branch and current git master.
|
||||
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
|
||||
subprocess.check_call(["git", "fetch", "--refetch", "check-patch", "master"])
|
||||
subprocess.check_call(["git", "fetch", "check-patch", "master"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-patch/master", "HEAD"],
|
||||
|
@@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# check-units.py: check the number of compilation units and identify
|
||||
# those that are rebuilt multiple times
|
||||
#
|
||||
# Copyright (C) 2025 Linaro Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
from os import access, R_OK, path
|
||||
from sys import exit
|
||||
import json
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def extract_build_units(cc_path):
|
||||
"""
|
||||
Extract the build units and their counds from compile_commands.json file.
|
||||
|
||||
Returns:
|
||||
Hash table of ["unit"] = count
|
||||
"""
|
||||
|
||||
j = json.load(open(cc_path, 'r'))
|
||||
files = [f['file'] for f in j]
|
||||
build_units = Counter(files)
|
||||
|
||||
return build_units
|
||||
|
||||
|
||||
def analyse_units(build_units, top_n):
|
||||
"""
|
||||
Analyse the build units and report stats and the top 10 rebuilds
|
||||
"""
|
||||
|
||||
print(f"Total source files: {len(build_units.keys())}")
|
||||
print(f"Total build units: {sum(units.values())}")
|
||||
|
||||
# Create a sorted list by number of rebuilds
|
||||
sorted_build_units = sorted(build_units.items(),
|
||||
key=lambda item: item[1],
|
||||
reverse=True)
|
||||
|
||||
print("Most rebuilt units:")
|
||||
for unit, count in sorted_build_units[:top_n]:
|
||||
print(f" {unit} built {count} times")
|
||||
|
||||
print("Least rebuilt units:")
|
||||
for unit, count in sorted_build_units[-10:]:
|
||||
print(f" {unit} built {count} times")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="analyse number of build units in compile_commands.json")
|
||||
parser.add_argument("cc_path", type=Path, default=None,
|
||||
help="Path to compile_commands.json")
|
||||
parser.add_argument("-n", type=int, default=20,
|
||||
help="Dump the top <n> entries")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if path.isfile(args.cc_path) and access(args.cc_path, R_OK):
|
||||
units = extract_build_units(args.cc_path)
|
||||
analyse_units(units, args.n)
|
||||
exit(0)
|
||||
else:
|
||||
print(f"{args.cc_path} doesn't exist or isn't readable")
|
||||
exit(1)
|
@@ -13,48 +13,98 @@
|
||||
.cirrus_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
|
||||
needs: []
|
||||
allow_failure:
|
||||
exit_codes: 3
|
||||
# 20 mins larger than "timeout_in" in cirrus/build.yml
|
||||
# as there's often a 5-10 minute delay before Cirrus CI
|
||||
# actually starts the task
|
||||
timeout: 80m
|
||||
allow_failure: true
|
||||
script:
|
||||
- set -o allexport
|
||||
- source .gitlab-ci.d/cirrus/$NAME.vars
|
||||
- set +o allexport
|
||||
- cirrus-vars <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
|
||||
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
|
||||
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
|
||||
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
|
||||
-e "s|[@]CIRRUS_VM_INSTANCE_TYPE@|$CIRRUS_VM_INSTANCE_TYPE|g"
|
||||
-e "s|[@]CIRRUS_VM_IMAGE_SELECTOR@|$CIRRUS_VM_IMAGE_SELECTOR|g"
|
||||
-e "s|[@]CIRRUS_VM_IMAGE_NAME@|$CIRRUS_VM_IMAGE_NAME|g"
|
||||
-e "s|[@]CIRRUS_VM_CPUS@|$CIRRUS_VM_CPUS|g"
|
||||
-e "s|[@]CIRRUS_VM_RAM@|$CIRRUS_VM_RAM|g"
|
||||
-e "s|[@]UPDATE_COMMAND@|$UPDATE_COMMAND|g"
|
||||
-e "s|[@]INSTALL_COMMAND@|$INSTALL_COMMAND|g"
|
||||
-e "s|[@]PATH@|$PATH_EXTRA${PATH_EXTRA:+:}\$PATH|g"
|
||||
-e "s|[@]PKG_CONFIG_PATH@|$PKG_CONFIG_PATH|g"
|
||||
-e "s|[@]PKGS@|$PKGS|g"
|
||||
-e "s|[@]MAKE@|$MAKE|g"
|
||||
-e "s|[@]PYTHON@|$PYTHON|g"
|
||||
-e "s|[@]PIP3@|$PIP3|g"
|
||||
-e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g"
|
||||
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
|
||||
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
|
||||
<.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cat .gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
|
||||
variables:
|
||||
QEMU_JOB_CIRRUS: 1
|
||||
|
||||
x64-freebsd-14-build:
|
||||
x64-freebsd-13-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: freebsd-14
|
||||
NAME: freebsd-13
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-14-2
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update; pkg upgrade -y
|
||||
INSTALL_COMMAND: pkg install -y
|
||||
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
||||
aarch64-macos-build:
|
||||
aarch64-macos-12-base-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: macos-14
|
||||
NAME: macos-12
|
||||
CIRRUS_VM_INSTANCE_TYPE: macos_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image
|
||||
CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-runner:sonoma
|
||||
CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-monterey-base:latest
|
||||
CIRRUS_VM_CPUS: 12
|
||||
CIRRUS_VM_RAM: 24G
|
||||
UPDATE_COMMAND: brew update
|
||||
INSTALL_COMMAND: brew install
|
||||
PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
|
||||
PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
|
||||
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu
|
||||
TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
|
||||
|
||||
|
||||
# The following jobs run VM-based tests via KVM on a Linux-based Cirrus-CI job
|
||||
.cirrus_kvm_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
|
||||
needs: []
|
||||
timeout: 80m
|
||||
script:
|
||||
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
|
||||
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
|
||||
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
|
||||
-e "s|[@]NAME@|$NAME|g"
|
||||
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
|
||||
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
|
||||
<.gitlab-ci.d/cirrus/kvm-build.yml >.gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cat .gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
|
||||
variables:
|
||||
QEMU_JOB_CIRRUS: 1
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
|
||||
|
||||
x86-netbsd:
|
||||
extends: .cirrus_kvm_job
|
||||
variables:
|
||||
NAME: netbsd
|
||||
CONFIGURE_ARGS: --target-list=x86_64-softmmu,ppc64-softmmu,aarch64-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
||||
x86-openbsd:
|
||||
extends: .cirrus_kvm_job
|
||||
variables:
|
||||
NAME: openbsd
|
||||
CONFIGURE_ARGS: --target-list=i386-softmmu,riscv64-softmmu,mips64-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
@@ -8,7 +8,7 @@ env:
|
||||
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
|
||||
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
|
||||
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
|
||||
PATH: "@PATH_EXTRA@:$PATH"
|
||||
PATH: "@PATH@"
|
||||
PKG_CONFIG_PATH: "@PKG_CONFIG_PATH@"
|
||||
PYTHON: "@PYTHON@"
|
||||
MAKE: "@MAKE@"
|
||||
@@ -16,17 +16,15 @@ env:
|
||||
TEST_TARGETS: "@TEST_TARGETS@"
|
||||
|
||||
build_task:
|
||||
# A little shorter than GitLab timeout in ../cirrus.yml
|
||||
timeout_in: 60m
|
||||
install_script:
|
||||
- @UPDATE_COMMAND@
|
||||
- @INSTALL_COMMAND@ @PKGS@
|
||||
- if test -n "@PYPI_PKGS@" ; then PYLIB=$(@PYTHON@ -c 'import sysconfig; print(sysconfig.get_path("stdlib"))'); rm -f $PYLIB/EXTERNALLY-MANAGED; @PIP3@ install @PYPI_PKGS@ ; fi
|
||||
- if test -n "@PYPI_PKGS@" ; then @PIP3@ install @PYPI_PKGS@ ; fi
|
||||
clone_script:
|
||||
- git clone --depth 100 "$CI_REPOSITORY_URL" .
|
||||
- git fetch origin "$CI_COMMIT_REF_NAME"
|
||||
- git reset --hard "$CI_COMMIT_SHA"
|
||||
step_script:
|
||||
build_script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror $CONFIGURE_ARGS
|
||||
|
16
.gitlab-ci.d/cirrus/freebsd-13.vars
Normal file
16
.gitlab-ci.d/cirrus/freebsd-13.vars
Normal file
@@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables freebsd-13 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
@@ -1,16 +0,0 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables freebsd-14 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache4 cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk-vnc gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-pyyaml py311-sphinx py311-sphinx_rtd_theme py311-tomli python3 rpm2cpio rust rust-bindgen-cli sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 vulkan-tools xorriso zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
31
.gitlab-ci.d/cirrus/kvm-build.yml
Normal file
31
.gitlab-ci.d/cirrus/kvm-build.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
container:
|
||||
image: fedora:35
|
||||
cpu: 4
|
||||
memory: 8Gb
|
||||
kvm: true
|
||||
|
||||
env:
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
|
||||
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
|
||||
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
|
||||
|
||||
@NAME@_task:
|
||||
@NAME@_vm_cache:
|
||||
folder: $HOME/.cache/qemu-vm
|
||||
install_script:
|
||||
- dnf update -y
|
||||
- dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget meson
|
||||
clone_script:
|
||||
- git clone --depth 100 "$CI_REPOSITORY_URL" .
|
||||
- git fetch origin "$CI_COMMIT_REF_NAME"
|
||||
- git reset --hard "$CI_COMMIT_SHA"
|
||||
build_script:
|
||||
- if [ -f $HOME/.cache/qemu-vm/images/@NAME@.img ]; then
|
||||
make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN)
|
||||
EXTRA_CONFIGURE_OPTS="@CONFIGURE_ARGS@"
|
||||
BUILD_TARGET="@TEST_TARGETS@" ;
|
||||
else
|
||||
make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN) BUILD_TARGET=help
|
||||
EXTRA_CONFIGURE_OPTS="--disable-system --disable-user --disable-tools" ;
|
||||
fi
|
16
.gitlab-ci.d/cirrus/macos-12.vars
Normal file
16
.gitlab-ci.d/cirrus/macos-12.vars
Normal file
@@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables macos-12 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/opt/homebrew/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
@@ -1,16 +0,0 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables macos-14 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/opt/homebrew/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bindgen bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libcbor libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio rust sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 vulkan-tools xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
@@ -1,10 +1,10 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/container-template.yml'
|
||||
|
||||
amd64-centos9-container:
|
||||
amd64-centos8-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: centos9
|
||||
NAME: centos8
|
||||
|
||||
amd64-fedora-container:
|
||||
extends: .container_job_template
|
||||
|
@@ -1,3 +1,9 @@
|
||||
alpha-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-alpha-cross
|
||||
|
||||
amd64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -10,18 +16,18 @@ amd64-debian-user-cross-container:
|
||||
variables:
|
||||
NAME: debian-all-test-cross
|
||||
|
||||
amd64-debian-legacy-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-legacy-test-cross
|
||||
|
||||
arm64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-arm64-cross
|
||||
|
||||
armel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-armel-cross
|
||||
|
||||
armhf-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -34,17 +40,23 @@ hexagon-cross-container:
|
||||
variables:
|
||||
NAME: debian-hexagon-cross
|
||||
|
||||
loongarch-debian-cross-container:
|
||||
hppa-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-loongarch-cross
|
||||
NAME: debian-hppa-cross
|
||||
|
||||
i686-debian-cross-container:
|
||||
m68k-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-i686-cross
|
||||
NAME: debian-m68k-cross
|
||||
|
||||
mips64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips64-cross
|
||||
|
||||
mips64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
@@ -52,12 +64,24 @@ mips64el-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-mips64el-cross
|
||||
|
||||
mips-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips-cross
|
||||
|
||||
mipsel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mipsel-cross
|
||||
|
||||
powerpc-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-powerpc-test-cross
|
||||
|
||||
ppc64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -67,15 +91,36 @@ ppc64el-debian-cross-container:
|
||||
riscv64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
# as we are currently based on 'sid/unstable' we may break so...
|
||||
allow_failure: true
|
||||
variables:
|
||||
NAME: debian-riscv64-cross
|
||||
|
||||
# we can however build TCG tests using a non-sid base
|
||||
riscv64-debian-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-riscv64-test-cross
|
||||
|
||||
s390x-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-s390x-cross
|
||||
|
||||
sh4-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-sh4-cross
|
||||
|
||||
sparc64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-sparc64-cross
|
||||
|
||||
tricore-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -87,12 +132,22 @@ xtensa-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-xtensa-cross
|
||||
|
||||
cris-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-cris-cross
|
||||
|
||||
i386-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-i386-cross
|
||||
|
||||
win32-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-win32-cross
|
||||
|
||||
win64-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-win64-cross
|
||||
|
||||
wasm-emsdk-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: emsdk-wasm32-cross
|
||||
|
@@ -11,7 +11,7 @@ amd64-debian-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian
|
||||
NAME: debian-amd64
|
||||
|
||||
amd64-ubuntu2204-container:
|
||||
extends: .container_job_template
|
||||
@@ -27,9 +27,3 @@ python-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: python
|
||||
|
||||
amd64-fedora-rust-nightly-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-rust-nightly
|
||||
allow_failure: true
|
||||
|
@@ -2,51 +2,22 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
timeout: 80m
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- cat /packages.txt
|
||||
- section_end setup
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
|
||||
--target-list-exclude="arm-softmmu
|
||||
--target-list-exclude="arm-softmmu cris-softmmu
|
||||
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
|
||||
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
|
||||
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- section_start installer "Building the installer"
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
- if grep -q "EXESUF=.exe" config-host.mak;
|
||||
then make installer;
|
||||
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
|
||||
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
|
||||
fi
|
||||
- section_end installer
|
||||
- ccache --show-stats
|
||||
|
||||
# Job to cross-build specific accelerators.
|
||||
#
|
||||
@@ -57,68 +28,27 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
timeout: 60m
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
timeout: 30m
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
|
||||
.cross_user_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-system --target-list-exclude="aarch64_be-linux-user
|
||||
alpha-linux-user m68k-linux-user microblazeel-linux-user
|
||||
or1k-linux-user ppc-linux-user sparc-linux-user
|
||||
alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
|
||||
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
|
||||
xtensa-linux-user $CROSS_SKIP_TARGETS"
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
|
||||
# We can still run some tests on some of our cross build jobs. They can add this
|
||||
# template to their extends to save the build logs and test results
|
||||
|
@@ -1,6 +1,13 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/crossbuild-template.yml'
|
||||
|
||||
cross-armel-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: armel-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-armel-cross
|
||||
|
||||
cross-armhf-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
@@ -30,43 +37,28 @@ cross-arm64-kvm-only:
|
||||
IMAGE: debian-arm64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features
|
||||
|
||||
cross-i686-system:
|
||||
extends:
|
||||
- .cross_system_build_job
|
||||
- .cross_test_artifacts
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-kvm
|
||||
MAKE_CHECK_ARGS: check-qtest
|
||||
|
||||
cross-i686-user:
|
||||
cross-i386-user:
|
||||
extends:
|
||||
- .cross_user_build_job
|
||||
- .cross_test_artifacts
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
job: i386-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
IMAGE: fedora-i386-cross
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
cross-i686-tci:
|
||||
cross-i386-tci:
|
||||
extends:
|
||||
- .cross_accel_build_job
|
||||
- .cross_test_artifacts
|
||||
timeout: 60m
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
job: i386-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
IMAGE: fedora-i386-cross
|
||||
ACCEL: tcg-interpreter
|
||||
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,arm-softmmu,arm-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm
|
||||
# Force tests to run with reduced parallelism, to see whether this
|
||||
# reduces the flakiness of this CI job. The CI
|
||||
# environment by default shows us 8 CPUs and so we
|
||||
# would otherwise be using a parallelism of 9.
|
||||
MAKE_CHECK_ARGS: check check-tcg -j2
|
||||
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins
|
||||
MAKE_CHECK_ARGS: check check-tcg
|
||||
|
||||
cross-mipsel-system:
|
||||
extends: .cross_system_build_job
|
||||
@@ -118,8 +110,12 @@ cross-ppc64el-kvm-only:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices
|
||||
|
||||
# The riscv64 cross-builds currently use a 'sid' container to get
|
||||
# compilers and libraries. Until something more stable is found we
|
||||
# allow_failure so as not to block CI.
|
||||
cross-riscv64-system:
|
||||
extends: .cross_system_build_job
|
||||
allow_failure: true
|
||||
needs:
|
||||
job: riscv64-debian-cross-container
|
||||
variables:
|
||||
@@ -127,6 +123,7 @@ cross-riscv64-system:
|
||||
|
||||
cross-riscv64-user:
|
||||
extends: .cross_user_build_job
|
||||
allow_failure: true
|
||||
needs:
|
||||
job: riscv64-debian-cross-container
|
||||
variables:
|
||||
@@ -162,15 +159,29 @@ cross-mips64el-kvm-only:
|
||||
IMAGE: debian-mips64el-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
|
||||
|
||||
cross-win32-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: win32-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win32-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu nios2-softmmu
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths:
|
||||
- build/qemu-setup*.exe
|
||||
|
||||
cross-win64-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: win64-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
|
||||
m68k-softmmu microblazeel-softmmu
|
||||
m68k-softmmu microblazeel-softmmu nios2-softmmu
|
||||
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
|
||||
tricore-softmmu xtensaeb-softmmu
|
||||
artifacts:
|
||||
|
@@ -10,14 +10,13 @@
|
||||
# gitlab-runner. To avoid problems that gitlab-runner can cause while
|
||||
# reusing the GIT repository, let's enable the clone strategy, which
|
||||
# guarantees a fresh repository on each job run.
|
||||
variables:
|
||||
GIT_STRATEGY: clone
|
||||
|
||||
# All custom runners can extend this template to upload the testlog
|
||||
# data as an artifact and also feed the junit report
|
||||
.custom_runner_template:
|
||||
extends: .base_job_template
|
||||
variables:
|
||||
GIT_STRATEGY: clone
|
||||
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
expire_in: 7 days
|
||||
@@ -29,6 +28,7 @@
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml'
|
||||
|
24
.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
Normal file
24
.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
# All centos-stream-8 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/stream/8/build-environment.yml task
|
||||
# "Installation of extra packages to build QEMU"
|
||||
|
||||
centos-stream-8-x86_64:
|
||||
extends: .custom_runner_template
|
||||
allow_failure: true
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- centos_stream_8
|
||||
- x86_64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
|
||||
before_script:
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make NINJA=":" check check-avocado
|
130
.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml
Normal file
130
.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml
Normal file
@@ -0,0 +1,130 @@
|
||||
# All ubuntu-20.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
|
||||
|
||||
ubuntu-20.04-s390x-all-linux-static:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
script:
|
||||
# --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
|
||||
# --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync check-tcg
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-20.04-s390x-all:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
timeout: 75m
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-20.04-s390x-alldbg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-20.04-s390x-clang:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-20.04-s390x-tci:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh --enable-tcg-interpreter
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
|
||||
ubuntu-20.04-s390x-notcg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
@@ -1,5 +1,5 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# setup by the scripts/ci/setup/qemu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
|
||||
ubuntu-22.04-aarch32-all:
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# setup by the scripts/ci/setup/qemu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
|
||||
ubuntu-22.04-aarch64-all-linux-static:
|
||||
@@ -103,7 +103,7 @@ ubuntu-22.04-aarch64-clang:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-ubsan
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
@@ -1,130 +0,0 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
|
||||
ubuntu-22.04-s390x-all-linux:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug --disable-system --disable-tools --disable-docs
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync check-tcg
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-all-system:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
timeout: 75m
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-user
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-alldbg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-clang:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --cc=clang --cxx=clang++ --enable-ubsan
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-tci:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
|
||||
ubuntu-22.04-s390x-notcg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
@@ -24,10 +24,6 @@
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
|
||||
when: manual
|
||||
|
||||
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Run if any files affecting the build output are touched
|
||||
- changes:
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
|
@@ -46,49 +46,3 @@ check-python-tox:
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
needs:
|
||||
job: python-container
|
||||
|
||||
check-rust-tools-nightly:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora-rust-nightly:$QEMU_CI_CONTAINER_TAG
|
||||
script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start test "Running Rust code checks"
|
||||
- cd build
|
||||
- pyvenv/bin/meson devenv -w ../rust ${CARGO-cargo} fmt --check
|
||||
- make clippy
|
||||
- make rustdoc
|
||||
- section_end test
|
||||
variables:
|
||||
GIT_DEPTH: 1
|
||||
allow_failure: true
|
||||
needs:
|
||||
- job: build-system-fedora-rust-nightly
|
||||
artifacts: true
|
||||
artifacts:
|
||||
when: on_success
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- rust/target/doc
|
||||
|
||||
check-build-units:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Install Tools"
|
||||
- apt install --assume-yes --no-install-recommends jq
|
||||
- section_end setup
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure
|
||||
- cd ..
|
||||
- section_end configure
|
||||
- section_start analyse "Analyse"
|
||||
- .gitlab-ci.d/check-units.py build/compile_commands.json
|
||||
- section_end analyse
|
||||
|
@@ -1,25 +1,21 @@
|
||||
msys2-64bit:
|
||||
.shared_msys2_builder:
|
||||
extends: .base_job_template
|
||||
tags:
|
||||
- saas-windows-medium-amd64
|
||||
- shared-windows
|
||||
- windows
|
||||
- windows-1809
|
||||
cache:
|
||||
key: "$CI_JOB_NAME"
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
paths:
|
||||
- msys64/var/cache
|
||||
- ccache
|
||||
when: always
|
||||
needs: []
|
||||
stage: build
|
||||
timeout: 100m
|
||||
timeout: 80m
|
||||
variables:
|
||||
# Select the "64 bit, gcc and MSVCRT" MSYS2 environment
|
||||
MSYSTEM: MINGW64
|
||||
# This feature doesn't (currently) work with PowerShell, it stops
|
||||
# the echo'ing of commands being run and doesn't show any timing
|
||||
FF_SCRIPT_SECTIONS: 0
|
||||
CONFIGURE_ARGS: --disable-system --enable-tools -Ddebug=false -Doptimization=0
|
||||
# The Windows git is a bit older so override the default
|
||||
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
expire_in: 7 days
|
||||
@@ -75,32 +71,61 @@ msys2-64bit:
|
||||
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
mingw-w64-x86_64-binutils
|
||||
mingw-w64-x86_64-ccache
|
||||
mingw-w64-x86_64-curl
|
||||
mingw-w64-x86_64-gcc
|
||||
mingw-w64-x86_64-glib2
|
||||
mingw-w64-x86_64-libnfs
|
||||
mingw-w64-x86_64-libssh
|
||||
mingw-w64-x86_64-ninja
|
||||
mingw-w64-x86_64-pixman
|
||||
mingw-w64-x86_64-pkgconf
|
||||
mingw-w64-x86_64-python
|
||||
mingw-w64-x86_64-zstd"
|
||||
$MINGW_TARGET-capstone
|
||||
$MINGW_TARGET-curl
|
||||
$MINGW_TARGET-cyrus-sasl
|
||||
$MINGW_TARGET-dtc
|
||||
$MINGW_TARGET-gcc
|
||||
$MINGW_TARGET-glib2
|
||||
$MINGW_TARGET-gnutls
|
||||
$MINGW_TARGET-gtk3
|
||||
$MINGW_TARGET-libgcrypt
|
||||
$MINGW_TARGET-libjpeg-turbo
|
||||
$MINGW_TARGET-libnfs
|
||||
$MINGW_TARGET-libpng
|
||||
$MINGW_TARGET-libssh
|
||||
$MINGW_TARGET-libtasn1
|
||||
$MINGW_TARGET-libusb
|
||||
$MINGW_TARGET-lzo2
|
||||
$MINGW_TARGET-nettle
|
||||
$MINGW_TARGET-ninja
|
||||
$MINGW_TARGET-pixman
|
||||
$MINGW_TARGET-pkgconf
|
||||
$MINGW_TARGET-python
|
||||
$MINGW_TARGET-SDL2
|
||||
$MINGW_TARGET-SDL2_image
|
||||
$MINGW_TARGET-snappy
|
||||
$MINGW_TARGET-spice
|
||||
$MINGW_TARGET-usbredir
|
||||
$MINGW_TARGET-zstd "
|
||||
- Write-Output "Running build at $(Get-Date -Format u)"
|
||||
- $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc)
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
|
||||
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
|
||||
- $env:CCACHE_MAXSIZE = "500M"
|
||||
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
|
||||
- $env:CC = "ccache gcc"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
|
||||
- ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS"
|
||||
- ..\msys64\usr\bin\bash -lc "make -j$env:JOBS"
|
||||
- ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS"
|
||||
- ..\msys64\usr\bin\bash -lc "make"
|
||||
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
|
||||
- Write-Output "Finished build at $(Get-Date -Format u)"
|
||||
|
||||
msys2-64bit:
|
||||
extends: .shared_msys2_builder
|
||||
variables:
|
||||
MINGW_TARGET: mingw-w64-x86_64
|
||||
MSYSTEM: MINGW64
|
||||
# do not remove "--without-default-devices"!
|
||||
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
|
||||
# changed to compile QEMU with the --without-default-devices switch
|
||||
# for the msys2 64-bit job, due to the build could not complete within
|
||||
CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0
|
||||
# qTests don't run successfully with "--without-default-devices",
|
||||
# so let's exclude the qtests from CI for now.
|
||||
TEST_ARGS: --no-suite qtest
|
||||
|
||||
msys2-32bit:
|
||||
extends: .shared_msys2_builder
|
||||
variables:
|
||||
MINGW_TARGET: mingw-w64-i686
|
||||
MSYSTEM: MINGW32
|
||||
CONFIGURE_ARGS: --target-list=ppc64-softmmu -Ddebug=false -Doptimization=0
|
||||
TEST_ARGS: --no-suite qtest
|
||||
|
36
.mailmap
36
.mailmap
@@ -30,45 +30,24 @@ malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
# Corrupted Author fields
|
||||
Aaron Larson <alarson@ddci.com> alarson@ddci.com
|
||||
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
|
||||
fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn>
|
||||
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
|
||||
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
|
||||
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
|
||||
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
|
||||
Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
|
||||
Stefan Weil <sw@weilnetz.de> <weil@mail.berlios.de>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@kiwi.(none)>
|
||||
|
||||
# There is also a:
|
||||
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
# for the cvs2svn initialization commit e63c3dc74bf.
|
||||
|
||||
# Next, translate a few commits where mailman rewrote the From: line due
|
||||
# to strict SPF and DMARC. Usually, our build process should be flagging
|
||||
# commits like these before maintainer merges; if you find the need to add
|
||||
# a line here, please also report a bug against the part of the build
|
||||
# process that let the mis-attribution slip through in the first place.
|
||||
#
|
||||
# If the mailing list munges your emails, use:
|
||||
# git config sendemail.from '"Your Name" <your.email@example.com>'
|
||||
# the use of "" in that line will differ from the typically unquoted
|
||||
# 'git config user.name', which in turn is sufficient for 'git send-email'
|
||||
# to add an extra From: line in the body of your email that takes
|
||||
# precedence over any munged From: in the mail's headers.
|
||||
# See https://lists.openembedded.org/g/openembedded-core/message/166515
|
||||
# and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html
|
||||
# to strict SPF, although we prefer to avoid adding more entries like that.
|
||||
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-trivial@nongnu.org>
|
||||
Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
|
||||
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
|
||||
|
||||
# Next, replace old addresses by a more recent one.
|
||||
Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp> <akihiko.odaki@daynix.com>
|
||||
Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp> <akihiko.odaki@gmail.com>
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@imgtec.com>
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
|
||||
@@ -77,8 +56,6 @@ Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
|
||||
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
|
||||
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
|
||||
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
|
||||
Brian Cain <brian.cain@oss.qualcomm.com> <bcain@quicinc.com>
|
||||
Brian Cain <brian.cain@oss.qualcomm.com> <quic_bcain@quicinc.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
|
||||
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
|
||||
@@ -88,13 +65,8 @@ Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
|
||||
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
|
||||
Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
|
||||
Leif Lindholm <leif.lindholm@oss.qualcomm.com> <quic_llindhol@quicinc.com>
|
||||
Leif Lindholm <leif.lindholm@oss.qualcomm.com> <leif.lindholm@linaro.org>
|
||||
Leif Lindholm <leif.lindholm@oss.qualcomm.com> <leif@nuviainc.com>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
|
||||
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
|
||||
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
|
||||
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
|
||||
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
|
||||
@@ -105,9 +77,7 @@ Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
|
||||
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
|
||||
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
|
||||
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@weilnetz.de>
|
||||
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
|
||||
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
|
||||
|
||||
|
@@ -5,19 +5,16 @@
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Set the version of Python and other tools you might need
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
# We recommend specifying your dependencies to enable reproducible builds:
|
||||
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
# We want all the document formats
|
||||
formats: all
|
||||
|
||||
# For consistency, we require that QEMU's Sphinx extensions
|
||||
# run with at least the same minimum version of Python that
|
||||
# we require for other Python in our codebase (our conf.py
|
||||
# enforces this, and some code needs it.)
|
||||
python:
|
||||
version: 3.6
|
||||
|
79
.travis.yml
79
.travis.yml
@@ -1,5 +1,5 @@
|
||||
os: linux
|
||||
dist: jammy
|
||||
dist: focal
|
||||
language: c
|
||||
compiler:
|
||||
- gcc
|
||||
@@ -7,11 +7,13 @@ cache:
|
||||
# There is one cache per branch and compiler version.
|
||||
# characteristics of each job are used to identify the cache:
|
||||
# - OS name (currently only linux)
|
||||
# - OS distribution (e.g. "jammy" for Linux)
|
||||
# - OS distribution (for Linux, bionic or focal)
|
||||
# - Names and values of visible environment variables set in .travis.yml or Settings panel
|
||||
timeout: 1200
|
||||
ccache: true
|
||||
pip: true
|
||||
directories:
|
||||
- $HOME/avocado/data/cache
|
||||
|
||||
|
||||
# The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
|
||||
@@ -32,8 +34,8 @@ env:
|
||||
- BASE_CONFIG="--disable-docs --disable-tools"
|
||||
- TEST_BUILD_CMD=""
|
||||
- TEST_CMD="make check V=1"
|
||||
# This is broadly a list of "mainline" system targets which have support across the major distros
|
||||
- MAIN_SYSTEM_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
# This is broadly a list of "mainline" softmmu targets which have support across the major distros
|
||||
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
|
||||
- CCACHE_MAXSIZE=1G
|
||||
- G_MESSAGES_DEBUG=error
|
||||
@@ -79,9 +81,45 @@ after_script:
|
||||
jobs:
|
||||
include:
|
||||
|
||||
- name: "[ppc64] Clang check-tcg"
|
||||
arch: ppc64le
|
||||
compiler: clang
|
||||
- name: "[aarch64] GCC check-tcg"
|
||||
arch: arm64
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
- libattr1-dev
|
||||
- libbrlapi-dev
|
||||
- libcacard-dev
|
||||
- libcap-ng-dev
|
||||
- libfdt-dev
|
||||
- libgcrypt20-dev
|
||||
- libgnutls28-dev
|
||||
- libgtk-3-dev
|
||||
- libiscsi-dev
|
||||
- liblttng-ust-dev
|
||||
- libncurses5-dev
|
||||
- libnfs-dev
|
||||
- libpixman-1-dev
|
||||
- libpng-dev
|
||||
- librados-dev
|
||||
- libsdl2-dev
|
||||
- libseccomp-dev
|
||||
- liburcu-dev
|
||||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --enable-fdt=system
|
||||
--target-list=${MAIN_SOFTMMU_TARGETS} --cxx=/bin/false"
|
||||
- UNRELIABLE=true
|
||||
|
||||
- name: "[ppc64] GCC check-tcg"
|
||||
arch: ppc64le
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -107,7 +145,6 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
@@ -117,6 +154,7 @@ jobs:
|
||||
|
||||
- name: "[s390x] GCC check-tcg"
|
||||
arch: s390x
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -142,13 +180,13 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers
|
||||
--target-list=hppa-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
- CONFIG="--disable-containers --enable-fdt=system
|
||||
--target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user"
|
||||
- UNRELIABLE=true
|
||||
script:
|
||||
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
|
||||
- |
|
||||
@@ -159,9 +197,9 @@ jobs:
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
|
||||
- name: "[s390x] Clang (other-system)"
|
||||
- name: "[s390x] GCC (other-softmmu)"
|
||||
arch: s390x
|
||||
compiler: clang
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -182,16 +220,17 @@ jobs:
|
||||
- libsnappy-dev
|
||||
- libzstd-dev
|
||||
- nettle-dev
|
||||
- xfslibs-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
|
||||
--target-list=arm-softmmu,avr-softmmu,microblaze-softmmu,sh4eb-softmmu,sparc64-softmmu,xtensaeb-softmmu"
|
||||
- CONFIG="--disable-containers --enable-fdt=system --audio-drv-list=sdl
|
||||
--disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
|
||||
|
||||
- name: "[s390x] GCC (user)"
|
||||
arch: s390x
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libgcrypt20-dev
|
||||
@@ -200,14 +239,13 @@ jobs:
|
||||
- ninja-build
|
||||
- flex
|
||||
- bison
|
||||
- python3-tomli
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --disable-system"
|
||||
|
||||
- name: "[s390x] Clang (disable-tcg)"
|
||||
arch: s390x
|
||||
compiler: clang
|
||||
dist: focal
|
||||
compiler: clang-10
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -233,8 +271,9 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
- clang-10
|
||||
env:
|
||||
- TEST_CMD="make check-unit"
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools
|
||||
--enable-fdt=system --host-cc=clang --cxx=clang++"
|
||||
- UNRELIABLE=true
|
||||
|
5
COPYING
5
COPYING
@@ -2,7 +2,7 @@
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
<https://fsf.org/>
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
@@ -304,7 +304,8 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, see <https://www.gnu.org/licenses/>.
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
Version 2.1, February 1999
|
||||
|
||||
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
||||
<https://fsf.org/>
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
@@ -484,7 +484,8 @@ convey the exclusion of warranty; and each file should have at least the
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
|
1
Kconfig
1
Kconfig
@@ -4,4 +4,3 @@ source accel/Kconfig
|
||||
source target/Kconfig
|
||||
source hw/Kconfig
|
||||
source semihosting/Kconfig
|
||||
source rust/Kconfig
|
||||
|
24
Kconfig.host
24
Kconfig.host
@@ -5,21 +5,12 @@
|
||||
config LINUX
|
||||
bool
|
||||
|
||||
config LIBCBOR
|
||||
bool
|
||||
|
||||
config GNUTLS
|
||||
bool
|
||||
|
||||
config OPENGL
|
||||
bool
|
||||
|
||||
config X11
|
||||
bool
|
||||
|
||||
config PIXMAN
|
||||
bool
|
||||
|
||||
config SPICE
|
||||
bool
|
||||
|
||||
@@ -29,9 +20,6 @@ config IVSHMEM
|
||||
config TPM
|
||||
bool
|
||||
|
||||
config FDT
|
||||
bool
|
||||
|
||||
config VHOST_USER
|
||||
bool
|
||||
|
||||
@@ -44,6 +32,9 @@ config VHOST_KERNEL
|
||||
config VIRTFS
|
||||
bool
|
||||
|
||||
config PVRDMA
|
||||
bool
|
||||
|
||||
config MULTIPROCESS_ALLOWED
|
||||
bool
|
||||
imply MULTIPROCESS
|
||||
@@ -55,12 +46,3 @@ config FUZZ
|
||||
config VFIO_USER_SERVER_ALLOWED
|
||||
bool
|
||||
imply VFIO_USER_SERVER
|
||||
|
||||
config HV_BALLOON_POSSIBLE
|
||||
bool
|
||||
|
||||
config HAVE_RUST
|
||||
bool
|
||||
|
||||
config MAC_PVG
|
||||
bool
|
||||
|
1288
MAINTAINERS
1288
MAINTAINERS
File diff suppressed because it is too large
Load Diff
57
Makefile
57
Makefile
@@ -78,8 +78,7 @@ x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
|
||||
# 1. ensure config-host.mak is up-to-date
|
||||
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh \
|
||||
$(SRC_PATH)/pythondeps.toml $(SRC_PATH)/VERSION
|
||||
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/VERSION
|
||||
@echo config-host.mak is out-of-date, running configure
|
||||
@if test -f meson-private/coredata.dat; then \
|
||||
./config.status --skip-meson; \
|
||||
@@ -142,13 +141,8 @@ MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
|
||||
NINJAFLAGS = \
|
||||
$(if $V,-v) \
|
||||
$(if $(MAKE.n), -n) \
|
||||
$(if $(MAKE.k), -k0) \
|
||||
$(filter-out -j, \
|
||||
$(or $(filter -l% -j%, $(MAKEFLAGS)), \
|
||||
$(if $(filter --jobserver-auth=%, $(MAKEFLAGS)),, -j1))) \
|
||||
NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
|
||||
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
|
||||
-d keepdepfile
|
||||
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
|
||||
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
|
||||
@@ -170,6 +164,14 @@ ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
.PHONY: plugins
|
||||
plugins:
|
||||
$(call quiet-command,\
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
|
||||
"BUILD", "example plugins")
|
||||
endif # $(CONFIG_PLUGIN)
|
||||
|
||||
else # config-host.mak does not exist
|
||||
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
|
||||
$(error Please call configure before running make)
|
||||
@@ -182,15 +184,15 @@ include $(SRC_PATH)/tests/Makefile.include
|
||||
|
||||
all: recurse-all
|
||||
|
||||
SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS)))
|
||||
.PHONY: $(SUBDIR_RULES)
|
||||
$(SUBDIR_RULES):
|
||||
ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
|
||||
.PHONY: $(ROMS_RULES)
|
||||
$(ROMS_RULES):
|
||||
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
|
||||
|
||||
.PHONY: recurse-all recurse-clean
|
||||
recurse-all: $(addsuffix /all, $(SUBDIRS))
|
||||
recurse-clean: $(addsuffix /clean, $(SUBDIRS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(SUBDIRS))
|
||||
recurse-all: $(addsuffix /all, $(ROMS))
|
||||
recurse-clean: $(addsuffix /clean, $(ROMS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(ROMS))
|
||||
|
||||
######################################################################
|
||||
|
||||
@@ -203,14 +205,13 @@ clean: recurse-clean
|
||||
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
|
||||
-exec rm {} +
|
||||
rm -f TAGS cscope.* *~ */*~
|
||||
@$(MAKE) -Ctests/qemu-iotests clean
|
||||
|
||||
VERSION = $(shell cat $(SRC_PATH)/VERSION)
|
||||
|
||||
dist: qemu-$(VERSION).tar.xz
|
||||
dist: qemu-$(VERSION).tar.bz2
|
||||
|
||||
qemu-%.tar.xz:
|
||||
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.xz,%,$@)"
|
||||
qemu-%.tar.bz2:
|
||||
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
|
||||
|
||||
distclean: clean recurse-distclean
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
|
||||
@@ -227,7 +228,6 @@ distclean: clean recurse-distclean
|
||||
rm -Rf .sdk qemu-bundle
|
||||
|
||||
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
|
||||
-path "$(SRC_PATH)/.pc" -prune -o \
|
||||
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
|
||||
|
||||
.PHONY: ctags
|
||||
@@ -286,13 +286,6 @@ include $(SRC_PATH)/tests/vm/Makefile.include
|
||||
print-help-run = printf " %-30s - %s\\n" "$1" "$2"
|
||||
print-help = @$(call print-help-run,$1,$2)
|
||||
|
||||
.PHONY: update-linux-vdso
|
||||
update-linux-vdso:
|
||||
@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \
|
||||
SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \
|
||||
done
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo 'Generic targets:'
|
||||
@@ -303,14 +296,16 @@ help:
|
||||
$(call print-help,cscope,Generate cscope index)
|
||||
$(call print-help,sparse,Run sparse on the QEMU source)
|
||||
@echo ''
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
@echo 'Plugin targets:'
|
||||
$(call print-help,plugins,Build the example TCG plugins)
|
||||
@echo ''
|
||||
endif
|
||||
@echo 'Cleaning targets:'
|
||||
$(call print-help,clean,Remove most generated files but keep the config)
|
||||
$(call print-help,distclean,Remove all generated files)
|
||||
$(call print-help,dist,Build a distributable tarball)
|
||||
@echo ''
|
||||
@echo 'Linux-user targets:'
|
||||
$(call print-help,update-linux-vdso,Build linux-user vdso images)
|
||||
@echo ''
|
||||
@echo 'Test targets:'
|
||||
$(call print-help,check,Run all tests (check-help for details))
|
||||
$(call print-help,bench,Run all benchmarks)
|
||||
@@ -321,7 +316,7 @@ help:
|
||||
@echo 'Documentation targets:'
|
||||
$(call print-help,html man,Build documentation in specified format)
|
||||
@echo ''
|
||||
ifneq ($(filter msi, $(ninja-targets)),)
|
||||
ifdef CONFIG_WIN32
|
||||
@echo 'Windows targets:'
|
||||
$(call print-help,installer,Build NSIS-based installer for QEMU)
|
||||
$(call print-help,msi,Build MSI-based installer for qemu-ga)
|
||||
|
@@ -82,7 +82,7 @@ guidelines set out in the `style section
|
||||
the Developers Guide.
|
||||
|
||||
Additional information on submitting patches can be found online via
|
||||
the QEMU website:
|
||||
the QEMU website
|
||||
|
||||
* `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
|
||||
* `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
|
||||
@@ -102,7 +102,7 @@ requires a working 'git send-email' setup, and by default doesn't
|
||||
automate everything, so you may want to go through the above steps
|
||||
manually for once.
|
||||
|
||||
For installation instructions, please go to:
|
||||
For installation instructions, please go to
|
||||
|
||||
* `<https://github.com/stefanha/git-publish>`_
|
||||
|
||||
@@ -159,7 +159,7 @@ Contact
|
||||
=======
|
||||
|
||||
The QEMU community can be contacted in a number of ways, with the two
|
||||
main methods being email and IRC:
|
||||
main methods being email and IRC
|
||||
|
||||
* `<mailto:qemu-devel@nongnu.org>`_
|
||||
* `<https://lists.nongnu.org/mailman/listinfo/qemu-devel>`_
|
||||
|
@@ -4,6 +4,9 @@ config WHPX
|
||||
config NVMM
|
||||
bool
|
||||
|
||||
config HAX
|
||||
bool
|
||||
|
||||
config HVF
|
||||
bool
|
||||
|
||||
@@ -16,5 +19,3 @@ config KVM
|
||||
config XEN
|
||||
bool
|
||||
select FSDEV_9P if VIRTFS
|
||||
select PCI_EXPRESS_GENERIC_BRIDGE
|
||||
select XEN_BUS
|
||||
|
@@ -25,11 +25,10 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockcnt.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "system/accel-blocker.h"
|
||||
#include "sysemu/accel-blocker.h"
|
||||
|
||||
static QemuLockCnt accel_in_ioctl_lock;
|
||||
static QemuEvent accel_in_ioctl_event;
|
||||
@@ -42,7 +41,7 @@ void accel_blocker_init(void)
|
||||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(bql_locked())) {
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -52,7 +51,7 @@ void accel_ioctl_begin(void)
|
||||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(bql_locked())) {
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -63,7 +62,7 @@ void accel_ioctl_end(void)
|
||||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(bql_locked())) {
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -73,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(bql_locked())) {
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -106,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
|
||||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(bql_locked());
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
|
@@ -4,16 +4,41 @@
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qemu/target-info.h"
|
||||
#include "accel/accel-ops.h"
|
||||
#include "accel/accel-cpu.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "accel-internal.h"
|
||||
|
||||
#include "cpu.h"
|
||||
#include "hw/core/accel-cpu.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "accel-softmmu.h"
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static const TypeInfo accel_type = {
|
||||
.name = TYPE_ACCEL,
|
||||
.parent = TYPE_OBJECT,
|
||||
.class_size = sizeof(AccelClass),
|
||||
.instance_size = sizeof(AccelState),
|
||||
};
|
||||
|
||||
/* Lookup AccelClass from opt_name. Returns NULL if not found */
|
||||
AccelClass *accel_find(const char *opt_name)
|
||||
@@ -62,83 +87,69 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
|
||||
const char *ac_name; /* AccelClass name */
|
||||
char *acc_name; /* AccelCPUClass name */
|
||||
ObjectClass *acc; /* AccelCPUClass */
|
||||
const char *cpu_resolving_type = target_cpu_type();
|
||||
|
||||
ac_name = object_class_get_name(OBJECT_CLASS(ac));
|
||||
g_assert(ac_name != NULL);
|
||||
|
||||
acc_name = g_strdup_printf("%s-%s", ac_name, cpu_resolving_type);
|
||||
acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
|
||||
acc = object_class_by_name(acc_name);
|
||||
g_free(acc_name);
|
||||
|
||||
if (acc) {
|
||||
object_class_foreach(accel_init_cpu_int_aux,
|
||||
cpu_resolving_type, false, acc);
|
||||
CPU_RESOLVING_TYPE, false, acc);
|
||||
}
|
||||
}
|
||||
|
||||
void accel_init_interfaces(AccelClass *ac)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
accel_init_ops_interfaces(ac);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
accel_init_cpu_interfaces(ac);
|
||||
}
|
||||
|
||||
void accel_cpu_instance_init(CPUState *cpu)
|
||||
{
|
||||
if (cpu->cc->accel_cpu && cpu->cc->accel_cpu->cpu_instance_init) {
|
||||
cpu->cc->accel_cpu->cpu_instance_init(cpu);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
|
||||
cc->accel_cpu->cpu_instance_init(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
|
||||
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
/* target specific realization */
|
||||
if (cpu->cc->accel_cpu
|
||||
&& cpu->cc->accel_cpu->cpu_target_realize
|
||||
&& !cpu->cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
return false;
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
|
||||
return cc->accel_cpu->cpu_realizefn(cpu, errp);
|
||||
}
|
||||
|
||||
/* generic realization */
|
||||
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void accel_cpu_common_unrealize(CPUState *cpu)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* generic unrealization */
|
||||
if (acc->cpu_common_unrealize) {
|
||||
acc->cpu_common_unrealize(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int accel_supported_gdbstub_sstep_flags(void)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
if (acc->gdbstub_supported_sstep_flags) {
|
||||
return acc->gdbstub_supported_sstep_flags(accel);
|
||||
return acc->gdbstub_supported_sstep_flags();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const TypeInfo accel_types[] = {
|
||||
{
|
||||
.name = TYPE_ACCEL,
|
||||
static const TypeInfo accel_cpu_type = {
|
||||
.name = TYPE_ACCEL_CPU,
|
||||
.parent = TYPE_OBJECT,
|
||||
.class_size = sizeof(AccelClass),
|
||||
.instance_size = sizeof(AccelState),
|
||||
.abstract = true,
|
||||
},
|
||||
.class_size = sizeof(AccelCPUClass),
|
||||
};
|
||||
|
||||
DEFINE_TYPES(accel_types)
|
||||
static void register_accel_types(void)
|
||||
{
|
||||
type_register_static(&accel_type);
|
||||
type_register_static(&accel_cpu_type);
|
||||
}
|
||||
|
||||
type_init(register_accel_types);
|
||||
|
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
* QEMU accel internal functions
|
||||
*
|
||||
* Copyright 2021 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_INTERNAL_H
|
||||
#define ACCEL_INTERNAL_H
|
||||
|
||||
#include "qemu/accel.h"
|
||||
|
||||
void accel_init_ops_interfaces(AccelClass *ac);
|
||||
|
||||
#endif /* ACCEL_SYSTEM_H */
|
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
* QMP commands related to accelerators
|
||||
*
|
||||
* Copyright (c) Linaro
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qapi/type-helpers.h"
|
||||
#include "qapi/qapi-commands-accelerator.h"
|
||||
#include "accel/accel-ops.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
HumanReadableText *qmp_x_accel_stats(Error **errp)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
|
||||
if (acc->get_stats) {
|
||||
acc->get_stats(accel, buf);
|
||||
}
|
||||
if (acc->ops->get_vcpu_stats) {
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
acc->ops->get_vcpu_stats(cpu, buf);
|
||||
}
|
||||
}
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
106
accel/accel-softmmu.c
Normal file
106
accel/accel-softmmu.c
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* QEMU accel class, system emulation components
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "accel-softmmu.h"
|
||||
|
||||
int accel_init_machine(AccelState *accel, MachineState *ms)
|
||||
{
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
int ret;
|
||||
ms->accelerator = accel;
|
||||
*(acc->allowed) = true;
|
||||
ret = acc->init_machine(ms);
|
||||
if (ret < 0) {
|
||||
ms->accelerator = NULL;
|
||||
*(acc->allowed) = false;
|
||||
object_unref(OBJECT(accel));
|
||||
} else {
|
||||
object_set_accelerator_compat_props(acc->compat_props);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
AccelState *current_accel(void)
|
||||
{
|
||||
return current_machine->accelerator;
|
||||
}
|
||||
|
||||
void accel_setup_post(MachineState *ms)
|
||||
{
|
||||
AccelState *accel = ms->accelerator;
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
if (acc->setup_post) {
|
||||
acc->setup_post(ms, accel);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the arch-independent accel operation interfaces */
|
||||
void accel_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name;
|
||||
char *ops_name;
|
||||
ObjectClass *oc;
|
||||
AccelOpsClass *ops;
|
||||
|
||||
ac_name = object_class_get_name(OBJECT_CLASS(ac));
|
||||
g_assert(ac_name != NULL);
|
||||
|
||||
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
|
||||
ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name));
|
||||
oc = module_object_class_by_name(ops_name);
|
||||
if (!oc) {
|
||||
error_report("fatal: could not load module for type '%s'", ops_name);
|
||||
exit(1);
|
||||
}
|
||||
g_free(ops_name);
|
||||
ops = ACCEL_OPS_CLASS(oc);
|
||||
/*
|
||||
* all accelerators need to define ops, providing at least a mandatory
|
||||
* non-NULL create_vcpu_thread operation.
|
||||
*/
|
||||
g_assert(ops != NULL);
|
||||
if (ops->ops_init) {
|
||||
ops->ops_init(ops);
|
||||
}
|
||||
cpus_register_accel(ops);
|
||||
}
|
||||
|
||||
static const TypeInfo accel_ops_type_info = {
|
||||
.name = TYPE_ACCEL_OPS,
|
||||
.parent = TYPE_OBJECT,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(AccelOpsClass),
|
||||
};
|
||||
|
||||
static void accel_softmmu_register_types(void)
|
||||
{
|
||||
type_register_static(&accel_ops_type_info);
|
||||
}
|
||||
type_init(accel_softmmu_register_types);
|
15
accel/accel-softmmu.h
Normal file
15
accel/accel-softmmu.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
* QEMU System Emulation accel internal functions
|
||||
*
|
||||
* Copyright 2021 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_SOFTMMU_H
|
||||
#define ACCEL_SOFTMMU_H
|
||||
|
||||
void accel_init_ops_interfaces(AccelClass *ac);
|
||||
|
||||
#endif /* ACCEL_SOFTMMU_H */
|
@@ -1,125 +0,0 @@
|
||||
/*
|
||||
* QEMU accel class, system emulation components
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qapi/qapi-commands-accelerator.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "hw/boards.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "accel/accel-ops.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "system/cpus.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "accel-internal.h"
|
||||
|
||||
int accel_init_machine(AccelState *accel, MachineState *ms)
|
||||
{
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
int ret;
|
||||
ms->accelerator = accel;
|
||||
*(acc->allowed) = true;
|
||||
ret = acc->init_machine(accel, ms);
|
||||
if (ret < 0) {
|
||||
ms->accelerator = NULL;
|
||||
*(acc->allowed) = false;
|
||||
object_unref(OBJECT(accel));
|
||||
} else {
|
||||
object_set_accelerator_compat_props(acc->compat_props);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
AccelState *current_accel(void)
|
||||
{
|
||||
return current_machine->accelerator;
|
||||
}
|
||||
|
||||
void accel_setup_post(MachineState *ms)
|
||||
{
|
||||
AccelState *accel = ms->accelerator;
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
if (acc->setup_post) {
|
||||
acc->setup_post(accel);
|
||||
}
|
||||
}
|
||||
|
||||
void accel_pre_resume(MachineState *ms, bool step_pending)
|
||||
{
|
||||
AccelState *accel = ms->accelerator;
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
if (acc->pre_resume_vm) {
|
||||
acc->pre_resume_vm(accel, step_pending);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the arch-independent accel operation interfaces */
|
||||
void accel_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name;
|
||||
char *ops_name;
|
||||
ObjectClass *oc;
|
||||
AccelOpsClass *ops;
|
||||
|
||||
ac_name = object_class_get_name(OBJECT_CLASS(ac));
|
||||
g_assert(ac_name != NULL);
|
||||
|
||||
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
|
||||
oc = module_object_class_by_name(ops_name);
|
||||
if (!oc) {
|
||||
error_report("fatal: could not load module for type '%s'", ops_name);
|
||||
exit(1);
|
||||
}
|
||||
g_free(ops_name);
|
||||
/*
|
||||
* all accelerators need to define ops, providing at least a mandatory
|
||||
* non-NULL create_vcpu_thread operation.
|
||||
*/
|
||||
ops = ACCEL_OPS_CLASS(oc);
|
||||
ac->ops = ops;
|
||||
if (ops->ops_init) {
|
||||
ops->ops_init(ac);
|
||||
}
|
||||
cpus_register_accel(ops);
|
||||
}
|
||||
|
||||
static void accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
{
|
||||
monitor_register_hmp_info_hrt("accel", qmp_x_accel_stats);
|
||||
}
|
||||
|
||||
static const TypeInfo accel_ops_type_info = {
|
||||
.name = TYPE_ACCEL_OPS,
|
||||
.parent = TYPE_OBJECT,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(AccelOpsClass),
|
||||
.class_init = accel_ops_class_init,
|
||||
};
|
||||
|
||||
static void accel_system_register_types(void)
|
||||
{
|
||||
type_register_static(&accel_ops_type_info);
|
||||
}
|
||||
type_init(accel_system_register_types);
|
@@ -1,41 +0,0 @@
|
||||
/*
|
||||
* QEMU accel class, components common to system emulation and user mode
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "accel/accel-cpu-target.h"
|
||||
|
||||
static const TypeInfo accel_cpu_type = {
|
||||
.name = TYPE_ACCEL_CPU,
|
||||
.parent = TYPE_OBJECT,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(AccelCPUClass),
|
||||
};
|
||||
|
||||
static void register_accel_types(void)
|
||||
{
|
||||
type_register_static(&accel_cpu_type);
|
||||
}
|
||||
|
||||
type_init(register_accel_types);
|
@@ -9,12 +9,6 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "accel-internal.h"
|
||||
|
||||
void accel_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
AccelState *current_accel(void)
|
||||
{
|
||||
|
@@ -13,11 +13,10 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "system/cpus.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "accel/dummy-cpus.h"
|
||||
|
||||
static void *dummy_cpu_thread_fn(void *arg)
|
||||
{
|
||||
@@ -25,9 +24,10 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
@@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
@@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
@@ -69,6 +69,9 @@ void dummy_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
|
||||
|
@@ -1,14 +0,0 @@
|
||||
/*
|
||||
* Dummy cpu thread code
|
||||
*
|
||||
* Copyright IBM, Corp. 2011
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_DUMMY_CPUS_H
|
||||
#define ACCEL_DUMMY_CPUS_H
|
||||
|
||||
void dummy_start_vcpu_thread(CPUState *cpu);
|
||||
|
||||
#endif
|
@@ -48,20 +48,23 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "gdbstub/enums.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "system/cpus.h"
|
||||
#include "system/hvf.h"
|
||||
#include "system/hvf_int.h"
|
||||
#include <mach/mach_time.h>
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/gdbstub.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
HVFState *hvf_state;
|
||||
|
||||
#ifdef __aarch64__
|
||||
#define HV_VM_DEFAULT NULL
|
||||
#endif
|
||||
|
||||
/* Memory slots */
|
||||
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
|
||||
@@ -78,6 +81,127 @@ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mac_slot {
|
||||
int present;
|
||||
uint64_t size;
|
||||
uint64_t gpa_start;
|
||||
uint64_t gva;
|
||||
};
|
||||
|
||||
struct mac_slot mac_slots[32];
|
||||
|
||||
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
|
||||
{
|
||||
struct mac_slot *macslot;
|
||||
hv_return_t ret;
|
||||
|
||||
macslot = &mac_slots[slot->slot_id];
|
||||
|
||||
if (macslot->present) {
|
||||
if (macslot->size != slot->size) {
|
||||
macslot->present = 0;
|
||||
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
macslot->present = 1;
|
||||
macslot->gpa_start = slot->start;
|
||||
macslot->size = slot->size;
|
||||
ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
|
||||
assert_hvf_ok(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
{
|
||||
hvf_slot *mem;
|
||||
MemoryRegion *area = section->mr;
|
||||
bool writable = !area->readonly && !area->rom_device;
|
||||
hv_memory_flags_t flags;
|
||||
uint64_t page_size = qemu_real_host_page_size();
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
if (writable) {
|
||||
return;
|
||||
} else if (!memory_region_is_romd(area)) {
|
||||
/*
|
||||
* If the memory device is not in romd_mode, then we actually want
|
||||
* to remove the hvf memory slot so all accesses will trap.
|
||||
*/
|
||||
add = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
|
||||
!QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
|
||||
/* Not page aligned, so we can not map as RAM */
|
||||
add = false;
|
||||
}
|
||||
|
||||
mem = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
if (mem && add) {
|
||||
if (mem->size == int128_get64(section->size) &&
|
||||
mem->start == section->offset_within_address_space &&
|
||||
mem->mem == (memory_region_get_ram_ptr(area) +
|
||||
section->offset_within_region)) {
|
||||
return; /* Same region was attempted to register, go away. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Region needs to be reset. set the size to 0 and remap it. */
|
||||
if (mem) {
|
||||
mem->size = 0;
|
||||
if (do_hvf_set_memory(mem, 0)) {
|
||||
error_report("Failed to reset overlapping slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
if (!add) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (area->readonly ||
|
||||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
|
||||
} else {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
|
||||
}
|
||||
|
||||
/* Now make a new slot. */
|
||||
int x;
|
||||
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
mem = &hvf_state->slots[x];
|
||||
if (!mem->size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x == hvf_state->num_slots) {
|
||||
error_report("No free slots");
|
||||
abort();
|
||||
}
|
||||
|
||||
mem->size = int128_get64(section->size);
|
||||
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
|
||||
mem->start = section->offset_within_address_space;
|
||||
mem->region = area;
|
||||
|
||||
if (do_hvf_set_memory(mem, flags)) {
|
||||
error_report("Error registering new memory slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
@@ -115,16 +239,137 @@ static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
|
||||
slot = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
/* protect region against writes; begin tracking it */
|
||||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_EXEC);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_log_start(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_log_stop(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (new != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 0);
|
||||
}
|
||||
|
||||
static void hvf_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/*
|
||||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, true);
|
||||
}
|
||||
|
||||
static void hvf_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, false);
|
||||
}
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.name = "hvf",
|
||||
.priority = MEMORY_LISTENER_PRIORITY_ACCEL,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
.log_stop = hvf_log_stop,
|
||||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
static void dummy_signal(int sig)
|
||||
{
|
||||
}
|
||||
|
||||
static void do_hvf_get_vcpu_exec_time(CPUState *cpu, run_on_cpu_data arg)
|
||||
bool hvf_allowed;
|
||||
|
||||
static int hvf_accel_init(MachineState *ms)
|
||||
{
|
||||
int r = hv_vcpu_get_exec_time(cpu->accel->fd, arg.host_ptr);
|
||||
assert_hvf_ok(r);
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s;
|
||||
|
||||
ret = hv_vm_create(HV_VM_DEFAULT);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s = g_new0(HVFState, 1);
|
||||
|
||||
s->num_slots = ARRAY_SIZE(s->slots);
|
||||
for (x = 0; x < s->num_slots; ++x) {
|
||||
s->slots[x].size = 0;
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
QTAILQ_INIT(&s->hvf_sw_breakpoints);
|
||||
|
||||
hvf_state = s;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
|
||||
return hvf_arch_init();
|
||||
}
|
||||
|
||||
static inline int hvf_gdbstub_sstep_flags(void)
|
||||
{
|
||||
return SSTEP_ENABLE | SSTEP_NOIRQ;
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
.name = TYPE_HVF_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.class_init = hvf_accel_class_init,
|
||||
};
|
||||
|
||||
static void hvf_type_init(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_type);
|
||||
}
|
||||
|
||||
type_init(hvf_type_init);
|
||||
|
||||
static void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
|
||||
@@ -155,10 +400,10 @@ static int hvf_init_vcpu(CPUState *cpu)
|
||||
r = hv_vcpu_create(&cpu->accel->fd,
|
||||
(hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
|
||||
#else
|
||||
r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
|
||||
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT);
|
||||
#endif
|
||||
cpu->vcpu_dirty = 1;
|
||||
assert_hvf_ok(r);
|
||||
cpu->vcpu_dirty = true;
|
||||
|
||||
cpu->accel->guest_debug_enabled = false;
|
||||
|
||||
@@ -179,10 +424,11 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
@@ -203,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
@@ -218,41 +464,17 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
|
||||
*/
|
||||
assert(hvf_enabled());
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
|
||||
QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
|
||||
if (bp->pc == pc) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int hvf_sw_breakpoints_active(CPUState *cpu)
|
||||
{
|
||||
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
|
||||
}
|
||||
|
||||
static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
hvf_arch_update_guest_debug(cpu);
|
||||
}
|
||||
|
||||
int hvf_update_guest_debug(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
@@ -290,7 +512,7 @@ static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
static int hvf_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
@@ -354,28 +576,12 @@ static void hvf_remove_all_breakpoints(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_get_vcpu_stats(CPUState *cpu, GString *buf)
|
||||
{
|
||||
uint64_t time_mach; /* units of mach_absolute_time() */
|
||||
|
||||
run_on_cpu(cpu, do_hvf_get_vcpu_exec_time, RUN_ON_CPU_HOST_PTR(&time_mach));
|
||||
|
||||
mach_timebase_info_data_t timebase;
|
||||
mach_timebase_info(&timebase);
|
||||
uint64_t time_ns = time_mach * timebase.numer / timebase.denom;
|
||||
|
||||
g_string_append_printf(buf, "HVF cumulative execution time: %llu.%.3llus\n",
|
||||
time_ns / 1000000000,
|
||||
(time_ns % 1000000000) / 1000000);
|
||||
}
|
||||
|
||||
static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = hvf_start_vcpu_thread;
|
||||
ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
|
||||
ops->handle_interrupt = generic_handle_interrupt;
|
||||
|
||||
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
|
||||
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
|
||||
@@ -387,10 +593,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
|
||||
ops->update_guest_debug = hvf_update_guest_debug;
|
||||
ops->supports_guest_debug = hvf_arch_supports_guest_debug;
|
||||
|
||||
ops->get_vcpu_stats = hvf_get_vcpu_stats;
|
||||
};
|
||||
|
||||
static const TypeInfo hvf_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("hvf"),
|
||||
|
||||
@@ -398,10 +601,8 @@ static const TypeInfo hvf_accel_ops_type = {
|
||||
.class_init = hvf_accel_ops_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
|
||||
static void hvf_accel_ops_register_types(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_ops_type);
|
||||
}
|
||||
|
||||
type_init(hvf_accel_ops_register_types);
|
||||
|
@@ -10,305 +10,66 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "accel/accel-ops.h"
|
||||
#include "system/address-spaces.h"
|
||||
#include "system/memory.h"
|
||||
#include "system/hvf.h"
|
||||
#include "system/hvf_int.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "hw/boards.h"
|
||||
#include "trace.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
|
||||
bool hvf_allowed;
|
||||
|
||||
struct mac_slot {
|
||||
int present;
|
||||
uint64_t size;
|
||||
uint64_t gpa_start;
|
||||
uint64_t gva;
|
||||
};
|
||||
|
||||
struct mac_slot mac_slots[32];
|
||||
|
||||
const char *hvf_return_string(hv_return_t ret)
|
||||
{
|
||||
switch (ret) {
|
||||
case HV_SUCCESS: return "HV_SUCCESS";
|
||||
case HV_ERROR: return "HV_ERROR";
|
||||
case HV_BUSY: return "HV_BUSY";
|
||||
case HV_BAD_ARGUMENT: return "HV_BAD_ARGUMENT";
|
||||
case HV_NO_RESOURCES: return "HV_NO_RESOURCES";
|
||||
case HV_NO_DEVICE: return "HV_NO_DEVICE";
|
||||
case HV_UNSUPPORTED: return "HV_UNSUPPORTED";
|
||||
case HV_DENIED: return "HV_DENIED";
|
||||
default: return "[unknown hv_return value]";
|
||||
}
|
||||
}
|
||||
|
||||
void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
|
||||
const char *exp)
|
||||
void assert_hvf_ok(hv_return_t ret)
|
||||
{
|
||||
if (ret == HV_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
error_report("Error: %s = %s (0x%x, at %s:%u)",
|
||||
exp, hvf_return_string(ret), ret, file, line);
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
|
||||
{
|
||||
struct mac_slot *macslot;
|
||||
hv_return_t ret;
|
||||
|
||||
macslot = &mac_slots[slot->slot_id];
|
||||
|
||||
if (macslot->present) {
|
||||
if (macslot->size != slot->size) {
|
||||
macslot->present = 0;
|
||||
trace_hvf_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
macslot->present = 1;
|
||||
macslot->gpa_start = slot->start;
|
||||
macslot->size = slot->size;
|
||||
trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags,
|
||||
flags & HV_MEMORY_READ ? 'R' : '-',
|
||||
flags & HV_MEMORY_WRITE ? 'W' : '-',
|
||||
flags & HV_MEMORY_EXEC ? 'X' : '-');
|
||||
ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
|
||||
assert_hvf_ok(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
{
|
||||
hvf_slot *mem;
|
||||
MemoryRegion *area = section->mr;
|
||||
bool writable = !area->readonly && !area->rom_device;
|
||||
hv_memory_flags_t flags;
|
||||
uint64_t page_size = qemu_real_host_page_size();
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
if (writable) {
|
||||
return;
|
||||
} else if (!memory_region_is_romd(area)) {
|
||||
/*
|
||||
* If the memory device is not in romd_mode, then we actually want
|
||||
* to remove the hvf memory slot so all accesses will trap.
|
||||
*/
|
||||
add = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
|
||||
!QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
|
||||
/* Not page aligned, so we can not map as RAM */
|
||||
add = false;
|
||||
}
|
||||
|
||||
mem = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
if (mem && add) {
|
||||
if (mem->size == int128_get64(section->size) &&
|
||||
mem->start == section->offset_within_address_space &&
|
||||
mem->mem == (memory_region_get_ram_ptr(area) +
|
||||
section->offset_within_region)) {
|
||||
return; /* Same region was attempted to register, go away. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Region needs to be reset. set the size to 0 and remap it. */
|
||||
if (mem) {
|
||||
mem->size = 0;
|
||||
if (do_hvf_set_memory(mem, 0)) {
|
||||
error_report("Failed to reset overlapping slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
if (!add) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (area->readonly ||
|
||||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
|
||||
} else {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
|
||||
}
|
||||
|
||||
/* Now make a new slot. */
|
||||
int x;
|
||||
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
mem = &hvf_state->slots[x];
|
||||
if (!mem->size) {
|
||||
switch (ret) {
|
||||
case HV_ERROR:
|
||||
error_report("Error: HV_ERROR");
|
||||
break;
|
||||
}
|
||||
case HV_BUSY:
|
||||
error_report("Error: HV_BUSY");
|
||||
break;
|
||||
case HV_BAD_ARGUMENT:
|
||||
error_report("Error: HV_BAD_ARGUMENT");
|
||||
break;
|
||||
case HV_NO_RESOURCES:
|
||||
error_report("Error: HV_NO_RESOURCES");
|
||||
break;
|
||||
case HV_NO_DEVICE:
|
||||
error_report("Error: HV_NO_DEVICE");
|
||||
break;
|
||||
case HV_UNSUPPORTED:
|
||||
error_report("Error: HV_UNSUPPORTED");
|
||||
break;
|
||||
#if defined(MAC_OS_VERSION_11_0) && \
|
||||
MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
|
||||
case HV_DENIED:
|
||||
error_report("Error: HV_DENIED");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
error_report("Unknown Error");
|
||||
}
|
||||
|
||||
if (x == hvf_state->num_slots) {
|
||||
error_report("No free slots");
|
||||
abort();
|
||||
}
|
||||
|
||||
mem->size = int128_get64(section->size);
|
||||
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
|
||||
mem->start = section->offset_within_address_space;
|
||||
mem->region = area;
|
||||
|
||||
if (do_hvf_set_memory(mem, flags)) {
|
||||
error_report("Error registering new memory slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
||||
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, target_ulong pc)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
|
||||
slot = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
/* protect region against writes; begin tracking it */
|
||||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_EXEC);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_log_start(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_log_stop(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (new != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 0);
|
||||
}
|
||||
|
||||
static void hvf_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/*
|
||||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, true);
|
||||
}
|
||||
|
||||
static void hvf_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, false);
|
||||
}
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.name = "hvf",
|
||||
.priority = MEMORY_LISTENER_PRIORITY_ACCEL,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
.log_stop = hvf_log_stop,
|
||||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
static int hvf_accel_init(AccelState *as, MachineState *ms)
|
||||
{
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s = HVF_STATE(as);
|
||||
int pa_range = 36;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
if (mc->hvf_get_physical_address_range) {
|
||||
pa_range = mc->hvf_get_physical_address_range(ms);
|
||||
if (pa_range < 0) {
|
||||
return -EINVAL;
|
||||
QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
|
||||
if (bp->pc == pc) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
|
||||
if (ret == HV_DENIED) {
|
||||
error_report("Could not access HVF. Is the executable signed"
|
||||
" with com.apple.security.hypervisor entitlement?");
|
||||
exit(1);
|
||||
}
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s->num_slots = ARRAY_SIZE(s->slots);
|
||||
for (x = 0; x < s->num_slots; ++x) {
|
||||
s->slots[x].size = 0;
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
QTAILQ_INIT(&s->hvf_sw_breakpoints);
|
||||
|
||||
hvf_state = s;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
|
||||
return hvf_arch_init();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hvf_gdbstub_sstep_flags(AccelState *as)
|
||||
int hvf_sw_breakpoints_active(CPUState *cpu)
|
||||
{
|
||||
return SSTEP_ENABLE | SSTEP_NOIRQ;
|
||||
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, const void *data)
|
||||
int hvf_update_guest_debug(CPUState *cpu)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
|
||||
hvf_arch_update_guest_debug(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
.name = TYPE_HVF_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.instance_size = sizeof(HVFState),
|
||||
.class_init = hvf_accel_class_init,
|
||||
};
|
||||
|
||||
static void hvf_type_init(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_type);
|
||||
}
|
||||
|
||||
type_init(hvf_type_init);
|
||||
|
@@ -1,7 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#
|
||||
# See docs/devel/tracing.rst for syntax documentation.
|
||||
|
||||
# hvf-accel-ops.c
|
||||
hvf_vm_map(uint64_t paddr, uint64_t size, void *vaddr, uint8_t flags, const char r, const char w, const char e) "paddr:0x%016"PRIx64" size:0x%08"PRIx64" vaddr:%p flags:0x%02x/%c%c%c"
|
||||
hvf_vm_unmap(uint64_t paddr, uint64_t size) "paddr:0x%016"PRIx64" size:0x%08"PRIx64
|
@@ -1,2 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#include "trace/trace-accel_hvf.h"
|
@@ -16,11 +16,10 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "system/kvm.h"
|
||||
#include "system/kvm_int.h"
|
||||
#include "system/runstate.h"
|
||||
#include "system/cpus.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
@@ -34,9 +33,10 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu, &error_fatal);
|
||||
@@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
@@ -67,6 +67,9 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
|
||||
@@ -80,17 +83,17 @@ static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
|
||||
|
||||
static bool kvm_cpus_are_resettable(void)
|
||||
{
|
||||
return !kvm_enabled() || !kvm_state->guest_state_protected;
|
||||
return !kvm_enabled() || kvm_cpu_check_are_resettable();
|
||||
}
|
||||
|
||||
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
static int kvm_update_guest_debug_ops(CPUState *cpu)
|
||||
{
|
||||
return kvm_update_guest_debug(cpu, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
@@ -101,9 +104,8 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = kvm_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
|
||||
ops->handle_interrupt = generic_handle_interrupt;
|
||||
|
||||
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
ops->update_guest_debug = kvm_update_guest_debug_ops;
|
||||
ops->supports_guest_debug = kvm_supports_guest_debug;
|
||||
ops->insert_breakpoint = kvm_insert_breakpoint;
|
||||
|
1376
accel/kvm/kvm-all.c
1376
accel/kvm/kvm-all.c
File diff suppressed because it is too large
Load Diff
@@ -10,6 +10,8 @@
|
||||
#ifndef KVM_CPUS_H
|
||||
#define KVM_CPUS_H
|
||||
|
||||
#include "sysemu/cpus.h"
|
||||
|
||||
int kvm_init_vcpu(CPUState *cpu, Error **errp);
|
||||
int kvm_cpu_exec(CPUState *cpu);
|
||||
void kvm_destroy_vcpu(CPUState *cpu);
|
||||
@@ -20,4 +22,5 @@ bool kvm_supports_guest_debug(void);
|
||||
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
|
||||
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu);
|
||||
|
||||
#endif /* KVM_CPUS_H */
|
||||
|
@@ -1,25 +1,21 @@
|
||||
# See docs/devel/tracing.rst for syntax documentation.
|
||||
|
||||
# kvm-all.c
|
||||
kvm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
|
||||
kvm_vm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
|
||||
kvm_vcpu_ioctl(int cpu_index, unsigned long type, void *arg) "cpu_index %d, type 0x%lx, arg %p"
|
||||
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
|
||||
kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p"
|
||||
kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p"
|
||||
kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d"
|
||||
kvm_device_ioctl(int fd, unsigned long type, void *arg) "dev fd %d, type 0x%lx, arg %p"
|
||||
kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
|
||||
kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
|
||||
kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
|
||||
kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id, int kvm_fd) "index: %d, id: %lu, kvm fd: %d"
|
||||
kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_unpark_vcpu(unsigned long arch_cpu_id, const char *msg) "id: %lu %s"
|
||||
kvm_irqchip_commit_routes(void) ""
|
||||
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
|
||||
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
|
||||
kvm_irqchip_release_virq(int virq) "virq %d"
|
||||
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_user_memory(uint16_t as, uint16_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint32_t fd, uint64_t fd_offset, int ret) "AddrSpace#%d Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " guest_memfd=%d" " guest_memfd_offset=0x%" PRIx64 " ret=%d"
|
||||
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
|
||||
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
|
||||
kvm_resample_fd_notify(int gsi) "gsi %d"
|
||||
kvm_dirty_ring_full(int id) "vcpu %d"
|
||||
@@ -29,11 +25,4 @@ kvm_dirty_ring_reaper(const char *s) "%s"
|
||||
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
|
||||
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
|
||||
kvm_dirty_ring_flush(int finished) "%d"
|
||||
kvm_failed_get_vcpu_mmap_size(void) ""
|
||||
kvm_cpu_exec(void) ""
|
||||
kvm_interrupt_exit_request(void) ""
|
||||
kvm_io_window_exit(void) ""
|
||||
kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32
|
||||
kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s"
|
||||
kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%" PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64
|
||||
kvm_slots_grow(unsigned int old, unsigned int new) "%u -> %u"
|
||||
|
||||
|
@@ -1,6 +1,5 @@
|
||||
common_ss.add(files('accel-common.c'))
|
||||
specific_ss.add(files('accel-target.c'))
|
||||
system_ss.add(files('accel-system.c', 'accel-blocker.c', 'accel-qmp.c'))
|
||||
specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
|
||||
system_ss.add(files('accel-softmmu.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('tcg')
|
||||
|
@@ -18,33 +18,18 @@
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/config-file.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "accel/accel-ops.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "system/qtest.h"
|
||||
#include "system/cpus.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "accel/dummy-cpus.h"
|
||||
|
||||
static int64_t qtest_clock_counter;
|
||||
|
||||
static int64_t qtest_get_virtual_clock(void)
|
||||
{
|
||||
return qatomic_read_i64(&qtest_clock_counter);
|
||||
}
|
||||
|
||||
static void qtest_set_virtual_clock(int64_t count)
|
||||
{
|
||||
qatomic_set_i64(&qtest_clock_counter, count);
|
||||
}
|
||||
|
||||
static int qtest_init_accel(AccelState *as, MachineState *ms)
|
||||
static int qtest_init_accel(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qtest_accel_class_init(ObjectClass *oc, const void *data)
|
||||
static void qtest_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "QTest";
|
||||
@@ -61,14 +46,12 @@ static const TypeInfo qtest_accel_type = {
|
||||
};
|
||||
module_obj(TYPE_QTEST_ACCEL);
|
||||
|
||||
static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = dummy_start_vcpu_thread;
|
||||
ops->get_virtual_clock = qtest_get_virtual_clock;
|
||||
ops->set_virtual_clock = qtest_set_virtual_clock;
|
||||
ops->handle_interrupt = generic_handle_interrupt;
|
||||
};
|
||||
|
||||
static const TypeInfo qtest_accel_ops_type = {
|
||||
|
24
accel/stubs/hax-stub.c
Normal file
24
accel/stubs/hax-stub.c
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* QEMU HAXM support
|
||||
*
|
||||
* Copyright (c) 2015, Intel Corporation
|
||||
*
|
||||
* Copyright 2016 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/hax.h"
|
||||
|
||||
bool hax_allowed;
|
||||
|
||||
int hax_sync_vcpus(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
/*
|
||||
* HVF stubs for QEMU
|
||||
*
|
||||
* Copyright (c) Linaro
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/hvf.h"
|
||||
|
||||
bool hvf_allowed;
|
@@ -11,29 +11,42 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/kvm.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "hw/pci/msi.h"
|
||||
|
||||
KVMState *kvm_state;
|
||||
bool kvm_kernel_irqchip;
|
||||
bool kvm_async_interrupts_allowed;
|
||||
bool kvm_eventfds_allowed;
|
||||
bool kvm_irqfds_allowed;
|
||||
bool kvm_resamplefds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
bool kvm_gsi_direct_mapping;
|
||||
bool kvm_allowed;
|
||||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_ioeventfd_any_length_allowed;
|
||||
bool kvm_msi_use_devid;
|
||||
bool kvm_direct_msi_allowed;
|
||||
|
||||
void kvm_flush_coalesced_mmio_buffer(void)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
bool kvm_has_sync_mmu(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_has_many_ioeventfds(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
|
||||
{
|
||||
return 1;
|
||||
@@ -79,6 +92,11 @@ void kvm_irqchip_change_notify(void)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
EventNotifier *rn, int virq)
|
||||
{
|
||||
@@ -91,14 +109,14 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
bool kvm_has_free_slot(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
void kvm_init_cpu_signals(CPUState *cpu)
|
||||
{
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
|
||||
bool kvm_arm_supports_user_irq(void)
|
||||
@@ -115,13 +133,3 @@ uint32_t kvm_dirty_ring_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_hwpoisoned_mem(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
@@ -1,9 +1,7 @@
|
||||
system_stubs_ss = ss.source_set()
|
||||
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_NVMM', if_false: files('nvmm-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_WHPX', if_false: files('whpx-stub.c'))
|
||||
sysemu_stubs_ss = ss.source_set()
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: sysemu_stubs_ss)
|
||||
|
@@ -1,12 +0,0 @@
|
||||
/*
|
||||
* NVMM stubs for QEMU
|
||||
*
|
||||
* Copyright (c) Linaro
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/nvmm.h"
|
||||
|
||||
bool nvmm_allowed;
|
@@ -11,7 +11,34 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
void tcg_flush_jmp_cache(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool nonfault, void **phost, uintptr_t retaddr)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void *probe_access(CPUArchState *env, vaddr addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
/* Handled by hardware accelerator. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
G_NORETURN void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
|
@@ -1,12 +0,0 @@
|
||||
/*
|
||||
* WHPX stubs for QEMU
|
||||
*
|
||||
* Copyright (c) Linaro
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/whpx.h"
|
||||
|
||||
bool whpx_allowed;
|
@@ -6,7 +6,7 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/xen.h"
|
||||
#include "sysemu/xen.h"
|
||||
#include "qapi/qapi-commands-migration.h"
|
||||
|
||||
bool xen_allowed;
|
||||
|
@@ -14,20 +14,9 @@
|
||||
*/
|
||||
|
||||
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
|
||||
uint64_t read_value_low,
|
||||
uint64_t read_value_high,
|
||||
uint64_t write_value_low,
|
||||
uint64_t write_value_high,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
read_value_low, read_value_high,
|
||||
oi, QEMU_PLUGIN_MEM_R);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
write_value_low, write_value_high,
|
||||
oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -53,14 +53,6 @@
|
||||
# error unsupported data size
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
# define VALUE_LOW(val) int128_getlo(val)
|
||||
# define VALUE_HIGH(val) int128_gethi(val)
|
||||
#else
|
||||
# define VALUE_LOW(val) val
|
||||
# define VALUE_HIGH(val) 0
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE >= 4
|
||||
# define ABI_TYPE DATA_TYPE
|
||||
#else
|
||||
@@ -77,12 +69,11 @@
|
||||
# define END _le
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@@ -91,48 +82,32 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(newv),
|
||||
VALUE_HIGH(newv),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(val),
|
||||
VALUE_HIGH(val),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(ret), \
|
||||
VALUE_HIGH(ret), \
|
||||
VALUE_LOW(val), \
|
||||
VALUE_HIGH(val), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@@ -156,11 +131,11 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
@@ -168,12 +143,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
|
||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
} while (cmp != old); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(old), \
|
||||
VALUE_HIGH(old), \
|
||||
VALUE_LOW(xval), \
|
||||
VALUE_HIGH(xval), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return RET; \
|
||||
}
|
||||
|
||||
@@ -202,12 +172,11 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
# define END _be
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@@ -216,48 +185,32 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(newv),
|
||||
VALUE_HIGH(newv),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
ABI_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(val),
|
||||
VALUE_HIGH(val),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(ret), \
|
||||
VALUE_HIGH(ret), \
|
||||
VALUE_LOW(val), \
|
||||
VALUE_HIGH(val), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return BSWAP(ret); \
|
||||
}
|
||||
|
||||
@@ -278,11 +231,11 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
@@ -290,12 +243,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
|
||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
} while (ldo != ldn); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(old), \
|
||||
VALUE_HIGH(old), \
|
||||
VALUE_LOW(xval), \
|
||||
VALUE_HIGH(xval), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return RET; \
|
||||
}
|
||||
|
||||
@@ -329,5 +277,3 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
|
||||
#undef SUFFIX
|
||||
#undef DATA_SIZE
|
||||
#undef SHIFT
|
||||
#undef VALUE_LOW
|
||||
#undef VALUE_HIGH
|
||||
|
@@ -1,41 +0,0 @@
|
||||
/*
|
||||
* Internal memory barrier helpers for QEMU (target agnostic)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_BACKEND_LDST_H
|
||||
#define ACCEL_TCG_BACKEND_LDST_H
|
||||
|
||||
#include "tcg-target-mo.h"
|
||||
|
||||
/**
|
||||
* tcg_req_mo:
|
||||
* @guest_mo: Guest default memory order
|
||||
* @type: TCGBar
|
||||
*
|
||||
* Filter @type to the barrier that is required for the guest
|
||||
* memory ordering vs the host memory ordering. A non-zero
|
||||
* result indicates that some barrier is required.
|
||||
*/
|
||||
#define tcg_req_mo(guest_mo, type) \
|
||||
((type) & guest_mo & ~TCG_TARGET_DEFAULT_MO)
|
||||
|
||||
/**
|
||||
* cpu_req_mo:
|
||||
* @cpu: CPUState
|
||||
* @type: TCGBar
|
||||
*
|
||||
* If tcg_req_mo indicates a barrier for @type is required
|
||||
* for the guest memory model, issue a host memory barrier.
|
||||
*/
|
||||
#define cpu_req_mo(cpu, type) \
|
||||
do { \
|
||||
if (tcg_req_mo(cpu->cc->tcg_ops->guest_default_memory_order, type)) { \
|
||||
smp_mb(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif
|
@@ -18,45 +18,14 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "exec/log.h"
|
||||
#include "system/tcg.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/plugin.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal.h"
|
||||
|
||||
bool tcg_allowed;
|
||||
|
||||
bool tcg_cflags_has(CPUState *cpu, uint32_t flags)
|
||||
{
|
||||
return cpu->tcg_cflags & flags;
|
||||
}
|
||||
|
||||
void tcg_cflags_set(CPUState *cpu, uint32_t flags)
|
||||
{
|
||||
cpu->tcg_cflags |= flags;
|
||||
}
|
||||
|
||||
uint32_t curr_cflags(CPUState *cpu)
|
||||
{
|
||||
uint32_t cflags = cpu->tcg_cflags;
|
||||
|
||||
/*
|
||||
* Record gdb single-step. We should be exiting the TB by raising
|
||||
* EXCP_DEBUG, but to simplify other tests, disable chaining too.
|
||||
*
|
||||
* For singlestep and -d nochain, suppress goto_tb so that
|
||||
* we can log -d cpu,exec after every TB.
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
|
||||
} else if (qatomic_read(&one_insn_per_tb)) {
|
||||
cflags |= CF_NO_GOTO_TB | 1;
|
||||
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
cflags |= CF_NO_GOTO_TB;
|
||||
}
|
||||
|
||||
return cflags;
|
||||
}
|
||||
|
||||
/* exit the current TB, but without causing any exception to be raised */
|
||||
void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
{
|
||||
@@ -67,7 +36,7 @@ void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
/* Undo the setting in cpu_tb_exec. */
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
/* Undo any setting in generated code. */
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
siglongjmp(cpu->jmp_env, 1);
|
||||
|
@@ -21,30 +21,28 @@
|
||||
#include "qemu/qemu-print.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/type-helpers.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "accel/tcg/cpu-ops.h"
|
||||
#include "accel/tcg/helper-retaddr.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "trace.h"
|
||||
#include "disas/disas.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/cpu-interrupt.h"
|
||||
#include "exec/page-protection.h"
|
||||
#include "exec/mmap-lock.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "qemu/atomic.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/icount.h"
|
||||
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
|
||||
#include "hw/i386/apic.h"
|
||||
#endif
|
||||
#include "sysemu/cpus.h"
|
||||
#include "exec/cpu-all.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "exec/replay-core.h"
|
||||
#include "system/tcg.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/helper-proto-common.h"
|
||||
#include "tb-jmp-cache.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "tb-internal.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal.h"
|
||||
|
||||
/* -icount align implementation. */
|
||||
|
||||
@@ -75,7 +73,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
||||
sc->last_cpu_icount = cpu_icount;
|
||||
|
||||
@@ -126,7 +124,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
|
||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
||||
sc->last_cpu_icount
|
||||
= cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
if (sc->diff_clk < max_delay) {
|
||||
max_delay = sc->diff_clk;
|
||||
}
|
||||
@@ -148,10 +146,35 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
|
||||
}
|
||||
#endif /* CONFIG USER ONLY */
|
||||
|
||||
uint32_t curr_cflags(CPUState *cpu)
|
||||
{
|
||||
uint32_t cflags = cpu->tcg_cflags;
|
||||
|
||||
/*
|
||||
* Record gdb single-step. We should be exiting the TB by raising
|
||||
* EXCP_DEBUG, but to simplify other tests, disable chaining too.
|
||||
*
|
||||
* For singlestep and -d nochain, suppress goto_tb so that
|
||||
* we can log -d cpu,exec after every TB.
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
|
||||
} else if (qatomic_read(&one_insn_per_tb)) {
|
||||
cflags |= CF_NO_GOTO_TB | 1;
|
||||
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
cflags |= CF_NO_GOTO_TB;
|
||||
}
|
||||
|
||||
return cflags;
|
||||
}
|
||||
|
||||
struct tb_desc {
|
||||
TCGTBCPUState s;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
CPUArchState *env;
|
||||
tb_page_addr_t page_addr0;
|
||||
uint32_t flags;
|
||||
uint32_t cflags;
|
||||
};
|
||||
|
||||
static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
@@ -159,11 +182,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
const TranslationBlock *tb = p;
|
||||
const struct tb_desc *desc = d;
|
||||
|
||||
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) &&
|
||||
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
|
||||
tb_page_addr0(tb) == desc->page_addr0 &&
|
||||
tb->cs_base == desc->s.cs_base &&
|
||||
tb->flags == desc->s.flags &&
|
||||
tb_cflags(tb) == desc->s.cflags) {
|
||||
tb->cs_base == desc->cs_base &&
|
||||
tb->flags == desc->flags &&
|
||||
tb_cflags(tb) == desc->cflags) {
|
||||
/* check next page if needed */
|
||||
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
|
||||
if (tb_phys_page1 == -1) {
|
||||
@@ -181,7 +204,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
* is different for the new TB. Therefore any exception raised
|
||||
* here by the faulting lookup is not premature.
|
||||
*/
|
||||
virt_page1 = TARGET_PAGE_ALIGN(desc->s.pc);
|
||||
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
|
||||
phys_page1 = get_page_addr_code(desc->env, virt_page1);
|
||||
if (tb_phys_page1 == phys_page1) {
|
||||
return true;
|
||||
@@ -191,73 +214,81 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
return false;
|
||||
}
|
||||
|
||||
static TranslationBlock *tb_htable_lookup(CPUState *cpu, TCGTBCPUState s)
|
||||
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
uint32_t cflags)
|
||||
{
|
||||
tb_page_addr_t phys_pc;
|
||||
struct tb_desc desc;
|
||||
uint32_t h;
|
||||
|
||||
desc.s = s;
|
||||
desc.env = cpu_env(cpu);
|
||||
phys_pc = get_page_addr_code(desc.env, s.pc);
|
||||
desc.env = cpu->env_ptr;
|
||||
desc.cs_base = cs_base;
|
||||
desc.flags = flags;
|
||||
desc.cflags = cflags;
|
||||
desc.pc = pc;
|
||||
phys_pc = get_page_addr_code(desc.env, pc);
|
||||
if (phys_pc == -1) {
|
||||
return NULL;
|
||||
}
|
||||
desc.page_addr0 = phys_pc;
|
||||
h = tb_hash_func(phys_pc, (s.cflags & CF_PCREL ? 0 : s.pc),
|
||||
s.flags, s.cs_base, s.cflags);
|
||||
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
|
||||
flags, cs_base, cflags);
|
||||
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_lookup:
|
||||
* @cpu: CPU that will execute the returned translation block
|
||||
* @pc: guest PC
|
||||
* @cs_base: arch-specific value associated with translation block
|
||||
* @flags: arch-specific translation block flags
|
||||
* @cflags: CF_* flags
|
||||
*
|
||||
* Look up a translation block inside the QHT using @pc, @cs_base, @flags and
|
||||
* @cflags. Uses @cpu's tb_jmp_cache. Might cause an exception, so have a
|
||||
* longjmp destination ready.
|
||||
*
|
||||
* Returns: an existing translation block or NULL.
|
||||
*/
|
||||
static inline TranslationBlock *tb_lookup(CPUState *cpu, TCGTBCPUState s)
|
||||
/* Might cause an exception, so have a longjmp destination ready */
|
||||
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
uint32_t cflags)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUJumpCache *jc;
|
||||
uint32_t hash;
|
||||
|
||||
/* we should never be trying to look up an INVALID tb */
|
||||
tcg_debug_assert(!(s.cflags & CF_INVALID));
|
||||
tcg_debug_assert(!(cflags & CF_INVALID));
|
||||
|
||||
hash = tb_jmp_cache_hash_func(s.pc);
|
||||
hash = tb_jmp_cache_hash_func(pc);
|
||||
jc = cpu->tb_jmp_cache;
|
||||
|
||||
tb = qatomic_read(&jc->array[hash].tb);
|
||||
if (likely(tb &&
|
||||
jc->array[hash].pc == s.pc &&
|
||||
tb->cs_base == s.cs_base &&
|
||||
tb->flags == s.flags &&
|
||||
tb_cflags(tb) == s.cflags)) {
|
||||
goto hit;
|
||||
}
|
||||
if (cflags & CF_PCREL) {
|
||||
/* Use acquire to ensure current load of pc from jc. */
|
||||
tb = qatomic_load_acquire(&jc->array[hash].tb);
|
||||
|
||||
tb = tb_htable_lookup(cpu, s);
|
||||
if (likely(tb &&
|
||||
jc->array[hash].pc == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb_cflags(tb) == cflags)) {
|
||||
return tb;
|
||||
}
|
||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
jc->array[hash].pc = pc;
|
||||
/* Ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[hash].tb, tb);
|
||||
} else {
|
||||
/* Use rcu_read to ensure current load of pc from *tb. */
|
||||
tb = qatomic_rcu_read(&jc->array[hash].tb);
|
||||
|
||||
jc->array[hash].pc = s.pc;
|
||||
if (likely(tb &&
|
||||
tb->pc == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb_cflags(tb) == cflags)) {
|
||||
return tb;
|
||||
}
|
||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[hash].tb, tb);
|
||||
}
|
||||
|
||||
hit:
|
||||
/*
|
||||
* As long as tb is not NULL, the contents are consistent. Therefore,
|
||||
* the virtual PC has to match for non-CF_PCREL translations.
|
||||
*/
|
||||
assert((tb_cflags(tb) & CF_PCREL) || tb->pc == s.pc);
|
||||
return tb;
|
||||
}
|
||||
|
||||
@@ -274,11 +305,14 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
int flags = CPU_DUMP_CCOP;
|
||||
int flags = 0;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
|
||||
flags |= CPU_DUMP_FPU;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
flags |= CPU_DUMP_CCOP;
|
||||
#endif
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
|
||||
flags |= CPU_DUMP_VPU;
|
||||
}
|
||||
@@ -322,9 +356,9 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
assert(tcg_ops->debug_check_breakpoint);
|
||||
match_bp = tcg_ops->debug_check_breakpoint(cpu);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
assert(cc->tcg_ops->debug_check_breakpoint);
|
||||
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -350,7 +384,7 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
|
||||
* breakpoints are removed.
|
||||
*/
|
||||
if (match_page) {
|
||||
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | CF_BP_PAGE | 1;
|
||||
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -374,45 +408,29 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
{
|
||||
CPUState *cpu = env_cpu(env);
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
/*
|
||||
* By definition we've just finished a TB, so I/O is OK.
|
||||
* Avoid the possibility of calling cpu_io_recompile() if
|
||||
* a page table walk triggered by tb_lookup() calling
|
||||
* probe_access_internal() happens to touch an MMIO device.
|
||||
* The next TB, if we chain to it, will clear the flag again.
|
||||
*/
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
|
||||
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
|
||||
s.cflags = curr_cflags(cpu);
|
||||
|
||||
if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
|
||||
cflags = curr_cflags(cpu);
|
||||
if (check_for_breakpoints(cpu, pc, &cflags)) {
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
tb = tb_lookup(cpu, s);
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return tcg_code_gen_epilogue;
|
||||
}
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
|
||||
log_cpu_exec(s.pc, cpu, tb);
|
||||
log_cpu_exec(pc, cpu, tb);
|
||||
}
|
||||
|
||||
return tb->tc.ptr;
|
||||
}
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
return cpu->cc->get_pc(cpu);
|
||||
} else {
|
||||
return tb->pc;
|
||||
}
|
||||
}
|
||||
|
||||
/* Execute a TB, and fix up the CPU state afterwards if necessary */
|
||||
/*
|
||||
* Disable CFI checks.
|
||||
@@ -426,6 +444,7 @@ static vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
static inline TranslationBlock * QEMU_DISABLE_CFI
|
||||
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
uintptr_t ret;
|
||||
TranslationBlock *last_tb;
|
||||
const void *tb_ptr = itb->tc.ptr;
|
||||
@@ -435,8 +454,8 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
}
|
||||
|
||||
qemu_thread_jit_execute();
|
||||
ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr);
|
||||
cpu->neg.can_do_io = true;
|
||||
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->can_do_io = 1;
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
/*
|
||||
* TODO: Delay swapping back to the read-write region of the TB
|
||||
@@ -456,11 +475,10 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
* counter hit zero); we must restore the guest PC to the address
|
||||
* of the start of the TB.
|
||||
*/
|
||||
CPUClass *cc = cpu->cc;
|
||||
const TCGCPUOps *tcg_ops = cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->synchronize_from_tb) {
|
||||
tcg_ops->synchronize_from_tb(cpu, last_tb);
|
||||
if (cc->tcg_ops->synchronize_from_tb) {
|
||||
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
|
||||
} else {
|
||||
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
|
||||
assert(cc->set_pc);
|
||||
@@ -492,19 +510,19 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
|
||||
static void cpu_exec_enter(CPUState *cpu)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->cpu_exec_enter) {
|
||||
tcg_ops->cpu_exec_enter(cpu);
|
||||
if (cc->tcg_ops->cpu_exec_enter) {
|
||||
cc->tcg_ops->cpu_exec_enter(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_exec_exit(CPUState *cpu)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->cpu_exec_exit) {
|
||||
tcg_ops->cpu_exec_exit(cpu);
|
||||
if (cc->tcg_ops->cpu_exec_exit) {
|
||||
cc->tcg_ops->cpu_exec_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -539,15 +557,19 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
if (bql_locked()) {
|
||||
bql_unlock();
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
uint32_t flags, cflags;
|
||||
int tb_exit;
|
||||
|
||||
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
||||
@@ -556,13 +578,13 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
||||
g_assert(!cpu->running);
|
||||
cpu->running = true;
|
||||
|
||||
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
|
||||
s.cflags = curr_cflags(cpu);
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
|
||||
cflags = curr_cflags(cpu);
|
||||
/* Execute in a serial context. */
|
||||
s.cflags &= ~CF_PARALLEL;
|
||||
cflags &= ~CF_PARALLEL;
|
||||
/* After 1 insn, return and release the exclusive lock. */
|
||||
s.cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
|
||||
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
|
||||
/*
|
||||
* No need to check_for_breakpoints here.
|
||||
* We only arrive in cpu_exec_step_atomic after beginning execution
|
||||
@@ -570,16 +592,16 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
||||
* Any breakpoint for this insn will have been recognized earlier.
|
||||
*/
|
||||
|
||||
tb = tb_lookup(cpu, s);
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
mmap_lock();
|
||||
tb = tb_gen_code(cpu, s);
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
||||
mmap_unlock();
|
||||
}
|
||||
|
||||
cpu_exec_enter(cpu);
|
||||
/* execute the generated code */
|
||||
trace_exec_tb(tb, s.pc);
|
||||
trace_exec_tb(tb, pc);
|
||||
cpu_tb_exec(cpu, tb, &tb_exit);
|
||||
cpu_exec_exit(cpu);
|
||||
} else {
|
||||
@@ -647,16 +669,23 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
|
||||
|
||||
out_unlock_next:
|
||||
qemu_spin_unlock(&tb_next->jmp_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
static inline bool cpu_handle_halt(CPUState *cpu)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (cpu->halted) {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
bool leave_halt = tcg_ops->cpu_exec_halt(cpu);
|
||||
|
||||
if (!leave_halt) {
|
||||
#if defined(TARGET_I386)
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
#endif /* TARGET_I386 */
|
||||
if (!cpu_has_work(cpu)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -669,7 +698,7 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
||||
|
||||
static inline void cpu_handle_debug_exception(CPUState *cpu)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUWatchpoint *wp;
|
||||
|
||||
if (!cpu->watchpoint_hit) {
|
||||
@@ -678,8 +707,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
if (tcg_ops->debug_excp_handler) {
|
||||
tcg_ops->debug_excp_handler(cpu);
|
||||
if (cc->tcg_ops->debug_excp_handler) {
|
||||
cc->tcg_ops->debug_excp_handler(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -688,15 +717,14 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
if (cpu->exception_index < 0) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (replay_has_exception()
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* Execute just one insn to trigger exception pending in the log */
|
||||
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
||||
| CF_NOIRQ | 1;
|
||||
| CF_LAST_IO | CF_NOIRQ | 1;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cpu->exception_index >= EXCP_INTERRUPT) {
|
||||
/* exit request from the cpu execution loop */
|
||||
*ret = cpu->exception_index;
|
||||
@@ -705,27 +733,24 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
}
|
||||
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/*
|
||||
* If user mode only, we simulate a fake exception which will be
|
||||
* handled outside the cpu execution loop.
|
||||
*/
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
if (tcg_ops->fake_user_interrupt) {
|
||||
tcg_ops->fake_user_interrupt(cpu);
|
||||
}
|
||||
/* if user mode only, we simulate a fake exception
|
||||
which will be handled outside the cpu execution
|
||||
loop */
|
||||
#if defined(TARGET_I386)
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->tcg_ops->fake_user_interrupt(cpu);
|
||||
#endif /* TARGET_I386 */
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
bql_lock();
|
||||
tcg_ops->do_interrupt(cpu);
|
||||
bql_unlock();
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu->exception_index = -1;
|
||||
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
@@ -744,20 +769,26 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool icount_exit_request(CPUState *cpu)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
|
||||
* "real" interrupt event later. It does not need to be recorded for
|
||||
* replay purposes.
|
||||
*/
|
||||
static inline bool need_replay_interrupt(int interrupt_request)
|
||||
{
|
||||
if (!icount_enabled()) {
|
||||
return false;
|
||||
}
|
||||
if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
|
||||
return false;
|
||||
}
|
||||
return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
|
||||
#if defined(TARGET_I386)
|
||||
return !(interrupt_request & CPU_INTERRUPT_POLL);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
TranslationBlock **last_tb)
|
||||
@@ -776,11 +807,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||
*/
|
||||
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
||||
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
@@ -789,7 +820,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@@ -800,27 +831,38 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
bql_unlock();
|
||||
return true;
|
||||
} else {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
tcg_ops->cpu_exec_reset(cpu);
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
else if (interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUArchState *env = &x86_cpu->env;
|
||||
replay_interrupt();
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
||||
do_cpu_init(x86_cpu);
|
||||
cpu->exception_index = EXCP_HALTED;
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
cpu_reset(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#endif /* !TARGET_I386 */
|
||||
/* The target hook has 3 exit conditions:
|
||||
False when the interrupt isn't processed,
|
||||
True when it is, and we should restart on a new TB,
|
||||
and via longjmp via cpu_loop_exit. */
|
||||
else {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
/*
|
||||
* The target hook has 3 exit conditions:
|
||||
* False when the interrupt isn't processed,
|
||||
* True when it is, and we should restart on a new TB,
|
||||
* and via longjmp via cpu_loop_exit.
|
||||
*/
|
||||
if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (!tcg_ops->need_replay_interrupt ||
|
||||
tcg_ops->need_replay_interrupt(interrupt_request)) {
|
||||
if (cc->tcg_ops->cpu_exec_interrupt &&
|
||||
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (need_replay_interrupt(interrupt_request)) {
|
||||
replay_interrupt();
|
||||
}
|
||||
/*
|
||||
@@ -830,7 +872,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
@@ -849,11 +891,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
}
|
||||
|
||||
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (icount_enabled()
|
||||
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
@@ -868,6 +913,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
vaddr pc, TranslationBlock **last_tb,
|
||||
int *tb_exit)
|
||||
{
|
||||
int32_t insns_left;
|
||||
|
||||
trace_exec_tb(tb, pc);
|
||||
tb = cpu_tb_exec(cpu, tb, tb_exit);
|
||||
if (*tb_exit != TB_EXIT_REQUESTED) {
|
||||
@@ -876,7 +923,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
}
|
||||
|
||||
*last_tb = NULL;
|
||||
if (cpu_loop_exit_requested(cpu)) {
|
||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
* will also have set something else (eg exit_request or
|
||||
@@ -893,8 +941,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
/* Ensure global icount has gone forward */
|
||||
icount_update(cpu);
|
||||
/* Refill decrementer and continue execution. */
|
||||
int32_t insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
/*
|
||||
@@ -924,8 +972,11 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
|
||||
while (!cpu_handle_interrupt(cpu, &last_tb)) {
|
||||
TranslationBlock *tb;
|
||||
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
|
||||
s.cflags = cpu->cflags_next_tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
|
||||
|
||||
/*
|
||||
* When requested, use an exact setting for cflags for the next
|
||||
@@ -934,34 +985,41 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
* have CF_INVALID set, -1 is a convenient invalid value that
|
||||
* does not require tcg headers for cpu_common_reset.
|
||||
*/
|
||||
if (s.cflags == -1) {
|
||||
s.cflags = curr_cflags(cpu);
|
||||
cflags = cpu->cflags_next_tb;
|
||||
if (cflags == -1) {
|
||||
cflags = curr_cflags(cpu);
|
||||
} else {
|
||||
cpu->cflags_next_tb = -1;
|
||||
}
|
||||
|
||||
if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
|
||||
if (check_for_breakpoints(cpu, pc, &cflags)) {
|
||||
break;
|
||||
}
|
||||
|
||||
tb = tb_lookup(cpu, s);
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
CPUJumpCache *jc;
|
||||
uint32_t h;
|
||||
|
||||
mmap_lock();
|
||||
tb = tb_gen_code(cpu, s);
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
||||
mmap_unlock();
|
||||
|
||||
/*
|
||||
* We add the TB in the virtual pc hash table
|
||||
* for the fast lookup
|
||||
*/
|
||||
h = tb_jmp_cache_hash_func(s.pc);
|
||||
h = tb_jmp_cache_hash_func(pc);
|
||||
jc = cpu->tb_jmp_cache;
|
||||
jc->array[h].pc = s.pc;
|
||||
if (cflags & CF_PCREL) {
|
||||
jc->array[h].pc = pc;
|
||||
/* Ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[h].tb, tb);
|
||||
} else {
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[h].tb, tb);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
@@ -979,7 +1037,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
tb_add_jump(last_tb, tb_exit, tb);
|
||||
}
|
||||
|
||||
cpu_loop_exec_tb(cpu, tb, s.pc, &last_tb, &tb_exit);
|
||||
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
|
||||
|
||||
/* Try to align the host and virtual clocks
|
||||
if the guest is in advance */
|
||||
@@ -1011,7 +1069,7 @@ int cpu_exec(CPUState *cpu)
|
||||
return EXCP_HALTED;
|
||||
}
|
||||
|
||||
RCU_READ_LOCK_GUARD();
|
||||
rcu_read_lock();
|
||||
cpu_exec_enter(cpu);
|
||||
|
||||
/*
|
||||
@@ -1025,26 +1083,18 @@ int cpu_exec(CPUState *cpu)
|
||||
ret = cpu_exec_setjmp(cpu, &sc);
|
||||
|
||||
cpu_exec_exit(cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
static bool tcg_target_initialized;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!tcg_target_initialized) {
|
||||
/* Check mandatory TCGCPUOps handlers */
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
assert(tcg_ops->cpu_exec_halt);
|
||||
assert(tcg_ops->cpu_exec_interrupt);
|
||||
assert(tcg_ops->cpu_exec_reset);
|
||||
assert(tcg_ops->pointer_wrap);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
assert(tcg_ops->translate_code);
|
||||
assert(tcg_ops->get_tb_cpu_state);
|
||||
assert(tcg_ops->mmu_index);
|
||||
tcg_ops->initialize();
|
||||
cc->tcg_ops->initialize();
|
||||
tcg_target_initialized = true;
|
||||
}
|
||||
|
||||
@@ -1054,8 +1104,6 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
tcg_iommu_init_notifier_list(cpu);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* undo the initializations in reverse order */
|
||||
|
1649
accel/tcg/cputlb.c
1649
accel/tcg/cputlb.c
File diff suppressed because it is too large
Load Diff
96
accel/tcg/debuginfo.c
Normal file
96
accel/tcg/debuginfo.c
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Debug information support.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockable.h"
|
||||
|
||||
#include <elfutils/libdwfl.h>
|
||||
|
||||
#include "debuginfo.h"
|
||||
|
||||
static QemuMutex lock;
|
||||
static Dwfl *dwfl;
|
||||
static const Dwfl_Callbacks dwfl_callbacks = {
|
||||
.find_elf = NULL,
|
||||
.find_debuginfo = dwfl_standard_find_debuginfo,
|
||||
.section_address = NULL,
|
||||
.debuginfo_path = NULL,
|
||||
};
|
||||
|
||||
__attribute__((constructor))
|
||||
static void debuginfo_init(void)
|
||||
{
|
||||
qemu_mutex_init(&lock);
|
||||
}
|
||||
|
||||
void debuginfo_report_elf(const char *name, int fd, uint64_t bias)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&lock);
|
||||
|
||||
if (dwfl) {
|
||||
dwfl_report_begin_add(dwfl);
|
||||
} else {
|
||||
dwfl = dwfl_begin(&dwfl_callbacks);
|
||||
}
|
||||
|
||||
if (dwfl) {
|
||||
dwfl_report_elf(dwfl, name, name, fd, bias, true);
|
||||
dwfl_report_end(dwfl, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void debuginfo_lock(void)
|
||||
{
|
||||
qemu_mutex_lock(&lock);
|
||||
}
|
||||
|
||||
void debuginfo_query(struct debuginfo_query *q, size_t n)
|
||||
{
|
||||
const char *symbol, *file;
|
||||
Dwfl_Module *dwfl_module;
|
||||
Dwfl_Line *dwfl_line;
|
||||
GElf_Off dwfl_offset;
|
||||
GElf_Sym dwfl_sym;
|
||||
size_t i;
|
||||
int line;
|
||||
|
||||
if (!dwfl) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
dwfl_module = dwfl_addrmodule(dwfl, q[i].address);
|
||||
if (!dwfl_module) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (q[i].flags & DEBUGINFO_SYMBOL) {
|
||||
symbol = dwfl_module_addrinfo(dwfl_module, q[i].address,
|
||||
&dwfl_offset, &dwfl_sym,
|
||||
NULL, NULL, NULL);
|
||||
if (symbol) {
|
||||
q[i].symbol = symbol;
|
||||
q[i].offset = dwfl_offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (q[i].flags & DEBUGINFO_LINE) {
|
||||
dwfl_line = dwfl_module_getsrc(dwfl_module, q[i].address);
|
||||
if (dwfl_line) {
|
||||
file = dwfl_lineinfo(dwfl_line, NULL, &line, 0, NULL, NULL);
|
||||
if (file) {
|
||||
q[i].file = file;
|
||||
q[i].line = line;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void debuginfo_unlock(void)
|
||||
{
|
||||
qemu_mutex_unlock(&lock);
|
||||
}
|
79
accel/tcg/debuginfo.h
Normal file
79
accel/tcg/debuginfo.h
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Debug information support.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_DEBUGINFO_H
|
||||
#define ACCEL_TCG_DEBUGINFO_H
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
/*
|
||||
* Debuginfo describing a certain address.
|
||||
*/
|
||||
struct debuginfo_query {
|
||||
uint64_t address; /* Input: address. */
|
||||
int flags; /* Input: debuginfo subset. */
|
||||
const char *symbol; /* Symbol that the address is part of. */
|
||||
uint64_t offset; /* Offset from the symbol. */
|
||||
const char *file; /* Source file associated with the address. */
|
||||
int line; /* Line number in the source file. */
|
||||
};
|
||||
|
||||
/*
|
||||
* Debuginfo subsets.
|
||||
*/
|
||||
#define DEBUGINFO_SYMBOL BIT(1)
|
||||
#define DEBUGINFO_LINE BIT(2)
|
||||
|
||||
#if defined(CONFIG_TCG) && defined(CONFIG_LIBDW)
|
||||
/*
|
||||
* Load debuginfo for the specified guest ELF image.
|
||||
* Return true on success, false on failure.
|
||||
*/
|
||||
void debuginfo_report_elf(const char *name, int fd, uint64_t bias);
|
||||
|
||||
/*
|
||||
* Take the debuginfo lock.
|
||||
*/
|
||||
void debuginfo_lock(void);
|
||||
|
||||
/*
|
||||
* Fill each on N Qs with the debuginfo about Q->ADDRESS as specified by
|
||||
* Q->FLAGS:
|
||||
*
|
||||
* - DEBUGINFO_SYMBOL: update Q->SYMBOL and Q->OFFSET. If symbol debuginfo is
|
||||
* missing, then leave them as is.
|
||||
* - DEBUINFO_LINE: update Q->FILE and Q->LINE. If line debuginfo is missing,
|
||||
* then leave them as is.
|
||||
*
|
||||
* This function must be called under the debuginfo lock. The results can be
|
||||
* accessed only until the debuginfo lock is released.
|
||||
*/
|
||||
void debuginfo_query(struct debuginfo_query *q, size_t n);
|
||||
|
||||
/*
|
||||
* Release the debuginfo lock.
|
||||
*/
|
||||
void debuginfo_unlock(void);
|
||||
#else
|
||||
static inline void debuginfo_report_elf(const char *image_name, int image_fd,
|
||||
uint64_t load_bias)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debuginfo_lock(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debuginfo_query(struct debuginfo_query *q, size_t n)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debuginfo_unlock(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -1,503 +0,0 @@
|
||||
/*
|
||||
* QEMU System Emulator
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "system/cpus.h"
|
||||
#include "system/qtest.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/seqlock.h"
|
||||
#include "system/replay.h"
|
||||
#include "system/runstate.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/icount.h"
|
||||
#include "system/cpu-timers-internal.h"
|
||||
|
||||
/*
|
||||
* ICOUNT: Instruction Counter
|
||||
*
|
||||
* this module is split off from cpu-timers because the icount part
|
||||
* is TCG-specific, and does not need to be built for other accels.
|
||||
*/
|
||||
static bool icount_sleep = true;
|
||||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||
#define MAX_ICOUNT_SHIFT 10
|
||||
|
||||
bool icount_align_option;
|
||||
|
||||
/* Do not count executed instructions */
|
||||
ICountMode use_icount = ICOUNT_DISABLED;
|
||||
|
||||
static void icount_enable_precise(void)
|
||||
{
|
||||
/* Fixed conversion of insn to ns via "shift" option */
|
||||
use_icount = ICOUNT_PRECISE;
|
||||
}
|
||||
|
||||
static void icount_enable_adaptive(void)
|
||||
{
|
||||
/* Runtime adaptive algorithm to compute shift */
|
||||
use_icount = ICOUNT_ADAPTATIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The current number of executed instructions is based on what we
|
||||
* originally budgeted minus the current state of the decrementing
|
||||
* icount counters in extra/u16.low.
|
||||
*/
|
||||
static int64_t icount_get_executed(CPUState *cpu)
|
||||
{
|
||||
return (cpu->icount_budget -
|
||||
(cpu->neg.icount_decr.u16.low + cpu->icount_extra));
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the global shared timer_state.qemu_icount to take into
|
||||
* account executed instructions. This is done by the TCG vCPU
|
||||
* thread so the main-loop can see time has moved forward.
|
||||
*/
|
||||
static void icount_update_locked(CPUState *cpu)
|
||||
{
|
||||
int64_t executed = icount_get_executed(cpu);
|
||||
cpu->icount_budget -= executed;
|
||||
|
||||
qatomic_set_i64(&timers_state.qemu_icount,
|
||||
timers_state.qemu_icount + executed);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the global shared timer_state.qemu_icount to take into
|
||||
* account executed instructions. This is done by the TCG vCPU
|
||||
* thread so the main-loop can see time has moved forward.
|
||||
*/
|
||||
void icount_update(CPUState *cpu)
|
||||
{
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
icount_update_locked(cpu);
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
}
|
||||
|
||||
static int64_t icount_get_raw_locked(void)
|
||||
{
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (cpu && cpu->running) {
|
||||
if (!cpu->neg.can_do_io) {
|
||||
error_report("Bad icount read");
|
||||
exit(1);
|
||||
}
|
||||
/* Take into account what has run */
|
||||
icount_update_locked(cpu);
|
||||
}
|
||||
/* The read is protected by the seqlock, but needs atomic64 to avoid UB */
|
||||
return qatomic_read_i64(&timers_state.qemu_icount);
|
||||
}
|
||||
|
||||
static int64_t icount_get_locked(void)
|
||||
{
|
||||
int64_t icount = icount_get_raw_locked();
|
||||
return qatomic_read_i64(&timers_state.qemu_icount_bias) +
|
||||
icount_to_ns(icount);
|
||||
}
|
||||
|
||||
int64_t icount_get_raw(void)
|
||||
{
|
||||
int64_t icount;
|
||||
unsigned start;
|
||||
|
||||
do {
|
||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
icount = icount_get_raw_locked();
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||
|
||||
return icount;
|
||||
}
|
||||
|
||||
/* Return the virtual CPU time, based on the instruction counter. */
|
||||
int64_t icount_get(void)
|
||||
{
|
||||
int64_t icount;
|
||||
unsigned start;
|
||||
|
||||
do {
|
||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
icount = icount_get_locked();
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||
|
||||
return icount;
|
||||
}
|
||||
|
||||
int64_t icount_to_ns(int64_t icount)
|
||||
{
|
||||
return icount << qatomic_read(&timers_state.icount_time_shift);
|
||||
}
|
||||
|
||||
/*
|
||||
* Correlation between real and virtual time is always going to be
|
||||
* fairly approximate, so ignore small variation.
|
||||
* When the guest is idle real and virtual time will be aligned in
|
||||
* the IO wait loop.
|
||||
*/
|
||||
#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
static void icount_adjust(void)
|
||||
{
|
||||
int64_t cur_time;
|
||||
int64_t cur_icount;
|
||||
int64_t delta;
|
||||
|
||||
/* If the VM is not running, then do nothing. */
|
||||
if (!runstate_is_running()) {
|
||||
return;
|
||||
}
|
||||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
||||
cpu_get_clock_locked());
|
||||
cur_icount = icount_get_locked();
|
||||
|
||||
delta = cur_icount - cur_time;
|
||||
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
|
||||
if (delta > 0
|
||||
&& timers_state.last_delta + ICOUNT_WOBBLE < delta * 2
|
||||
&& timers_state.icount_time_shift > 0) {
|
||||
/* The guest is getting too far ahead. Slow time down. */
|
||||
qatomic_set(&timers_state.icount_time_shift,
|
||||
timers_state.icount_time_shift - 1);
|
||||
}
|
||||
if (delta < 0
|
||||
&& timers_state.last_delta - ICOUNT_WOBBLE > delta * 2
|
||||
&& timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
|
||||
/* The guest is getting too far behind. Speed time up. */
|
||||
qatomic_set(&timers_state.icount_time_shift,
|
||||
timers_state.icount_time_shift + 1);
|
||||
}
|
||||
timers_state.last_delta = delta;
|
||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||
cur_icount - (timers_state.qemu_icount
|
||||
<< timers_state.icount_time_shift));
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
}
|
||||
|
||||
static void icount_adjust_rt(void *opaque)
|
||||
{
|
||||
timer_mod(timers_state.icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
icount_adjust();
|
||||
}
|
||||
|
||||
static void icount_adjust_vm(void *opaque)
|
||||
{
|
||||
timer_mod(timers_state.icount_vm_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
NANOSECONDS_PER_SECOND / 10);
|
||||
icount_adjust();
|
||||
}
|
||||
|
||||
int64_t icount_round(int64_t count)
|
||||
{
|
||||
int shift = qatomic_read(&timers_state.icount_time_shift);
|
||||
return (count + (1 << shift) - 1) >> shift;
|
||||
}
|
||||
|
||||
static void icount_warp_rt(void)
|
||||
{
|
||||
unsigned seq;
|
||||
int64_t warp_start;
|
||||
|
||||
/*
|
||||
* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
|
||||
* changes from -1 to another value, so the race here is okay.
|
||||
*/
|
||||
do {
|
||||
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
warp_start = timers_state.vm_clock_warp_start;
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
|
||||
|
||||
if (warp_start == -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
if (runstate_is_running()) {
|
||||
int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
||||
cpu_get_clock_locked());
|
||||
int64_t warp_delta;
|
||||
|
||||
warp_delta = clock - timers_state.vm_clock_warp_start;
|
||||
if (icount_enabled() == ICOUNT_ADAPTATIVE) {
|
||||
/*
|
||||
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
|
||||
* ahead of real time (it might already be ahead so careful not
|
||||
* to go backwards).
|
||||
*/
|
||||
int64_t cur_icount = icount_get_locked();
|
||||
int64_t delta = clock - cur_icount;
|
||||
|
||||
if (delta < 0) {
|
||||
delta = 0;
|
||||
}
|
||||
warp_delta = MIN(warp_delta, delta);
|
||||
}
|
||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||
timers_state.qemu_icount_bias + warp_delta);
|
||||
}
|
||||
timers_state.vm_clock_warp_start = -1;
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
|
||||
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
}
|
||||
|
||||
static void icount_timer_cb(void *opaque)
|
||||
{
|
||||
/*
|
||||
* No need for a checkpoint because the timer already synchronizes
|
||||
* with CHECKPOINT_CLOCK_VIRTUAL_RT.
|
||||
*/
|
||||
icount_warp_rt();
|
||||
}
|
||||
|
||||
void icount_start_warp_timer(void)
|
||||
{
|
||||
int64_t clock;
|
||||
int64_t deadline;
|
||||
|
||||
assert(icount_enabled());
|
||||
|
||||
/*
|
||||
* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
||||
* do not fire, so computing the deadline does not make sense.
|
||||
*/
|
||||
if (!runstate_is_running()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||
if (!all_cpu_threads_idle()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (qtest_enabled()) {
|
||||
/* When testing, qtest commands advance icount. */
|
||||
return;
|
||||
}
|
||||
|
||||
replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
|
||||
} else {
|
||||
/* warp clock deterministically in record/replay mode */
|
||||
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
|
||||
/*
|
||||
* vCPU is sleeping and warp can't be started.
|
||||
* It is probably a race condition: notification sent
|
||||
* to vCPU was processed in advance and vCPU went to sleep.
|
||||
* Therefore we have to wake it up for doing something.
|
||||
*/
|
||||
if (replay_has_event()) {
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* We want to use the earliest deadline from ALL vm_clocks */
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
~QEMU_TIMER_ATTR_EXTERNAL);
|
||||
if (deadline < 0) {
|
||||
if (!icount_sleep) {
|
||||
warn_report_once("icount sleep disabled and no active timers");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (deadline > 0) {
|
||||
/*
|
||||
* Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
|
||||
* sleep. Otherwise, the CPU might be waiting for a future timer
|
||||
* interrupt to wake it up, but the interrupt never comes because
|
||||
* the vCPU isn't running any insns and thus doesn't advance the
|
||||
* QEMU_CLOCK_VIRTUAL.
|
||||
*/
|
||||
if (!icount_sleep) {
|
||||
/*
|
||||
* We never let VCPUs sleep in no sleep icount mode.
|
||||
* If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
|
||||
* to the next QEMU_CLOCK_VIRTUAL event and notify it.
|
||||
* It is useful when we want a deterministic execution time,
|
||||
* isolated from host latencies.
|
||||
*/
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||
timers_state.qemu_icount_bias + deadline);
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
} else {
|
||||
/*
|
||||
* We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
|
||||
* "real" time, (related to the time left until the next event) has
|
||||
* passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
|
||||
* This avoids that the warps are visible externally; for example,
|
||||
* you will not be sending network packets continuously instead of
|
||||
* every 100ms.
|
||||
*/
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
if (timers_state.vm_clock_warp_start == -1
|
||||
|| timers_state.vm_clock_warp_start > clock) {
|
||||
timers_state.vm_clock_warp_start = clock;
|
||||
}
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
timer_mod_anticipate(timers_state.icount_warp_timer,
|
||||
clock + deadline);
|
||||
}
|
||||
} else if (deadline == 0) {
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
}
|
||||
|
||||
void icount_account_warp_timer(void)
|
||||
{
|
||||
if (!icount_sleep) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
||||
* do not fire, so computing the deadline does not make sense.
|
||||
*/
|
||||
if (!runstate_is_running()) {
|
||||
return;
|
||||
}
|
||||
|
||||
replay_async_events();
|
||||
|
||||
/* warp clock deterministically in record/replay mode */
|
||||
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
|
||||
return;
|
||||
}
|
||||
|
||||
timer_del(timers_state.icount_warp_timer);
|
||||
icount_warp_rt();
|
||||
}
|
||||
|
||||
bool icount_configure(QemuOpts *opts, Error **errp)
|
||||
{
|
||||
const char *option = qemu_opt_get(opts, "shift");
|
||||
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
|
||||
bool align = qemu_opt_get_bool(opts, "align", false);
|
||||
long time_shift = -1;
|
||||
|
||||
if (!option) {
|
||||
if (qemu_opt_get(opts, "align") != NULL) {
|
||||
error_setg(errp, "Please specify shift option when using align");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (align && !sleep) {
|
||||
error_setg(errp, "align=on and sleep=off are incompatible");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strcmp(option, "auto") != 0) {
|
||||
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|
||||
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
|
||||
error_setg(errp, "icount: Invalid shift value");
|
||||
return false;
|
||||
}
|
||||
} else if (icount_align_option) {
|
||||
error_setg(errp, "shift=auto and align=on are incompatible");
|
||||
return false;
|
||||
} else if (!icount_sleep) {
|
||||
error_setg(errp, "shift=auto and sleep=off are incompatible");
|
||||
return false;
|
||||
}
|
||||
|
||||
icount_sleep = sleep;
|
||||
if (icount_sleep) {
|
||||
timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_timer_cb, NULL);
|
||||
}
|
||||
|
||||
icount_align_option = align;
|
||||
|
||||
if (time_shift >= 0) {
|
||||
timers_state.icount_time_shift = time_shift;
|
||||
icount_enable_precise();
|
||||
return true;
|
||||
}
|
||||
|
||||
icount_enable_adaptive();
|
||||
|
||||
/*
|
||||
* 125MIPS seems a reasonable initial guess at the guest speed.
|
||||
* It will be corrected fairly quickly anyway.
|
||||
*/
|
||||
timers_state.icount_time_shift = 3;
|
||||
|
||||
/*
|
||||
* Have both realtime and virtual time triggers for speed adjustment.
|
||||
* The realtime trigger catches emulated time passing too slowly,
|
||||
* the virtual time trigger catches emulated time passing too fast.
|
||||
* Realtime triggers occur even when idle, so use them less frequently
|
||||
* than VM triggers.
|
||||
*/
|
||||
timers_state.vm_clock_warp_start = -1;
|
||||
timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_adjust_rt, NULL);
|
||||
timer_mod(timers_state.icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
icount_adjust_vm, NULL);
|
||||
timer_mod(timers_state.icount_vm_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
NANOSECONDS_PER_SECOND / 10);
|
||||
return true;
|
||||
}
|
||||
|
||||
void icount_notify_exit(void)
|
||||
{
|
||||
assert(icount_enabled());
|
||||
|
||||
if (current_cpu) {
|
||||
qemu_cpu_kick(current_cpu);
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
}
|
@@ -1,144 +0,0 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target agnostic)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
|
||||
#define ACCEL_TCG_INTERNAL_COMMON_H
|
||||
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/mmap-lock.h"
|
||||
#include "accel/tcg/tb-cpu-state.h"
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
extern bool one_insn_per_tb;
|
||||
|
||||
extern bool icount_align_option;
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !tcg_cflags_has(cs, CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
|
||||
* @cs: CPUState pointer
|
||||
*
|
||||
* The memory callbacks are installed if a plugin has instrumented an
|
||||
* instruction for memory. This can be useful to know if you want to
|
||||
* force a slow path for a series of memory accesses.
|
||||
*/
|
||||
static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
|
||||
{
|
||||
#ifdef CONFIG_PLUGIN
|
||||
return !!cpu->neg.plugin_mem_cbs;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
/**
|
||||
* tlb_init - initialize a CPU's TLB
|
||||
* @cpu: CPU whose TLB should be initialized
|
||||
*/
|
||||
void tlb_init(CPUState *cpu);
|
||||
/**
|
||||
* tlb_destroy - destroy a CPU's TLB
|
||||
* @cpu: CPU whose TLB should be destroyed
|
||||
*/
|
||||
void tlb_destroy(CPUState *cpu);
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void tcg_exec_unrealizefn(CPUState *cpu);
|
||||
|
||||
/* current cflags for hashing/comparison */
|
||||
uint32_t curr_cflags(CPUState *cpu);
|
||||
|
||||
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
|
||||
|
||||
/**
|
||||
* get_page_addr_code_hostp()
|
||||
* @env: CPUArchState
|
||||
* @addr: guest virtual address of guest code
|
||||
*
|
||||
* See get_page_addr_code() (full-system version) for documentation on the
|
||||
* return value.
|
||||
*
|
||||
* Sets *@hostp (when @hostp is non-NULL) as follows.
|
||||
* If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
|
||||
* to the host address where @addr's content is kept.
|
||||
*
|
||||
* Note: this function can trigger an exception.
|
||||
*/
|
||||
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
|
||||
void **hostp);
|
||||
|
||||
/**
|
||||
* get_page_addr_code()
|
||||
* @env: CPUArchState
|
||||
* @addr: guest virtual address of guest code
|
||||
*
|
||||
* If we cannot translate and execute from the entire RAM page, or if
|
||||
* the region is not backed by RAM, returns -1. Otherwise, returns the
|
||||
* ram_addr_t corresponding to the guest code at @addr.
|
||||
*
|
||||
* Note: this function can trigger an exception.
|
||||
*/
|
||||
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
|
||||
vaddr addr)
|
||||
{
|
||||
return get_page_addr_code_hostp(env, addr, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access to the various translations structures need to be serialised
|
||||
* via locks for consistency. In user-mode emulation access to the
|
||||
* memory related structures are protected with mmap_lock.
|
||||
* In !user-mode we use per-page locks.
|
||||
*/
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#else
|
||||
#define assert_memory_lock()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_table_config_init(void) { }
|
||||
#else
|
||||
void page_table_config_init(void);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
|
||||
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
|
||||
|
||||
void tcg_get_stats(AccelState *accel, GString *buf);
|
||||
|
||||
#endif
|
141
accel/tcg/internal.h
Normal file
141
accel/tcg/internal.h
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
* Internal execution defines for qemu
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_H
|
||||
#define ACCEL_TCG_INTERNAL_H
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
|
||||
/*
|
||||
* Access to the various translations structures need to be serialised
|
||||
* via locks for consistency. In user-mode emulation access to the
|
||||
* memory related structures are protected with mmap_lock.
|
||||
* In !user-mode we use per-page locks.
|
||||
*/
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#else
|
||||
#define assert_memory_lock()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_table_config_init(void) { }
|
||||
#else
|
||||
void page_table_config_init(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/*
|
||||
* For user-only, page_protect sets the page read-only.
|
||||
* Since most execution is already on read-only pages, and we'd need to
|
||||
* account for other TBs on the same page, defer undoing any page protection
|
||||
* until we receive the write fault.
|
||||
*/
|
||||
static inline void tb_lock_page0(tb_page_addr_t p0)
|
||||
{
|
||||
page_protect(p0);
|
||||
}
|
||||
|
||||
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
|
||||
{
|
||||
page_protect(p1);
|
||||
}
|
||||
|
||||
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
|
||||
static inline void tb_unlock_pages(TranslationBlock *tb) { }
|
||||
#else
|
||||
void tb_lock_page0(tb_page_addr_t);
|
||||
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_pages(TranslationBlock *);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb);
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
return cpu->cc->get_pc(cpu);
|
||||
} else {
|
||||
return tb->pc;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
extern bool one_insn_per_tb;
|
||||
|
||||
/**
|
||||
* tcg_req_mo:
|
||||
* @type: TCGBar
|
||||
*
|
||||
* Filter @type to the barrier that is required for the guest
|
||||
* memory ordering vs the host memory ordering. A non-zero
|
||||
* result indicates that some barrier is required.
|
||||
*
|
||||
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
|
||||
* guest requires strict ordering.
|
||||
*
|
||||
* This is a macro so that it's constant even without optimization.
|
||||
*/
|
||||
#ifdef TCG_GUEST_DEFAULT_MO
|
||||
# define tcg_req_mo(type) \
|
||||
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
|
||||
#else
|
||||
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* cpu_req_mo:
|
||||
* @type: TCGBar
|
||||
*
|
||||
* If tcg_req_mo indicates a barrier for @type is required
|
||||
* for the guest memory model, issue a host memory barrier.
|
||||
*/
|
||||
#define cpu_req_mo(type) \
|
||||
do { \
|
||||
if (tcg_req_mo(type)) { \
|
||||
smp_mb(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* ACCEL_TCG_INTERNAL_H */
|
@@ -9,8 +9,8 @@
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "host/load-extract-al16-al8.h.inc"
|
||||
#include "host/store-insert-al16.h.inc"
|
||||
#include "host/load-extract-al16-al8.h"
|
||||
#include "host/store-insert-al16.h"
|
||||
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
# define HAVE_al8 true
|
||||
@@ -26,7 +26,7 @@
|
||||
* If the operation must be split into two operations to be
|
||||
* examined separately for atomicity, return -lg2.
|
||||
*/
|
||||
static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
|
||||
static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
|
||||
{
|
||||
MemOp atom = memop & MO_ATOM_MASK;
|
||||
MemOp size = memop & MO_SIZE;
|
||||
@@ -76,7 +76,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
|
||||
/*
|
||||
* Examine the alignment of p to determine if there are subobjects
|
||||
* that must be aligned. Note that we only really need ctz4() --
|
||||
* any more significant bits are discarded by the immediately
|
||||
* any more sigificant bits are discarded by the immediately
|
||||
* following comparison.
|
||||
*/
|
||||
tmp = ctz32(p);
|
||||
@@ -93,7 +93,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
|
||||
* host atomicity in order to avoid racing. This reduction
|
||||
* avoids looping with cpu_loop_exit_atomic.
|
||||
*/
|
||||
if (cpu_in_serial_context(cpu)) {
|
||||
if (cpu_in_serial_context(env_cpu(env))) {
|
||||
return MO_8;
|
||||
}
|
||||
return atmax;
|
||||
@@ -139,14 +139,14 @@ static inline uint64_t load_atomic8(void *pv)
|
||||
|
||||
/**
|
||||
* load_atomic8_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
*
|
||||
* Atomically load 8 aligned bytes from @pv.
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
{
|
||||
if (HAVE_al8) {
|
||||
return load_atomic8(pv);
|
||||
@@ -168,20 +168,19 @@ static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
#endif
|
||||
|
||||
/* Ultimate fallback: re-execute in serial context. */
|
||||
trace_load_atom8_or_exit_fallback(ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
* load_atomic16_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
*
|
||||
* Atomically load 16 aligned bytes from @pv.
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
{
|
||||
Int128 *p = __builtin_assume_aligned(pv, 16);
|
||||
|
||||
@@ -213,8 +212,7 @@ static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
}
|
||||
|
||||
/* Ultimate fallback: re-execute in serial context. */
|
||||
trace_load_atom16_or_exit_fallback(ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -265,7 +263,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
|
||||
|
||||
/**
|
||||
* load_atom_extract_al8_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
* @s: object size in bytes, @s <= 4.
|
||||
@@ -275,7 +273,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
|
||||
* 8-byte load and extract.
|
||||
* The value is returned in the low bits of a uint32_t.
|
||||
*/
|
||||
static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, int s)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -283,12 +281,12 @@ static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
|
||||
|
||||
pv = (void *)(pi & ~7);
|
||||
return load_atomic8_or_exit(cpu, ra, pv) >> shr;
|
||||
return load_atomic8_or_exit(env, ra, pv) >> shr;
|
||||
}
|
||||
|
||||
/**
|
||||
* load_atom_extract_al16_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @p: host address
|
||||
* @s: object size in bytes, @s <= 8.
|
||||
@@ -301,7 +299,7 @@ static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, int s)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -314,7 +312,7 @@ static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
* Provoke SIGBUS if possible otherwise.
|
||||
*/
|
||||
pv = (void *)(pi & ~7);
|
||||
r = load_atomic16_or_exit(cpu, ra, pv);
|
||||
r = load_atomic16_or_exit(env, ra, pv);
|
||||
|
||||
r = int128_urshift(r, shr);
|
||||
return int128_getlo(r);
|
||||
@@ -396,7 +394,7 @@ static inline uint64_t load_atom_8_by_8_or_4(void *pv)
|
||||
*
|
||||
* Load 2 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -412,7 +410,7 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
return lduw_he_p(pv);
|
||||
@@ -423,9 +421,9 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
return load_atomic4(pv - 1) >> 8;
|
||||
}
|
||||
if ((pi & 15) != 7) {
|
||||
return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
|
||||
return load_atom_extract_al8_or_exit(env, ra, pv, 2);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 2);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -438,7 +436,7 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Load 4 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -454,7 +452,7 @@ static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
case MO_16:
|
||||
@@ -468,9 +466,9 @@ static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return load_atom_extract_al4x2(pv);
|
||||
case MO_32:
|
||||
if (!(pi & 4)) {
|
||||
return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
|
||||
return load_atom_extract_al8_or_exit(env, ra, pv, 4);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 4);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -483,7 +481,7 @@ static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Load 8 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -500,12 +498,12 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
return load_atom_extract_al16_or_al8(pv, 8);
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
if (atmax == MO_64) {
|
||||
if (!HAVE_al8 && (pi & 7) == 0) {
|
||||
load_atomic8_or_exit(cpu, ra, pv);
|
||||
load_atomic8_or_exit(env, ra, pv);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 8);
|
||||
}
|
||||
if (HAVE_al8_fast) {
|
||||
return load_atom_extract_al8x2(pv);
|
||||
@@ -521,8 +519,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
if (HAVE_al8) {
|
||||
return load_atom_extract_al8x2(pv);
|
||||
}
|
||||
trace_load_atom8_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -535,7 +532,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Load 16 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -551,7 +548,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
return atomic16_read_ro(pv);
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
memcpy(&r, pv, 16);
|
||||
@@ -566,22 +563,20 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
break;
|
||||
case MO_64:
|
||||
if (!HAVE_al8) {
|
||||
trace_load_atom16_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
a = load_atomic8(pv);
|
||||
b = load_atomic8(pv + 8);
|
||||
break;
|
||||
case -MO_64:
|
||||
if (!HAVE_al8) {
|
||||
trace_load_atom16_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
a = load_atom_extract_al8x2(pv);
|
||||
b = load_atom_extract_al8x2(pv + 8);
|
||||
break;
|
||||
case MO_128:
|
||||
return load_atomic16_or_exit(cpu, ra, pv);
|
||||
return load_atomic16_or_exit(env, ra, pv);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -830,7 +825,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
|
||||
int sh = o * 8;
|
||||
Int128 m, v;
|
||||
|
||||
qemu_build_assert(HAVE_CMPXCHG128);
|
||||
qemu_build_assert(HAVE_ATOMIC128_RW);
|
||||
|
||||
/* Like MAKE_64BIT_MASK(0, sz), but larger. */
|
||||
if (sz <= 64) {
|
||||
@@ -862,7 +857,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
|
||||
*
|
||||
* Store 2 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint16_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -873,7 +868,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
if (atmax == MO_8) {
|
||||
stw_he_p(pv, val);
|
||||
return;
|
||||
@@ -892,7 +887,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
} else if ((pi & 15) == 7) {
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
Int128 v = int128_lshift(int128_make64(val), 56);
|
||||
Int128 m = int128_lshift(int128_make64(0xffff), 56);
|
||||
store_atom_insert_al16(pv - 7, v, m);
|
||||
@@ -902,8 +897,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
trace_store_atom2_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -914,7 +908,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Store 4 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint32_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -925,7 +919,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
stl_he_p(pv, val);
|
||||
@@ -962,13 +956,12 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
trace_store_atom4_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -982,7 +975,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Store 8 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint64_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -993,7 +986,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
stq_he_p(pv, val);
|
||||
@@ -1028,7 +1021,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
break;
|
||||
case MO_64:
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val)));
|
||||
return;
|
||||
}
|
||||
@@ -1036,8 +1029,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
trace_store_atom8_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1048,7 +1040,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Store 16 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, Int128 val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -1060,7 +1052,7 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
|
||||
a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
|
||||
b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
|
||||
@@ -1084,7 +1076,7 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
break;
|
||||
case -MO_64:
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
uint64_t val_le;
|
||||
int s2 = pi & 15;
|
||||
int s1 = 16 - s2;
|
||||
@@ -1111,10 +1103,13 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
break;
|
||||
case MO_128:
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
atomic16_set(pv, val);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
trace_store_atom16_fallback(memop, ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
@@ -8,238 +8,300 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
/*
|
||||
* Load helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||
return cpu_ldb_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
|
||||
return cpu_ldw_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide signed versions of the load routines as well. We can of course
|
||||
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
|
||||
return cpu_ldl_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
|
||||
return cpu_ldq_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
|
||||
return cpu_ldw_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
|
||||
return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||
}
|
||||
|
||||
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
|
||||
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return helper_ld16_mmu(env, addr, oi, GETPC());
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
|
||||
return cpu_ldl_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
|
||||
return cpu_ldq_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||
cpu_stb_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
|
||||
cpu_stw_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
|
||||
cpu_stl_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
|
||||
cpu_stq_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
|
||||
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
helper_st16_mmu(env, addr, val, oi, GETPC());
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
|
||||
cpu_stw_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
* Load helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_load_cb(CPUArchState *env, vaddr addr,
|
||||
uint64_t value_low,
|
||||
uint64_t value_high,
|
||||
MemOpIdx oi)
|
||||
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
value_low, value_high,
|
||||
oi, QEMU_PLUGIN_MEM_R);
|
||||
}
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
|
||||
cpu_stl_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra)
|
||||
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
|
||||
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
|
||||
cpu_stq_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint16_t ret;
|
||||
/*--------------------------*/
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
return (int8_t)cpu_ldub_data_ra(env, addr, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
Int128 ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
|
||||
plugin_load_cb(env, addr, int128_getlo(ret), int128_gethi(ret), oi);
|
||||
return ret;
|
||||
return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_store_cb(CPUArchState *env, vaddr addr,
|
||||
uint64_t value_low,
|
||||
uint64_t value_high,
|
||||
MemOpIdx oi)
|
||||
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
value_low, value_high,
|
||||
oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, vaddr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
helper_stb_mmu(env, addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stw_mmu(CPUArchState *env, vaddr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stl_mmu(CPUArchState *env, vaddr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_mmu(CPUArchState *env, vaddr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, int128_getlo(val), int128_gethi(val), oi);
|
||||
return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t val, uintptr_t ra)
|
||||
{
|
||||
cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
/*--------------------------*/
|
||||
|
||||
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldub_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return (int8_t)cpu_ldub_data(env, addr);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_lduw_be_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return (int16_t)cpu_lduw_be_data(env, addr);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldl_be_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldq_be_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_lduw_le_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return (int16_t)cpu_lduw_le_data(env, addr);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldl_le_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldq_le_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stb_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stw_be_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stl_be_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
|
||||
{
|
||||
cpu_stq_be_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stw_le_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stl_le_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
|
||||
{
|
||||
cpu_stq_le_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
@@ -1,39 +1,29 @@
|
||||
if not have_tcg
|
||||
subdir_done()
|
||||
endif
|
||||
|
||||
tcg_ss = ss.source_set()
|
||||
|
||||
tcg_ss.add(files(
|
||||
'cpu-exec.c',
|
||||
'cpu-exec-common.c',
|
||||
'tcg-runtime.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-all.c',
|
||||
'tcg-stats.c',
|
||||
'cpu-exec-common.c',
|
||||
'cpu-exec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
'tcg-runtime.c',
|
||||
'translate-all.c',
|
||||
'translator.c',
|
||||
))
|
||||
if get_option('plugins')
|
||||
tcg_ss.add(files('plugin-gen.c'))
|
||||
endif
|
||||
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
|
||||
tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
|
||||
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
|
||||
tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
|
||||
tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
|
||||
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
|
||||
|
||||
user_ss.add_all(tcg_ss)
|
||||
system_ss.add_all(tcg_ss)
|
||||
|
||||
user_ss.add(files(
|
||||
'user-exec.c',
|
||||
'user-exec-stub.c',
|
||||
))
|
||||
|
||||
system_ss.add(files(
|
||||
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'cputlb.c',
|
||||
'icount-common.c',
|
||||
'monitor.c',
|
||||
'tcg-accel-ops.c',
|
||||
'tcg-accel-ops-icount.c',
|
||||
'tcg-accel-ops-mttcg.c',
|
||||
'tcg-accel-ops-rr.c',
|
||||
'watchpoint.c',
|
||||
))
|
||||
|
||||
tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'tcg-accel-ops.c',
|
||||
'tcg-accel-ops-mttcg.c',
|
||||
'tcg-accel-ops-icount.c',
|
||||
'tcg-accel-ops-rr.c',
|
||||
))
|
||||
|
@@ -7,13 +7,48 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/type-helpers.h"
|
||||
#include "qapi/qapi-commands-machine.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "system/tcg.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
static void dump_drift_info(GString *buf)
|
||||
{
|
||||
if (!icount_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
|
||||
(cpu_get_clock() - icount_get()) / SCALE_MS);
|
||||
if (icount_align_option) {
|
||||
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
|
||||
-max_delay / SCALE_MS);
|
||||
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
|
||||
max_advance / SCALE_MS);
|
||||
} else {
|
||||
g_string_append_printf(buf, "Max guest delay NA\n");
|
||||
g_string_append_printf(buf, "Max guest advance NA\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_accel_info(GString *buf)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
|
||||
"one-insn-per-tb",
|
||||
&error_fatal);
|
||||
|
||||
g_string_append_printf(buf, "Accelerator settings:\n");
|
||||
g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
|
||||
one_insn_per_tb ? "on" : "off");
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
{
|
||||
@@ -24,7 +59,24 @@ HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tcg_dump_stats(buf);
|
||||
dump_accel_info(buf);
|
||||
dump_exec_info(buf);
|
||||
dump_drift_info(buf);
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_opcount(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
|
||||
if (!tcg_enabled()) {
|
||||
error_setg(errp,
|
||||
"Opcode count information is only available with accel=tcg");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tcg_dump_op_count(buf);
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
@@ -32,6 +84,7 @@ HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
static void hmp_tcg_register(void)
|
||||
{
|
||||
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
|
||||
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
|
||||
}
|
||||
|
||||
type_init(hmp_tcg_register);
|
||||
|
386
accel/tcg/perf.c
Normal file
386
accel/tcg/perf.c
Normal file
@@ -0,0 +1,386 @@
|
||||
/*
|
||||
* Linux perf perf-<pid>.map and jit-<pid>.dump integration.
|
||||
*
|
||||
* The jitdump spec can be found at [1].
|
||||
*
|
||||
* [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/tools/perf/Documentation/jitdump-specification.txt
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "elf.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
||||
#include "debuginfo.h"
|
||||
#include "perf.h"
|
||||
|
||||
static FILE *safe_fopen_w(const char *path)
|
||||
{
|
||||
int saved_errno;
|
||||
FILE *f;
|
||||
int fd;
|
||||
|
||||
/* Delete the old file, if any. */
|
||||
unlink(path);
|
||||
|
||||
/* Avoid symlink attacks by using O_CREAT | O_EXCL. */
|
||||
fd = open(path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Convert fd to FILE*. */
|
||||
f = fdopen(fd, "w");
|
||||
if (f == NULL) {
|
||||
saved_errno = errno;
|
||||
close(fd);
|
||||
errno = saved_errno;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static FILE *perfmap;
|
||||
|
||||
void perf_enable_perfmap(void)
|
||||
{
|
||||
char map_file[32];
|
||||
|
||||
snprintf(map_file, sizeof(map_file), "/tmp/perf-%d.map", getpid());
|
||||
perfmap = safe_fopen_w(map_file);
|
||||
if (perfmap == NULL) {
|
||||
warn_report("Could not open %s: %s, proceeding without perfmap",
|
||||
map_file, strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
/* Get PC and size of code JITed for guest instruction #INSN. */
|
||||
static void get_host_pc_size(uintptr_t *host_pc, uint16_t *host_size,
|
||||
const void *start, size_t insn)
|
||||
{
|
||||
uint16_t start_off = insn ? tcg_ctx->gen_insn_end_off[insn - 1] : 0;
|
||||
|
||||
if (host_pc) {
|
||||
*host_pc = (uintptr_t)start + start_off;
|
||||
}
|
||||
if (host_size) {
|
||||
*host_size = tcg_ctx->gen_insn_end_off[insn] - start_off;
|
||||
}
|
||||
}
|
||||
|
||||
static const char *pretty_symbol(const struct debuginfo_query *q, size_t *len)
|
||||
{
|
||||
static __thread char buf[64];
|
||||
int tmp;
|
||||
|
||||
if (!q->symbol) {
|
||||
tmp = snprintf(buf, sizeof(buf), "guest-0x%"PRIx64, q->address);
|
||||
if (len) {
|
||||
*len = MIN(tmp + 1, sizeof(buf));
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
if (!q->offset) {
|
||||
if (len) {
|
||||
*len = strlen(q->symbol) + 1;
|
||||
}
|
||||
return q->symbol;
|
||||
}
|
||||
|
||||
tmp = snprintf(buf, sizeof(buf), "%s+0x%"PRIx64, q->symbol, q->offset);
|
||||
if (len) {
|
||||
*len = MIN(tmp + 1, sizeof(buf));
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void write_perfmap_entry(const void *start, size_t insn,
|
||||
const struct debuginfo_query *q)
|
||||
{
|
||||
uint16_t host_size;
|
||||
uintptr_t host_pc;
|
||||
|
||||
get_host_pc_size(&host_pc, &host_size, start, insn);
|
||||
fprintf(perfmap, "%"PRIxPTR" %"PRIx16" %s\n",
|
||||
host_pc, host_size, pretty_symbol(q, NULL));
|
||||
}
|
||||
|
||||
static FILE *jitdump;
|
||||
static size_t perf_marker_size;
|
||||
static void *perf_marker = MAP_FAILED;
|
||||
|
||||
#define JITHEADER_MAGIC 0x4A695444
|
||||
#define JITHEADER_VERSION 1
|
||||
|
||||
struct jitheader {
|
||||
uint32_t magic;
|
||||
uint32_t version;
|
||||
uint32_t total_size;
|
||||
uint32_t elf_mach;
|
||||
uint32_t pad1;
|
||||
uint32_t pid;
|
||||
uint64_t timestamp;
|
||||
uint64_t flags;
|
||||
};
|
||||
|
||||
enum jit_record_type {
|
||||
JIT_CODE_LOAD = 0,
|
||||
JIT_CODE_DEBUG_INFO = 2,
|
||||
};
|
||||
|
||||
struct jr_prefix {
|
||||
uint32_t id;
|
||||
uint32_t total_size;
|
||||
uint64_t timestamp;
|
||||
};
|
||||
|
||||
struct jr_code_load {
|
||||
struct jr_prefix p;
|
||||
|
||||
uint32_t pid;
|
||||
uint32_t tid;
|
||||
uint64_t vma;
|
||||
uint64_t code_addr;
|
||||
uint64_t code_size;
|
||||
uint64_t code_index;
|
||||
};
|
||||
|
||||
struct debug_entry {
|
||||
uint64_t addr;
|
||||
int lineno;
|
||||
int discrim;
|
||||
const char name[];
|
||||
};
|
||||
|
||||
struct jr_code_debug_info {
|
||||
struct jr_prefix p;
|
||||
|
||||
uint64_t code_addr;
|
||||
uint64_t nr_entry;
|
||||
struct debug_entry entries[];
|
||||
};
|
||||
|
||||
static uint32_t get_e_machine(void)
|
||||
{
|
||||
Elf64_Ehdr elf_header;
|
||||
FILE *exe;
|
||||
size_t n;
|
||||
|
||||
QEMU_BUILD_BUG_ON(offsetof(Elf32_Ehdr, e_machine) !=
|
||||
offsetof(Elf64_Ehdr, e_machine));
|
||||
|
||||
exe = fopen("/proc/self/exe", "r");
|
||||
if (exe == NULL) {
|
||||
return EM_NONE;
|
||||
}
|
||||
|
||||
n = fread(&elf_header, sizeof(elf_header), 1, exe);
|
||||
fclose(exe);
|
||||
if (n != 1) {
|
||||
return EM_NONE;
|
||||
}
|
||||
|
||||
return elf_header.e_machine;
|
||||
}
|
||||
|
||||
void perf_enable_jitdump(void)
|
||||
{
|
||||
struct jitheader header;
|
||||
char jitdump_file[32];
|
||||
|
||||
if (!use_rt_clock) {
|
||||
warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump");
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(jitdump_file, sizeof(jitdump_file), "jit-%d.dump", getpid());
|
||||
jitdump = safe_fopen_w(jitdump_file);
|
||||
if (jitdump == NULL) {
|
||||
warn_report("Could not open %s: %s, proceeding without jitdump",
|
||||
jitdump_file, strerror(errno));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* `perf inject` will see that the mapped file name in the corresponding
|
||||
* PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump
|
||||
* and will process it as a jitdump file.
|
||||
*/
|
||||
perf_marker_size = qemu_real_host_page_size();
|
||||
perf_marker = mmap(NULL, perf_marker_size, PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE, fileno(jitdump), 0);
|
||||
if (perf_marker == MAP_FAILED) {
|
||||
warn_report("Could not map %s: %s, proceeding without jitdump",
|
||||
jitdump_file, strerror(errno));
|
||||
fclose(jitdump);
|
||||
jitdump = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
header.magic = JITHEADER_MAGIC;
|
||||
header.version = JITHEADER_VERSION;
|
||||
header.total_size = sizeof(header);
|
||||
header.elf_mach = get_e_machine();
|
||||
header.pad1 = 0;
|
||||
header.pid = getpid();
|
||||
header.timestamp = get_clock();
|
||||
header.flags = 0;
|
||||
fwrite(&header, sizeof(header), 1, jitdump);
|
||||
}
|
||||
|
||||
void perf_report_prologue(const void *start, size_t size)
|
||||
{
|
||||
if (perfmap) {
|
||||
fprintf(perfmap, "%"PRIxPTR" %zx tcg-prologue-buffer\n",
|
||||
(uintptr_t)start, size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write a JIT_CODE_DEBUG_INFO jitdump entry. */
|
||||
static void write_jr_code_debug_info(const void *start,
|
||||
const struct debuginfo_query *q,
|
||||
size_t icount)
|
||||
{
|
||||
struct jr_code_debug_info rec;
|
||||
struct debug_entry ent;
|
||||
uintptr_t host_pc;
|
||||
int insn;
|
||||
|
||||
/* Write the header. */
|
||||
rec.p.id = JIT_CODE_DEBUG_INFO;
|
||||
rec.p.total_size = sizeof(rec) + sizeof(ent) + 1;
|
||||
rec.p.timestamp = get_clock();
|
||||
rec.code_addr = (uintptr_t)start;
|
||||
rec.nr_entry = 1;
|
||||
for (insn = 0; insn < icount; insn++) {
|
||||
if (q[insn].file) {
|
||||
rec.p.total_size += sizeof(ent) + strlen(q[insn].file) + 1;
|
||||
rec.nr_entry++;
|
||||
}
|
||||
}
|
||||
fwrite(&rec, sizeof(rec), 1, jitdump);
|
||||
|
||||
/* Write the main debug entries. */
|
||||
for (insn = 0; insn < icount; insn++) {
|
||||
if (q[insn].file) {
|
||||
get_host_pc_size(&host_pc, NULL, start, insn);
|
||||
ent.addr = host_pc;
|
||||
ent.lineno = q[insn].line;
|
||||
ent.discrim = 0;
|
||||
fwrite(&ent, sizeof(ent), 1, jitdump);
|
||||
fwrite(q[insn].file, strlen(q[insn].file) + 1, 1, jitdump);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailing debug_entry. */
|
||||
ent.addr = (uintptr_t)start + tcg_ctx->gen_insn_end_off[icount - 1];
|
||||
ent.lineno = 0;
|
||||
ent.discrim = 0;
|
||||
fwrite(&ent, sizeof(ent), 1, jitdump);
|
||||
fwrite("", 1, 1, jitdump);
|
||||
}
|
||||
|
||||
/* Write a JIT_CODE_LOAD jitdump entry. */
|
||||
static void write_jr_code_load(const void *start, uint16_t host_size,
|
||||
const struct debuginfo_query *q)
|
||||
{
|
||||
static uint64_t code_index;
|
||||
struct jr_code_load rec;
|
||||
const char *symbol;
|
||||
size_t symbol_size;
|
||||
|
||||
symbol = pretty_symbol(q, &symbol_size);
|
||||
rec.p.id = JIT_CODE_LOAD;
|
||||
rec.p.total_size = sizeof(rec) + symbol_size + host_size;
|
||||
rec.p.timestamp = get_clock();
|
||||
rec.pid = getpid();
|
||||
rec.tid = qemu_get_thread_id();
|
||||
rec.vma = (uintptr_t)start;
|
||||
rec.code_addr = (uintptr_t)start;
|
||||
rec.code_size = host_size;
|
||||
rec.code_index = code_index++;
|
||||
fwrite(&rec, sizeof(rec), 1, jitdump);
|
||||
fwrite(symbol, symbol_size, 1, jitdump);
|
||||
fwrite(start, host_size, 1, jitdump);
|
||||
}
|
||||
|
||||
void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
|
||||
const void *start)
|
||||
{
|
||||
struct debuginfo_query *q;
|
||||
size_t insn, start_words;
|
||||
uint64_t *gen_insn_data;
|
||||
|
||||
if (!perfmap && !jitdump) {
|
||||
return;
|
||||
}
|
||||
|
||||
q = g_try_malloc0_n(tb->icount, sizeof(*q));
|
||||
if (!q) {
|
||||
return;
|
||||
}
|
||||
|
||||
debuginfo_lock();
|
||||
|
||||
/* Query debuginfo for each guest instruction. */
|
||||
gen_insn_data = tcg_ctx->gen_insn_data;
|
||||
start_words = tcg_ctx->insn_start_words;
|
||||
|
||||
for (insn = 0; insn < tb->icount; insn++) {
|
||||
/* FIXME: This replicates the restore_state_to_opc() logic. */
|
||||
q[insn].address = gen_insn_data[insn * start_words + 0];
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
|
||||
} else {
|
||||
#if defined(TARGET_I386)
|
||||
q[insn].address -= tb->cs_base;
|
||||
#endif
|
||||
}
|
||||
q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0);
|
||||
}
|
||||
debuginfo_query(q, tb->icount);
|
||||
|
||||
/* Emit perfmap entries if needed. */
|
||||
if (perfmap) {
|
||||
flockfile(perfmap);
|
||||
for (insn = 0; insn < tb->icount; insn++) {
|
||||
write_perfmap_entry(start, insn, &q[insn]);
|
||||
}
|
||||
funlockfile(perfmap);
|
||||
}
|
||||
|
||||
/* Emit jitdump entries if needed. */
|
||||
if (jitdump) {
|
||||
flockfile(jitdump);
|
||||
write_jr_code_debug_info(start, q, tb->icount);
|
||||
write_jr_code_load(start, tcg_ctx->gen_insn_end_off[tb->icount - 1],
|
||||
q);
|
||||
funlockfile(jitdump);
|
||||
}
|
||||
|
||||
debuginfo_unlock();
|
||||
g_free(q);
|
||||
}
|
||||
|
||||
void perf_exit(void)
|
||||
{
|
||||
if (perfmap) {
|
||||
fclose(perfmap);
|
||||
perfmap = NULL;
|
||||
}
|
||||
|
||||
if (perf_marker != MAP_FAILED) {
|
||||
munmap(perf_marker, perf_marker_size);
|
||||
perf_marker = MAP_FAILED;
|
||||
}
|
||||
|
||||
if (jitdump) {
|
||||
fclose(jitdump);
|
||||
jitdump = NULL;
|
||||
}
|
||||
}
|
49
accel/tcg/perf.h
Normal file
49
accel/tcg/perf.h
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Linux perf perf-<pid>.map and jit-<pid>.dump integration.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_PERF_H
|
||||
#define ACCEL_TCG_PERF_H
|
||||
|
||||
#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
|
||||
/* Start writing perf-<pid>.map. */
|
||||
void perf_enable_perfmap(void);
|
||||
|
||||
/* Start writing jit-<pid>.dump. */
|
||||
void perf_enable_jitdump(void);
|
||||
|
||||
/* Add information about TCG prologue to profiler maps. */
|
||||
void perf_report_prologue(const void *start, size_t size);
|
||||
|
||||
/* Add information about JITted guest code to profiler maps. */
|
||||
void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
|
||||
const void *start);
|
||||
|
||||
/* Stop writing perf-<pid>.map and/or jit-<pid>.dump. */
|
||||
void perf_exit(void);
|
||||
#else
|
||||
static inline void perf_enable_perfmap(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_enable_jitdump(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_report_prologue(const void *start, size_t size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
|
||||
const void *start)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_exit(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
4
accel/tcg/plugin-helpers.h
Normal file
4
accel/tcg/plugin-helpers.h
Normal file
@@ -0,0 +1,4 @@
|
||||
#ifdef CONFIG_PLUGIN
|
||||
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
|
||||
DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
|
||||
#endif
|
@@ -20,9 +20,8 @@
|
||||
#ifndef EXEC_TB_HASH_H
|
||||
#define EXEC_TB_HASH_H
|
||||
|
||||
#include "exec/vaddr.h"
|
||||
#include "exec/target_page.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/cpu-defs.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/xxhash.h"
|
||||
#include "tb-jmp-cache.h"
|
||||
|
||||
|
@@ -1,55 +0,0 @@
|
||||
/*
|
||||
* TranslationBlock internal declarations (target specific)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H
|
||||
#define ACCEL_TCG_TB_INTERNAL_TARGET_H
|
||||
|
||||
#include "exec/translation-block.h"
|
||||
|
||||
/*
|
||||
* The true return address will often point to a host insn that is part of
|
||||
* the next translated guest insn. Adjust the address backward to point to
|
||||
* the middle of the call insn. Subtracting one would do the job except for
|
||||
* several compressed mode architectures (arm, mips) which set the low bit
|
||||
* to indicate the compressed mode; subtracting two works around that. It
|
||||
* is also the case that there are no host isas that contain a call insn
|
||||
* smaller than 4 bytes, so we don't worry about special-casing this.
|
||||
*/
|
||||
#define GETPC_ADJ 2
|
||||
|
||||
void tb_lock_page0(tb_page_addr_t);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/*
|
||||
* For user-only, page_protect sets the page read-only.
|
||||
* Since most execution is already on read-only pages, and we'd need to
|
||||
* account for other TBs on the same page, defer undoing any page protection
|
||||
* until we receive the write fault.
|
||||
*/
|
||||
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
|
||||
{
|
||||
tb_lock_page0(p1);
|
||||
}
|
||||
|
||||
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
|
||||
static inline void tb_unlock_pages(TranslationBlock *tb) { }
|
||||
#else
|
||||
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_pages(TranslationBlock *);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t ram_addr,
|
||||
unsigned size, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
|
||||
uintptr_t pc);
|
||||
|
||||
#endif
|
@@ -9,25 +9,20 @@
|
||||
#ifndef ACCEL_TCG_TB_JMP_CACHE_H
|
||||
#define ACCEL_TCG_TB_JMP_CACHE_H
|
||||
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/cpu-common.h"
|
||||
|
||||
#define TB_JMP_CACHE_BITS 12
|
||||
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
||||
|
||||
/*
|
||||
* Invalidated in parallel; all accesses to 'tb' must be atomic.
|
||||
* A valid entry is read/written by a single CPU, therefore there is
|
||||
* no need for qatomic_rcu_read() and pc is always consistent with a
|
||||
* non-NULL value of 'tb'. Strictly speaking pc is only needed for
|
||||
* CF_PCREL, but it's used always for simplicity.
|
||||
* Accessed in parallel; all accesses to 'tb' must be atomic.
|
||||
* For CF_PCREL, accesses to 'pc' must be protected by a
|
||||
* load_acquire/store_release to 'tb'.
|
||||
*/
|
||||
typedef struct CPUJumpCache {
|
||||
struct CPUJumpCache {
|
||||
struct rcu_head rcu;
|
||||
struct {
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
} array[TB_JMP_CACHE_SIZE];
|
||||
} CPUJumpCache;
|
||||
};
|
||||
|
||||
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Translation Block Maintenance
|
||||
* Translation Block Maintaince
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
@@ -22,21 +22,14 @@
|
||||
#include "qemu/qtree.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/page-protection.h"
|
||||
#include "exec/mmap-lock.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/target_page.h"
|
||||
#include "accel/tcg/cpu-ops.h"
|
||||
#include "tb-internal.h"
|
||||
#include "system/tcg.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "tb-internal.h"
|
||||
#include "internal-common.h"
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#include "user/page-protection.h"
|
||||
#endif
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
/* List iterators for lists of tagged pointers in TranslationBlock. */
|
||||
@@ -157,7 +150,11 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
|
||||
/*
|
||||
* In system mode we want L1_MAP to be based on ram offsets.
|
||||
*/
|
||||
#define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
||||
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
|
||||
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
||||
#else
|
||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
||||
#endif
|
||||
|
||||
/* Size of the L2 (and L3, etc) page tables. */
|
||||
#define V_L2_BITS 10
|
||||
@@ -210,12 +207,13 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||
{
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||
|
||||
/* Level 2..N-1. */
|
||||
for (int i = v_l2_levels; i > 0; i--) {
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
@@ -714,7 +712,7 @@ static void tb_record(TranslationBlock *tb)
|
||||
tb_page_addr_t paddr0 = tb_page_addr0(tb);
|
||||
tb_page_addr_t paddr1 = tb_page_addr1(tb);
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
|
||||
|
||||
assert(paddr0 != -1);
|
||||
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
|
||||
@@ -746,7 +744,7 @@ static void tb_remove(TranslationBlock *tb)
|
||||
tb_page_addr_t paddr0 = tb_page_addr0(tb);
|
||||
tb_page_addr_t paddr1 = tb_page_addr1(tb);
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
|
||||
|
||||
assert(paddr0 != -1);
|
||||
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
|
||||
@@ -1006,8 +1004,7 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
|
||||
* Called with mmap_lock held for user-mode emulation.
|
||||
* NOTE: this function must not be called while a TB is running.
|
||||
*/
|
||||
void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
|
||||
tb_page_addr_t last)
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
PageForEachNext n;
|
||||
@@ -1024,22 +1021,23 @@ void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
|
||||
* Called with mmap_lock held for user-mode emulation
|
||||
* NOTE: this function must not be called while a TB is running.
|
||||
*/
|
||||
static void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
tb_page_addr_t start, last;
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
tb_invalidate_phys_range(NULL, start, last);
|
||||
tb_invalidate_phys_range(start, last);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with mmap_lock held. If pc is not 0 then it indicates the
|
||||
* host PC of the faulting store instruction that caused this invalidate.
|
||||
* Returns true if the caller needs to abort execution of the current TB.
|
||||
* Returns true if the caller needs to abort execution of the current
|
||||
* TB (because it was modified by this store and the guest CPU has
|
||||
* precise-SMC semantics).
|
||||
*/
|
||||
bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
|
||||
uintptr_t pc)
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
||||
{
|
||||
TranslationBlock *current_tb;
|
||||
bool current_tb_modified;
|
||||
@@ -1051,7 +1049,10 @@ bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
|
||||
* Without precise smc semantics, or when outside of a TB,
|
||||
* we can skip to invalidate.
|
||||
*/
|
||||
if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) {
|
||||
#ifndef TARGET_HAS_PRECISE_SMC
|
||||
pc = 0;
|
||||
#endif
|
||||
if (!pc) {
|
||||
tb_invalidate_phys_page(addr);
|
||||
return false;
|
||||
}
|
||||
@@ -1074,14 +1075,16 @@ bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
|
||||
* the CPU state.
|
||||
*/
|
||||
current_tb_modified = true;
|
||||
cpu_restore_state_from_tb(cpu, current_tb, pc);
|
||||
cpu_restore_state_from_tb(current_cpu, current_tb, pc);
|
||||
}
|
||||
tb_phys_invalidate__locked(tb);
|
||||
}
|
||||
|
||||
if (current_tb_modified) {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
CPUState *cpu = current_cpu;
|
||||
cpu->cflags_next_tb =
|
||||
1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -1090,28 +1093,23 @@ bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
|
||||
/*
|
||||
* @p must be non-NULL.
|
||||
* Call with all @pages locked.
|
||||
* (@cpu, @retaddr) may be (NULL, 0) outside of a cpu context,
|
||||
* in which case precise_smc need not be detected.
|
||||
*/
|
||||
static void
|
||||
tb_invalidate_phys_page_range__locked(CPUState *cpu,
|
||||
struct page_collection *pages,
|
||||
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
PageDesc *p, tb_page_addr_t start,
|
||||
tb_page_addr_t last,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
PageForEachNext n;
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
bool current_tb_modified = false;
|
||||
TranslationBlock *current_tb = NULL;
|
||||
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
|
||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
|
||||
/* Range may not cross a page. */
|
||||
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
|
||||
|
||||
if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) {
|
||||
current_tb = tcg_tb_lookup(retaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* We remove all the TBs in the range [start, last].
|
||||
* XXX: see if in some cases it could be faster to invalidate all the code
|
||||
@@ -1129,7 +1127,8 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
|
||||
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
|
||||
}
|
||||
if (!(tb_last < start || tb_start > last)) {
|
||||
if (unlikely(current_tb == tb) &&
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb == tb &&
|
||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||
/*
|
||||
* If we are modifying the current TB, we must stop
|
||||
@@ -1139,8 +1138,9 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
|
||||
* restore the CPU state.
|
||||
*/
|
||||
current_tb_modified = true;
|
||||
cpu_restore_state_from_tb(cpu, current_tb, retaddr);
|
||||
cpu_restore_state_from_tb(current_cpu, current_tb, retaddr);
|
||||
}
|
||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
tb_phys_invalidate__locked(tb);
|
||||
}
|
||||
}
|
||||
@@ -1150,13 +1150,38 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
|
||||
tlb_unprotect_code(start);
|
||||
}
|
||||
|
||||
if (unlikely(current_tb_modified)) {
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb_modified) {
|
||||
page_collection_unlock(pages);
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
current_cpu->cflags_next_tb =
|
||||
1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
cpu_loop_exit_noexc(current_cpu);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical
|
||||
* address page @addr.
|
||||
*/
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t start, last;
|
||||
PageDesc *p;
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
pages = page_collection_lock(start, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1166,8 +1191,7 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
|
||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||
* this TB.
|
||||
*/
|
||||
void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
|
||||
tb_page_addr_t last)
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t index, index_last;
|
||||
@@ -1186,30 +1210,44 @@ void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
|
||||
page_start = index << TARGET_PAGE_BITS;
|
||||
page_last = page_start | ~TARGET_PAGE_MASK;
|
||||
page_last = MIN(page_last, last);
|
||||
tb_invalidate_phys_page_range__locked(cpu, pages, pd,
|
||||
tb_invalidate_phys_page_range__locked(pages, pd,
|
||||
page_start, page_last, 0);
|
||||
}
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call with all @pages in the range [@start, @start + len[ locked.
|
||||
*/
|
||||
static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
||||
tb_page_addr_t start,
|
||||
unsigned len, uintptr_t ra)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
||||
p = page_find(start >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert_page_locked(p);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
* len must be <= 8 and start must be a multiple of len.
|
||||
* Called via softmmu_template.h when code areas are written to with
|
||||
* iothread mutex not held.
|
||||
*/
|
||||
void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t start,
|
||||
unsigned len, uintptr_t ra)
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
PageDesc *p = page_find(start >> TARGET_PAGE_BITS);
|
||||
struct page_collection *pages;
|
||||
|
||||
if (p) {
|
||||
ram_addr_t last = start + len - 1;
|
||||
struct page_collection *pages = page_collection_lock(start, last);
|
||||
|
||||
tb_invalidate_phys_page_range__locked(cpu, pages, p,
|
||||
start, last, ra);
|
||||
pages = page_collection_lock(ram_addr, ram_addr + size - 1);
|
||||
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
@@ -24,11 +24,11 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/replay.h"
|
||||
#include "exec/icount.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-icount.h"
|
||||
@@ -111,24 +111,24 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu->neg.icount_decr.u16.low == 0);
|
||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
/*
|
||||
* We're called without the BQL, so must take it while
|
||||
* We're called without the iothread lock, so must take it while
|
||||
* we're calling timer handlers.
|
||||
*/
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
icount_notify_aio_contexts();
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ void icount_process_data(CPUState *cpu)
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu->neg.icount_decr.u16.low = 0;
|
||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
@@ -153,7 +153,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
|
||||
|
||||
tcg_handle_interrupt(cpu, mask);
|
||||
if (qemu_cpu_is_self(cpu) &&
|
||||
!cpu->neg.can_do_io
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
|
@@ -24,14 +24,15 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/tcg.h"
|
||||
#include "system/replay.h"
|
||||
#include "exec/icount.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "hw/boards.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-mttcg.h"
|
||||
|
||||
@@ -75,11 +76,11 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu.notifier);
|
||||
tcg_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
@@ -90,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
bql_unlock();
|
||||
r = tcg_cpu_exec(cpu);
|
||||
bql_lock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
r = tcg_cpus_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
@@ -104,20 +105,21 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
*/
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qatomic_set_mb(&cpu->exit_request, 0);
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpu_destroy(cpu);
|
||||
bql_unlock();
|
||||
tcg_cpus_destroy(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
@@ -135,6 +137,10 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
|
||||
g_assert(tcg_enabled());
|
||||
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
|
||||
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* create a thread per vCPU with TCG (MTTCG) */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
|
||||
cpu->cpu_index);
|
||||
|
@@ -25,14 +25,14 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockable.h"
|
||||
#include "system/tcg.h"
|
||||
#include "system/replay.h"
|
||||
#include "exec/icount.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-rr.h"
|
||||
#include "tcg-accel-ops-icount.h"
|
||||
@@ -111,7 +111,7 @@ static void rr_wait_io_event(void)
|
||||
|
||||
while (all_cpu_threads_idle()) {
|
||||
rr_stop_kick_timer();
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
}
|
||||
|
||||
rr_start_kick_timer();
|
||||
@@ -131,7 +131,7 @@ static void rr_deal_with_unplugged_cpus(void)
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->unplug && !cpu_can_run(cpu)) {
|
||||
tcg_cpu_destroy(cpu);
|
||||
tcg_cpus_destroy(cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -188,17 +188,17 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu);
|
||||
tcg_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
|
||||
/* process any pending work */
|
||||
CPU_FOREACH(cpu) {
|
||||
@@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
/* Only used for icount_enabled() */
|
||||
int64_t cpu_budget = 0;
|
||||
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
replay_mutex_lock();
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (icount_enabled()) {
|
||||
int cpu_count = rr_cpu_count();
|
||||
@@ -254,23 +254,23 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (icount_enabled()) {
|
||||
icount_prepare_for_run(cpu, cpu_budget);
|
||||
}
|
||||
r = tcg_cpu_exec(cpu);
|
||||
r = tcg_cpus_exec(cpu);
|
||||
if (icount_enabled()) {
|
||||
icount_process_data(cpu);
|
||||
}
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
@@ -302,7 +302,9 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
rr_deal_with_unplugged_cpus();
|
||||
}
|
||||
|
||||
g_assert_not_reached();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void rr_start_vcpu_thread(CPUState *cpu)
|
||||
@@ -315,25 +317,24 @@ void rr_start_vcpu_thread(CPUState *cpu)
|
||||
tcg_cpu_init_cflags(cpu, false);
|
||||
|
||||
if (!single_tcg_cpu_thread) {
|
||||
single_tcg_halt_cond = cpu->halt_cond;
|
||||
single_tcg_cpu_thread = cpu->thread;
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_new0(QemuCond, 1);
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* share a single thread for all cpus with TCG */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
|
||||
qemu_thread_create(cpu->thread, thread_name,
|
||||
rr_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
single_tcg_halt_cond = cpu->halt_cond;
|
||||
single_tcg_cpu_thread = cpu->thread;
|
||||
} else {
|
||||
/* we share the thread, dump spare data */
|
||||
g_free(cpu->thread);
|
||||
qemu_cond_destroy(cpu->halt_cond);
|
||||
g_free(cpu->halt_cond);
|
||||
/* we share the thread */
|
||||
cpu->thread = single_tcg_cpu_thread;
|
||||
cpu->halt_cond = single_tcg_halt_cond;
|
||||
|
||||
/* copy the stuff done at start of rr_cpu_thread_fn */
|
||||
cpu->thread_id = first_cpu->thread_id;
|
||||
cpu->neg.can_do_io = 1;
|
||||
cpu->can_do_io = 1;
|
||||
cpu->created = true;
|
||||
}
|
||||
}
|
||||
|
@@ -26,22 +26,15 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "accel/accel-ops.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "system/tcg.h"
|
||||
#include "system/replay.h"
|
||||
#include "exec/icount.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/hwaddr.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/watchpoint.h"
|
||||
#include "gdbstub/enums.h"
|
||||
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/gdbstub.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-mttcg.h"
|
||||
@@ -66,37 +59,29 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
|
||||
|
||||
cflags |= parallel ? CF_PARALLEL : 0;
|
||||
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
|
||||
tcg_cflags_set(cpu, cflags);
|
||||
cpu->tcg_cflags |= cflags;
|
||||
}
|
||||
|
||||
void tcg_cpu_destroy(CPUState *cpu)
|
||||
void tcg_cpus_destroy(CPUState *cpu)
|
||||
{
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
}
|
||||
|
||||
int tcg_cpu_exec(CPUState *cpu)
|
||||
int tcg_cpus_exec(CPUState *cpu)
|
||||
{
|
||||
int ret;
|
||||
assert(tcg_enabled());
|
||||
cpu_exec_start(cpu);
|
||||
ret = cpu_exec(cpu);
|
||||
cpu_exec_end(cpu);
|
||||
|
||||
qatomic_set_mb(&cpu->exit_request, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tcg_cpu_reset_hold(CPUState *cpu)
|
||||
{
|
||||
tcg_flush_jmp_cache(cpu);
|
||||
|
||||
tlb_flush(cpu);
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
/*
|
||||
@@ -106,7 +91,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,9 +109,10 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
|
||||
[GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
|
||||
};
|
||||
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
int cputype = xlat[gdbtype];
|
||||
|
||||
if (cpu->cc->gdb_stop_before_watchpoint) {
|
||||
if (cc->gdb_stop_before_watchpoint) {
|
||||
cputype |= BP_STOP_BEFORE_ACCESS;
|
||||
}
|
||||
return cputype;
|
||||
@@ -200,10 +186,8 @@ static inline void tcg_remove_all_breakpoints(CPUState *cpu)
|
||||
cpu_watchpoint_remove_all(cpu, BP_GDB);
|
||||
}
|
||||
|
||||
static void tcg_accel_ops_init(AccelClass *ac)
|
||||
static void tcg_accel_ops_init(AccelOpsClass *ops)
|
||||
{
|
||||
AccelOpsClass *ops = ac->ops;
|
||||
|
||||
if (qemu_tcg_mttcg_enabled()) {
|
||||
ops->create_vcpu_thread = mttcg_start_vcpu_thread;
|
||||
ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
|
||||
@@ -221,14 +205,13 @@ static void tcg_accel_ops_init(AccelClass *ac)
|
||||
}
|
||||
}
|
||||
|
||||
ops->cpu_reset_hold = tcg_cpu_reset_hold;
|
||||
ops->supports_guest_debug = tcg_supports_guest_debug;
|
||||
ops->insert_breakpoint = tcg_insert_breakpoint;
|
||||
ops->remove_breakpoint = tcg_remove_breakpoint;
|
||||
ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
|
||||
}
|
||||
|
||||
static void tcg_accel_ops_class_init(ObjectClass *oc, const void *data)
|
||||
static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
|
@@ -12,10 +12,10 @@
|
||||
#ifndef TCG_ACCEL_OPS_H
|
||||
#define TCG_ACCEL_OPS_H
|
||||
|
||||
#include "system/cpus.h"
|
||||
#include "sysemu/cpus.h"
|
||||
|
||||
void tcg_cpu_destroy(CPUState *cpu);
|
||||
int tcg_cpu_exec(CPUState *cpu);
|
||||
void tcg_cpus_destroy(CPUState *cpu);
|
||||
int tcg_cpus_exec(CPUState *cpu);
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask);
|
||||
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
|
||||
|
||||
|
@@ -24,31 +24,26 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "system/tcg.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/replay-core.h"
|
||||
#include "exec/icount.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg/oversized-guest.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qemu/atomic.h"
|
||||
#include "qapi/qapi-types-common.h"
|
||||
#include "qapi/qapi-builtin-visit.h"
|
||||
#include "qemu/units.h"
|
||||
#include "qemu/target-info.h"
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "hw/boards.h"
|
||||
#endif
|
||||
#include "accel/accel-ops.h"
|
||||
#include "accel/accel-cpu-ops.h"
|
||||
#include "accel/tcg/cpu-ops.h"
|
||||
#include "internal-common.h"
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct TCGState {
|
||||
AccelState parent_obj;
|
||||
|
||||
OnOffAuto mttcg_enabled;
|
||||
bool mttcg_enabled;
|
||||
bool one_insn_per_tb;
|
||||
int splitwx_enabled;
|
||||
unsigned long tb_size;
|
||||
@@ -60,40 +55,7 @@ typedef struct TCGState TCGState;
|
||||
DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
|
||||
TYPE_TCG_ACCEL)
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool qemu_tcg_mttcg_enabled(void)
|
||||
{
|
||||
TCGState *s = TCG_STATE(current_accel());
|
||||
return s->mttcg_enabled == ON_OFF_AUTO_ON;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static void tcg_accel_instance_init(Object *obj)
|
||||
{
|
||||
TCGState *s = TCG_STATE(obj);
|
||||
|
||||
/* If debugging enabled, default "auto on", otherwise off. */
|
||||
#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
s->splitwx_enabled = -1;
|
||||
#else
|
||||
s->splitwx_enabled = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool one_insn_per_tb;
|
||||
|
||||
static int tcg_init_machine(AccelState *as, MachineState *ms)
|
||||
{
|
||||
TCGState *s = TCG_STATE(as);
|
||||
unsigned max_threads = 1;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type()));
|
||||
bool mttcg_supported = cc->tcg_ops->mttcg_supported;
|
||||
|
||||
switch (s->mttcg_enabled) {
|
||||
case ON_OFF_AUTO_AUTO:
|
||||
/*
|
||||
/*
|
||||
* We default to false if we know other options have been enabled
|
||||
* which are currently incompatible with MTTCG. Otherwise when each
|
||||
* guest (target) has been updated to support:
|
||||
@@ -105,43 +67,61 @@ static int tcg_init_machine(AccelState *as, MachineState *ms)
|
||||
* there is one remaining limitation to check:
|
||||
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
|
||||
*/
|
||||
if (mttcg_supported && !icount_enabled()) {
|
||||
s->mttcg_enabled = ON_OFF_AUTO_ON;
|
||||
max_threads = ms->smp.max_cpus;
|
||||
} else {
|
||||
s->mttcg_enabled = ON_OFF_AUTO_OFF;
|
||||
}
|
||||
break;
|
||||
case ON_OFF_AUTO_ON:
|
||||
if (!mttcg_supported) {
|
||||
warn_report("Guest not yet converted to MTTCG - "
|
||||
"you may get unexpected results");
|
||||
}
|
||||
max_threads = ms->smp.max_cpus;
|
||||
break;
|
||||
case ON_OFF_AUTO_OFF:
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
|
||||
static bool default_mttcg_enabled(void)
|
||||
{
|
||||
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
|
||||
return false;
|
||||
}
|
||||
#ifdef TARGET_SUPPORTS_MTTCG
|
||||
# ifndef TCG_GUEST_DEFAULT_MO
|
||||
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
|
||||
# endif
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void tcg_accel_instance_init(Object *obj)
|
||||
{
|
||||
TCGState *s = TCG_STATE(obj);
|
||||
|
||||
s->mttcg_enabled = default_mttcg_enabled();
|
||||
|
||||
/* If debugging enabled, default "auto on", otherwise off. */
|
||||
#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
s->splitwx_enabled = -1;
|
||||
#else
|
||||
s->splitwx_enabled = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool mttcg_enabled;
|
||||
bool one_insn_per_tb;
|
||||
|
||||
static int tcg_init_machine(MachineState *ms)
|
||||
{
|
||||
TCGState *s = TCG_STATE(current_accel());
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
unsigned max_cpus = 1;
|
||||
#else
|
||||
unsigned max_cpus = ms->smp.max_cpus;
|
||||
#endif
|
||||
|
||||
tcg_allowed = true;
|
||||
mttcg_enabled = s->mttcg_enabled;
|
||||
|
||||
page_init();
|
||||
tb_htable_init();
|
||||
tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_threads);
|
||||
tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
|
||||
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
/*
|
||||
* There's no guest base to take into account, so go ahead and
|
||||
* initialize the prologue now.
|
||||
*/
|
||||
tcg_prologue_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
qdev_create_fake_machine();
|
||||
tcg_prologue_init(tcg_ctx);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@@ -151,7 +131,7 @@ static char *tcg_get_thread(Object *obj, Error **errp)
|
||||
{
|
||||
TCGState *s = TCG_STATE(obj);
|
||||
|
||||
return g_strdup(s->mttcg_enabled == ON_OFF_AUTO_ON ? "multi" : "single");
|
||||
return g_strdup(s->mttcg_enabled ? "multi" : "single");
|
||||
}
|
||||
|
||||
static void tcg_set_thread(Object *obj, const char *value, Error **errp)
|
||||
@@ -159,13 +139,19 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
|
||||
TCGState *s = TCG_STATE(obj);
|
||||
|
||||
if (strcmp(value, "multi") == 0) {
|
||||
if (icount_enabled()) {
|
||||
if (TCG_OVERSIZED_GUEST) {
|
||||
error_setg(errp, "No MTTCG when guest word size > hosts");
|
||||
} else if (icount_enabled()) {
|
||||
error_setg(errp, "No MTTCG when icount is enabled");
|
||||
} else {
|
||||
s->mttcg_enabled = ON_OFF_AUTO_ON;
|
||||
#ifndef TARGET_SUPPORTS_MTTCG
|
||||
warn_report("Guest not yet converted to MTTCG - "
|
||||
"you may get unexpected results");
|
||||
#endif
|
||||
s->mttcg_enabled = true;
|
||||
}
|
||||
} else if (strcmp(value, "single") == 0) {
|
||||
s->mttcg_enabled = ON_OFF_AUTO_OFF;
|
||||
s->mttcg_enabled = false;
|
||||
} else {
|
||||
error_setg(errp, "Invalid 'thread' setting %s", value);
|
||||
}
|
||||
@@ -221,7 +207,7 @@ static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
|
||||
qatomic_set(&one_insn_per_tb, value);
|
||||
}
|
||||
|
||||
static int tcg_gdbstub_supported_sstep_flags(AccelState *as)
|
||||
static int tcg_gdbstub_supported_sstep_flags(void)
|
||||
{
|
||||
/*
|
||||
* In replay mode all events will come from the log and can't be
|
||||
@@ -236,14 +222,11 @@ static int tcg_gdbstub_supported_sstep_flags(AccelState *as)
|
||||
}
|
||||
}
|
||||
|
||||
static void tcg_accel_class_init(ObjectClass *oc, const void *data)
|
||||
static void tcg_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "tcg";
|
||||
ac->init_machine = tcg_init_machine;
|
||||
ac->cpu_common_realize = tcg_exec_realizefn;
|
||||
ac->cpu_common_unrealize = tcg_exec_unrealizefn;
|
||||
ac->get_stats = tcg_get_stats;
|
||||
ac->allowed = &tcg_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user