mirror_qemu/scripts/meson-buildoptions.sh

568 lines
33 KiB
Bash
Raw Normal View History

# This file is generated by meson-buildoptions.py, do not edit!
meson_options_help() {
printf "%s\n" ' --audio-drv-list=CHOICES Set audio driver list [default] (choices: alsa/co'
printf "%s\n" ' reaudio/default/dsound/jack/oss/pa/pipewire/sdl/s'
printf "%s\n" ' ndio)'
printf "%s\n" ' --bindir=VALUE Executable directory [bin]'
printf "%s\n" ' --block-drv-ro-whitelist=VALUE'
printf "%s\n" ' set block driver read-only whitelist (by default'
printf "%s\n" ' affects only QEMU, not tools like qemu-img)'
printf "%s\n" ' --block-drv-rw-whitelist=VALUE'
printf "%s\n" ' set block driver read-write whitelist (by default'
printf "%s\n" ' affects only QEMU, not tools like qemu-img)'
printf "%s\n" ' --datadir=VALUE Data file directory [share]'
printf "%s\n" ' --disable-coroutine-pool coroutine freelist (better performance)'
printf "%s\n" ' --disable-debug-info Enable debug symbols and other information'
printf "%s\n" ' --disable-hexagon-idef-parser'
printf "%s\n" ' use idef-parser to automatically generate TCG'
printf "%s\n" ' code for the Hexagon frontend'
printf "%s\n" ' --disable-install-blobs install provided firmware blobs'
printf "%s\n" ' --disable-qom-cast-debug cast debugging support'
meson, cutils: allow non-relocatable installs Say QEMU is configured with bindir = "/usr/bin" and a firmware path that starts with "/usr/share/qemu". Ever since QEMU 5.2, QEMU's install has been relocatable: if you move qemu-system-x86_64 from /usr/bin to /home/username/bin, it will start looking for firmware in /home/username/share/qemu. Previously, you would get a non-relocatable install where the moved QEMU will keep looking for firmware in /usr/share/qemu. Windows almost always wants relocatable installs, and in fact that is why QEMU 5.2 introduced relocatability in the first place. However, newfangled distribution mechanisms such as AppImage (https://docs.appimage.org/reference/best-practices.html), and possibly NixOS, also dislike using at runtime the absolute paths that were established at build time. On POSIX systems you almost never care; if you do, your usecase dictates which one is desirable, so there's no single answer. Obviously relocatability works fine most of the time, because not many people have complained about QEMU's switch to relocatable install, and that's why until now there was no way to disable relocatability. But a non-relocatable, non-modular binary can help if you want to do experiments with old firmware and new QEMU or vice versa (because you can just upgrade/downgrade the firmware package, and use rpm2cpio or similar to extract the QEMU binaries outside /usr), so allow both. This patch allows one to build a non-relocatable install using a new option to configure. Why? Because it's not too hard, and because it helps the user double check the relocatability of their install. Note that the same code that handles relocation also lets you run QEMU from the build tree and pick e.g. firmware files from the source tree transparently. Therefore that part remains active with this patch, even if you configure with --disable-relocatable. Suggested-by: Michael Tokarev <mjt@tls.msk.ru> Reviewed-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-05 15:19:34 +03:00
printf "%s\n" ' --disable-relocatable toggle relocatable install'
printf "%s\n" ' --docdir=VALUE Base directory for documentation installation'
printf "%s\n" ' (can be empty) [share/doc]'
printf "%s\n" ' --enable-block-drv-whitelist-in-tools'
printf "%s\n" ' use block whitelist also in tools instead of only'
printf "%s\n" ' QEMU'
printf "%s\n" ' --enable-cfi Control-Flow Integrity (CFI)'
printf "%s\n" ' --enable-cfi-debug Verbose errors in case of CFI violation'
printf "%s\n" ' --enable-debug-graph-lock'
printf "%s\n" ' graph lock debugging support'
printf "%s\n" ' --enable-debug-mutex mutex debugging support'
printf "%s\n" ' --enable-debug-stack-usage'
printf "%s\n" ' measure coroutine stack usage'
printf "%s\n" ' --enable-debug-tcg TCG debugging'
printf "%s\n" ' --enable-fdt[=CHOICE] Whether and how to find the libfdt library'
printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)'
printf "%s\n" ' --enable-fuzzing build fuzzing targets'
printf "%s\n" ' --enable-gcov Enable coverage tracking.'
printf "%s\n" ' --enable-lto Use link time optimization'
printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:'
printf "%s\n" ' jemalloc/system/tcmalloc)'
printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for'
printf "%s\n" ' upgrades'
printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and'
printf "%s\n" ' getrandom()'
printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires'
printf "%s\n" ' clang/llvm and coroutine backend ucontext)'
printf "%s\n" ' --enable-sanitizers enable default sanitizers'
printf "%s\n" ' --enable-strip Strip targets on install'
printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)'
printf "%s\n" ' --enable-trace-backends=CHOICES'
printf "%s\n" ' Set available tracing backends [log] (choices:'
printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)'
printf "%s\n" ' --enable-tsan enable thread sanitizer'
printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-'
printf "%s\n" ' firmware]'
printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler'
printf "%s\n" ' --includedir=VALUE Header file directory [include]'
printf "%s\n" ' --interp-prefix=VALUE where to find shared libraries etc., use %M for'
printf "%s\n" ' cpu name [/usr/gnemul/qemu-%M]'
printf "%s\n" ' --libdir=VALUE Library directory [system default]'
printf "%s\n" ' --libexecdir=VALUE Library executable directory [libexec]'
printf "%s\n" ' --localedir=VALUE Locale data directory [share/locale]'
printf "%s\n" ' --localstatedir=VALUE Localstate data directory [/var/local]'
printf "%s\n" ' --mandir=VALUE Manual page directory [share/man]'
printf "%s\n" ' --prefix=VALUE Installation prefix [/usr/local]'
printf "%s\n" ' --qemu-ga-distro=VALUE second path element in qemu-ga registry entries'
printf "%s\n" ' [Linux]'
printf "%s\n" ' --qemu-ga-manufacturer=VALUE'
printf "%s\n" ' "manufacturer" name for qemu-ga registry entries'
printf "%s\n" ' [QEMU]'
printf "%s\n" ' --qemu-ga-version=VALUE version number for qemu-ga installer'
printf "%s\n" ' --smbd=VALUE Path to smbd for slirp networking'
printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]'
printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string'
printf "%s\n" ' [NORMAL]'
printf "%s\n" ' --with-coroutine=CHOICE coroutine backend to use (choices:'
printf "%s\n" ' auto/sigaltstack/ucontext/windows)'
printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the'
printf "%s\n" ' package'
printf "%s\n" ' --with-suffix=VALUE Suffix for QEMU data/modules/config directories'
printf "%s\n" ' (can be empty) [qemu]'
printf "%s\n" ' --with-trace-file=VALUE Trace file prefix for simple backend [trace]'
printf "%s\n" ''
printf "%s\n" 'Optional features, enabled with --enable-FEATURE and'
printf "%s\n" 'disabled with --disable-FEATURE, default is enabled if available'
printf "%s\n" '(unless built with --without-default-features):'
printf "%s\n" ''
net: add initial support for AF_XDP network backend AF_XDP is a network socket family that allows communication directly with the network device driver in the kernel, bypassing most or all of the kernel networking stack. In the essence, the technology is pretty similar to netmap. But, unlike netmap, AF_XDP is Linux-native and works with any network interfaces without driver modifications. Unlike vhost-based backends (kernel, user, vdpa), AF_XDP doesn't require access to character devices or unix sockets. Only access to the network interface itself is necessary. This patch implements a network backend that communicates with the kernel by creating an AF_XDP socket. A chunk of userspace memory is shared between QEMU and the host kernel. 4 ring buffers (Tx, Rx, Fill and Completion) are placed in that memory along with a pool of memory buffers for the packet data. Data transmission is done by allocating one of the buffers, copying packet data into it and placing the pointer into Tx ring. After transmission, device will return the buffer via Completion ring. On Rx, device will take a buffer form a pre-populated Fill ring, write the packet data into it and place the buffer into Rx ring. AF_XDP network backend takes on the communication with the host kernel and the network interface and forwards packets to/from the peer device in QEMU. Usage example: -device virtio-net-pci,netdev=guest1,mac=00:16:35:AF:AA:5C -netdev af-xdp,ifname=ens6f1np1,id=guest1,mode=native,queues=1 XDP program bridges the socket with a network interface. It can be attached to the interface in 2 different modes: 1. skb - this mode should work for any interface and doesn't require driver support. With a caveat of lower performance. 2. native - this does require support from the driver and allows to bypass skb allocation in the kernel and potentially use zero-copy while getting packets in/out userspace. By default, QEMU will try to use native mode and fall back to skb. Mode can be forced via 'mode' option. To force 'copy' even in native mode, use 'force-copy=on' option. This might be useful if there is some issue with the driver. Option 'queues=N' allows to specify how many device queues should be open. Note that all the queues that are not open are still functional and can receive traffic, but it will not be delivered to QEMU. So, the number of device queues should generally match the QEMU configuration, unless the device is shared with something else and the traffic re-direction to appropriate queues is correctly configured on a device level (e.g. with ethtool -N). 'start-queue=M' option can be used to specify from which queue id QEMU should start configuring 'N' queues. It might also be necessary to use this option with certain NICs, e.g. MLX5 NICs. See the docs for examples. In a general case QEMU will need CAP_NET_ADMIN and CAP_SYS_ADMIN or CAP_BPF capabilities in order to load default XSK/XDP programs to the network interface and configure BPF maps. It is possible, however, to run with no capabilities. For that to work, an external process with enough capabilities will need to pre-load default XSK program, create AF_XDP sockets and pass their file descriptors to QEMU process on startup via 'sock-fds' option. Network backend will need to be configured with 'inhibit=on' to avoid loading of the program. QEMU will need 32 MB of locked memory (RLIMIT_MEMLOCK) per queue or CAP_IPC_LOCK. There are few performance challenges with the current network backends. First is that they do not support IO threads. This means that data path is handled by the main thread in QEMU and may slow down other work or may be slowed down by some other work. This also means that taking advantage of multi-queue is generally not possible today. Another thing is that data path is going through the device emulation code, which is not really optimized for performance. The fastest "frontend" device is virtio-net. But it's not optimized for heavy traffic either, because it expects such use-cases to be handled via some implementation of vhost (user, kernel, vdpa). In practice, we have virtio notifications and rcu lock/unlock on a per-packet basis and not very efficient accesses to the guest memory. Communication channels between backend and frontend devices do not allow passing more than one packet at a time as well. Some of these challenges can be avoided in the future by adding better batching into device emulation or by implementing vhost-af-xdp variant. There are also a few kernel limitations. AF_XDP sockets do not support any kinds of checksum or segmentation offloading. Buffers are limited to a page size (4K), i.e. MTU is limited. Multi-buffer support implementation for AF_XDP is in progress, but not ready yet. Also, transmission in all non-zero-copy modes is synchronous, i.e. done in a syscall. That doesn't allow high packet rates on virtual interfaces. However, keeping in mind all of these challenges, current implementation of the AF_XDP backend shows a decent performance while running on top of a physical NIC with zero-copy support. Test setup: 2 VMs running on 2 physical hosts connected via ConnectX6-Dx card. Network backend is configured to open the NIC directly in native mode. The driver supports zero-copy. NIC is configured to use 1 queue. Inside a VM - iperf3 for basic TCP performance testing and dpdk-testpmd for PPS testing. iperf3 result: TCP stream : 19.1 Gbps dpdk-testpmd (single queue, single CPU core, 64 B packets) results: Tx only : 3.4 Mpps Rx only : 2.0 Mpps L2 FWD Loopback : 1.5 Mpps In skb mode the same setup shows much lower performance, similar to the setup where pair of physical NICs is replaced with veth pair: iperf3 result: TCP stream : 9 Gbps dpdk-testpmd (single queue, single CPU core, 64 B packets) results: Tx only : 1.2 Mpps Rx only : 1.0 Mpps L2 FWD Loopback : 0.7 Mpps Results in skb mode or over the veth are close to results of a tap backend with vhost=on and disabled segmentation offloading bridged with a NIC. Signed-off-by: Ilya Maximets <i.maximets@ovn.org> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> (docker/lcitool) Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-09-13 21:34:37 +03:00
printf "%s\n" ' af-xdp AF_XDP network backend support'
printf "%s\n" ' alsa ALSA sound support'
printf "%s\n" ' attr attr/xattr support'
printf "%s\n" ' auth-pam PAM access control'
printf "%s\n" ' avx2 AVX2 optimizations'
printf "%s\n" ' avx512bw AVX512BW optimizations'
printf "%s\n" ' avx512f AVX512F optimizations'
blkio: add libblkio block driver libblkio (https://gitlab.com/libblkio/libblkio/) is a library for high-performance disk I/O. It currently supports io_uring, virtio-blk-vhost-user, and virtio-blk-vhost-vdpa with additional drivers under development. One of the reasons for developing libblkio is that other applications besides QEMU can use it. This will be particularly useful for virtio-blk-vhost-user which applications may wish to use for connecting to qemu-storage-daemon. libblkio also gives us an opportunity to develop in Rust behind a C API that is easy to consume from QEMU. This commit adds io_uring, nvme-io_uring, virtio-blk-vhost-user, and virtio-blk-vhost-vdpa BlockDrivers to QEMU using libblkio. It will be easy to add other libblkio drivers since they will share the majority of code. For now I/O buffers are copied through bounce buffers if the libblkio driver requires it. Later commits add an optimization for pre-registering guest RAM to avoid bounce buffers. The syntax is: --blockdev io_uring,node-name=drive0,filename=test.img,readonly=on|off,cache.direct=on|off --blockdev nvme-io_uring,node-name=drive0,filename=/dev/ng0n1,readonly=on|off,cache.direct=on --blockdev virtio-blk-vhost-vdpa,node-name=drive0,path=/dev/vdpa...,readonly=on|off,cache.direct=on --blockdev virtio-blk-vhost-user,node-name=drive0,path=vhost-user-blk.sock,readonly=on|off,cache.direct=on Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Message-id: 20221013185908.1297568-3-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-10-13 21:58:57 +03:00
printf "%s\n" ' blkio libblkio block device driver'
printf "%s\n" ' bochs bochs image format support'
printf "%s\n" ' bpf eBPF support'
printf "%s\n" ' brlapi brlapi character device driver'
printf "%s\n" ' bzip2 bzip2 support for DMG images'
printf "%s\n" ' canokey CanoKey support'
printf "%s\n" ' cap-ng cap_ng support'
printf "%s\n" ' capstone Whether and how to find the capstone library'
printf "%s\n" ' cloop cloop image format support'
printf "%s\n" ' cocoa Cocoa user interface (macOS only)'
printf "%s\n" ' colo-proxy colo-proxy support'
printf "%s\n" ' coreaudio CoreAudio sound support'
printf "%s\n" ' crypto-afalg Linux AF_ALG crypto backend driver'
printf "%s\n" ' curl CURL block device driver'
printf "%s\n" ' curses curses UI'
printf "%s\n" ' dbus-display -display dbus support'
printf "%s\n" ' dmg dmg image format support'
printf "%s\n" ' docs Documentations build support'
printf "%s\n" ' dsound DirectSound sound support'
printf "%s\n" ' fuse FUSE block device export'
printf "%s\n" ' fuse-lseek SEEK_HOLE/SEEK_DATA support for FUSE exports'
printf "%s\n" ' gcrypt libgcrypt cryptography support'
printf "%s\n" ' gettext Localization of the GTK+ user interface'
printf "%s\n" ' gio use libgio for D-Bus support'
printf "%s\n" ' glusterfs Glusterfs block device driver'
printf "%s\n" ' gnutls GNUTLS cryptography support'
printf "%s\n" ' gtk GTK+ user interface'
printf "%s\n" ' gtk-clipboard clipboard support for the gtk UI (EXPERIMENTAL, MAY HANG)'
printf "%s\n" ' guest-agent Build QEMU Guest Agent'
printf "%s\n" ' guest-agent-msi Build MSI package for the QEMU Guest Agent'
Add Hyper-V Dynamic Memory Protocol driver (hv-balloon) base This driver is like virtio-balloon on steroids: it allows both changing the guest memory allocation via ballooning and (in the next patch) inserting pieces of extra RAM into it on demand from a provided memory backend. The actual resizing is done via ballooning interface (for example, via the "balloon" HMP command). This includes resizing the guest past its boot size - that is, hot-adding additional memory in granularity limited only by the guest alignment requirements, as provided by the next patch. In contrast with ACPI DIMM hotplug where one can only request to unplug a whole DIMM stick this driver allows removing memory from guest in single page (4k) units via ballooning. After a VM reboot the guest is back to its original (boot) size. In the future, the guest boot memory size might be changed on reboot instead, taking into account the effective size that VM had before that reboot (much like Hyper-V does). For performance reasons, the guest-released memory is tracked in a few range trees, as a series of (start, count) ranges. Each time a new page range is inserted into such tree its neighbors are checked as candidates for possible merging with it. Besides performance reasons, the Dynamic Memory protocol itself uses page ranges as the data structure in its messages, so relevant pages need to be merged into such ranges anyway. One has to be careful when tracking the guest-released pages, since the guest can maliciously report returning pages outside its current address space, which later clash with the address range of newly added memory. Similarly, the guest can report freeing the same page twice. The above design results in much better ballooning performance than when using virtio-balloon with the same guest: 230 GB / minute with this driver versus 70 GB / minute with virtio-balloon. During a ballooning operation most of time is spent waiting for the guest to come up with newly freed page ranges, processing the received ranges on the host side (in QEMU and KVM) is nearly instantaneous. The unballoon operation is also pretty much instantaneous: thanks to the merging of the ballooned out page ranges 200 GB of memory can be returned to the guest in about 1 second. With virtio-balloon this operation takes about 2.5 minutes. These tests were done against a Windows Server 2019 guest running on a Xeon E5-2699, after dirtying the whole memory inside guest before each balloon operation. Using a range tree instead of a bitmap to track the removed memory also means that the solution scales well with the guest size: even a 1 TB range takes just a few bytes of such metadata. Since the required GTree operations aren't present in every Glib version a check for them was added to the meson build script, together with new "--enable-hv-balloon" and "--disable-hv-balloon" configure arguments. If these GTree operations are missing in the system's Glib version this driver will be skipped during QEMU build. An optional "status-report=on" device parameter requests memory status events from the guest (typically sent every second), which allow the host to learn both the guest memory available and the guest memory in use counts. Following commits will add support for their external emission as "HV_BALLOON_STATUS_REPORT" QMP events. The driver is named hv-balloon since the Linux kernel client driver for the Dynamic Memory Protocol is named as such and to follow the naming pattern established by the virtio-balloon driver. The whole protocol runs over Hyper-V VMBus. The driver was tested against Windows Server 2012 R2, Windows Server 2016 and Windows Server 2019 guests and obeys the guest alignment requirements reported to the host via DM_CAPABILITIES_REPORT message. Acked-by: David Hildenbrand <david@redhat.com> Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
2023-06-12 17:00:54 +03:00
printf "%s\n" ' hv-balloon hv-balloon driver (requires Glib 2.68+ GTree API)'
printf "%s\n" ' hvf HVF acceleration support'
printf "%s\n" ' iconv Font glyph conversion support'
printf "%s\n" ' jack JACK sound support'
printf "%s\n" ' keyring Linux keyring support'
printf "%s\n" ' kvm KVM acceleration support'
printf "%s\n" ' l2tpv3 l2tpv3 network backend support'
printf "%s\n" ' libdaxctl libdaxctl support'
printf "%s\n" ' libdw debuginfo support'
printf "%s\n" ' libiscsi libiscsi userspace initiator'
printf "%s\n" ' libkeyutils Linux keyutils support'
printf "%s\n" ' libnfs libnfs block device driver'
printf "%s\n" ' libpmem libpmem support'
printf "%s\n" ' libssh ssh block device support'
printf "%s\n" ' libudev Use libudev to enumerate host devices'
printf "%s\n" ' libusb libusb support for USB passthrough'
printf "%s\n" ' libvduse build VDUSE Library'
printf "%s\n" ' linux-aio Linux AIO support'
printf "%s\n" ' linux-io-uring Linux io_uring support'
printf "%s\n" ' live-block-migration'
printf "%s\n" ' block migration in the main migration stream'
printf "%s\n" ' lzfse lzfse support for DMG images'
printf "%s\n" ' lzo lzo compression support'
printf "%s\n" ' malloc-trim enable libc malloc_trim() for memory optimization'
printf "%s\n" ' membarrier membarrier system call (for Linux 4.14+ or Windows'
printf "%s\n" ' modules modules support (non Windows)'
printf "%s\n" ' mpath Multipath persistent reservation passthrough'
printf "%s\n" ' multiprocess Out of process device emulation support'
printf "%s\n" ' netmap netmap network backend support'
printf "%s\n" ' nettle nettle cryptography support'
printf "%s\n" ' numa libnuma support'
printf "%s\n" ' nvmm NVMM acceleration support'
printf "%s\n" ' opengl OpenGL support'
printf "%s\n" ' oss OSS sound support'
printf "%s\n" ' pa PulseAudio sound support'
printf "%s\n" ' parallels parallels image format support'
printf "%s\n" ' pipewire PipeWire sound support'
printf "%s\n" ' pixman pixman support'
printf "%s\n" ' plugins TCG plugins via shared library loading'
printf "%s\n" ' png PNG support with libpng'
printf "%s\n" ' pvrdma Enable PVRDMA support'
printf "%s\n" ' qcow1 qcow1 image format support'
printf "%s\n" ' qed qed image format support'
printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)'
printf "%s\n" ' rbd Ceph block device driver'
printf "%s\n" ' rdma Enable RDMA-based migration'
printf "%s\n" ' replication replication support'
printf "%s\n" ' rutabaga-gfx rutabaga_gfx support'
printf "%s\n" ' sdl SDL user interface'
printf "%s\n" ' sdl-image SDL Image support for icons'
printf "%s\n" ' seccomp seccomp support'
nbd/server: Add --selinux-label option Under SELinux, Unix domain sockets have two labels. One is on the disk and can be set with commands such as chcon(1). There is a different label stored in memory (called the process label). This can only be set by the process creating the socket. When using SELinux + SVirt and wanting qemu to be able to connect to a qemu-nbd instance, you must set both labels correctly first. For qemu-nbd the options to set the second label are awkward. You can create the socket in a wrapper program and then exec into qemu-nbd. Or you could try something with LD_PRELOAD. This commit adds the ability to set the label straightforwardly on the command line, via the new --selinux-label flag. (The name of the flag is the same as the equivalent nbdkit option.) A worked example showing how to use the new option can be found in this bug: https://bugzilla.redhat.com/show_bug.cgi?id=1984938 Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1984938 Signed-off-by: Richard W.M. Jones <rjones@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> [eblake: rebase to configure changes, reject --selinux-label if it is not compiled in or not used on a Unix socket] Note that we may relax some of these restrictions at a later date, such as making it possible to label a TCP socket, although it may be smarter to do so as a generic QMP action rather than more one-off command lines in qemu-nbd. Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20211115202944.615966-1-eblake@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> [eblake: adjust meson output as suggested by thuth] Signed-off-by: Eric Blake <eblake@redhat.com>
2021-11-15 23:29:43 +03:00
printf "%s\n" ' selinux SELinux support in qemu-nbd'
printf "%s\n" ' slirp libslirp user mode network backend support'
printf "%s\n" ' slirp-smbd use smbd (at path --smbd=*) in slirp networking'
printf "%s\n" ' smartcard CA smartcard emulation support'
printf "%s\n" ' snappy snappy compression support'
printf "%s\n" ' sndio sndio sound support'
printf "%s\n" ' sparse sparse checker'
printf "%s\n" ' spice Spice server support'
printf "%s\n" ' spice-protocol Spice protocol support'
printf "%s\n" ' stack-protector compiler-provided stack protection'
printf "%s\n" ' tcg TCG support'
printf "%s\n" ' tools build support utilities that come with QEMU'
printf "%s\n" ' tpm TPM support'
printf "%s\n" ' u2f U2F emulation support'
printf "%s\n" ' usb-redir libusbredir support'
printf "%s\n" ' vde vde network backend support'
printf "%s\n" ' vdi vdi image format support'
printf "%s\n" ' vduse-blk-export'
printf "%s\n" ' VDUSE block export support'
printf "%s\n" ' vfio-user-server'
printf "%s\n" ' vfio-user server support'
printf "%s\n" ' vhdx vhdx image format support'
printf "%s\n" ' vhost-crypto vhost-user crypto backend support'
printf "%s\n" ' vhost-kernel vhost kernel backend support'
printf "%s\n" ' vhost-net vhost-net kernel acceleration support'
printf "%s\n" ' vhost-user vhost-user backend support'
printf "%s\n" ' vhost-user-blk-server'
printf "%s\n" ' build vhost-user-blk server'
printf "%s\n" ' vhost-vdpa vhost-vdpa kernel backend support'
printf "%s\n" ' virglrenderer virgl rendering support'
printf "%s\n" ' virtfs virtio-9p support'
printf "%s\n" ' virtfs-proxy-helper'
printf "%s\n" ' virtio-9p proxy helper support'
printf "%s\n" ' vmdk vmdk image format support'
printf "%s\n" ' vmnet vmnet.framework network backend support'
printf "%s\n" ' vnc VNC server'
printf "%s\n" ' vnc-jpeg JPEG lossy compression for VNC server'
printf "%s\n" ' vnc-sasl SASL authentication for VNC server'
printf "%s\n" ' vpc vpc image format support'
printf "%s\n" ' vte vte support for the gtk UI'
printf "%s\n" ' vvfat vvfat image format support'
printf "%s\n" ' werror Treat warnings as errors'
printf "%s\n" ' whpx WHPX acceleration support'
printf "%s\n" ' xen Xen backend support'
printf "%s\n" ' xen-pci-passthrough'
printf "%s\n" ' Xen PCI passthrough support'
printf "%s\n" ' xkbcommon xkbcommon support'
printf "%s\n" ' zstd zstd compression support'
}
_meson_option_parse() {
case $1 in
net: add initial support for AF_XDP network backend AF_XDP is a network socket family that allows communication directly with the network device driver in the kernel, bypassing most or all of the kernel networking stack. In the essence, the technology is pretty similar to netmap. But, unlike netmap, AF_XDP is Linux-native and works with any network interfaces without driver modifications. Unlike vhost-based backends (kernel, user, vdpa), AF_XDP doesn't require access to character devices or unix sockets. Only access to the network interface itself is necessary. This patch implements a network backend that communicates with the kernel by creating an AF_XDP socket. A chunk of userspace memory is shared between QEMU and the host kernel. 4 ring buffers (Tx, Rx, Fill and Completion) are placed in that memory along with a pool of memory buffers for the packet data. Data transmission is done by allocating one of the buffers, copying packet data into it and placing the pointer into Tx ring. After transmission, device will return the buffer via Completion ring. On Rx, device will take a buffer form a pre-populated Fill ring, write the packet data into it and place the buffer into Rx ring. AF_XDP network backend takes on the communication with the host kernel and the network interface and forwards packets to/from the peer device in QEMU. Usage example: -device virtio-net-pci,netdev=guest1,mac=00:16:35:AF:AA:5C -netdev af-xdp,ifname=ens6f1np1,id=guest1,mode=native,queues=1 XDP program bridges the socket with a network interface. It can be attached to the interface in 2 different modes: 1. skb - this mode should work for any interface and doesn't require driver support. With a caveat of lower performance. 2. native - this does require support from the driver and allows to bypass skb allocation in the kernel and potentially use zero-copy while getting packets in/out userspace. By default, QEMU will try to use native mode and fall back to skb. Mode can be forced via 'mode' option. To force 'copy' even in native mode, use 'force-copy=on' option. This might be useful if there is some issue with the driver. Option 'queues=N' allows to specify how many device queues should be open. Note that all the queues that are not open are still functional and can receive traffic, but it will not be delivered to QEMU. So, the number of device queues should generally match the QEMU configuration, unless the device is shared with something else and the traffic re-direction to appropriate queues is correctly configured on a device level (e.g. with ethtool -N). 'start-queue=M' option can be used to specify from which queue id QEMU should start configuring 'N' queues. It might also be necessary to use this option with certain NICs, e.g. MLX5 NICs. See the docs for examples. In a general case QEMU will need CAP_NET_ADMIN and CAP_SYS_ADMIN or CAP_BPF capabilities in order to load default XSK/XDP programs to the network interface and configure BPF maps. It is possible, however, to run with no capabilities. For that to work, an external process with enough capabilities will need to pre-load default XSK program, create AF_XDP sockets and pass their file descriptors to QEMU process on startup via 'sock-fds' option. Network backend will need to be configured with 'inhibit=on' to avoid loading of the program. QEMU will need 32 MB of locked memory (RLIMIT_MEMLOCK) per queue or CAP_IPC_LOCK. There are few performance challenges with the current network backends. First is that they do not support IO threads. This means that data path is handled by the main thread in QEMU and may slow down other work or may be slowed down by some other work. This also means that taking advantage of multi-queue is generally not possible today. Another thing is that data path is going through the device emulation code, which is not really optimized for performance. The fastest "frontend" device is virtio-net. But it's not optimized for heavy traffic either, because it expects such use-cases to be handled via some implementation of vhost (user, kernel, vdpa). In practice, we have virtio notifications and rcu lock/unlock on a per-packet basis and not very efficient accesses to the guest memory. Communication channels between backend and frontend devices do not allow passing more than one packet at a time as well. Some of these challenges can be avoided in the future by adding better batching into device emulation or by implementing vhost-af-xdp variant. There are also a few kernel limitations. AF_XDP sockets do not support any kinds of checksum or segmentation offloading. Buffers are limited to a page size (4K), i.e. MTU is limited. Multi-buffer support implementation for AF_XDP is in progress, but not ready yet. Also, transmission in all non-zero-copy modes is synchronous, i.e. done in a syscall. That doesn't allow high packet rates on virtual interfaces. However, keeping in mind all of these challenges, current implementation of the AF_XDP backend shows a decent performance while running on top of a physical NIC with zero-copy support. Test setup: 2 VMs running on 2 physical hosts connected via ConnectX6-Dx card. Network backend is configured to open the NIC directly in native mode. The driver supports zero-copy. NIC is configured to use 1 queue. Inside a VM - iperf3 for basic TCP performance testing and dpdk-testpmd for PPS testing. iperf3 result: TCP stream : 19.1 Gbps dpdk-testpmd (single queue, single CPU core, 64 B packets) results: Tx only : 3.4 Mpps Rx only : 2.0 Mpps L2 FWD Loopback : 1.5 Mpps In skb mode the same setup shows much lower performance, similar to the setup where pair of physical NICs is replaced with veth pair: iperf3 result: TCP stream : 9 Gbps dpdk-testpmd (single queue, single CPU core, 64 B packets) results: Tx only : 1.2 Mpps Rx only : 1.0 Mpps L2 FWD Loopback : 0.7 Mpps Results in skb mode or over the veth are close to results of a tap backend with vhost=on and disabled segmentation offloading bridged with a NIC. Signed-off-by: Ilya Maximets <i.maximets@ovn.org> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> (docker/lcitool) Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-09-13 21:34:37 +03:00
--enable-af-xdp) printf "%s" -Daf_xdp=enabled ;;
--disable-af-xdp) printf "%s" -Daf_xdp=disabled ;;
--enable-alsa) printf "%s" -Dalsa=enabled ;;
--disable-alsa) printf "%s" -Dalsa=disabled ;;
--enable-attr) printf "%s" -Dattr=enabled ;;
--disable-attr) printf "%s" -Dattr=disabled ;;
--audio-drv-list=*) quote_sh "-Daudio_drv_list=$2" ;;
--enable-auth-pam) printf "%s" -Dauth_pam=enabled ;;
--disable-auth-pam) printf "%s" -Dauth_pam=disabled ;;
--enable-avx2) printf "%s" -Davx2=enabled ;;
--disable-avx2) printf "%s" -Davx2=disabled ;;
--enable-avx512bw) printf "%s" -Davx512bw=enabled ;;
--disable-avx512bw) printf "%s" -Davx512bw=disabled ;;
--enable-avx512f) printf "%s" -Davx512f=enabled ;;
--disable-avx512f) printf "%s" -Davx512f=disabled ;;
--enable-gcov) printf "%s" -Db_coverage=true ;;
--disable-gcov) printf "%s" -Db_coverage=false ;;
--enable-lto) printf "%s" -Db_lto=true ;;
--disable-lto) printf "%s" -Db_lto=false ;;
--bindir=*) quote_sh "-Dbindir=$2" ;;
blkio: add libblkio block driver libblkio (https://gitlab.com/libblkio/libblkio/) is a library for high-performance disk I/O. It currently supports io_uring, virtio-blk-vhost-user, and virtio-blk-vhost-vdpa with additional drivers under development. One of the reasons for developing libblkio is that other applications besides QEMU can use it. This will be particularly useful for virtio-blk-vhost-user which applications may wish to use for connecting to qemu-storage-daemon. libblkio also gives us an opportunity to develop in Rust behind a C API that is easy to consume from QEMU. This commit adds io_uring, nvme-io_uring, virtio-blk-vhost-user, and virtio-blk-vhost-vdpa BlockDrivers to QEMU using libblkio. It will be easy to add other libblkio drivers since they will share the majority of code. For now I/O buffers are copied through bounce buffers if the libblkio driver requires it. Later commits add an optimization for pre-registering guest RAM to avoid bounce buffers. The syntax is: --blockdev io_uring,node-name=drive0,filename=test.img,readonly=on|off,cache.direct=on|off --blockdev nvme-io_uring,node-name=drive0,filename=/dev/ng0n1,readonly=on|off,cache.direct=on --blockdev virtio-blk-vhost-vdpa,node-name=drive0,path=/dev/vdpa...,readonly=on|off,cache.direct=on --blockdev virtio-blk-vhost-user,node-name=drive0,path=vhost-user-blk.sock,readonly=on|off,cache.direct=on Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Message-id: 20221013185908.1297568-3-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-10-13 21:58:57 +03:00
--enable-blkio) printf "%s" -Dblkio=enabled ;;
--disable-blkio) printf "%s" -Dblkio=disabled ;;
--block-drv-ro-whitelist=*) quote_sh "-Dblock_drv_ro_whitelist=$2" ;;
--block-drv-rw-whitelist=*) quote_sh "-Dblock_drv_rw_whitelist=$2" ;;
--enable-block-drv-whitelist-in-tools) printf "%s" -Dblock_drv_whitelist_in_tools=true ;;
--disable-block-drv-whitelist-in-tools) printf "%s" -Dblock_drv_whitelist_in_tools=false ;;
--enable-bochs) printf "%s" -Dbochs=enabled ;;
--disable-bochs) printf "%s" -Dbochs=disabled ;;
--enable-bpf) printf "%s" -Dbpf=enabled ;;
--disable-bpf) printf "%s" -Dbpf=disabled ;;
--enable-brlapi) printf "%s" -Dbrlapi=enabled ;;
--disable-brlapi) printf "%s" -Dbrlapi=disabled ;;
--enable-bzip2) printf "%s" -Dbzip2=enabled ;;
--disable-bzip2) printf "%s" -Dbzip2=disabled ;;
--enable-canokey) printf "%s" -Dcanokey=enabled ;;
--disable-canokey) printf "%s" -Dcanokey=disabled ;;
--enable-cap-ng) printf "%s" -Dcap_ng=enabled ;;
--disable-cap-ng) printf "%s" -Dcap_ng=disabled ;;
--enable-capstone) printf "%s" -Dcapstone=enabled ;;
--disable-capstone) printf "%s" -Dcapstone=disabled ;;
--enable-cfi) printf "%s" -Dcfi=true ;;
--disable-cfi) printf "%s" -Dcfi=false ;;
--enable-cfi-debug) printf "%s" -Dcfi_debug=true ;;
--disable-cfi-debug) printf "%s" -Dcfi_debug=false ;;
--enable-cloop) printf "%s" -Dcloop=enabled ;;
--disable-cloop) printf "%s" -Dcloop=disabled ;;
--enable-cocoa) printf "%s" -Dcocoa=enabled ;;
--disable-cocoa) printf "%s" -Dcocoa=disabled ;;
--enable-colo-proxy) printf "%s" -Dcolo_proxy=enabled ;;
--disable-colo-proxy) printf "%s" -Dcolo_proxy=disabled ;;
--enable-coreaudio) printf "%s" -Dcoreaudio=enabled ;;
--disable-coreaudio) printf "%s" -Dcoreaudio=disabled ;;
--with-coroutine=*) quote_sh "-Dcoroutine_backend=$2" ;;
--enable-coroutine-pool) printf "%s" -Dcoroutine_pool=true ;;
--disable-coroutine-pool) printf "%s" -Dcoroutine_pool=false ;;
--enable-crypto-afalg) printf "%s" -Dcrypto_afalg=enabled ;;
--disable-crypto-afalg) printf "%s" -Dcrypto_afalg=disabled ;;
--enable-curl) printf "%s" -Dcurl=enabled ;;
--disable-curl) printf "%s" -Dcurl=disabled ;;
--enable-curses) printf "%s" -Dcurses=enabled ;;
--disable-curses) printf "%s" -Dcurses=disabled ;;
--datadir=*) quote_sh "-Ddatadir=$2" ;;
--enable-dbus-display) printf "%s" -Ddbus_display=enabled ;;
--disable-dbus-display) printf "%s" -Ddbus_display=disabled ;;
--enable-debug-info) printf "%s" -Ddebug=true ;;
--disable-debug-info) printf "%s" -Ddebug=false ;;
--enable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=true ;;
--disable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=false ;;
--enable-debug-mutex) printf "%s" -Ddebug_mutex=true ;;
--disable-debug-mutex) printf "%s" -Ddebug_mutex=false ;;
--enable-debug-stack-usage) printf "%s" -Ddebug_stack_usage=true ;;
--disable-debug-stack-usage) printf "%s" -Ddebug_stack_usage=false ;;
--enable-debug-tcg) printf "%s" -Ddebug_tcg=true ;;
--disable-debug-tcg) printf "%s" -Ddebug_tcg=false ;;
--enable-dmg) printf "%s" -Ddmg=enabled ;;
--disable-dmg) printf "%s" -Ddmg=disabled ;;
--docdir=*) quote_sh "-Ddocdir=$2" ;;
--enable-docs) printf "%s" -Ddocs=enabled ;;
--disable-docs) printf "%s" -Ddocs=disabled ;;
--enable-dsound) printf "%s" -Ddsound=enabled ;;
--disable-dsound) printf "%s" -Ddsound=disabled ;;
--enable-fdt) printf "%s" -Dfdt=enabled ;;
--disable-fdt) printf "%s" -Dfdt=disabled ;;
--enable-fdt=*) quote_sh "-Dfdt=$2" ;;
--enable-fuse) printf "%s" -Dfuse=enabled ;;
--disable-fuse) printf "%s" -Dfuse=disabled ;;
--enable-fuse-lseek) printf "%s" -Dfuse_lseek=enabled ;;
--disable-fuse-lseek) printf "%s" -Dfuse_lseek=disabled ;;
--enable-fuzzing) printf "%s" -Dfuzzing=true ;;
--disable-fuzzing) printf "%s" -Dfuzzing=false ;;
--enable-gcrypt) printf "%s" -Dgcrypt=enabled ;;
--disable-gcrypt) printf "%s" -Dgcrypt=disabled ;;
--enable-gettext) printf "%s" -Dgettext=enabled ;;
--disable-gettext) printf "%s" -Dgettext=disabled ;;
--enable-gio) printf "%s" -Dgio=enabled ;;
--disable-gio) printf "%s" -Dgio=disabled ;;
--enable-glusterfs) printf "%s" -Dglusterfs=enabled ;;
--disable-glusterfs) printf "%s" -Dglusterfs=disabled ;;
--enable-gnutls) printf "%s" -Dgnutls=enabled ;;
--disable-gnutls) printf "%s" -Dgnutls=disabled ;;
--enable-gtk) printf "%s" -Dgtk=enabled ;;
--disable-gtk) printf "%s" -Dgtk=disabled ;;
--enable-gtk-clipboard) printf "%s" -Dgtk_clipboard=enabled ;;
--disable-gtk-clipboard) printf "%s" -Dgtk_clipboard=disabled ;;
--enable-guest-agent) printf "%s" -Dguest_agent=enabled ;;
--disable-guest-agent) printf "%s" -Dguest_agent=disabled ;;
--enable-guest-agent-msi) printf "%s" -Dguest_agent_msi=enabled ;;
--disable-guest-agent-msi) printf "%s" -Dguest_agent_msi=disabled ;;
--enable-hexagon-idef-parser) printf "%s" -Dhexagon_idef_parser=true ;;
--disable-hexagon-idef-parser) printf "%s" -Dhexagon_idef_parser=false ;;
Add Hyper-V Dynamic Memory Protocol driver (hv-balloon) base This driver is like virtio-balloon on steroids: it allows both changing the guest memory allocation via ballooning and (in the next patch) inserting pieces of extra RAM into it on demand from a provided memory backend. The actual resizing is done via ballooning interface (for example, via the "balloon" HMP command). This includes resizing the guest past its boot size - that is, hot-adding additional memory in granularity limited only by the guest alignment requirements, as provided by the next patch. In contrast with ACPI DIMM hotplug where one can only request to unplug a whole DIMM stick this driver allows removing memory from guest in single page (4k) units via ballooning. After a VM reboot the guest is back to its original (boot) size. In the future, the guest boot memory size might be changed on reboot instead, taking into account the effective size that VM had before that reboot (much like Hyper-V does). For performance reasons, the guest-released memory is tracked in a few range trees, as a series of (start, count) ranges. Each time a new page range is inserted into such tree its neighbors are checked as candidates for possible merging with it. Besides performance reasons, the Dynamic Memory protocol itself uses page ranges as the data structure in its messages, so relevant pages need to be merged into such ranges anyway. One has to be careful when tracking the guest-released pages, since the guest can maliciously report returning pages outside its current address space, which later clash with the address range of newly added memory. Similarly, the guest can report freeing the same page twice. The above design results in much better ballooning performance than when using virtio-balloon with the same guest: 230 GB / minute with this driver versus 70 GB / minute with virtio-balloon. During a ballooning operation most of time is spent waiting for the guest to come up with newly freed page ranges, processing the received ranges on the host side (in QEMU and KVM) is nearly instantaneous. The unballoon operation is also pretty much instantaneous: thanks to the merging of the ballooned out page ranges 200 GB of memory can be returned to the guest in about 1 second. With virtio-balloon this operation takes about 2.5 minutes. These tests were done against a Windows Server 2019 guest running on a Xeon E5-2699, after dirtying the whole memory inside guest before each balloon operation. Using a range tree instead of a bitmap to track the removed memory also means that the solution scales well with the guest size: even a 1 TB range takes just a few bytes of such metadata. Since the required GTree operations aren't present in every Glib version a check for them was added to the meson build script, together with new "--enable-hv-balloon" and "--disable-hv-balloon" configure arguments. If these GTree operations are missing in the system's Glib version this driver will be skipped during QEMU build. An optional "status-report=on" device parameter requests memory status events from the guest (typically sent every second), which allow the host to learn both the guest memory available and the guest memory in use counts. Following commits will add support for their external emission as "HV_BALLOON_STATUS_REPORT" QMP events. The driver is named hv-balloon since the Linux kernel client driver for the Dynamic Memory Protocol is named as such and to follow the naming pattern established by the virtio-balloon driver. The whole protocol runs over Hyper-V VMBus. The driver was tested against Windows Server 2012 R2, Windows Server 2016 and Windows Server 2019 guests and obeys the guest alignment requirements reported to the host via DM_CAPABILITIES_REPORT message. Acked-by: David Hildenbrand <david@redhat.com> Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
2023-06-12 17:00:54 +03:00
--enable-hv-balloon) printf "%s" -Dhv_balloon=enabled ;;
--disable-hv-balloon) printf "%s" -Dhv_balloon=disabled ;;
--enable-hvf) printf "%s" -Dhvf=enabled ;;
--disable-hvf) printf "%s" -Dhvf=disabled ;;
--iasl=*) quote_sh "-Diasl=$2" ;;
--enable-iconv) printf "%s" -Diconv=enabled ;;
--disable-iconv) printf "%s" -Diconv=disabled ;;
--includedir=*) quote_sh "-Dincludedir=$2" ;;
--enable-install-blobs) printf "%s" -Dinstall_blobs=true ;;
--disable-install-blobs) printf "%s" -Dinstall_blobs=false ;;
--interp-prefix=*) quote_sh "-Dinterp_prefix=$2" ;;
--enable-jack) printf "%s" -Djack=enabled ;;
--disable-jack) printf "%s" -Djack=disabled ;;
--enable-keyring) printf "%s" -Dkeyring=enabled ;;
--disable-keyring) printf "%s" -Dkeyring=disabled ;;
--enable-kvm) printf "%s" -Dkvm=enabled ;;
--disable-kvm) printf "%s" -Dkvm=disabled ;;
--enable-l2tpv3) printf "%s" -Dl2tpv3=enabled ;;
--disable-l2tpv3) printf "%s" -Dl2tpv3=disabled ;;
--enable-libdaxctl) printf "%s" -Dlibdaxctl=enabled ;;
--disable-libdaxctl) printf "%s" -Dlibdaxctl=disabled ;;
--libdir=*) quote_sh "-Dlibdir=$2" ;;
--enable-libdw) printf "%s" -Dlibdw=enabled ;;
--disable-libdw) printf "%s" -Dlibdw=disabled ;;
--libexecdir=*) quote_sh "-Dlibexecdir=$2" ;;
--enable-libiscsi) printf "%s" -Dlibiscsi=enabled ;;
--disable-libiscsi) printf "%s" -Dlibiscsi=disabled ;;
--enable-libkeyutils) printf "%s" -Dlibkeyutils=enabled ;;
--disable-libkeyutils) printf "%s" -Dlibkeyutils=disabled ;;
--enable-libnfs) printf "%s" -Dlibnfs=enabled ;;
--disable-libnfs) printf "%s" -Dlibnfs=disabled ;;
--enable-libpmem) printf "%s" -Dlibpmem=enabled ;;
--disable-libpmem) printf "%s" -Dlibpmem=disabled ;;
--enable-libssh) printf "%s" -Dlibssh=enabled ;;
--disable-libssh) printf "%s" -Dlibssh=disabled ;;
--enable-libudev) printf "%s" -Dlibudev=enabled ;;
--disable-libudev) printf "%s" -Dlibudev=disabled ;;
--enable-libusb) printf "%s" -Dlibusb=enabled ;;
--disable-libusb) printf "%s" -Dlibusb=disabled ;;
--enable-libvduse) printf "%s" -Dlibvduse=enabled ;;
--disable-libvduse) printf "%s" -Dlibvduse=disabled ;;
--enable-linux-aio) printf "%s" -Dlinux_aio=enabled ;;
--disable-linux-aio) printf "%s" -Dlinux_aio=disabled ;;
--enable-linux-io-uring) printf "%s" -Dlinux_io_uring=enabled ;;
--disable-linux-io-uring) printf "%s" -Dlinux_io_uring=disabled ;;
--enable-live-block-migration) printf "%s" -Dlive_block_migration=enabled ;;
--disable-live-block-migration) printf "%s" -Dlive_block_migration=disabled ;;
--localedir=*) quote_sh "-Dlocaledir=$2" ;;
--localstatedir=*) quote_sh "-Dlocalstatedir=$2" ;;
--enable-lzfse) printf "%s" -Dlzfse=enabled ;;
--disable-lzfse) printf "%s" -Dlzfse=disabled ;;
--enable-lzo) printf "%s" -Dlzo=enabled ;;
--disable-lzo) printf "%s" -Dlzo=disabled ;;
--enable-malloc=*) quote_sh "-Dmalloc=$2" ;;
--enable-malloc-trim) printf "%s" -Dmalloc_trim=enabled ;;
--disable-malloc-trim) printf "%s" -Dmalloc_trim=disabled ;;
--mandir=*) quote_sh "-Dmandir=$2" ;;
--enable-membarrier) printf "%s" -Dmembarrier=enabled ;;
--disable-membarrier) printf "%s" -Dmembarrier=disabled ;;
--enable-module-upgrades) printf "%s" -Dmodule_upgrades=true ;;
--disable-module-upgrades) printf "%s" -Dmodule_upgrades=false ;;
--enable-modules) printf "%s" -Dmodules=enabled ;;
--disable-modules) printf "%s" -Dmodules=disabled ;;
--enable-mpath) printf "%s" -Dmpath=enabled ;;
--disable-mpath) printf "%s" -Dmpath=disabled ;;
--enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;;
--disable-multiprocess) printf "%s" -Dmultiprocess=disabled ;;
--enable-netmap) printf "%s" -Dnetmap=enabled ;;
--disable-netmap) printf "%s" -Dnetmap=disabled ;;
--enable-nettle) printf "%s" -Dnettle=enabled ;;
--disable-nettle) printf "%s" -Dnettle=disabled ;;
--enable-numa) printf "%s" -Dnuma=enabled ;;
--disable-numa) printf "%s" -Dnuma=disabled ;;
--enable-nvmm) printf "%s" -Dnvmm=enabled ;;
--disable-nvmm) printf "%s" -Dnvmm=disabled ;;
--enable-opengl) printf "%s" -Dopengl=enabled ;;
--disable-opengl) printf "%s" -Dopengl=disabled ;;
--enable-oss) printf "%s" -Doss=enabled ;;
--disable-oss) printf "%s" -Doss=disabled ;;
--enable-pa) printf "%s" -Dpa=enabled ;;
--disable-pa) printf "%s" -Dpa=disabled ;;
--enable-parallels) printf "%s" -Dparallels=enabled ;;
--disable-parallels) printf "%s" -Dparallels=disabled ;;
--enable-pipewire) printf "%s" -Dpipewire=enabled ;;
--disable-pipewire) printf "%s" -Dpipewire=disabled ;;
--enable-pixman) printf "%s" -Dpixman=enabled ;;
--disable-pixman) printf "%s" -Dpixman=disabled ;;
--with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;;
--enable-plugins) printf "%s" -Dplugins=true ;;
--disable-plugins) printf "%s" -Dplugins=false ;;
--enable-png) printf "%s" -Dpng=enabled ;;
--disable-png) printf "%s" -Dpng=disabled ;;
--prefix=*) quote_sh "-Dprefix=$2" ;;
--enable-pvrdma) printf "%s" -Dpvrdma=enabled ;;
--disable-pvrdma) printf "%s" -Dpvrdma=disabled ;;
--enable-qcow1) printf "%s" -Dqcow1=enabled ;;
--disable-qcow1) printf "%s" -Dqcow1=disabled ;;
--enable-qed) printf "%s" -Dqed=enabled ;;
--disable-qed) printf "%s" -Dqed=disabled ;;
--firmwarepath=*) quote_sh "-Dqemu_firmwarepath=$(meson_option_build_array $2)" ;;
--qemu-ga-distro=*) quote_sh "-Dqemu_ga_distro=$2" ;;
--qemu-ga-manufacturer=*) quote_sh "-Dqemu_ga_manufacturer=$2" ;;
--qemu-ga-version=*) quote_sh "-Dqemu_ga_version=$2" ;;
--with-suffix=*) quote_sh "-Dqemu_suffix=$2" ;;
--enable-qga-vss) printf "%s" -Dqga_vss=enabled ;;
--disable-qga-vss) printf "%s" -Dqga_vss=disabled ;;
--enable-qom-cast-debug) printf "%s" -Dqom_cast_debug=true ;;
--disable-qom-cast-debug) printf "%s" -Dqom_cast_debug=false ;;
--enable-rbd) printf "%s" -Drbd=enabled ;;
--disable-rbd) printf "%s" -Drbd=disabled ;;
--enable-rdma) printf "%s" -Drdma=enabled ;;
--disable-rdma) printf "%s" -Drdma=disabled ;;
meson, cutils: allow non-relocatable installs Say QEMU is configured with bindir = "/usr/bin" and a firmware path that starts with "/usr/share/qemu". Ever since QEMU 5.2, QEMU's install has been relocatable: if you move qemu-system-x86_64 from /usr/bin to /home/username/bin, it will start looking for firmware in /home/username/share/qemu. Previously, you would get a non-relocatable install where the moved QEMU will keep looking for firmware in /usr/share/qemu. Windows almost always wants relocatable installs, and in fact that is why QEMU 5.2 introduced relocatability in the first place. However, newfangled distribution mechanisms such as AppImage (https://docs.appimage.org/reference/best-practices.html), and possibly NixOS, also dislike using at runtime the absolute paths that were established at build time. On POSIX systems you almost never care; if you do, your usecase dictates which one is desirable, so there's no single answer. Obviously relocatability works fine most of the time, because not many people have complained about QEMU's switch to relocatable install, and that's why until now there was no way to disable relocatability. But a non-relocatable, non-modular binary can help if you want to do experiments with old firmware and new QEMU or vice versa (because you can just upgrade/downgrade the firmware package, and use rpm2cpio or similar to extract the QEMU binaries outside /usr), so allow both. This patch allows one to build a non-relocatable install using a new option to configure. Why? Because it's not too hard, and because it helps the user double check the relocatability of their install. Note that the same code that handles relocation also lets you run QEMU from the build tree and pick e.g. firmware files from the source tree transparently. Therefore that part remains active with this patch, even if you configure with --disable-relocatable. Suggested-by: Michael Tokarev <mjt@tls.msk.ru> Reviewed-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-05 15:19:34 +03:00
--enable-relocatable) printf "%s" -Drelocatable=true ;;
--disable-relocatable) printf "%s" -Drelocatable=false ;;
--enable-replication) printf "%s" -Dreplication=enabled ;;
--disable-replication) printf "%s" -Dreplication=disabled ;;
--enable-rng-none) printf "%s" -Drng_none=true ;;
--disable-rng-none) printf "%s" -Drng_none=false ;;
--enable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=enabled ;;
--disable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=disabled ;;
--enable-safe-stack) printf "%s" -Dsafe_stack=true ;;
--disable-safe-stack) printf "%s" -Dsafe_stack=false ;;
--enable-sanitizers) printf "%s" -Dsanitizers=true ;;
--disable-sanitizers) printf "%s" -Dsanitizers=false ;;
--enable-sdl) printf "%s" -Dsdl=enabled ;;
--disable-sdl) printf "%s" -Dsdl=disabled ;;
--enable-sdl-image) printf "%s" -Dsdl_image=enabled ;;
--disable-sdl-image) printf "%s" -Dsdl_image=disabled ;;
--enable-seccomp) printf "%s" -Dseccomp=enabled ;;
--disable-seccomp) printf "%s" -Dseccomp=disabled ;;
nbd/server: Add --selinux-label option Under SELinux, Unix domain sockets have two labels. One is on the disk and can be set with commands such as chcon(1). There is a different label stored in memory (called the process label). This can only be set by the process creating the socket. When using SELinux + SVirt and wanting qemu to be able to connect to a qemu-nbd instance, you must set both labels correctly first. For qemu-nbd the options to set the second label are awkward. You can create the socket in a wrapper program and then exec into qemu-nbd. Or you could try something with LD_PRELOAD. This commit adds the ability to set the label straightforwardly on the command line, via the new --selinux-label flag. (The name of the flag is the same as the equivalent nbdkit option.) A worked example showing how to use the new option can be found in this bug: https://bugzilla.redhat.com/show_bug.cgi?id=1984938 Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1984938 Signed-off-by: Richard W.M. Jones <rjones@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> [eblake: rebase to configure changes, reject --selinux-label if it is not compiled in or not used on a Unix socket] Note that we may relax some of these restrictions at a later date, such as making it possible to label a TCP socket, although it may be smarter to do so as a generic QMP action rather than more one-off command lines in qemu-nbd. Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20211115202944.615966-1-eblake@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> [eblake: adjust meson output as suggested by thuth] Signed-off-by: Eric Blake <eblake@redhat.com>
2021-11-15 23:29:43 +03:00
--enable-selinux) printf "%s" -Dselinux=enabled ;;
--disable-selinux) printf "%s" -Dselinux=disabled ;;
--enable-slirp) printf "%s" -Dslirp=enabled ;;
--disable-slirp) printf "%s" -Dslirp=disabled ;;
--enable-slirp-smbd) printf "%s" -Dslirp_smbd=enabled ;;
--disable-slirp-smbd) printf "%s" -Dslirp_smbd=disabled ;;
--enable-smartcard) printf "%s" -Dsmartcard=enabled ;;
--disable-smartcard) printf "%s" -Dsmartcard=disabled ;;
--smbd=*) quote_sh "-Dsmbd=$2" ;;
--enable-snappy) printf "%s" -Dsnappy=enabled ;;
--disable-snappy) printf "%s" -Dsnappy=disabled ;;
--enable-sndio) printf "%s" -Dsndio=enabled ;;
--disable-sndio) printf "%s" -Dsndio=disabled ;;
--enable-sparse) printf "%s" -Dsparse=enabled ;;
--disable-sparse) printf "%s" -Dsparse=disabled ;;
--enable-spice) printf "%s" -Dspice=enabled ;;
--disable-spice) printf "%s" -Dspice=disabled ;;
--enable-spice-protocol) printf "%s" -Dspice_protocol=enabled ;;
--disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;;
--enable-stack-protector) printf "%s" -Dstack_protector=enabled ;;
--disable-stack-protector) printf "%s" -Dstack_protector=disabled ;;
--enable-strip) printf "%s" -Dstrip=true ;;
--disable-strip) printf "%s" -Dstrip=false ;;
--sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;;
--enable-tcg) printf "%s" -Dtcg=enabled ;;
--disable-tcg) printf "%s" -Dtcg=disabled ;;
--enable-tcg-interpreter) printf "%s" -Dtcg_interpreter=true ;;
--disable-tcg-interpreter) printf "%s" -Dtcg_interpreter=false ;;
--tls-priority=*) quote_sh "-Dtls_priority=$2" ;;
--enable-tools) printf "%s" -Dtools=enabled ;;
--disable-tools) printf "%s" -Dtools=disabled ;;
--enable-tpm) printf "%s" -Dtpm=enabled ;;
--disable-tpm) printf "%s" -Dtpm=disabled ;;
--enable-trace-backends=*) quote_sh "-Dtrace_backends=$2" ;;
--with-trace-file=*) quote_sh "-Dtrace_file=$2" ;;
--enable-tsan) printf "%s" -Dtsan=true ;;
--disable-tsan) printf "%s" -Dtsan=false ;;
--enable-u2f) printf "%s" -Du2f=enabled ;;
--disable-u2f) printf "%s" -Du2f=disabled ;;
--enable-usb-redir) printf "%s" -Dusb_redir=enabled ;;
--disable-usb-redir) printf "%s" -Dusb_redir=disabled ;;
--enable-vde) printf "%s" -Dvde=enabled ;;
--disable-vde) printf "%s" -Dvde=disabled ;;
--enable-vdi) printf "%s" -Dvdi=enabled ;;
--disable-vdi) printf "%s" -Dvdi=disabled ;;
--enable-vduse-blk-export) printf "%s" -Dvduse_blk_export=enabled ;;
--disable-vduse-blk-export) printf "%s" -Dvduse_blk_export=disabled ;;
--enable-vfio-user-server) printf "%s" -Dvfio_user_server=enabled ;;
--disable-vfio-user-server) printf "%s" -Dvfio_user_server=disabled ;;
--enable-vhdx) printf "%s" -Dvhdx=enabled ;;
--disable-vhdx) printf "%s" -Dvhdx=disabled ;;
--enable-vhost-crypto) printf "%s" -Dvhost_crypto=enabled ;;
--disable-vhost-crypto) printf "%s" -Dvhost_crypto=disabled ;;
--enable-vhost-kernel) printf "%s" -Dvhost_kernel=enabled ;;
--disable-vhost-kernel) printf "%s" -Dvhost_kernel=disabled ;;
--enable-vhost-net) printf "%s" -Dvhost_net=enabled ;;
--disable-vhost-net) printf "%s" -Dvhost_net=disabled ;;
--enable-vhost-user) printf "%s" -Dvhost_user=enabled ;;
--disable-vhost-user) printf "%s" -Dvhost_user=disabled ;;
--enable-vhost-user-blk-server) printf "%s" -Dvhost_user_blk_server=enabled ;;
--disable-vhost-user-blk-server) printf "%s" -Dvhost_user_blk_server=disabled ;;
--enable-vhost-vdpa) printf "%s" -Dvhost_vdpa=enabled ;;
--disable-vhost-vdpa) printf "%s" -Dvhost_vdpa=disabled ;;
--enable-virglrenderer) printf "%s" -Dvirglrenderer=enabled ;;
--disable-virglrenderer) printf "%s" -Dvirglrenderer=disabled ;;
--enable-virtfs) printf "%s" -Dvirtfs=enabled ;;
--disable-virtfs) printf "%s" -Dvirtfs=disabled ;;
--enable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=enabled ;;
--disable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=disabled ;;
--enable-vmdk) printf "%s" -Dvmdk=enabled ;;
--disable-vmdk) printf "%s" -Dvmdk=disabled ;;
--enable-vmnet) printf "%s" -Dvmnet=enabled ;;
--disable-vmnet) printf "%s" -Dvmnet=disabled ;;
--enable-vnc) printf "%s" -Dvnc=enabled ;;
--disable-vnc) printf "%s" -Dvnc=disabled ;;
--enable-vnc-jpeg) printf "%s" -Dvnc_jpeg=enabled ;;
--disable-vnc-jpeg) printf "%s" -Dvnc_jpeg=disabled ;;
--enable-vnc-sasl) printf "%s" -Dvnc_sasl=enabled ;;
--disable-vnc-sasl) printf "%s" -Dvnc_sasl=disabled ;;
--enable-vpc) printf "%s" -Dvpc=enabled ;;
--disable-vpc) printf "%s" -Dvpc=disabled ;;
--enable-vte) printf "%s" -Dvte=enabled ;;
--disable-vte) printf "%s" -Dvte=disabled ;;
--enable-vvfat) printf "%s" -Dvvfat=enabled ;;
--disable-vvfat) printf "%s" -Dvvfat=disabled ;;
--enable-werror) printf "%s" -Dwerror=true ;;
--disable-werror) printf "%s" -Dwerror=false ;;
--enable-whpx) printf "%s" -Dwhpx=enabled ;;
--disable-whpx) printf "%s" -Dwhpx=disabled ;;
--enable-xen) printf "%s" -Dxen=enabled ;;
--disable-xen) printf "%s" -Dxen=disabled ;;
--enable-xen-pci-passthrough) printf "%s" -Dxen_pci_passthrough=enabled ;;
--disable-xen-pci-passthrough) printf "%s" -Dxen_pci_passthrough=disabled ;;
--enable-xkbcommon) printf "%s" -Dxkbcommon=enabled ;;
--disable-xkbcommon) printf "%s" -Dxkbcommon=disabled ;;
--enable-zstd) printf "%s" -Dzstd=enabled ;;
--disable-zstd) printf "%s" -Dzstd=disabled ;;
*) return 1 ;;
esac
}