* minor patches here and there

* MTTCG: lock-free TB lookup
 * SCSI: bugfixes for MPTSAS, MegaSAS, LSI53c, vmw_pvscsi
 * buffer_is_zero rewrite (except for one patch)
 * chardev: qemu_chr_fe_write checks
 * checkpatch improvement for markdown preformatted text
 * default-configs cleanups
 * atomics cleanups
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQEcBAABAgAGBQJX2DP2AAoJEL/70l94x66DIBYH/2pW+/HYexCobNn9eVD0Wm08
 im0mRHIU0vjfTaeZSasJPXvA2FyYQLl9KnSFvUFcRiLILpp+hE3QdZ8o0QGlfAmE
 +5MWsPJDXMbOaCOfMKZpZvPfJ6q/lSTg6eiJTPiRgyU7fQgjMDAot1s44ETYGVRu
 myeheEvjSwm/aT9sRIUK6KC7LWXGHFYRYzYJDnvoN6svHZ10DcEDhve8bdmixFk0
 0zUY4RmPk8n46SntDG65tgAlKlzfSuPOesvbpcQIYe1H+r+uJt9BST7MjKdbdDQv
 b/LDzMx8CTbd2tDPL6JWgjBGBZ6SZ4Q6x0a45kzJRtkS+BPtNeGGzBVwULVN4RY=
 =eAJS
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* minor patches here and there
* MTTCG: lock-free TB lookup
* SCSI: bugfixes for MPTSAS, MegaSAS, LSI53c, vmw_pvscsi
* buffer_is_zero rewrite (except for one patch)
* chardev: qemu_chr_fe_write checks
* checkpatch improvement for markdown preformatted text
* default-configs cleanups
* atomics cleanups

# gpg: Signature made Tue 13 Sep 2016 18:14:30 BST
# gpg:                using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (58 commits)
  cutils: Add generic prefetch
  cutils: Add SSE4 version
  cutils: Add test for buffer_is_zero
  cutils: Remove ppc buffer zero checking
  cutils: Remove aarch64 buffer zero checking
  cutils: Rearrange buffer_is_zero acceleration
  cutils: Export only buffer_is_zero
  cutils: Remove SPLAT macro
  cutils: Move buffer_is_zero and subroutines to a new file
  ppc: do not redefine CPUPPCState
  x86/lapic: Load LAPIC state at post_load
  optionrom: do not rely on compiler's bswap optimization
  checkpatch: Fix whitespace checks for documentation code blocks
  atomics: Use __atomic_*_n() variant primitives
  atomics: Remove redundant barrier()'s
  kvm-all: drop kvm_setup_guest_memory
  i8257: Make device "i8257" unavailable with -device
  Revert "megasas: remove useless check for cmd->frame"
  char: convert qemu_chr_fe_write to qemu_chr_fe_write_all
  hw: replace most use of qemu_chr_fe_write with qemu_chr_fe_write_all
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

 Conflicts:
	cpus.c
	tests/Makefile.include
master
Peter Maydell 2016-09-15 10:24:22 +01:00
commit 8212ff86f4
77 changed files with 885 additions and 643 deletions

View File

@ -715,6 +715,10 @@ F: hw/misc/pc-testdev.c
F: hw/timer/hpet*
F: hw/timer/i8254*
F: hw/timer/mc146818rtc*
F: include/hw/i2c/pm_smbus.h
F: include/hw/timer/hpet.h
F: include/hw/timer/i8254*
F: include/hw/timer/mc146818rtc*
Machine core
M: Eduardo Habkost <ehabkost@redhat.com>
@ -831,7 +835,7 @@ T: git git://github.com/jasowang/qemu.git net
SCSI
M: Paolo Bonzini <pbonzini@redhat.com>
S: Supported
F: include/hw/scsi*
F: include/hw/scsi/*
F: hw/scsi/*
T: git git://github.com/bonzini/qemu.git scsi-next
@ -1257,6 +1261,11 @@ F: net/slirp.c
F: include/net/slirp.h
T: git git://git.kiszka.org/qemu.git queues/slirp
Stubs
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: stubs/
Tracing
M: Stefan Hajnoczi <stefanha@redhat.com>
S: Maintained

View File

@ -41,7 +41,9 @@ static void rng_egd_request_entropy(RngBackend *b, RngRequest *req)
header[0] = 0x02;
header[1] = len;
qemu_chr_fe_write(s->chr, header, sizeof(header));
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, header, sizeof(header));
size -= len;
}

View File

@ -3907,7 +3907,7 @@ void qmp_x_blockdev_del(bool has_id, const char *id,
goto out;
}
if (!blk && !bs->monitor_list.tqe_prev) {
if (!blk && !QTAILQ_IN_USE(bs, monitor_list)) {
error_setg(errp, "Node %s is not owned by the monitor",
bs->node_name);
goto out;

21
configure vendored
View File

@ -1804,28 +1804,19 @@ fi
##########################################
# avx2 optimization requirement check
if test "$static" = "no" ; then
cat > $TMPC << EOF
cat > $TMPC << EOF
#pragma GCC push_options
#pragma GCC target("avx2")
#include <cpuid.h>
#include <immintrin.h>
static int bar(void *a) {
return _mm256_movemask_epi8(_mm256_cmpeq_epi8(*(__m256i *)a, (__m256i){0}));
__m256i x = *(__m256i *)a;
return _mm256_testz_si256(x, x);
}
static void *bar_ifunc(void) {return (void*) bar;}
int foo(void *a) __attribute__((ifunc("bar_ifunc")));
int main(int argc, char *argv[]) { return foo(argv[0]);}
int main(int argc, char *argv[]) { return bar(argv[0]); }
EOF
if compile_object "" ; then
if has readelf; then
if readelf --syms $TMPO 2>/dev/null |grep -q "IFUNC.*foo"; then
avx2_opt="yes"
fi
fi
fi
if compile_object "" ; then
avx2_opt="yes"
fi
#########################################

View File

@ -241,7 +241,8 @@ static bool tb_cmp(const void *p, const void *d)
if (tb->pc == desc->pc &&
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags) {
tb->flags == desc->flags &&
!atomic_read(&tb->invalid)) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
@ -259,7 +260,7 @@ static bool tb_cmp(const void *p, const void *d)
return false;
}
static TranslationBlock *tb_find_physical(CPUState *cpu,
static TranslationBlock *tb_htable_lookup(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
uint32_t flags)
@ -278,72 +279,48 @@ static TranslationBlock *tb_find_physical(CPUState *cpu,
return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
static TranslationBlock *tb_find_slow(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
uint32_t flags)
{
TranslationBlock *tb;
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (tb) {
goto found;
}
#ifdef CONFIG_USER_ONLY
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. Since we're momentarily dropping
* tb_lock, there's a chance that our desired tb has been
* translated.
*/
tb_unlock();
mmap_lock();
tb_lock();
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (tb) {
mmap_unlock();
goto found;
}
#endif
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
#ifdef CONFIG_USER_ONLY
mmap_unlock();
#endif
found:
/* we add the TB in the virtual pc hash table */
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUState *cpu,
TranslationBlock **last_tb,
int tb_exit)
static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *last_tb,
int tb_exit)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
bool have_tb_lock = false;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb_lock();
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags);
}
if (cpu->tb_flushed) {
/* Ensure that no TB jump will be modified as the
* translation buffer has been flushed.
*/
*last_tb = NULL;
cpu->tb_flushed = false;
tb = tb_htable_lookup(cpu, pc, cs_base, flags);
if (!tb) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. As system emulation is currently
* single threaded the locks are NOPs.
*/
mmap_lock();
tb_lock();
have_tb_lock = true;
/* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock.
*/
tb = tb_htable_lookup(cpu, pc, cs_base, flags);
if (!tb) {
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
}
mmap_unlock();
}
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in
@ -351,14 +328,25 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
* spanning two pages because the mapping for the second page can change.
*/
if (tb->page_addr[1] != -1) {
*last_tb = NULL;
last_tb = NULL;
}
#endif
/* See if we can patch the calling TB. */
if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tb_add_jump(*last_tb, tb_exit, tb);
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
if (!have_tb_lock) {
tb_lock();
have_tb_lock = true;
}
/* Check if translation buffer has been flushed */
if (cpu->tb_flushed) {
cpu->tb_flushed = false;
} else if (!tb->invalid) {
tb_add_jump(last_tb, tb_exit, tb);
}
}
if (have_tb_lock) {
tb_unlock();
}
tb_unlock();
return tb;
}
@ -437,8 +425,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
TranslationBlock *last_tb = NULL; /* Avoid chaining TBs */
cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
*ret = -1;
return true;
#endif
@ -618,10 +605,10 @@ int cpu_exec(CPUState *cpu)
break;
}
cpu->tb_flushed = false; /* reset before first TB lookup */
atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
for(;;) {
cpu_handle_interrupt(cpu, &last_tb);
tb = tb_find_fast(cpu, &last_tb, tb_exit);
tb = tb_find(cpu, last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
/* Try to align the host and virtual clocks
if the guest is in advance */

20
cpus.c
View File

@ -191,8 +191,12 @@ int64_t cpu_icount_to_ns(int64_t icount)
return icount << icount_time_shift;
}
/* return the host CPU cycle counter and handle stop/restart */
/* Caller must hold the BQL */
/* return the time elapsed in VM between vm_start and vm_stop. Unless
* icount is active, cpu_get_ticks() uses units of the host CPU cycle
* counter.
*
* Caller must hold the BQL
*/
int64_t cpu_get_ticks(void)
{
int64_t ticks;
@ -219,17 +223,19 @@ int64_t cpu_get_ticks(void)
static int64_t cpu_get_clock_locked(void)
{
int64_t ticks;
int64_t time;
ticks = timers_state.cpu_clock_offset;
time = timers_state.cpu_clock_offset;
if (timers_state.cpu_ticks_enabled) {
ticks += get_clock();
time += get_clock();
}
return ticks;
return time;
}
/* return the host CPU monotonic time */
/* Return the monotonic time elapsed in VM, i.e.,
* the time between vm_start and vm_stop
*/
int64_t cpu_get_clock(void)
{
int64_t ti;

View File

@ -3,7 +3,6 @@
include pci.mak
include usb.mak
CONFIG_VGA=y
CONFIG_ISA_MMIO=y
CONFIG_NAND=y
CONFIG_ECC=y
CONFIG_SERIAL=y

View File

@ -30,14 +30,12 @@ CONFIG_I8257=y
CONFIG_IDE_ISA=y
CONFIG_IDE_PIIX=y
CONFIG_NE2000_ISA=y
CONFIG_PIIX_PCI=y
CONFIG_HPET=y
CONFIG_APPLESMC=y
CONFIG_I8259=y
CONFIG_PFLASH_CFI01=y
CONFIG_TPM_TIS=$(CONFIG_TPM)
CONFIG_MC146818RTC=y
CONFIG_PAM=y
CONFIG_PCI_PIIX=y
CONFIG_WDT_IB700=y
CONFIG_XEN_I386=$(CONFIG_XEN)

View File

@ -3,7 +3,6 @@
include pci.mak
include sound.mak
include usb.mak
CONFIG_ISA_MMIO=y
CONFIG_ESCC=y
CONFIG_M48T59=y
CONFIG_SERIAL=y

View File

@ -4,7 +4,6 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_VIRTIO_VGA=y
CONFIG_ISA_MMIO=y
CONFIG_ESCC=y
CONFIG_M48T59=y
CONFIG_SERIAL=y

View File

@ -2,7 +2,6 @@
include pci.mak
include usb.mak
CONFIG_ISA_MMIO=y
CONFIG_M48T59=y
CONFIG_PTIMER=y
CONFIG_SERIAL=y

View File

@ -30,14 +30,12 @@ CONFIG_I8257=y
CONFIG_IDE_ISA=y
CONFIG_IDE_PIIX=y
CONFIG_NE2000_ISA=y
CONFIG_PIIX_PCI=y
CONFIG_HPET=y
CONFIG_APPLESMC=y
CONFIG_I8259=y
CONFIG_PFLASH_CFI01=y
CONFIG_TPM_TIS=$(CONFIG_TPM)
CONFIG_MC146818RTC=y
CONFIG_PAM=y
CONFIG_PCI_PIIX=y
CONFIG_WDT_IB700=y
CONFIG_XEN_I386=$(CONFIG_XEN)

View File

@ -37,7 +37,7 @@ do not matter; as soon as all previous critical sections have finished,
there cannot be any readers who hold references to the data structure,
and these can now be safely reclaimed (e.g., freed or unref'ed).
Here is a picutre:
Here is a picture:
thread 1 thread 2 thread 3
------------------- ------------------------ -------------------

7
exec.c
View File

@ -617,7 +617,7 @@ void cpu_exec_exit(CPUState *cpu)
CPUClass *cc = CPU_GET_CLASS(cpu);
cpu_list_lock();
if (cpu->node.tqe_prev == NULL) {
if (!QTAILQ_IN_USE(cpu, node)) {
/* there is nothing to undo since cpu_exec_init() hasn't been called */
cpu_list_unlock();
return;
@ -626,7 +626,6 @@ void cpu_exec_exit(CPUState *cpu)
assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
QTAILQ_REMOVE(&cpus, cpu, node);
cpu->node.tqe_prev = NULL;
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
cpu_list_unlock();
@ -1622,10 +1621,8 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
/* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
if (kvm_enabled()) {
kvm_setup_guest_memory(new_block->host, new_block->max_length);
}
}
}

View File

@ -402,7 +402,9 @@ static void put_buffer(GDBState *s, const uint8_t *buf, int len)
}
}
#else
qemu_chr_fe_write(s->chr, buf, len);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, buf, len);
#endif
}

View File

@ -769,14 +769,16 @@ static void omap_sti_fifo_write(void *opaque, hwaddr addr,
if (ch == STI_TRACE_CONTROL_CHANNEL) {
/* Flush channel <i>value</i>. */
qemu_chr_fe_write(s->chr, (const uint8_t *) "\r", 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, (const uint8_t *) "\r", 1);
} else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) {
if (value == 0xc0 || value == 0xc3) {
/* Open channel <i>ch</i>. */
} else if (value == 0x00)
qemu_chr_fe_write(s->chr, (const uint8_t *) "\n", 1);
qemu_chr_fe_write_all(s->chr, (const uint8_t *) "\n", 1);
else
qemu_chr_fe_write(s->chr, &byte, 1);
qemu_chr_fe_write_all(s->chr, &byte, 1);
}
}

View File

@ -1903,7 +1903,9 @@ static void pxa2xx_fir_write(void *opaque, hwaddr addr,
else
ch = ~value;
if (s->chr && s->enable && (s->control[0] & (1 << 3))) /* TXE */
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
break;
case ICSR0:
s->status[0] &= ~(value & 0x66);

View File

@ -1108,7 +1108,9 @@ static void strongarm_uart_tx(void *opaque)
if (s->utcr3 & UTCR3_LBM) /* loopback */ {
strongarm_uart_receive(s, &s->tx_fifo[s->tx_start], 1);
} else if (s->chr) {
qemu_chr_fe_write(s->chr, &s->tx_fifo[s->tx_start], 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &s->tx_fifo[s->tx_start], 1);
}
s->tx_start = (s->tx_start + 1) % 8;

View File

@ -169,7 +169,9 @@ static void bcm2835_aux_write(void *opaque, hwaddr offset, uint64_t value,
/* "DLAB bit set means access baudrate register" is NYI */
ch = value;
if (s->chr) {
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
break;

View File

@ -60,7 +60,9 @@ static void debugcon_ioport_write(void *opaque, hwaddr addr, uint64_t val,
printf(" [debugcon: write addr=0x%04" HWADDR_PRIx " val=0x%02" PRIx64 "]\n", addr, val);
#endif
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}

View File

@ -77,6 +77,8 @@ static void digic_uart_write(void *opaque, hwaddr addr, uint64_t value,
switch (addr) {
case R_TX:
if (s->chr) {
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
break;

View File

@ -557,7 +557,9 @@ static void escc_mem_write(void *opaque, hwaddr addr,
s->tx = val;
if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { // tx enabled
if (s->chr)
qemu_chr_fe_write(s->chr, &s->tx, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &s->tx, 1);
else if (s->type == kbd && !s->disabled) {
handle_kbd_command(s, val);
}

View File

@ -126,7 +126,9 @@ ser_write(void *opaque, hwaddr addr,
switch (addr)
{
case RW_DOUT:
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
s->regs[R_INTR] |= 3;
s->pending_tx = 1;
s->regs[addr] = value;

View File

@ -387,7 +387,9 @@ static void exynos4210_uart_write(void *opaque, hwaddr offset,
s->reg[I_(UTRSTAT)] &= ~(UTRSTAT_TRANSMITTER_EMPTY |
UTRSTAT_Tx_BUFFER_EMPTY);
ch = (uint8_t)val;
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
#if DEBUG_Tx_DATA
fprintf(stderr, "%c", ch);
#endif

View File

@ -203,7 +203,9 @@ static void grlib_apbuart_write(void *opaque, hwaddr addr,
/* Transmit when character device available and transmitter enabled */
if ((uart->chr) && (uart->control & UART_TRANSMIT_ENABLE)) {
c = value & 0xFF;
qemu_chr_fe_write(uart->chr, &c, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(uart->chr, &c, 1);
/* Generate interrupt */
if (uart->control & UART_TRANSMIT_INTERRUPT) {
qemu_irq_pulse(uart->irq);

View File

@ -182,7 +182,9 @@ static void imx_serial_write(void *opaque, hwaddr offset,
ch = value;
if (s->ucr2 & UCR2_TXEN) {
if (s->chr) {
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
s->usr1 &= ~USR1_TRDY;
imx_update(s);

View File

@ -360,7 +360,9 @@ static void io_write(IPackDevice *ip, uint8_t addr, uint16_t val)
DPRINTF("Write THR%c (0x%x)\n", channel + 'a', reg);
if (ch->dev) {
uint8_t thr = reg;
qemu_chr_fe_write(ch->dev, &thr, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(ch->dev, &thr, 1);
}
} else {
DPRINTF("Write THR%c (0x%x), Tx disabled\n", channel + 'a', reg);

View File

@ -76,6 +76,8 @@ void lm32_juart_set_jtx(DeviceState *d, uint32_t jtx)
s->jtx = jtx;
if (s->chr) {
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
}

View File

@ -178,6 +178,8 @@ static void uart_write(void *opaque, hwaddr addr,
switch (addr) {
case R_RXTX:
if (s->chr) {
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
break;

View File

@ -114,7 +114,9 @@ static void mcf_uart_do_tx(mcf_uart_state *s)
{
if (s->tx_enabled && (s->sr & MCF_UART_TxEMP) == 0) {
if (s->chr)
qemu_chr_fe_write(s->chr, (unsigned char *)&s->tb, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, (unsigned char *)&s->tb, 1);
s->sr |= MCF_UART_TxEMP;
}
if (s->tx_enabled) {

View File

@ -129,7 +129,9 @@ parallel_ioport_write_sw(void *opaque, uint32_t addr, uint32_t val)
if (val & PARA_CTR_STROBE) {
s->status &= ~PARA_STS_BUSY;
if ((s->control & PARA_CTR_STROBE) == 0)
qemu_chr_fe_write(s->chr, &s->dataw, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &s->dataw, 1);
} else {
if (s->control & PARA_CTR_INTEN) {
s->irq_pending = 1;

View File

@ -146,7 +146,9 @@ static void pl011_write(void *opaque, hwaddr offset,
/* ??? Check if transmitter is enabled. */
ch = value;
if (s->chr)
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
s->int_level |= PL011_INT_TX;
pl011_update(s);
break;

View File

@ -89,7 +89,9 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
scon->buf[scon->length] = *buf;
scon->length += 1;
if (scon->echo) {
qemu_chr_fe_write(scon->chr, buf, size);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(scon->chr, buf, size);
}
}
@ -191,9 +193,6 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
*/
static int write_console_data(SCLPEvent *event, const uint8_t *buf, int len)
{
int ret = 0;
const uint8_t *buf_offset;
SCLPConsoleLM *scon = SCLPLM_CONSOLE(event);
if (!scon->chr) {
@ -201,21 +200,9 @@ static int write_console_data(SCLPEvent *event, const uint8_t *buf, int len)
return len;
}
buf_offset = buf;
while (len > 0) {
ret = qemu_chr_fe_write(scon->chr, buf, len);
if (ret == 0) {
/* a pty doesn't seem to be connected - no error */
len = 0;
} else if (ret == -EAGAIN || (ret > 0 && ret < len)) {
len -= ret;
buf_offset += ret;
} else {
len = 0;
}
}
return ret;
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
return qemu_chr_fe_write_all(scon->chr, buf, len);
}
static int process_mdb(SCLPEvent *event, MDBO *mdbo)

View File

@ -168,6 +168,8 @@ static ssize_t write_console_data(SCLPEvent *event, const uint8_t *buf,
return len;
}
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
return qemu_chr_fe_write_all(scon->chr, buf, len);
}

View File

@ -111,7 +111,9 @@ static void sh_serial_write(void *opaque, hwaddr offs,
case 0x0c: /* FTDR / TDR */
if (s->chr) {
ch = val;
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
s->dr = val;
s->flags &= ~SH_SERIAL_FLAG_TDE;

View File

@ -60,8 +60,9 @@ void vty_putchars(VIOsPAPRDevice *sdev, uint8_t *buf, int len)
{
VIOsPAPRVTYDevice *dev = VIO_SPAPR_VTY_DEVICE(sdev);
/* FIXME: should check the qemu_chr_fe_write() return value */
qemu_chr_fe_write(dev->chardev, buf, len);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(dev->chardev, buf, len);
}
static void spapr_vty_realize(VIOsPAPRDevice *sdev, Error **errp)

View File

@ -153,6 +153,8 @@ static void stm32f2xx_usart_write(void *opaque, hwaddr addr,
if (value < 0xF000) {
ch = value;
if (s->chr) {
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
}
s->usart_sr |= USART_SR_TC;

View File

@ -68,6 +68,27 @@ static ssize_t flush_buf(VirtIOSerialPort *port,
*/
if (ret < 0)
ret = 0;
/* XXX we should be queuing data to send later for the
* console devices too rather than silently dropping
* console data on EAGAIN. The Linux virtio-console
* hvc driver though does sends with spinlocks held,
* so if we enable throttling that'll stall the entire
* guest kernel, not merely the process writing to the
* console.
*
* While we could queue data for later write without
* enabling throttling, this would result in the guest
* being able to trigger arbitrary memory usage in QEMU
* buffering data for later writes.
*
* So fixing this problem likely requires fixing the
* Linux virtio-console hvc driver to not hold spinlocks
* while writing, and instead merely block the process
* that's writing. QEMU would then need some way to detect
* if the guest had the fixed driver too, before we can
* use throttling on host side.
*/
if (!k->is_console) {
virtio_serial_throttle_port(port, true);
if (!vcon->watch) {

View File

@ -144,7 +144,9 @@ uart_write(void *opaque, hwaddr addr,
case R_TX:
if (s->chr)
qemu_chr_fe_write(s->chr, &ch, 1);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->chr, &ch, 1);
s->regs[addr] = value;

View File

@ -600,6 +600,8 @@ static void i8257_class_init(ObjectClass *klass, void *data)
idc->release_DREQ = i8257_dma_release_DREQ;
idc->schedule = i8257_dma_schedule;
idc->register_channel = i8257_dma_register_channel;
/* Reason: needs to be wired up by isa_bus_dma() to work */
dc->cannot_instantiate_with_device_add_yet = true;
}
static const TypeInfo i8257_info = {

View File

@ -28,9 +28,8 @@ static inline uint32_t kvm_apic_get_reg(struct kvm_lapic_state *kapic,
return *((uint32_t *)(kapic->regs + (reg_id << 4)));
}
void kvm_put_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic)
static void kvm_put_apic_state(APICCommonState *s, struct kvm_lapic_state *kapic)
{
APICCommonState *s = APIC_COMMON(dev);
int i;
memset(kapic, 0, sizeof(*kapic));
@ -125,6 +124,27 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
}
}
static void kvm_apic_put(void *data)
{
APICCommonState *s = data;
struct kvm_lapic_state kapic;
int ret;
kvm_put_apic_state(s, &kapic);
ret = kvm_vcpu_ioctl(CPU(s->cpu), KVM_SET_LAPIC, &kapic);
if (ret < 0) {
fprintf(stderr, "KVM_SET_LAPIC failed: %s\n", strerror(ret));
abort();
}
}
static void kvm_apic_post_load(APICCommonState *s)
{
fprintf(stderr, "%s: Yeh\n", __func__);
run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
}
static void do_inject_external_nmi(void *data)
{
APICCommonState *s = data;
@ -178,6 +198,8 @@ static void kvm_apic_reset(APICCommonState *s)
{
/* Not used by KVM, which uses the CPU mp_state instead. */
s->wait_for_sipi = 0;
run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
}
static void kvm_apic_realize(DeviceState *dev, Error **errp)
@ -206,6 +228,7 @@ static void kvm_apic_class_init(ObjectClass *klass, void *data)
k->set_base = kvm_apic_set_base;
k->set_tpr = kvm_apic_set_tpr;
k->get_tpr = kvm_apic_get_tpr;
k->post_load = kvm_apic_post_load;
k->enable_tpr_reporting = kvm_apic_enable_tpr_reporting;
k->vapic_base_update = kvm_apic_vapic_base_update;
k->external_nmi = kvm_apic_external_nmi;

View File

@ -100,12 +100,16 @@ ipmb_checksum(const unsigned char *data, int size, unsigned char start)
static void continue_send(IPMIBmcExtern *ibe)
{
int ret;
if (ibe->outlen == 0) {
goto check_reset;
}
send:
ibe->outpos += qemu_chr_fe_write(ibe->chr, ibe->outbuf + ibe->outpos,
ibe->outlen - ibe->outpos);
ret = qemu_chr_fe_write(ibe->chr, ibe->outbuf + ibe->outpos,
ibe->outlen - ibe->outpos);
if (ret > 0) {
ibe->outpos += ret;
}
if (ibe->outpos < ibe->outlen) {
/* Not fully transmitted, try again in a 10ms */
timer_mod_ns(ibe->extern_timer,

View File

@ -19,6 +19,7 @@
#include "hw/pci/pci.h"
#include "hw/scsi/scsi.h"
#include "sysemu/dma.h"
#include "qemu/log.h"
//#define DEBUG_LSI
//#define DEBUG_LSI_REG
@ -34,6 +35,21 @@ do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__); exit(1);} while
do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__);} while (0)
#endif
static const char *names[] = {
"SCNTL0", "SCNTL1", "SCNTL2", "SCNTL3", "SCID", "SXFER", "SDID", "GPREG",
"SFBR", "SOCL", "SSID", "SBCL", "DSTAT", "SSTAT0", "SSTAT1", "SSTAT2",
"DSA0", "DSA1", "DSA2", "DSA3", "ISTAT", "0x15", "0x16", "0x17",
"CTEST0", "CTEST1", "CTEST2", "CTEST3", "TEMP0", "TEMP1", "TEMP2", "TEMP3",
"DFIFO", "CTEST4", "CTEST5", "CTEST6", "DBC0", "DBC1", "DBC2", "DCMD",
"DNAD0", "DNAD1", "DNAD2", "DNAD3", "DSP0", "DSP1", "DSP2", "DSP3",
"DSPS0", "DSPS1", "DSPS2", "DSPS3", "SCRATCHA0", "SCRATCHA1", "SCRATCHA2", "SCRATCHA3",
"DMODE", "DIEN", "SBR", "DCNTL", "ADDER0", "ADDER1", "ADDER2", "ADDER3",
"SIEN0", "SIEN1", "SIST0", "SIST1", "SLPAR", "0x45", "MACNTL", "GPCNTL",
"STIME0", "STIME1", "RESPID", "0x4b", "STEST0", "STEST1", "STEST2", "STEST3",
"SIDL", "0x51", "0x52", "0x53", "SODL", "0x55", "0x56", "0x57",
"SBDL", "0x59", "0x5a", "0x5b", "SCRATCHB0", "SCRATCHB1", "SCRATCHB2", "SCRATCHB3",
};
#define LSI_MAX_DEVS 7
#define LSI_SCNTL0_TRG 0x01
@ -194,6 +210,7 @@ typedef struct {
MemoryRegion mmio_io;
MemoryRegion ram_io;
MemoryRegion io_io;
AddressSpace pci_io_as;
int carry; /* ??? Should this be an a visible register somewhere? */
int status;
@ -309,7 +326,7 @@ static void lsi_soft_reset(LSIState *s)
s->istat0 = 0;
s->istat1 = 0;
s->dcmd = 0x40;
s->dstat = LSI_DSTAT_DFE;
s->dstat = 0;
s->dien = 0;
s->sist0 = 0;
s->sist1 = 0;
@ -391,6 +408,30 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val);
static void lsi_execute_script(LSIState *s);
static void lsi_reselect(LSIState *s, lsi_request *p);
static inline int lsi_mem_read(LSIState *s, dma_addr_t addr,
void *buf, dma_addr_t len)
{
if (s->dmode & LSI_DMODE_SIOM) {
address_space_read(&s->pci_io_as, addr, MEMTXATTRS_UNSPECIFIED,
buf, len);
return 0;
} else {
return pci_dma_read(PCI_DEVICE(s), addr, buf, len);
}
}
static inline int lsi_mem_write(LSIState *s, dma_addr_t addr,
const void *buf, dma_addr_t len)
{
if (s->dmode & LSI_DMODE_DIOM) {
address_space_write(&s->pci_io_as, addr, MEMTXATTRS_UNSPECIFIED,
buf, len);
return 0;
} else {
return pci_dma_write(PCI_DEVICE(s), addr, buf, len);
}
}
static inline uint32_t read_dword(LSIState *s, uint32_t addr)
{
uint32_t buf;
@ -534,7 +575,6 @@ static void lsi_bad_selection(LSIState *s, uint32_t id)
/* Initiate a SCSI layer data transfer. */
static void lsi_do_dma(LSIState *s, int out)
{
PCIDevice *pci_dev;
uint32_t count;
dma_addr_t addr;
SCSIDevice *dev;
@ -546,7 +586,6 @@ static void lsi_do_dma(LSIState *s, int out)
return;
}
pci_dev = PCI_DEVICE(s);
dev = s->current->req->dev;
assert(dev);
@ -572,9 +611,9 @@ static void lsi_do_dma(LSIState *s, int out)
}
/* ??? Set SFBR to first data byte. */
if (out) {
pci_dma_read(pci_dev, addr, s->current->dma_buf, count);
lsi_mem_read(s, addr, s->current->dma_buf, count);
} else {
pci_dma_write(pci_dev, addr, s->current->dma_buf, count);
lsi_mem_write(s, addr, s->current->dma_buf, count);
}
s->current->dma_len -= count;
if (s->current->dma_len == 0) {
@ -1006,15 +1045,14 @@ bad:
#define LSI_BUF_SIZE 4096
static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count)
{
PCIDevice *d = PCI_DEVICE(s);
int n;
uint8_t buf[LSI_BUF_SIZE];
DPRINTF("memcpy dest 0x%08x src 0x%08x count %d\n", dest, src, count);
while (count) {
n = (count > LSI_BUF_SIZE) ? LSI_BUF_SIZE : count;
pci_dma_read(d, src, buf, n);
pci_dma_write(d, dest, buf, n);
lsi_mem_read(s, src, buf, n);
lsi_mem_write(s, dest, buf, n);
src += n;
dest += n;
count -= n;
@ -1480,155 +1518,200 @@ again:
static uint8_t lsi_reg_readb(LSIState *s, int offset)
{
uint8_t tmp;
uint8_t ret;
#define CASE_GET_REG24(name, addr) \
case addr: return s->name & 0xff; \
case addr + 1: return (s->name >> 8) & 0xff; \
case addr + 2: return (s->name >> 16) & 0xff;
case addr: ret = s->name & 0xff; break; \
case addr + 1: ret = (s->name >> 8) & 0xff; break; \
case addr + 2: ret = (s->name >> 16) & 0xff; break;
#define CASE_GET_REG32(name, addr) \
case addr: return s->name & 0xff; \
case addr + 1: return (s->name >> 8) & 0xff; \
case addr + 2: return (s->name >> 16) & 0xff; \
case addr + 3: return (s->name >> 24) & 0xff;
case addr: ret = s->name & 0xff; break; \
case addr + 1: ret = (s->name >> 8) & 0xff; break; \
case addr + 2: ret = (s->name >> 16) & 0xff; break; \
case addr + 3: ret = (s->name >> 24) & 0xff; break;
#ifdef DEBUG_LSI_REG
DPRINTF("Read reg %x\n", offset);
#endif
switch (offset) {
case 0x00: /* SCNTL0 */
return s->scntl0;
ret = s->scntl0;
break;
case 0x01: /* SCNTL1 */
return s->scntl1;
ret = s->scntl1;
break;
case 0x02: /* SCNTL2 */
return s->scntl2;
ret = s->scntl2;
break;
case 0x03: /* SCNTL3 */
return s->scntl3;
ret = s->scntl3;
break;
case 0x04: /* SCID */
return s->scid;
ret = s->scid;
break;
case 0x05: /* SXFER */
return s->sxfer;
ret = s->sxfer;
break;
case 0x06: /* SDID */
return s->sdid;
ret = s->sdid;
break;
case 0x07: /* GPREG0 */
return 0x7f;
ret = 0x7f;
break;
case 0x08: /* Revision ID */
return 0x00;
ret = 0x00;
break;
case 0x09: /* SOCL */
return s->socl;
ret = s->socl;
break;
case 0xa: /* SSID */
return s->ssid;
ret = s->ssid;
break;
case 0xb: /* SBCL */
/* ??? This is not correct. However it's (hopefully) only
used for diagnostics, so should be ok. */
return 0;
ret = 0;
break;
case 0xc: /* DSTAT */
tmp = s->dstat | LSI_DSTAT_DFE;
ret = s->dstat | LSI_DSTAT_DFE;
if ((s->istat0 & LSI_ISTAT0_INTF) == 0)
s->dstat = 0;
lsi_update_irq(s);
return tmp;
break;
case 0x0d: /* SSTAT0 */
return s->sstat0;
ret = s->sstat0;
break;
case 0x0e: /* SSTAT1 */
return s->sstat1;
ret = s->sstat1;
break;
case 0x0f: /* SSTAT2 */
return s->scntl1 & LSI_SCNTL1_CON ? 0 : 2;
ret = s->scntl1 & LSI_SCNTL1_CON ? 0 : 2;
break;
CASE_GET_REG32(dsa, 0x10)
case 0x14: /* ISTAT0 */
return s->istat0;
ret = s->istat0;
break;
case 0x15: /* ISTAT1 */
return s->istat1;
ret = s->istat1;
break;
case 0x16: /* MBOX0 */
return s->mbox0;
ret = s->mbox0;
break;
case 0x17: /* MBOX1 */
return s->mbox1;
ret = s->mbox1;
break;
case 0x18: /* CTEST0 */
return 0xff;
ret = 0xff;
break;
case 0x19: /* CTEST1 */
return 0;
ret = 0;
break;
case 0x1a: /* CTEST2 */
tmp = s->ctest2 | LSI_CTEST2_DACK | LSI_CTEST2_CM;
ret = s->ctest2 | LSI_CTEST2_DACK | LSI_CTEST2_CM;
if (s->istat0 & LSI_ISTAT0_SIGP) {
s->istat0 &= ~LSI_ISTAT0_SIGP;
tmp |= LSI_CTEST2_SIGP;
ret |= LSI_CTEST2_SIGP;
}
return tmp;
break;
case 0x1b: /* CTEST3 */
return s->ctest3;
ret = s->ctest3;
break;
CASE_GET_REG32(temp, 0x1c)
case 0x20: /* DFIFO */
return 0;
ret = 0;
break;
case 0x21: /* CTEST4 */
return s->ctest4;
ret = s->ctest4;
break;
case 0x22: /* CTEST5 */
return s->ctest5;
ret = s->ctest5;
break;
case 0x23: /* CTEST6 */
return 0;
ret = 0;
break;
CASE_GET_REG24(dbc, 0x24)
case 0x27: /* DCMD */
return s->dcmd;
ret = s->dcmd;
break;
CASE_GET_REG32(dnad, 0x28)
CASE_GET_REG32(dsp, 0x2c)
CASE_GET_REG32(dsps, 0x30)
CASE_GET_REG32(scratch[0], 0x34)
case 0x38: /* DMODE */
return s->dmode;
ret = s->dmode;
break;
case 0x39: /* DIEN */
return s->dien;
ret = s->dien;
break;
case 0x3a: /* SBR */
return s->sbr;
ret = s->sbr;
break;
case 0x3b: /* DCNTL */
return s->dcntl;
ret = s->dcntl;
break;
/* ADDER Output (Debug of relative jump address) */
CASE_GET_REG32(adder, 0x3c)
case 0x40: /* SIEN0 */
return s->sien0;
ret = s->sien0;
break;
case 0x41: /* SIEN1 */
return s->sien1;
ret = s->sien1;
break;
case 0x42: /* SIST0 */
tmp = s->sist0;
ret = s->sist0;
s->sist0 = 0;
lsi_update_irq(s);
return tmp;
break;
case 0x43: /* SIST1 */
tmp = s->sist1;
ret = s->sist1;
s->sist1 = 0;
lsi_update_irq(s);
return tmp;
break;
case 0x46: /* MACNTL */
return 0x0f;
ret = 0x0f;
break;
case 0x47: /* GPCNTL0 */
return 0x0f;
ret = 0x0f;
break;
case 0x48: /* STIME0 */
return s->stime0;
ret = s->stime0;
break;
case 0x4a: /* RESPID0 */
return s->respid0;
ret = s->respid0;
break;
case 0x4b: /* RESPID1 */
return s->respid1;
ret = s->respid1;
break;
case 0x4d: /* STEST1 */
return s->stest1;
ret = s->stest1;
break;
case 0x4e: /* STEST2 */
return s->stest2;
ret = s->stest2;
break;
case 0x4f: /* STEST3 */
return s->stest3;
ret = s->stest3;
break;
case 0x50: /* SIDL */
/* This is needed by the linux drivers. We currently only update it
during the MSG IN phase. */
return s->sidl;
ret = s->sidl;
break;
case 0x52: /* STEST4 */
return 0xe0;
ret = 0xe0;
break;
case 0x56: /* CCNTL0 */
return s->ccntl0;
ret = s->ccntl0;
break;
case 0x57: /* CCNTL1 */
return s->ccntl1;
ret = s->ccntl1;
break;
case 0x58: /* SBDL */
/* Some drivers peek at the data bus during the MSG IN phase. */
if ((s->sstat1 & PHASE_MASK) == PHASE_MI)
return s->msg[0];
return 0;
ret = 0;
break;
case 0x59: /* SBDL high */
return 0;
ret = 0;
break;
CASE_GET_REG32(mmrs, 0xa0)
CASE_GET_REG32(mmws, 0xa4)
CASE_GET_REG32(sfs, 0xa8)
@ -1643,18 +1726,34 @@ static uint8_t lsi_reg_readb(LSIState *s, int offset)
CASE_GET_REG32(ia, 0xd4)
CASE_GET_REG32(sbc, 0xd8)
CASE_GET_REG32(csbc, 0xdc)
}
if (offset >= 0x5c && offset < 0xa0) {
case 0x5c ... 0x9f:
{
int n;
int shift;
n = (offset - 0x58) >> 2;
shift = (offset & 3) * 8;
return (s->scratch[n] >> shift) & 0xff;
ret = (s->scratch[n] >> shift) & 0xff;
break;
}
default:
{
qemu_log_mask(LOG_GUEST_ERROR,
"lsi_scsi: invalid read from reg %s %x\n",
offset < ARRAY_SIZE(names) ? names[offset] : "???",
offset);
ret = 0xff;
break;
}
}
BADF("readb 0x%x\n", offset);
exit(1);
#undef CASE_GET_REG24
#undef CASE_GET_REG32
#ifdef DEBUG_LSI_REG
DPRINTF("Read reg %s %x = %02x\n",
offset < ARRAY_SIZE(names) ? names[offset] : "???", offset, ret);
#endif
return ret;
}
static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
@ -1671,7 +1770,8 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
case addr + 3: s->name &= 0x00ffffff; s->name |= val << 24; break;
#ifdef DEBUG_LSI_REG
DPRINTF("Write reg %x = %02x\n", offset, val);
DPRINTF("Write reg %s %x = %02x\n",
offset < ARRAY_SIZE(names) ? names[offset] : "???", offset, val);
#endif
switch (offset) {
case 0x00: /* SCNTL0 */
@ -1799,9 +1899,6 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
CASE_SET_REG32(dsps, 0x30)
CASE_SET_REG32(scratch[0], 0x34)
case 0x38: /* DMODE */
if (val & (LSI_DMODE_SIOM | LSI_DMODE_DIOM)) {
BADF("IO mappings not implemented\n");
}
s->dmode = val;
break;
case 0x39: /* DIEN */
@ -1886,7 +1983,10 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
shift = (offset & 3) * 8;
s->scratch[n] = deposit32(s->scratch[n], shift, 8, val);
} else {
BADF("Unhandled writeb 0x%x = 0x%x\n", offset, val);
qemu_log_mask(LOG_GUEST_ERROR,
"lsi_scsi: invalid write to reg %s %x (0x%02x)\n",
offset < ARRAY_SIZE(names) ? names[offset] : "???",
offset, val);
}
}
#undef CASE_SET_REG24
@ -2108,6 +2208,8 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s,
"lsi-io", 256);
address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io");
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_io);
pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio_io);
pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io);
@ -2119,6 +2221,13 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
}
}
static void lsi_scsi_unrealize(DeviceState *dev, Error **errp)
{
LSIState *s = LSI53C895A(dev);
address_space_destroy(&s->pci_io_as);
}
static void lsi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@ -2129,6 +2238,7 @@ static void lsi_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_LSI_53C895A;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
dc->unrealize = lsi_scsi_unrealize;
dc->reset = lsi_scsi_reset;
dc->vmsd = &vmstate_lsi_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);

View File

@ -1981,7 +1981,11 @@ static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr,
break;
}
if (frame_status != MFI_STAT_INVALID_STATUS) {
cmd->frame->header.cmd_status = frame_status;
if (cmd->frame) {
cmd->frame->header.cmd_status = frame_status;
} else {
megasas_frame_set_cmd_status(s, frame_addr, frame_status);
}
megasas_unmap_frame(s, cmd);
megasas_complete_frame(s, cmd->context);
}

View File

@ -158,7 +158,7 @@ static size_t mptsas_config_pack(uint8_t **data, const char *fmt, ...)
va_end(ap);
if (data) {
assert(ret < 256 && (ret % 4) == 0);
assert(ret / 4 < 256 && (ret % 4) == 0);
stb_p(*data + 1, ret / 4);
}
return ret;
@ -203,7 +203,7 @@ size_t mptsas_config_manufacturing_1(MPTSASState *s, uint8_t **data, int address
{
/* VPD - all zeros */
return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
"s256");
"*s256");
}
static
@ -328,7 +328,7 @@ size_t mptsas_config_ioc_0(MPTSASState *s, uint8_t **data, int address)
return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_IOC, 0x01,
"*l*lwwb*b*b*blww",
pcic->vendor_id, pcic->device_id, pcic->revision,
pcic->subsystem_vendor_id,
pcic->class_id, pcic->subsystem_vendor_id,
pcic->subsystem_id);
}

View File

@ -599,8 +599,8 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
}
l = strlen(s->serial);
if (l > 20) {
l = 20;
if (l > 36) {
l = 36;
}
DPRINTF("Inquiry EVPD[Serial number] "

View File

@ -40,6 +40,8 @@
#define PVSCSI_MAX_DEVS (64)
#define PVSCSI_MSIX_NUM_VECTORS (1)
#define PVSCSI_MAX_SG_ELEM 2048
#define PVSCSI_MAX_CMD_DATA_WORDS \
(sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
@ -152,7 +154,7 @@ pvscsi_log2(uint32_t input)
return log;
}
static int
static void
pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
{
int i;
@ -160,10 +162,6 @@ pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
uint32_t req_ring_size, cmp_ring_size;
m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
if ((ri->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
|| (ri->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)) {
return -1;
}
req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
txr_len_log2 = pvscsi_log2(req_ring_size - 1);
@ -195,8 +193,6 @@ pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
/* Flush ring state page changes */
smp_wmb();
return 0;
}
static int
@ -634,17 +630,16 @@ pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
static void
pvscsi_convert_sglist(PVSCSIRequest *r)
{
int chunk_size;
uint32_t chunk_size, elmcnt = 0;
uint64_t data_length = r->req.dataLen;
PVSCSISGState sg = r->sg;
while (data_length) {
while (!sg.resid) {
while (data_length && elmcnt < PVSCSI_MAX_SG_ELEM) {
while (!sg.resid && elmcnt++ < PVSCSI_MAX_SG_ELEM) {
pvscsi_get_next_sg_elem(&sg);
trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
r->sg.resid);
}
assert(data_length > 0);
chunk_size = MIN((unsigned) data_length, sg.resid);
chunk_size = MIN(data_length, sg.resid);
if (chunk_size) {
qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
}
@ -746,7 +741,7 @@ pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
for (i = 0; i < rc->cmpRingNumPages; i++) {
trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->reqRingPPNs[i]);
trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->cmpRingPPNs[i]);
}
}
@ -779,11 +774,16 @@ pvscsi_on_cmd_setup_rings(PVSCSIState *s)
trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
pvscsi_dbg_dump_tx_rings_config(rc);
if (pvscsi_ring_init_data(&s->rings, rc) < 0) {
if (!rc->reqRingNumPages
|| rc->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
|| !rc->cmpRingNumPages
|| rc->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) {
return PVSCSI_COMMAND_PROCESSING_FAILED;
}
pvscsi_dbg_dump_tx_rings_config(rc);
pvscsi_ring_init_data(&s->rings, rc);
s->rings_info_valid = TRUE;
return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
}

View File

@ -75,8 +75,11 @@ static void ccid_card_vscard_send_msg(PassthruState *s,
scr_msg_header.type = htonl(type);
scr_msg_header.reader_id = htonl(reader_id);
scr_msg_header.length = htonl(length);
qemu_chr_fe_write(s->cs, (uint8_t *)&scr_msg_header, sizeof(VSCMsgHeader));
qemu_chr_fe_write(s->cs, payload, length);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->cs, (uint8_t *)&scr_msg_header,
sizeof(VSCMsgHeader));
qemu_chr_fe_write_all(s->cs, payload, length);
}
static void ccid_card_vscard_send_apdu(PassthruState *s,

View File

@ -366,7 +366,9 @@ static void usb_serial_handle_data(USBDevice *dev, USBPacket *p)
goto fail;
for (i = 0; i < p->iov.niov; i++) {
iov = p->iov.iov + i;
qemu_chr_fe_write(s->cs, iov->iov_base, iov->iov_len);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s->cs, iov->iov_base, iov->iov_len);
}
p->actual_length = p->iov.size;
break;

View File

@ -225,6 +225,8 @@ struct TranslationBlock {
#define CF_USE_ICOUNT 0x20000
#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
uint16_t invalid;
void *tc_ptr; /* pointer to the translated code */
uint8_t *tc_search; /* pointer to search data */
/* original tb when cflags has CF_NOCACHE */

View File

@ -11,8 +11,7 @@
#define PPC_FDT_H
#include "qemu/error-report.h"
typedef struct CPUPPCState CPUPPCState;
#include "target-ppc/cpu-qom.h"
#define _FDT(exp) \
do { \

View File

@ -72,16 +72,16 @@
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
*/
#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
/* Most compilers currently treat consume and acquire the same, but really
* no processors except Alpha need a barrier here. Leave it in if
* using Thread Sanitizer to avoid warnings, otherwise optimize it away.
*/
#if defined(__SANITIZE_THREAD__)
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
#elsif defined(__alpha__)
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
#else
@ -96,15 +96,12 @@
#define atomic_read(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof_strip_qual(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
_val; \
__atomic_load_n(ptr, __ATOMIC_RELAXED); \
})
#define atomic_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
__atomic_store_n(ptr, i, __ATOMIC_RELAXED); \
} while(0)
/* See above: most compilers currently treat consume and acquire the
@ -129,8 +126,7 @@
#define atomic_rcu_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
/* atomic_mb_read/set semantics map Java volatile variables. They are
@ -153,9 +149,8 @@
#define atomic_mb_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
smp_wmb(); \
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
__atomic_store_n(ptr, i, __ATOMIC_RELAXED); \
smp_mb(); \
} while(0)
#else
@ -169,8 +164,7 @@
#define atomic_mb_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
__atomic_store_n(ptr, i, __ATOMIC_SEQ_CST); \
} while(0)
#endif
@ -179,17 +173,15 @@
#define atomic_xchg(ptr, i) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof_strip_qual(*ptr) _new = (i), _old; \
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
_old; \
__atomic_exchange_n(ptr, i, __ATOMIC_SEQ_CST); \
})
/* Returns the eventual value, failed or not */
#define atomic_cmpxchg(ptr, old, new) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof_strip_qual(*ptr) _old = (old), _new = (new); \
__atomic_compare_exchange(ptr, &_old, &_new, false, \
typeof_strip_qual(*ptr) _old = (old); \
__atomic_compare_exchange_n(ptr, &_old, new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
_old; \
})

View File

@ -168,9 +168,8 @@ int64_t qemu_strtosz_suffix_unit(const char *nptr, char **end,
/* used to print char* safely */
#define STR_OR_NULL(str) ((str) ? (str) : "null")
bool can_use_buffer_find_nonzero_offset(const void *buf, size_t len);
size_t buffer_find_nonzero_offset(const void *buf, size_t len);
bool buffer_is_zero(const void *buf, size_t len);
bool test_buffer_is_zero_next_accel(void);
/*
* Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128)

View File

@ -407,6 +407,7 @@ struct { \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
(elm)->field.tqe_prev = NULL; \
} while (/*CONSTCOND*/0)
#define QTAILQ_FOREACH(var, head, field) \
@ -430,6 +431,7 @@ struct { \
#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define QTAILQ_FIRST(head) ((head)->tqh_first)
#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_prev != NULL)
#define QTAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))

View File

@ -22,23 +22,20 @@
* @QEMU_CLOCK_REALTIME: Real time clock
*
* The real time clock should be used only for stuff which does not
* change the virtual machine state, as it is run even if the virtual
* machine is stopped. The real time clock has a frequency of 1000
* Hz.
* change the virtual machine state, as it runs even if the virtual
* machine is stopped.
*
* @QEMU_CLOCK_VIRTUAL: virtual clock
*
* The virtual clock is only run during the emulation. It is stopped
* when the virtual machine is stopped. Virtual timers use a high
* precision clock, usually cpu cycles (use ticks_per_sec).
* The virtual clock only runs during the emulation. It stops
* when the virtual machine is stopped.
*
* @QEMU_CLOCK_HOST: host clock
*
* The host clock should be use for device models that emulate accurate
* The host clock should be used for device models that emulate accurate
* real time sources. It will continue to run when the virtual machine
* is suspended, and it will reflect system time changes the host may
* undergo (e.g. due to NTP). The host clock has the same precision as
* the virtual clock.
* undergo (e.g. due to NTP).
*
* @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp
*
@ -76,10 +73,6 @@ struct QEMUTimer {
extern QEMUTimerListGroup main_loop_tlg;
/*
* QEMUClockType
*/
/*
* qemu_clock_get_ns;
* @type: the clock type

View File

@ -221,7 +221,6 @@ int kvm_destroy_vcpu(CPUState *cpu);
#ifdef NEED_CPU_H
#include "cpu.h"
void kvm_setup_guest_memory(void *start, size_t size);
void kvm_flush_coalesced_mmio_buffer(void);
int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
@ -372,7 +371,6 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
struct kvm_guest_debug;

View File

@ -2148,6 +2148,7 @@ void kvm_device_access(int fd, int group, uint64_t attr,
}
}
/* Return 1 on success, 0 on failure */
int kvm_has_sync_mmu(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
@ -2190,20 +2191,6 @@ int kvm_has_intx_set_mask(void)
return kvm_state->intx_set_mask;
}
void kvm_setup_guest_memory(void *start, size_t size)
{
if (!kvm_has_sync_mmu()) {
int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
if (ret) {
perror("qemu_madvise");
fprintf(stderr,
"Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
exit(1);
}
}
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
target_ulong pc)

View File

@ -73,10 +73,6 @@ int kvm_has_many_ioeventfds(void)
return 0;
}
void kvm_setup_guest_memory(void *start, size_t size)
{
}
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
return -ENOSYS;

View File

@ -73,7 +73,7 @@ static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
static inline bool is_zero_range(uint8_t *p, uint64_t size)
{
return buffer_find_nonzero_offset(p, size) == size;
return buffer_is_zero(p, size);
}
/* struct contains XBZRLE cache and a static page

View File

@ -1934,10 +1934,7 @@ retry:
* memset() + madvise() the entire chunk without RDMA.
*/
if (can_use_buffer_find_nonzero_offset((void *)(uintptr_t)sge.addr,
length)
&& buffer_find_nonzero_offset((void *)(uintptr_t)sge.addr,
length) == length) {
if (buffer_is_zero((void *)(uintptr_t)sge.addr, length)) {
RDMACompress comp = {
.offset = current_addr,
.value = 0,

View File

@ -239,7 +239,7 @@ static void netfilter_finalize(Object *obj)
}
if (nf->netdev && !QTAILQ_EMPTY(&nf->netdev->filters) &&
nf->next.tqe_prev) {
QTAILQ_IN_USE(nf, next)) {
QTAILQ_REMOVE(&nf->netdev->filters, nf, next);
}
g_free(nf->netdev_id);

Binary file not shown.

View File

@ -122,24 +122,14 @@ static inline void writel_es(uint16_t offset, uint32_t val)
static inline uint32_t bswap32(uint32_t x)
{
return
((x & 0x000000ffU) << 24) |
((x & 0x0000ff00U) << 8) |
((x & 0x00ff0000U) >> 8) |
((x & 0xff000000U) >> 24);
asm("bswapl %0" : "=r" (x) : "0" (x));
return x;
}
static inline uint64_t bswap64(uint64_t x)
{
return
((x & 0x00000000000000ffULL) << 56) |
((x & 0x000000000000ff00ULL) << 40) |
((x & 0x0000000000ff0000ULL) << 24) |
((x & 0x00000000ff000000ULL) << 8) |
((x & 0x000000ff00000000ULL) >> 8) |
((x & 0x0000ff0000000000ULL) >> 24) |
((x & 0x00ff000000000000ULL) >> 40) |
((x & 0xff00000000000000ULL) >> 56);
asm("bswapl %%eax; bswapl %%edx; xchg %%eax, %%edx" : "=A" (x) : "0" (x));
return x;
}
static inline uint64_t cpu_to_be64(uint64_t x)

View File

@ -39,6 +39,7 @@
#include "io/channel-file.h"
#include "io/channel-tls.h"
#include "sysemu/replay.h"
#include "qemu/help_option.h"
#include <zlib.h>
@ -440,7 +441,9 @@ void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, sizeof(buf), fmt, ap);
qemu_chr_fe_write(s, (uint8_t *)buf, strlen(buf));
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(s, (uint8_t *)buf, strlen(buf));
va_end(ap);
}
@ -556,7 +559,9 @@ static int mux_chr_write(CharDriverState *chr, const uint8_t *buf, int len)
(secs / 60) % 60,
secs % 60,
(int)(ti % 1000));
qemu_chr_fe_write(d->drv, (uint8_t *)buf1, strlen(buf1));
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(d->drv, (uint8_t *)buf1, strlen(buf1));
d->linestart = 0;
}
ret += qemu_chr_fe_write(d->drv, buf+i, 1);
@ -594,13 +599,15 @@ static void mux_print_help(CharDriverState *chr)
"\n\rEscape-Char set to Ascii: 0x%02x\n\r\n\r",
term_escape_char);
}
qemu_chr_fe_write(chr, (uint8_t *)cbuf, strlen(cbuf));
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(chr, (uint8_t *)cbuf, strlen(cbuf));
for (i = 0; mux_help[i] != NULL; i++) {
for (j=0; mux_help[i][j] != '\0'; j++) {
if (mux_help[i][j] == '%')
qemu_chr_fe_write(chr, (uint8_t *)ebuf, strlen(ebuf));
qemu_chr_fe_write_all(chr, (uint8_t *)ebuf, strlen(ebuf));
else
qemu_chr_fe_write(chr, (uint8_t *)&mux_help[i][j], 1);
qemu_chr_fe_write_all(chr, (uint8_t *)&mux_help[i][j], 1);
}
}
}
@ -625,7 +632,7 @@ static int mux_proc_byte(CharDriverState *chr, MuxDriver *d, int ch)
case 'x':
{
const char *term = "QEMU: Terminated\n\r";
qemu_chr_fe_write(chr, (uint8_t *)term, strlen(term));
qemu_chr_fe_write_all(chr, (uint8_t *)term, strlen(term));
exit(0);
break;
}
@ -3879,16 +3886,26 @@ CharDriverState *qemu_chr_new_from_opts(QemuOpts *opts,
const char *id = qemu_opts_id(opts);
char *bid = NULL;
if (id == NULL) {
error_setg(errp, "chardev: no id specified");
goto err;
}
if (qemu_opt_get(opts, "backend") == NULL) {
error_setg(errp, "chardev: \"%s\" missing backend",
qemu_opts_id(opts));
goto err;
}
if (is_help_option(qemu_opt_get(opts, "backend"))) {
fprintf(stderr, "Available chardev backend types:\n");
for (i = backends; i; i = i->next) {
cd = i->data;
fprintf(stderr, "%s\n", cd->name);
}
exit(!is_help_option(qemu_opt_get(opts, "backend")));
}
if (id == NULL) {
error_setg(errp, "chardev: no id specified");
goto err;
}
for (i = backends; i; i = i->next) {
cd = i->data;

View File

@ -982,13 +982,14 @@ DEF("nographic", 0, QEMU_OPTION_nographic,
STEXI
@item -nographic
@findex -nographic
Normally, QEMU uses SDL to display the VGA output. With this option,
you can totally disable graphical output so that QEMU is a simple
command line application. The emulated serial port is redirected on
the console and muxed with the monitor (unless redirected elsewhere
explicitly). Therefore, you can still use QEMU to debug a Linux kernel
with a serial console. Use @key{C-a h} for help on switching between
the console and monitor.
Normally, if QEMU is compiled with graphical window support, it displays
output such as guest graphics, guest console, and the QEMU monitor in a
window. With this option, you can totally disable graphical output so
that QEMU is a simple command line application. The emulated serial port
is redirected on the console and muxed with the monitor (unless
redirected elsewhere explicitly). Therefore, you can still use QEMU to
debug a Linux kernel with a serial console. Use @key{C-a h} for help on
switching between the console and monitor.
ETEXI
DEF("curses", 0, QEMU_OPTION_curses,
@ -997,9 +998,11 @@ DEF("curses", 0, QEMU_OPTION_curses,
STEXI
@item -curses
@findex -curses
Normally, QEMU uses SDL to display the VGA output. With this option,
QEMU can display the VGA output when in text mode using a
curses/ncurses interface. Nothing is displayed in graphical mode.
Normally, if QEMU is compiled with graphical window support, it displays
output such as guest graphics, guest console, and the QEMU monitor in a
window. With this option, QEMU can display the VGA output when in text
mode using a curses/ncurses interface. Nothing is displayed in graphical
mode.
ETEXI
DEF("no-frame", 0, QEMU_OPTION_no_frame,
@ -1243,13 +1246,14 @@ DEF("vnc", HAS_ARG, QEMU_OPTION_vnc ,
STEXI
@item -vnc @var{display}[,@var{option}[,@var{option}[,...]]]
@findex -vnc
Normally, QEMU uses SDL to display the VGA output. With this option,
you can have QEMU listen on VNC display @var{display} and redirect the VGA
display over the VNC session. It is very useful to enable the usb
tablet device when using this option (option @option{-usbdevice
tablet}). When using the VNC display, you must use the @option{-k}
parameter to set the keyboard layout if you are not using en-us. Valid
syntax for the @var{display} is
Normally, if QEMU is compiled with graphical window support, it displays
output such as guest graphics, guest console, and the QEMU monitor in a
window. With this option, you can have QEMU listen on VNC display
@var{display} and redirect the VGA display over the VNC session. It is
very useful to enable the usb tablet device when using this option
(option @option{-usbdevice tablet}). When using the VNC display, you
must use the @option{-k} parameter to set the keyboard layout if you are
not using en-us. Valid syntax for the @var{display} is
@table @option
@ -2148,6 +2152,7 @@ The general form of a character device option is:
ETEXI
DEF("chardev", HAS_ARG, QEMU_OPTION_chardev,
"-chardev help\n"
"-chardev null,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
"-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4][,ipv6][,nodelay][,reconnect=seconds]\n"
" [,server][,nowait][,telnet][,reconnect=seconds][,mux=on|off]\n"
@ -2213,6 +2218,8 @@ Backend is one of:
@option{spiceport}.
The specific backend will determine the applicable options.
Use "-chardev help" to print all available chardev backend types.
All devices must have an id, which can be any string up to 127 characters long.
It is used to uniquely identify this device in other command line directives.

View File

@ -51,7 +51,7 @@ process-archive-undefs = $(filter-out %.a %.mo,$1) \
$(call undefined-symbols,$(filter %.mo,$1)))) \
$(filter %.a,$1)
extract-libs = $(strip $(foreach o,$1,$($o-libs)))
extract-libs = $(strip $(foreach o,$(filter-out %.mo,$1),$($o-libs)))
expand-objs = $(strip $(sort $(filter %.o,$1)) \
$(foreach o,$(filter %.mo,$1),$($o-objs)) \
$(filter-out %.o %.mo,$1))

View File

@ -1320,6 +1320,16 @@ sub process {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
ERROR("DOS line endings\n" . $herevet);
} elsif ($realfile =~ /^docs\/.+\.txt/ ||
$realfile =~ /^docs\/.+\.md/) {
if ($rawline =~ /^\+\s+$/ && $rawline !~ /^\+ {4}$/) {
# TODO: properly check we're in a code block
# (surrounding text is 4-column aligned)
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
ERROR("code blocks in documentation should have " .
"empty lines with exactly 4 columns of " .
"whitespace\n" . $herevet);
}
} elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
ERROR("trailing whitespace\n" . $herevet);

View File

@ -79,6 +79,7 @@ class Misc(object):
def show(self):
print self.name
value = msr().read(self.msr, 0)
print ' Hex: 0x%x' % (value)
def first_bit(key):
if type(key) is tuple:
return key[0]
@ -172,6 +173,7 @@ controls = [
16: 'RDSEED exiting',
18: 'EPT-violation #VE',
20: 'Enable XSAVES/XRSTORS',
25: 'TSC scaling',
},
cap_msr = MSR_IA32_VMX_PROCBASED_CTLS2,
),

View File

@ -1072,7 +1072,9 @@ int slirp_add_exec(Slirp *slirp, int do_pty, const void *args,
ssize_t slirp_send(struct socket *so, const void *buf, size_t len, int flags)
{
if (so->s == -1 && so->extra) {
qemu_chr_fe_write(so->extra, buf, len);
/* XXX this blocks entire thread. Rewrite to use
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(so->extra, buf, len);
return len;
}

View File

@ -2416,19 +2416,6 @@ static int kvm_get_apic(X86CPU *cpu)
return 0;
}
static int kvm_put_apic(X86CPU *cpu)
{
DeviceState *apic = cpu->apic_state;
struct kvm_lapic_state kapic;
if (apic && kvm_irqchip_in_kernel()) {
kvm_put_apic_state(apic, &kapic);
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
}
return 0;
}
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
{
CPUState *cs = CPU(cpu);
@ -2670,10 +2657,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
ret = kvm_put_apic(x86_cpu);
if (ret < 0) {
return ret;
}
}
ret = kvm_put_tscdeadline_msr(x86_cpu);

View File

@ -113,6 +113,8 @@ check-unit-y += tests/test-crypto-block$(EXESUF)
gcov-files-test-logging-y = tests/test-logging.c
check-unit-y += tests/test-logging$(EXESUF)
check-unit-$(CONFIG_REPLICATION) += tests/test-replication$(EXESUF)
check-unit-y += tests/test-bufferiszero$(EXESUF)
gcov-files-check-bufferiszero-y = util/bufferiszero.c
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
@ -485,6 +487,7 @@ tests/test-qdist$(EXESUF): tests/test-qdist.o $(test-util-obj-y)
tests/test-qht$(EXESUF): tests/test-qht.o $(test-util-obj-y)
tests/test-qht-par$(EXESUF): tests/test-qht-par.o tests/qht-bench$(EXESUF) $(test-util-obj-y)
tests/qht-bench$(EXESUF): tests/qht-bench.o $(test-util-obj-y)
tests/test-bufferiszero$(EXESUF): tests/test-bufferiszero.o $(test-util-obj-y)
tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\

78
tests/test-bufferiszero.c Normal file
View File

@ -0,0 +1,78 @@
/*
* QEMU buffer_is_zero test
*
* Copyright (c) 2016 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
static char buffer[8 * 1024 * 1024];
static void test_1(void)
{
size_t s, a, o;
/* Basic positive test. */
g_assert(buffer_is_zero(buffer, sizeof(buffer)));
/* Basic negative test. */
buffer[sizeof(buffer) - 1] = 1;
g_assert(!buffer_is_zero(buffer, sizeof(buffer)));
buffer[sizeof(buffer) - 1] = 0;
/* Positive tests for size and alignment. */
for (a = 1; a <= 64; a++) {
for (s = 1; s < 1024; s++) {
buffer[a - 1] = 1;
buffer[a + s] = 1;
g_assert(buffer_is_zero(buffer + a, s));
buffer[a - 1] = 0;
buffer[a + s] = 0;
}
}
/* Negative tests for size, alignment, and the offset of the marker. */
for (a = 1; a <= 64; a++) {
for (s = 1; s < 1024; s++) {
for (o = 0; o < s; ++o) {
buffer[a + o] = 1;
g_assert(!buffer_is_zero(buffer + a, s));
buffer[a + o] = 0;
}
}
}
}
static void test_2(void)
{
if (g_test_perf()) {
test_1();
} else {
do {
test_1();
} while (test_buffer_is_zero_next_accel());
}
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/cutils/bufferiszero", test_2);
return g_test_run();
}

View File

@ -773,6 +773,7 @@ static TranslationBlock *tb_alloc(target_ulong pc)
tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
tb->pc = pc;
tb->cflags = 0;
tb->invalid = false;
return tb;
}
@ -848,13 +849,17 @@ void tb_flush(CPUState *cpu)
> tcg_ctx.code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
tcg_ctx.tb_ctx.nb_tbs = 0;
CPU_FOREACH(cpu) {
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
cpu->tb_flushed = true;
int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
atomic_mb_set(&cpu->tb_flushed, true);
}
tcg_ctx.tb_ctx.nb_tbs = 0;
qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
page_flush_tb();
@ -990,6 +995,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
uint32_t h;
tb_page_addr_t phys_pc;
atomic_set(&tb->invalid, true);
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
@ -1010,8 +1017,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
if (cpu->tb_jmp_cache[h] == tb) {
cpu->tb_jmp_cache[h] = NULL;
if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
atomic_set(&cpu->tb_jmp_cache[h], NULL);
}
}
@ -1124,10 +1131,6 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
{
uint32_t h;
/* add in the hash table */
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
/* add in the page list */
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
if (phys_page2 != -1) {
@ -1136,6 +1139,10 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb->page_addr[1] = -1;
}
/* add in the hash table */
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
#ifdef DEBUG_TB_CHECK
tb_page_check();
#endif

View File

@ -1,4 +1,5 @@
util-obj-y = osdep.o cutils.o unicode.o qemu-timer-common.o
util-obj-y += bufferiszero.o
util-obj-$(CONFIG_POSIX) += compatfd.o
util-obj-$(CONFIG_POSIX) += event_notifier-posix.o
util-obj-$(CONFIG_POSIX) += mmap-alloc.o

230
util/bufferiszero.c Normal file
View File

@ -0,0 +1,230 @@
/*
* Simple C functions to supplement the C library
*
* Copyright (c) 2006 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/cutils.h"
#include "qemu/bswap.h"
/* vector definitions */
extern void link_error(void);
#define ACCEL_BUFFER_ZERO(NAME, SIZE, VECTYPE, NONZERO) \
static bool NAME(const void *buf, size_t len) \
{ \
const void *end = buf + len; \
do { \
const VECTYPE *p = buf; \
VECTYPE t; \
__builtin_prefetch(buf + SIZE); \
barrier(); \
if (SIZE == sizeof(VECTYPE) * 4) { \
t = (p[0] | p[1]) | (p[2] | p[3]); \
} else if (SIZE == sizeof(VECTYPE) * 8) { \
t = p[0] | p[1]; \
t |= p[2] | p[3]; \
t |= p[4] | p[5]; \
t |= p[6] | p[7]; \
} else { \
link_error(); \
} \
if (unlikely(NONZERO(t))) { \
return false; \
} \
buf += SIZE; \
} while (buf < end); \
return true; \
}
static bool
buffer_zero_int(const void *buf, size_t len)
{
if (unlikely(len < 8)) {
/* For a very small buffer, simply accumulate all the bytes. */
const unsigned char *p = buf;
const unsigned char *e = buf + len;
unsigned char t = 0;
do {
t |= *p++;
} while (p < e);
return t == 0;
} else {
/* Otherwise, use the unaligned memory access functions to
handle the beginning and end of the buffer, with a couple
of loops handling the middle aligned section. */
uint64_t t = ldq_he_p(buf);
const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8);
const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8);
for (; p + 8 <= e; p += 8) {
__builtin_prefetch(p + 8);
if (t) {
return false;
}
t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
}
while (p < e) {
t |= *p++;
}
t |= ldq_he_p(buf + len - 8);
return t == 0;
}
}
#if defined(CONFIG_AVX2_OPT) || (defined(CONFIG_CPUID_H) && defined(__SSE2__))
#include <cpuid.h>
/* Do not use push_options pragmas unnecessarily, because clang
* does not support them.
*/
#ifndef __SSE2__
#pragma GCC push_options
#pragma GCC target("sse2")
#endif
#include <emmintrin.h>
#define SSE2_NONZERO(X) \
(_mm_movemask_epi8(_mm_cmpeq_epi8((X), _mm_setzero_si128())) != 0xFFFF)
ACCEL_BUFFER_ZERO(buffer_zero_sse2, 64, __m128i, SSE2_NONZERO)
#ifndef __SSE2__
#pragma GCC pop_options
#endif
#ifdef CONFIG_AVX2_OPT
#pragma GCC push_options
#pragma GCC target("sse4")
#include <smmintrin.h>
#define SSE4_NONZERO(X) !_mm_testz_si128((X), (X))
ACCEL_BUFFER_ZERO(buffer_zero_sse4, 64, __m128i, SSE4_NONZERO)
#pragma GCC pop_options
#pragma GCC push_options
#pragma GCC target("avx2")
#include <immintrin.h>
#define AVX2_NONZERO(X) !_mm256_testz_si256((X), (X))
ACCEL_BUFFER_ZERO(buffer_zero_avx2, 128, __m256i, AVX2_NONZERO)
#pragma GCC pop_options
#endif
#define CACHE_AVX2 2
#define CACHE_AVX1 4
#define CACHE_SSE4 8
#define CACHE_SSE2 16
static unsigned cpuid_cache;
static void __attribute__((constructor)) init_cpuid_cache(void)
{
int max = __get_cpuid_max(0, NULL);
int a, b, c, d;
unsigned cache = 0;
if (max >= 1) {
__cpuid(1, a, b, c, d);
if (d & bit_SSE2) {
cache |= CACHE_SSE2;
}
#ifdef CONFIG_AVX2_OPT
if (c & bit_SSE4_1) {
cache |= CACHE_SSE4;
}
/* We must check that AVX is not just available, but usable. */
if ((c & bit_OSXSAVE) && (c & bit_AVX)) {
__asm("xgetbv" : "=a"(a), "=d"(d) : "c"(0));
if ((a & 6) == 6) {
cache |= CACHE_AVX1;
if (max >= 7) {
__cpuid_count(7, 0, a, b, c, d);
if (b & bit_AVX2) {
cache |= CACHE_AVX2;
}
}
}
}
#endif
}
cpuid_cache = cache;
}
#define HAVE_NEXT_ACCEL
bool test_buffer_is_zero_next_accel(void)
{
/* If no bits set, we just tested buffer_zero_int, and there
are no more acceleration options to test. */
if (cpuid_cache == 0) {
return false;
}
/* Disable the accelerator we used before and select a new one. */
cpuid_cache &= cpuid_cache - 1;
return true;
}
static bool select_accel_fn(const void *buf, size_t len)
{
uintptr_t ibuf = (uintptr_t)buf;
#ifdef CONFIG_AVX2_OPT
if (len % 128 == 0 && ibuf % 32 == 0 && (cpuid_cache & CACHE_AVX2)) {
return buffer_zero_avx2(buf, len);
}
if (len % 64 == 0 && ibuf % 16 == 0 && (cpuid_cache & CACHE_SSE4)) {
return buffer_zero_sse4(buf, len);
}
#endif
if (len % 64 == 0 && ibuf % 16 == 0 && (cpuid_cache & CACHE_SSE2)) {
return buffer_zero_sse2(buf, len);
}
return buffer_zero_int(buf, len);
}
#else
#define select_accel_fn buffer_zero_int
#endif
#ifndef HAVE_NEXT_ACCEL
bool test_buffer_is_zero_next_accel(void)
{
return false;
}
#endif
/*
* Checks if a buffer is all zeroes
*/
bool buffer_is_zero(const void *buf, size_t len)
{
if (unlikely(len == 0)) {
return true;
}
/* Fetch the beginning of the buffer while we select the accelerator. */
__builtin_prefetch(buf);
/* Use an optimized zero check if possible. Note that this also
includes a check for an unrolled loop over 64-bit integers. */
return select_accel_fn(buf, len);
}

View File

@ -161,250 +161,6 @@ int qemu_fdatasync(int fd)
#endif
}
/* vector definitions */
#ifdef __ALTIVEC__
#include <altivec.h>
/* The altivec.h header says we're allowed to undef these for
* C++ compatibility. Here we don't care about C++, but we
* undef them anyway to avoid namespace pollution.
*/
#undef vector
#undef pixel
#undef bool
#define VECTYPE __vector unsigned char
#define SPLAT(p) vec_splat(vec_ld(0, p), 0)
#define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
#define VEC_OR(v1, v2) ((v1) | (v2))
/* altivec.h may redefine the bool macro as vector type.
* Reset it to POSIX semantics. */
#define bool _Bool
#elif defined __SSE2__
#include <emmintrin.h>
#define VECTYPE __m128i
#define SPLAT(p) _mm_set1_epi8(*(p))
#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
#define VEC_OR(v1, v2) (_mm_or_si128(v1, v2))
#elif defined(__aarch64__)
#include "arm_neon.h"
#define VECTYPE uint64x2_t
#define ALL_EQ(v1, v2) \
((vgetq_lane_u64(v1, 0) == vgetq_lane_u64(v2, 0)) && \
(vgetq_lane_u64(v1, 1) == vgetq_lane_u64(v2, 1)))
#define VEC_OR(v1, v2) ((v1) | (v2))
#else
#define VECTYPE unsigned long
#define SPLAT(p) (*(p) * (~0UL / 255))
#define ALL_EQ(v1, v2) ((v1) == (v2))
#define VEC_OR(v1, v2) ((v1) | (v2))
#endif
#define BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR 8
static bool
can_use_buffer_find_nonzero_offset_inner(const void *buf, size_t len)
{
return (len % (BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR
* sizeof(VECTYPE)) == 0
&& ((uintptr_t) buf) % sizeof(VECTYPE) == 0);
}
/*
* Searches for an area with non-zero content in a buffer
*
* Attention! The len must be a multiple of
* BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR * sizeof(VECTYPE)
* and addr must be a multiple of sizeof(VECTYPE) due to
* restriction of optimizations in this function.
*
* can_use_buffer_find_nonzero_offset_inner() can be used to
* check these requirements.
*
* The return value is the offset of the non-zero area rounded
* down to a multiple of sizeof(VECTYPE) for the first
* BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR chunks and down to
* BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR * sizeof(VECTYPE)
* afterwards.
*
* If the buffer is all zero the return value is equal to len.
*/
static size_t buffer_find_nonzero_offset_inner(const void *buf, size_t len)
{
const VECTYPE *p = buf;
const VECTYPE zero = (VECTYPE){0};
size_t i;
assert(can_use_buffer_find_nonzero_offset_inner(buf, len));
if (!len) {
return 0;
}
for (i = 0; i < BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR; i++) {
if (!ALL_EQ(p[i], zero)) {
return i * sizeof(VECTYPE);
}
}
for (i = BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR;
i < len / sizeof(VECTYPE);
i += BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR) {
VECTYPE tmp0 = VEC_OR(p[i + 0], p[i + 1]);
VECTYPE tmp1 = VEC_OR(p[i + 2], p[i + 3]);
VECTYPE tmp2 = VEC_OR(p[i + 4], p[i + 5]);
VECTYPE tmp3 = VEC_OR(p[i + 6], p[i + 7]);
VECTYPE tmp01 = VEC_OR(tmp0, tmp1);
VECTYPE tmp23 = VEC_OR(tmp2, tmp3);
if (!ALL_EQ(VEC_OR(tmp01, tmp23), zero)) {
break;
}
}
return i * sizeof(VECTYPE);
}
#if defined CONFIG_AVX2_OPT
#pragma GCC push_options
#pragma GCC target("avx2")
#include <cpuid.h>
#include <immintrin.h>
#define AVX2_VECTYPE __m256i
#define AVX2_SPLAT(p) _mm256_set1_epi8(*(p))
#define AVX2_ALL_EQ(v1, v2) \
(_mm256_movemask_epi8(_mm256_cmpeq_epi8(v1, v2)) == 0xFFFFFFFF)
#define AVX2_VEC_OR(v1, v2) (_mm256_or_si256(v1, v2))
static bool
can_use_buffer_find_nonzero_offset_avx2(const void *buf, size_t len)
{
return (len % (BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR
* sizeof(AVX2_VECTYPE)) == 0
&& ((uintptr_t) buf) % sizeof(AVX2_VECTYPE) == 0);
}
static size_t buffer_find_nonzero_offset_avx2(const void *buf, size_t len)
{
const AVX2_VECTYPE *p = buf;
const AVX2_VECTYPE zero = (AVX2_VECTYPE){0};
size_t i;
assert(can_use_buffer_find_nonzero_offset_avx2(buf, len));
if (!len) {
return 0;
}
for (i = 0; i < BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR; i++) {
if (!AVX2_ALL_EQ(p[i], zero)) {
return i * sizeof(AVX2_VECTYPE);
}
}
for (i = BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR;
i < len / sizeof(AVX2_VECTYPE);
i += BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR) {
AVX2_VECTYPE tmp0 = AVX2_VEC_OR(p[i + 0], p[i + 1]);
AVX2_VECTYPE tmp1 = AVX2_VEC_OR(p[i + 2], p[i + 3]);
AVX2_VECTYPE tmp2 = AVX2_VEC_OR(p[i + 4], p[i + 5]);
AVX2_VECTYPE tmp3 = AVX2_VEC_OR(p[i + 6], p[i + 7]);
AVX2_VECTYPE tmp01 = AVX2_VEC_OR(tmp0, tmp1);
AVX2_VECTYPE tmp23 = AVX2_VEC_OR(tmp2, tmp3);
if (!AVX2_ALL_EQ(AVX2_VEC_OR(tmp01, tmp23), zero)) {
break;
}
}
return i * sizeof(AVX2_VECTYPE);
}
static bool avx2_support(void)
{
int a, b, c, d;
if (__get_cpuid_max(0, NULL) < 7) {
return false;
}
__cpuid_count(7, 0, a, b, c, d);
return b & bit_AVX2;
}
bool can_use_buffer_find_nonzero_offset(const void *buf, size_t len) \
__attribute__ ((ifunc("can_use_buffer_find_nonzero_offset_ifunc")));
size_t buffer_find_nonzero_offset(const void *buf, size_t len) \
__attribute__ ((ifunc("buffer_find_nonzero_offset_ifunc")));
static void *buffer_find_nonzero_offset_ifunc(void)
{
typeof(buffer_find_nonzero_offset) *func = (avx2_support()) ?
buffer_find_nonzero_offset_avx2 : buffer_find_nonzero_offset_inner;
return func;
}
static void *can_use_buffer_find_nonzero_offset_ifunc(void)
{
typeof(can_use_buffer_find_nonzero_offset) *func = (avx2_support()) ?
can_use_buffer_find_nonzero_offset_avx2 :
can_use_buffer_find_nonzero_offset_inner;
return func;
}
#pragma GCC pop_options
#else
bool can_use_buffer_find_nonzero_offset(const void *buf, size_t len)
{
return can_use_buffer_find_nonzero_offset_inner(buf, len);
}
size_t buffer_find_nonzero_offset(const void *buf, size_t len)
{
return buffer_find_nonzero_offset_inner(buf, len);
}
#endif
/*
* Checks if a buffer is all zeroes
*
* Attention! The len must be a multiple of 4 * sizeof(long) due to
* restriction of optimizations in this function.
*/
bool buffer_is_zero(const void *buf, size_t len)
{
/*
* Use long as the biggest available internal data type that fits into the
* CPU register and unroll the loop to smooth out the effect of memory
* latency.
*/
size_t i;
long d0, d1, d2, d3;
const long * const data = buf;
/* use vector optimized zero check if possible */
if (can_use_buffer_find_nonzero_offset(buf, len)) {
return buffer_find_nonzero_offset(buf, len) == len;
}
assert(len % (4 * sizeof(long)) == 0);
len /= sizeof(long);
for (i = 0; i < len; i += 4) {
d0 = data[i + 0];
d1 = data[i + 1];
d2 = data[i + 2];
d3 = data[i + 3];
if (d0 || d1 || d2 || d3) {
return false;
}
}
return true;
}
#ifndef _WIN32
/* Sets a specific flag */
int fcntl_setfl(int fd, int flag)

View File

@ -491,10 +491,10 @@ static int inet_dgram_saddr(InetSocketAddress *sraddr,
goto err;
}
if (0 != (rc = getaddrinfo(addr, port, &ai, &peer))) {
if ((rc = getaddrinfo(addr, port, &ai, &peer)) != 0) {
error_setg(errp, "address resolution failed for %s:%s: %s", addr, port,
gai_strerror(rc));
goto err;
goto err;
}
/* lookup local addr */
@ -517,7 +517,7 @@ static int inet_dgram_saddr(InetSocketAddress *sraddr,
port = "0";
}
if (0 != (rc = getaddrinfo(addr, port, &ai, &local))) {
if ((rc = getaddrinfo(addr, port, &ai, &local)) != 0) {
error_setg(errp, "address resolution failed for %s:%s: %s", addr, port,
gai_strerror(rc));
goto err;
@ -548,12 +548,16 @@ static int inet_dgram_saddr(InetSocketAddress *sraddr,
return sock;
err:
if (-1 != sock)
if (sock != -1) {
closesocket(sock);
if (local)
}
if (local) {
freeaddrinfo(local);
if (peer)
}
if (peer) {
freeaddrinfo(peer);
}
return -1;
}
@ -573,20 +577,20 @@ InetSocketAddress *inet_parse(const char *str, Error **errp)
if (str[0] == ':') {
/* no host given */
host[0] = '\0';
if (1 != sscanf(str, ":%32[^,]%n", port, &pos)) {
if (sscanf(str, ":%32[^,]%n", port, &pos) != 1) {
error_setg(errp, "error parsing port in address '%s'", str);
goto fail;
}
} else if (str[0] == '[') {
/* IPv6 addr */
if (2 != sscanf(str, "[%64[^]]]:%32[^,]%n", host, port, &pos)) {
if (sscanf(str, "[%64[^]]]:%32[^,]%n", host, port, &pos) != 2) {
error_setg(errp, "error parsing IPv6 address '%s'", str);
goto fail;
}
addr->ipv6 = addr->has_ipv6 = true;
} else {
/* hostname or IPv4 addr */
if (2 != sscanf(str, "%64[^:]:%32[^,]%n", host, port, &pos)) {
if (sscanf(str, "%64[^:]:%32[^,]%n", host, port, &pos) != 2) {
error_setg(errp, "error parsing address '%s'", str);
goto fail;
}
@ -816,8 +820,10 @@ int unix_listen(const char *str, char *ostr, int olen, Error **errp)
sock = unix_listen_saddr(saddr, true, errp);
if (sock != -1 && ostr)
if (sock != -1 && ostr) {
snprintf(ostr, olen, "%s%s", saddr->path, optstr ? optstr : "");
}
qapi_free_UnixSocketAddress(saddr);
return sock;
}