diff --git a/block/nbd-client.h b/block/nbd-client.h index 8cdfc92e94..891ba44a20 100644 --- a/block/nbd-client.h +++ b/block/nbd-client.h @@ -30,8 +30,6 @@ typedef struct NBDClientSession { Coroutine *recv_coroutine[MAX_NBD_REQUESTS]; NBDReply reply; - - bool is_unix; } NBDClientSession; NBDClientSession *nbd_get_client_session(BlockDriverState *bs); diff --git a/block/nbd.c b/block/nbd.c index f478f80b4a..1b832c2132 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -285,8 +285,6 @@ static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, Error **errp) goto done; } - s->client.is_unix = saddr->type == SOCKET_ADDRESS_KIND_UNIX; - done: QDECREF(addr); qobject_decref(crumpled_addr); diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index 7a6e771ed1..c3829e31b5 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -387,25 +387,6 @@ static bool apic_common_sipi_needed(void *opaque) return s->wait_for_sipi != 0; } -static bool apic_irq_delivered_needed(void *opaque) -{ - APICCommonState *s = APIC_COMMON(opaque); - return s->cpu == X86_CPU(first_cpu) && apic_irq_delivered != 0; -} - -static void apic_irq_delivered_pre_save(void *opaque) -{ - APICCommonState *s = APIC_COMMON(opaque); - s->apic_irq_delivered = apic_irq_delivered; -} - -static int apic_irq_delivered_post_load(void *opaque, int version_id) -{ - APICCommonState *s = APIC_COMMON(opaque); - apic_irq_delivered = s->apic_irq_delivered; - return 0; -} - static const VMStateDescription vmstate_apic_common_sipi = { .name = "apic_sipi", .version_id = 1, @@ -418,19 +399,6 @@ static const VMStateDescription vmstate_apic_common_sipi = { } }; -static const VMStateDescription vmstate_apic_irq_delivered = { - .name = "apic_irq_delivered", - .version_id = 1, - .minimum_version_id = 1, - .needed = apic_irq_delivered_needed, - .pre_save = apic_irq_delivered_pre_save, - .post_load = apic_irq_delivered_post_load, - .fields = (VMStateField[]) { - VMSTATE_INT32(apic_irq_delivered, APICCommonState), - VMSTATE_END_OF_LIST() - } -}; - static const VMStateDescription vmstate_apic_common = { .name = "apic", .version_id = 3, @@ -465,7 +433,6 @@ static const VMStateDescription vmstate_apic_common = { }, .subsections = (const VMStateDescription*[]) { &vmstate_apic_common_sipi, - &vmstate_apic_irq_delivered, NULL } }; diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 2933119e7d..a55ff87c22 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -237,9 +237,8 @@ static void scsi_read_complete(void * opaque, int ret) assert(max_transfer); stl_be_p(&r->buf[8], max_transfer); /* Also take care of the opt xfer len. */ - if (ldl_be_p(&r->buf[12]) > max_transfer) { - stl_be_p(&r->buf[12], max_transfer); - } + stl_be_p(&r->buf[12], + MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); } scsi_req_data(&r->req, len); scsi_req_unref(&r->req); diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 74c95e0e60..944ea4eb53 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -52,28 +52,40 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) { - VirtIOSCSI *s = (VirtIOSCSI *)vdev; + bool progress; + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + virtio_scsi_acquire(s); assert(s->ctx && s->dataplane_started); - return virtio_scsi_handle_cmd_vq(s, vq); + progress = virtio_scsi_handle_cmd_vq(s, vq); + virtio_scsi_release(s); + return progress; } static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { + bool progress; VirtIOSCSI *s = VIRTIO_SCSI(vdev); + virtio_scsi_acquire(s); assert(s->ctx && s->dataplane_started); - return virtio_scsi_handle_ctrl_vq(s, vq); + progress = virtio_scsi_handle_ctrl_vq(s, vq); + virtio_scsi_release(s); + return progress; } static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, VirtQueue *vq) { + bool progress; VirtIOSCSI *s = VIRTIO_SCSI(vdev); + virtio_scsi_acquire(s); assert(s->ctx && s->dataplane_started); - return virtio_scsi_handle_event_vq(s, vq); + progress = virtio_scsi_handle_event_vq(s, vq); + virtio_scsi_release(s); + return progress; } static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n, diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 1dbc4bced9..bd62d08251 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -422,31 +422,15 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) } } -static inline void virtio_scsi_acquire(VirtIOSCSI *s) -{ - if (s->ctx) { - aio_context_acquire(s->ctx); - } -} - -static inline void virtio_scsi_release(VirtIOSCSI *s) -{ - if (s->ctx) { - aio_context_release(s->ctx); - } -} - bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) { VirtIOSCSIReq *req; bool progress = false; - virtio_scsi_acquire(s); while ((req = virtio_scsi_pop_req(s, vq))) { progress = true; virtio_scsi_handle_ctrl_req(s, req); } - virtio_scsi_release(s); return progress; } @@ -460,7 +444,9 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) return; } } + virtio_scsi_acquire(s); virtio_scsi_handle_ctrl_vq(s, vq); + virtio_scsi_release(s); } static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) @@ -604,7 +590,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); - virtio_scsi_acquire(s); do { virtio_queue_set_notification(vq, 0); @@ -632,7 +617,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) QTAILQ_FOREACH_SAFE(req, &reqs, next, next) { virtio_scsi_handle_cmd_req_submit(s, req); } - virtio_scsi_release(s); return progress; } @@ -647,7 +631,9 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) return; } } + virtio_scsi_acquire(s); virtio_scsi_handle_cmd_vq(s, vq); + virtio_scsi_release(s); } static void virtio_scsi_get_config(VirtIODevice *vdev, @@ -723,12 +709,10 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, return; } - virtio_scsi_acquire(s); - req = virtio_scsi_pop_req(s, vs->event_vq); if (!req) { s->events_dropped = true; - goto out; + return; } if (s->events_dropped) { @@ -738,7 +722,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) { virtio_scsi_bad_req(req); - goto out; + return; } evt = &req->resp.event; @@ -758,19 +742,14 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, evt->lun[3] = dev->lun & 0xFF; } virtio_scsi_complete_req(req); -out: - virtio_scsi_release(s); } bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) { - virtio_scsi_acquire(s); if (s->events_dropped) { virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); - virtio_scsi_release(s); return true; } - virtio_scsi_release(s); return false; } @@ -784,7 +763,9 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) return; } } + virtio_scsi_acquire(s); virtio_scsi_handle_event_vq(s, vq); + virtio_scsi_release(s); } static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) @@ -794,8 +775,10 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) && dev->type != TYPE_ROM) { + virtio_scsi_acquire(s); virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE, sense.asc | (sense.ascq << 8)); + virtio_scsi_release(s); } } @@ -817,9 +800,11 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + virtio_scsi_acquire(s); virtio_scsi_push_event(s, sd, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN); + virtio_scsi_release(s); } } @@ -831,9 +816,11 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, SCSIDevice *sd = SCSI_DEVICE(dev); if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + virtio_scsi_acquire(s); virtio_scsi_push_event(s, sd, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED); + virtio_scsi_release(s); } qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h index 20ad28c95b..1209eb483a 100644 --- a/include/hw/i386/apic_internal.h +++ b/include/hw/i386/apic_internal.h @@ -189,8 +189,6 @@ struct APICCommonState { DeviceState *vapic; hwaddr vapic_paddr; /* note: persistence via kvmvapic */ bool legacy_instance_id; - - int apic_irq_delivered; /* for saving static variable */ }; typedef struct VAPICState { diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h index f536f77e68..8ae0acaa1f 100644 --- a/include/hw/virtio/virtio-scsi.h +++ b/include/hw/virtio/virtio-scsi.h @@ -121,6 +121,20 @@ typedef struct VirtIOSCSIReq { } req; } VirtIOSCSIReq; +static inline void virtio_scsi_acquire(VirtIOSCSI *s) +{ + if (s->ctx) { + aio_context_acquire(s->ctx); + } +} + +static inline void virtio_scsi_release(VirtIOSCSI *s) +{ + if (s->ctx) { + aio_context_release(s->ctx); + } +} + void virtio_scsi_common_realize(DeviceState *dev, Error **errp, VirtIOHandleOutput ctrl, VirtIOHandleOutput evt, VirtIOHandleOutput cmd); diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h index 5fb6541ae9..4c4a261cf4 100644 --- a/include/qemu/thread-win32.h +++ b/include/qemu/thread-win32.h @@ -4,8 +4,7 @@ #include struct QemuMutex { - CRITICAL_SECTION lock; - LONG owner; + SRWLOCK lock; }; typedef struct QemuRecMutex QemuRecMutex; @@ -19,9 +18,7 @@ int qemu_rec_mutex_trylock(QemuRecMutex *mutex); void qemu_rec_mutex_unlock(QemuRecMutex *mutex); struct QemuCond { - LONG waiters, target; - HANDLE sema; - HANDLE continue_event; + CONDITION_VARIABLE var; }; struct QemuSemaphore { diff --git a/memory.c b/memory.c index 64b0a605ef..4c95aaf39c 100644 --- a/memory.c +++ b/memory.c @@ -906,12 +906,6 @@ void memory_region_transaction_begin(void) ++memory_region_transaction_depth; } -static void memory_region_clear_pending(void) -{ - memory_region_update_pending = false; - ioeventfd_update_pending = false; -} - void memory_region_transaction_commit(void) { AddressSpace *as; @@ -927,14 +921,14 @@ void memory_region_transaction_commit(void) QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { address_space_update_topology(as); } - + memory_region_update_pending = false; MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); } else if (ioeventfd_update_pending) { QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { address_space_update_ioeventfds(as); } + ioeventfd_update_pending = false; } - memory_region_clear_pending(); } } diff --git a/target/i386/translate.c b/target/i386/translate.c index 72c1b03a2a..1d1372fb43 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -4418,6 +4418,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s, s->vex_l = 0; s->vex_v = 0; next_byte: + /* x86 has an upper limit of 15 bytes for an instruction. Since we + * do not want to decode and generate IR for an illegal + * instruction, the following check limits the instruction size to + * 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */ + if (s->pc - pc_start > 14) { + goto illegal_op; + } b = cpu_ldub_code(env, s->pc); s->pc++; /* Collect prefixes. */ diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 3fe6089c3e..4d9189e9ef 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -55,7 +55,7 @@ #include "qemu/error-report.h" #endif -#define MAX_MEM_PREALLOC_THREAD_COUNT (MIN(sysconf(_SC_NPROCESSORS_ONLN), 16)) +#define MAX_MEM_PREALLOC_THREAD_COUNT 16 struct MemsetThread { char *addr; @@ -381,6 +381,18 @@ static void *do_touch_pages(void *arg) return NULL; } +static inline int get_memset_num_threads(int smp_cpus) +{ + long host_procs = sysconf(_SC_NPROCESSORS_ONLN); + int ret = 1; + + if (host_procs > 0) { + ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus); + } + /* In case sysconf() fails, we fall back to single threaded */ + return ret; +} + static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, int smp_cpus) { @@ -389,7 +401,7 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, int i = 0; memset_thread_failed = false; - memset_num_threads = MIN(smp_cpus, MAX_MEM_PREALLOC_THREAD_COUNT); + memset_num_threads = get_memset_num_threads(smp_cpus); memset_thread = g_new0(MemsetThread, memset_num_threads); numpages_per_thread = (numpages / memset_num_threads); size_per_thread = (hpagesize * numpages_per_thread); diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index 29c3e4dd85..59befd5202 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -10,6 +10,11 @@ * See the COPYING file in the top-level directory. * */ + +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 +#endif + #include "qemu/osdep.h" #include "qemu-common.h" #include "qemu/thread.h" @@ -39,44 +44,30 @@ static void error_exit(int err, const char *msg) void qemu_mutex_init(QemuMutex *mutex) { - mutex->owner = 0; - InitializeCriticalSection(&mutex->lock); + InitializeSRWLock(&mutex->lock); } void qemu_mutex_destroy(QemuMutex *mutex) { - assert(mutex->owner == 0); - DeleteCriticalSection(&mutex->lock); + InitializeSRWLock(&mutex->lock); } void qemu_mutex_lock(QemuMutex *mutex) { - EnterCriticalSection(&mutex->lock); - - /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not - * using them as such. - */ - assert(mutex->owner == 0); - mutex->owner = GetCurrentThreadId(); + AcquireSRWLockExclusive(&mutex->lock); } int qemu_mutex_trylock(QemuMutex *mutex) { int owned; - owned = TryEnterCriticalSection(&mutex->lock); - if (owned) { - assert(mutex->owner == 0); - mutex->owner = GetCurrentThreadId(); - } + owned = TryAcquireSRWLockExclusive(&mutex->lock); return !owned; } void qemu_mutex_unlock(QemuMutex *mutex) { - assert(mutex->owner == GetCurrentThreadId()); - mutex->owner = 0; - LeaveCriticalSection(&mutex->lock); + ReleaseSRWLockExclusive(&mutex->lock); } void qemu_rec_mutex_init(QemuRecMutex *mutex) @@ -107,124 +98,27 @@ void qemu_rec_mutex_unlock(QemuRecMutex *mutex) void qemu_cond_init(QemuCond *cond) { memset(cond, 0, sizeof(*cond)); - - cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL); - if (!cond->sema) { - error_exit(GetLastError(), __func__); - } - cond->continue_event = CreateEvent(NULL, /* security */ - FALSE, /* auto-reset */ - FALSE, /* not signaled */ - NULL); /* name */ - if (!cond->continue_event) { - error_exit(GetLastError(), __func__); - } + InitializeConditionVariable(&cond->var); } void qemu_cond_destroy(QemuCond *cond) { - BOOL result; - result = CloseHandle(cond->continue_event); - if (!result) { - error_exit(GetLastError(), __func__); - } - cond->continue_event = 0; - result = CloseHandle(cond->sema); - if (!result) { - error_exit(GetLastError(), __func__); - } - cond->sema = 0; + InitializeConditionVariable(&cond->var); } void qemu_cond_signal(QemuCond *cond) { - DWORD result; - - /* - * Signal only when there are waiters. cond->waiters is - * incremented by pthread_cond_wait under the external lock, - * so we are safe about that. - */ - if (cond->waiters == 0) { - return; - } - - /* - * Waiting threads decrement it outside the external lock, but - * only if another thread is executing pthread_cond_broadcast and - * has the mutex. So, it also cannot be decremented concurrently - * with this particular access. - */ - cond->target = cond->waiters - 1; - result = SignalObjectAndWait(cond->sema, cond->continue_event, - INFINITE, FALSE); - if (result == WAIT_ABANDONED || result == WAIT_FAILED) { - error_exit(GetLastError(), __func__); - } + WakeConditionVariable(&cond->var); } void qemu_cond_broadcast(QemuCond *cond) { - BOOLEAN result; - /* - * As in pthread_cond_signal, access to cond->waiters and - * cond->target is locked via the external mutex. - */ - if (cond->waiters == 0) { - return; - } - - cond->target = 0; - result = ReleaseSemaphore(cond->sema, cond->waiters, NULL); - if (!result) { - error_exit(GetLastError(), __func__); - } - - /* - * At this point all waiters continue. Each one takes its - * slice of the semaphore. Now it's our turn to wait: Since - * the external mutex is held, no thread can leave cond_wait, - * yet. For this reason, we can be sure that no thread gets - * a chance to eat *more* than one slice. OTOH, it means - * that the last waiter must send us a wake-up. - */ - WaitForSingleObject(cond->continue_event, INFINITE); + WakeAllConditionVariable(&cond->var); } void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) { - /* - * This access is protected under the mutex. - */ - cond->waiters++; - - /* - * Unlock external mutex and wait for signal. - * NOTE: we've held mutex locked long enough to increment - * waiters count above, so there's no problem with - * leaving mutex unlocked before we wait on semaphore. - */ - qemu_mutex_unlock(mutex); - WaitForSingleObject(cond->sema, INFINITE); - - /* Now waiters must rendez-vous with the signaling thread and - * let it continue. For cond_broadcast this has heavy contention - * and triggers thundering herd. So goes life. - * - * Decrease waiters count. The mutex is not taken, so we have - * to do this atomically. - * - * All waiters contend for the mutex at the end of this function - * until the signaling thread relinquishes it. To ensure - * each waiter consumes exactly one slice of the semaphore, - * the signaling thread stops until it is told by the last - * waiter that it can go on. - */ - if (InterlockedDecrement(&cond->waiters) == cond->target) { - SetEvent(cond->continue_event); - } - - qemu_mutex_lock(mutex); + SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0); } void qemu_sem_init(QemuSemaphore *sem, int init)