* MTTCG fix for win32

* virtio-scsi assertion failure
 * mem-prealloc coverity fix
 * x86 migration revert which requires more thought
 * x86 instruction limit (avoids >2 page translation blocks)
 * nbd dead code cleanup
 * small memory.c logic fix
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQExBAABCAAbBQJY2Te4FBxwYm9uemluaUByZWRoYXQuY29tAAoJEL/70l94x66D
 GyIH/jMpl0w5cdW2hxzEba5alqALKx8fz8LMFy47lSndifyr74Nbk7fq9u89m9/6
 3dz92sOq4ixUt8+eWEHcy0lJqucrStdMWcA7LsSIioXfgbBN39e9NfJFshXKTSQU
 RSL3M5f5XvYHZqHWhk/GjzlkA2l+Dq2v7FM+DT4HISnP0fjcmGXEfadfUZi6KLao
 94xXGs73pTkln9jm8N1pwn3JuJ4+FbEatrvok01nmTbA7VrrBz0zVbTZjhWz7Tu/
 sqBuIBAnPNKhYZFhF8GnNrXUaIciCbw13QdT047JSfpdkSQ7IUfGt7mW48X0+q9z
 JCHTiTZ35d7/lqeMojgl9ANUDpk=
 =iED8
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* MTTCG fix for win32
* virtio-scsi assertion failure
* mem-prealloc coverity fix
* x86 migration revert which requires more thought
* x86 instruction limit (avoids >2 page translation blocks)
* nbd dead code cleanup
* small memory.c logic fix

# gpg: Signature made Mon 27 Mar 2017 17:03:04 BST
# gpg:                using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream:
  scsi-generic: Fill in opt_xfer_len in INQUIRY reply if it is zero
  Revert "apic: save apic_delivered flag"
  nbd: drop unused NBDClientSession.is_unix field
  win32: replace custom mutex and condition variable with native primitives
  mem-prealloc: fix sysconf(_SC_NPROCESSORS_ONLN) failure case.
  tcg/i386: Check the size of instruction being translated
  virtio-scsi: Fix acquire/release in dataplane handlers
  virtio-scsi: Make virtio_scsi_acquire/release public
  clear pending status before calling memory commit

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2017-03-27 17:34:50 +01:00
commit eb06c9e2d3
13 changed files with 86 additions and 209 deletions

View File

@ -30,8 +30,6 @@ typedef struct NBDClientSession {
Coroutine *recv_coroutine[MAX_NBD_REQUESTS]; Coroutine *recv_coroutine[MAX_NBD_REQUESTS];
NBDReply reply; NBDReply reply;
bool is_unix;
} NBDClientSession; } NBDClientSession;
NBDClientSession *nbd_get_client_session(BlockDriverState *bs); NBDClientSession *nbd_get_client_session(BlockDriverState *bs);

View File

@ -285,8 +285,6 @@ static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, Error **errp)
goto done; goto done;
} }
s->client.is_unix = saddr->type == SOCKET_ADDRESS_KIND_UNIX;
done: done:
QDECREF(addr); QDECREF(addr);
qobject_decref(crumpled_addr); qobject_decref(crumpled_addr);

View File

@ -387,25 +387,6 @@ static bool apic_common_sipi_needed(void *opaque)
return s->wait_for_sipi != 0; return s->wait_for_sipi != 0;
} }
static bool apic_irq_delivered_needed(void *opaque)
{
APICCommonState *s = APIC_COMMON(opaque);
return s->cpu == X86_CPU(first_cpu) && apic_irq_delivered != 0;
}
static void apic_irq_delivered_pre_save(void *opaque)
{
APICCommonState *s = APIC_COMMON(opaque);
s->apic_irq_delivered = apic_irq_delivered;
}
static int apic_irq_delivered_post_load(void *opaque, int version_id)
{
APICCommonState *s = APIC_COMMON(opaque);
apic_irq_delivered = s->apic_irq_delivered;
return 0;
}
static const VMStateDescription vmstate_apic_common_sipi = { static const VMStateDescription vmstate_apic_common_sipi = {
.name = "apic_sipi", .name = "apic_sipi",
.version_id = 1, .version_id = 1,
@ -418,19 +399,6 @@ static const VMStateDescription vmstate_apic_common_sipi = {
} }
}; };
static const VMStateDescription vmstate_apic_irq_delivered = {
.name = "apic_irq_delivered",
.version_id = 1,
.minimum_version_id = 1,
.needed = apic_irq_delivered_needed,
.pre_save = apic_irq_delivered_pre_save,
.post_load = apic_irq_delivered_post_load,
.fields = (VMStateField[]) {
VMSTATE_INT32(apic_irq_delivered, APICCommonState),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_apic_common = { static const VMStateDescription vmstate_apic_common = {
.name = "apic", .name = "apic",
.version_id = 3, .version_id = 3,
@ -465,7 +433,6 @@ static const VMStateDescription vmstate_apic_common = {
}, },
.subsections = (const VMStateDescription*[]) { .subsections = (const VMStateDescription*[]) {
&vmstate_apic_common_sipi, &vmstate_apic_common_sipi,
&vmstate_apic_irq_delivered,
NULL NULL
} }
}; };

View File

@ -237,9 +237,8 @@ static void scsi_read_complete(void * opaque, int ret)
assert(max_transfer); assert(max_transfer);
stl_be_p(&r->buf[8], max_transfer); stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */ /* Also take care of the opt xfer len. */
if (ldl_be_p(&r->buf[12]) > max_transfer) { stl_be_p(&r->buf[12],
stl_be_p(&r->buf[12], max_transfer); MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
}
} }
scsi_req_data(&r->req, len); scsi_req_data(&r->req, len);
scsi_req_unref(&r->req); scsi_req_unref(&r->req);

View File

@ -52,28 +52,40 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
VirtQueue *vq) VirtQueue *vq)
{ {
VirtIOSCSI *s = (VirtIOSCSI *)vdev; bool progress;
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
virtio_scsi_acquire(s);
assert(s->ctx && s->dataplane_started); assert(s->ctx && s->dataplane_started);
return virtio_scsi_handle_cmd_vq(s, vq); progress = virtio_scsi_handle_cmd_vq(s, vq);
virtio_scsi_release(s);
return progress;
} }
static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
VirtQueue *vq) VirtQueue *vq)
{ {
bool progress;
VirtIOSCSI *s = VIRTIO_SCSI(vdev); VirtIOSCSI *s = VIRTIO_SCSI(vdev);
virtio_scsi_acquire(s);
assert(s->ctx && s->dataplane_started); assert(s->ctx && s->dataplane_started);
return virtio_scsi_handle_ctrl_vq(s, vq); progress = virtio_scsi_handle_ctrl_vq(s, vq);
virtio_scsi_release(s);
return progress;
} }
static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
VirtQueue *vq) VirtQueue *vq)
{ {
bool progress;
VirtIOSCSI *s = VIRTIO_SCSI(vdev); VirtIOSCSI *s = VIRTIO_SCSI(vdev);
virtio_scsi_acquire(s);
assert(s->ctx && s->dataplane_started); assert(s->ctx && s->dataplane_started);
return virtio_scsi_handle_event_vq(s, vq); progress = virtio_scsi_handle_event_vq(s, vq);
virtio_scsi_release(s);
return progress;
} }
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n, static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,

View File

@ -422,31 +422,15 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
} }
} }
static inline void virtio_scsi_acquire(VirtIOSCSI *s)
{
if (s->ctx) {
aio_context_acquire(s->ctx);
}
}
static inline void virtio_scsi_release(VirtIOSCSI *s)
{
if (s->ctx) {
aio_context_release(s->ctx);
}
}
bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
{ {
VirtIOSCSIReq *req; VirtIOSCSIReq *req;
bool progress = false; bool progress = false;
virtio_scsi_acquire(s);
while ((req = virtio_scsi_pop_req(s, vq))) { while ((req = virtio_scsi_pop_req(s, vq))) {
progress = true; progress = true;
virtio_scsi_handle_ctrl_req(s, req); virtio_scsi_handle_ctrl_req(s, req);
} }
virtio_scsi_release(s);
return progress; return progress;
} }
@ -460,7 +444,9 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
return; return;
} }
} }
virtio_scsi_acquire(s);
virtio_scsi_handle_ctrl_vq(s, vq); virtio_scsi_handle_ctrl_vq(s, vq);
virtio_scsi_release(s);
} }
static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
@ -604,7 +590,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
virtio_scsi_acquire(s);
do { do {
virtio_queue_set_notification(vq, 0); virtio_queue_set_notification(vq, 0);
@ -632,7 +617,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) { QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
virtio_scsi_handle_cmd_req_submit(s, req); virtio_scsi_handle_cmd_req_submit(s, req);
} }
virtio_scsi_release(s);
return progress; return progress;
} }
@ -647,7 +631,9 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
return; return;
} }
} }
virtio_scsi_acquire(s);
virtio_scsi_handle_cmd_vq(s, vq); virtio_scsi_handle_cmd_vq(s, vq);
virtio_scsi_release(s);
} }
static void virtio_scsi_get_config(VirtIODevice *vdev, static void virtio_scsi_get_config(VirtIODevice *vdev,
@ -723,12 +709,10 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
return; return;
} }
virtio_scsi_acquire(s);
req = virtio_scsi_pop_req(s, vs->event_vq); req = virtio_scsi_pop_req(s, vs->event_vq);
if (!req) { if (!req) {
s->events_dropped = true; s->events_dropped = true;
goto out; return;
} }
if (s->events_dropped) { if (s->events_dropped) {
@ -738,7 +722,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) { if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
virtio_scsi_bad_req(req); virtio_scsi_bad_req(req);
goto out; return;
} }
evt = &req->resp.event; evt = &req->resp.event;
@ -758,19 +742,14 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
evt->lun[3] = dev->lun & 0xFF; evt->lun[3] = dev->lun & 0xFF;
} }
virtio_scsi_complete_req(req); virtio_scsi_complete_req(req);
out:
virtio_scsi_release(s);
} }
bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
{ {
virtio_scsi_acquire(s);
if (s->events_dropped) { if (s->events_dropped) {
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
virtio_scsi_release(s);
return true; return true;
} }
virtio_scsi_release(s);
return false; return false;
} }
@ -784,7 +763,9 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
return; return;
} }
} }
virtio_scsi_acquire(s);
virtio_scsi_handle_event_vq(s, vq); virtio_scsi_handle_event_vq(s, vq);
virtio_scsi_release(s);
} }
static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
@ -794,8 +775,10 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) && if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
dev->type != TYPE_ROM) { dev->type != TYPE_ROM) {
virtio_scsi_acquire(s);
virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE, virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
sense.asc | (sense.ascq << 8)); sense.asc | (sense.ascq << 8));
virtio_scsi_release(s);
} }
} }
@ -817,9 +800,11 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
} }
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_acquire(s);
virtio_scsi_push_event(s, sd, virtio_scsi_push_event(s, sd,
VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_RESCAN); VIRTIO_SCSI_EVT_RESET_RESCAN);
virtio_scsi_release(s);
} }
} }
@ -831,9 +816,11 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
SCSIDevice *sd = SCSI_DEVICE(dev); SCSIDevice *sd = SCSI_DEVICE(dev);
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_acquire(s);
virtio_scsi_push_event(s, sd, virtio_scsi_push_event(s, sd,
VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_REMOVED); VIRTIO_SCSI_EVT_RESET_REMOVED);
virtio_scsi_release(s);
} }
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);

View File

@ -189,8 +189,6 @@ struct APICCommonState {
DeviceState *vapic; DeviceState *vapic;
hwaddr vapic_paddr; /* note: persistence via kvmvapic */ hwaddr vapic_paddr; /* note: persistence via kvmvapic */
bool legacy_instance_id; bool legacy_instance_id;
int apic_irq_delivered; /* for saving static variable */
}; };
typedef struct VAPICState { typedef struct VAPICState {

View File

@ -121,6 +121,20 @@ typedef struct VirtIOSCSIReq {
} req; } req;
} VirtIOSCSIReq; } VirtIOSCSIReq;
static inline void virtio_scsi_acquire(VirtIOSCSI *s)
{
if (s->ctx) {
aio_context_acquire(s->ctx);
}
}
static inline void virtio_scsi_release(VirtIOSCSI *s)
{
if (s->ctx) {
aio_context_release(s->ctx);
}
}
void virtio_scsi_common_realize(DeviceState *dev, Error **errp, void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
VirtIOHandleOutput ctrl, VirtIOHandleOutput evt, VirtIOHandleOutput ctrl, VirtIOHandleOutput evt,
VirtIOHandleOutput cmd); VirtIOHandleOutput cmd);

View File

@ -4,8 +4,7 @@
#include <windows.h> #include <windows.h>
struct QemuMutex { struct QemuMutex {
CRITICAL_SECTION lock; SRWLOCK lock;
LONG owner;
}; };
typedef struct QemuRecMutex QemuRecMutex; typedef struct QemuRecMutex QemuRecMutex;
@ -19,9 +18,7 @@ int qemu_rec_mutex_trylock(QemuRecMutex *mutex);
void qemu_rec_mutex_unlock(QemuRecMutex *mutex); void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
struct QemuCond { struct QemuCond {
LONG waiters, target; CONDITION_VARIABLE var;
HANDLE sema;
HANDLE continue_event;
}; };
struct QemuSemaphore { struct QemuSemaphore {

View File

@ -906,12 +906,6 @@ void memory_region_transaction_begin(void)
++memory_region_transaction_depth; ++memory_region_transaction_depth;
} }
static void memory_region_clear_pending(void)
{
memory_region_update_pending = false;
ioeventfd_update_pending = false;
}
void memory_region_transaction_commit(void) void memory_region_transaction_commit(void)
{ {
AddressSpace *as; AddressSpace *as;
@ -927,14 +921,14 @@ void memory_region_transaction_commit(void)
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
address_space_update_topology(as); address_space_update_topology(as);
} }
memory_region_update_pending = false;
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
} else if (ioeventfd_update_pending) { } else if (ioeventfd_update_pending) {
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
address_space_update_ioeventfds(as); address_space_update_ioeventfds(as);
} }
ioeventfd_update_pending = false;
} }
memory_region_clear_pending();
} }
} }

View File

@ -4418,6 +4418,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
s->vex_l = 0; s->vex_l = 0;
s->vex_v = 0; s->vex_v = 0;
next_byte: next_byte:
/* x86 has an upper limit of 15 bytes for an instruction. Since we
* do not want to decode and generate IR for an illegal
* instruction, the following check limits the instruction size to
* 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
if (s->pc - pc_start > 14) {
goto illegal_op;
}
b = cpu_ldub_code(env, s->pc); b = cpu_ldub_code(env, s->pc);
s->pc++; s->pc++;
/* Collect prefixes. */ /* Collect prefixes. */

View File

@ -55,7 +55,7 @@
#include "qemu/error-report.h" #include "qemu/error-report.h"
#endif #endif
#define MAX_MEM_PREALLOC_THREAD_COUNT (MIN(sysconf(_SC_NPROCESSORS_ONLN), 16)) #define MAX_MEM_PREALLOC_THREAD_COUNT 16
struct MemsetThread { struct MemsetThread {
char *addr; char *addr;
@ -381,6 +381,18 @@ static void *do_touch_pages(void *arg)
return NULL; return NULL;
} }
static inline int get_memset_num_threads(int smp_cpus)
{
long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
int ret = 1;
if (host_procs > 0) {
ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
}
/* In case sysconf() fails, we fall back to single threaded */
return ret;
}
static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
int smp_cpus) int smp_cpus)
{ {
@ -389,7 +401,7 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
int i = 0; int i = 0;
memset_thread_failed = false; memset_thread_failed = false;
memset_num_threads = MIN(smp_cpus, MAX_MEM_PREALLOC_THREAD_COUNT); memset_num_threads = get_memset_num_threads(smp_cpus);
memset_thread = g_new0(MemsetThread, memset_num_threads); memset_thread = g_new0(MemsetThread, memset_num_threads);
numpages_per_thread = (numpages / memset_num_threads); numpages_per_thread = (numpages / memset_num_threads);
size_per_thread = (hpagesize * numpages_per_thread); size_per_thread = (hpagesize * numpages_per_thread);

View File

@ -10,6 +10,11 @@
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
* *
*/ */
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x0600
#endif
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu-common.h" #include "qemu-common.h"
#include "qemu/thread.h" #include "qemu/thread.h"
@ -39,44 +44,30 @@ static void error_exit(int err, const char *msg)
void qemu_mutex_init(QemuMutex *mutex) void qemu_mutex_init(QemuMutex *mutex)
{ {
mutex->owner = 0; InitializeSRWLock(&mutex->lock);
InitializeCriticalSection(&mutex->lock);
} }
void qemu_mutex_destroy(QemuMutex *mutex) void qemu_mutex_destroy(QemuMutex *mutex)
{ {
assert(mutex->owner == 0); InitializeSRWLock(&mutex->lock);
DeleteCriticalSection(&mutex->lock);
} }
void qemu_mutex_lock(QemuMutex *mutex) void qemu_mutex_lock(QemuMutex *mutex)
{ {
EnterCriticalSection(&mutex->lock); AcquireSRWLockExclusive(&mutex->lock);
/* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
* using them as such.
*/
assert(mutex->owner == 0);
mutex->owner = GetCurrentThreadId();
} }
int qemu_mutex_trylock(QemuMutex *mutex) int qemu_mutex_trylock(QemuMutex *mutex)
{ {
int owned; int owned;
owned = TryEnterCriticalSection(&mutex->lock); owned = TryAcquireSRWLockExclusive(&mutex->lock);
if (owned) {
assert(mutex->owner == 0);
mutex->owner = GetCurrentThreadId();
}
return !owned; return !owned;
} }
void qemu_mutex_unlock(QemuMutex *mutex) void qemu_mutex_unlock(QemuMutex *mutex)
{ {
assert(mutex->owner == GetCurrentThreadId()); ReleaseSRWLockExclusive(&mutex->lock);
mutex->owner = 0;
LeaveCriticalSection(&mutex->lock);
} }
void qemu_rec_mutex_init(QemuRecMutex *mutex) void qemu_rec_mutex_init(QemuRecMutex *mutex)
@ -107,124 +98,27 @@ void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
void qemu_cond_init(QemuCond *cond) void qemu_cond_init(QemuCond *cond)
{ {
memset(cond, 0, sizeof(*cond)); memset(cond, 0, sizeof(*cond));
InitializeConditionVariable(&cond->var);
cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
if (!cond->sema) {
error_exit(GetLastError(), __func__);
}
cond->continue_event = CreateEvent(NULL, /* security */
FALSE, /* auto-reset */
FALSE, /* not signaled */
NULL); /* name */
if (!cond->continue_event) {
error_exit(GetLastError(), __func__);
}
} }
void qemu_cond_destroy(QemuCond *cond) void qemu_cond_destroy(QemuCond *cond)
{ {
BOOL result; InitializeConditionVariable(&cond->var);
result = CloseHandle(cond->continue_event);
if (!result) {
error_exit(GetLastError(), __func__);
}
cond->continue_event = 0;
result = CloseHandle(cond->sema);
if (!result) {
error_exit(GetLastError(), __func__);
}
cond->sema = 0;
} }
void qemu_cond_signal(QemuCond *cond) void qemu_cond_signal(QemuCond *cond)
{ {
DWORD result; WakeConditionVariable(&cond->var);
/*
* Signal only when there are waiters. cond->waiters is
* incremented by pthread_cond_wait under the external lock,
* so we are safe about that.
*/
if (cond->waiters == 0) {
return;
}
/*
* Waiting threads decrement it outside the external lock, but
* only if another thread is executing pthread_cond_broadcast and
* has the mutex. So, it also cannot be decremented concurrently
* with this particular access.
*/
cond->target = cond->waiters - 1;
result = SignalObjectAndWait(cond->sema, cond->continue_event,
INFINITE, FALSE);
if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
error_exit(GetLastError(), __func__);
}
} }
void qemu_cond_broadcast(QemuCond *cond) void qemu_cond_broadcast(QemuCond *cond)
{ {
BOOLEAN result; WakeAllConditionVariable(&cond->var);
/*
* As in pthread_cond_signal, access to cond->waiters and
* cond->target is locked via the external mutex.
*/
if (cond->waiters == 0) {
return;
}
cond->target = 0;
result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
if (!result) {
error_exit(GetLastError(), __func__);
}
/*
* At this point all waiters continue. Each one takes its
* slice of the semaphore. Now it's our turn to wait: Since
* the external mutex is held, no thread can leave cond_wait,
* yet. For this reason, we can be sure that no thread gets
* a chance to eat *more* than one slice. OTOH, it means
* that the last waiter must send us a wake-up.
*/
WaitForSingleObject(cond->continue_event, INFINITE);
} }
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
{ {
/* SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
* This access is protected under the mutex.
*/
cond->waiters++;
/*
* Unlock external mutex and wait for signal.
* NOTE: we've held mutex locked long enough to increment
* waiters count above, so there's no problem with
* leaving mutex unlocked before we wait on semaphore.
*/
qemu_mutex_unlock(mutex);
WaitForSingleObject(cond->sema, INFINITE);
/* Now waiters must rendez-vous with the signaling thread and
* let it continue. For cond_broadcast this has heavy contention
* and triggers thundering herd. So goes life.
*
* Decrease waiters count. The mutex is not taken, so we have
* to do this atomically.
*
* All waiters contend for the mutex at the end of this function
* until the signaling thread relinquishes it. To ensure
* each waiter consumes exactly one slice of the semaphore,
* the signaling thread stops until it is told by the last
* waiter that it can go on.
*/
if (InterlockedDecrement(&cond->waiters) == cond->target) {
SetEvent(cond->continue_event);
}
qemu_mutex_lock(mutex);
} }
void qemu_sem_init(QemuSemaphore *sem, int init) void qemu_sem_init(QemuSemaphore *sem, int init)