block: make BDRV_POLL_WHILE() re-entrancy safe

Nested BDRV_POLL_WHILE() calls can occur.  Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.

This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.

Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).

Reported-by: "fuweiwei (C)" <fuweiwei2@huawei.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20180307124619.6218-1-stefanha@redhat.com
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
master
Stefan Hajnoczi 2018-03-07 12:46:19 +00:00
parent e4ae62b802
commit 7376eda7c2
2 changed files with 31 additions and 32 deletions

View File

@ -50,8 +50,8 @@
* } * }
*/ */
typedef struct { typedef struct {
/* Is the main loop waiting for a kick? Accessed with atomic ops. */ /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
bool need_kick; unsigned num_waiters;
} AioWait; } AioWait;
/** /**
@ -71,35 +71,34 @@ typedef struct {
* wait on conditions between two IOThreads since that could lead to deadlock, * wait on conditions between two IOThreads since that could lead to deadlock,
* go via the main loop instead. * go via the main loop instead.
*/ */
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \ #define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
bool waited_ = false; \ bool waited_ = false; \
bool busy_ = true; \ bool busy_ = true; \
AioWait *wait_ = (wait); \ AioWait *wait_ = (wait); \
AioContext *ctx_ = (ctx); \ AioContext *ctx_ = (ctx); \
if (in_aio_context_home_thread(ctx_)) { \ if (in_aio_context_home_thread(ctx_)) { \
while ((cond) || busy_) { \ while ((cond) || busy_) { \
busy_ = aio_poll(ctx_, (cond)); \ busy_ = aio_poll(ctx_, (cond)); \
waited_ |= !!(cond) | busy_; \ waited_ |= !!(cond) | busy_; \
} \ } \
} else { \ } else { \
assert(qemu_get_current_aio_context() == \ assert(qemu_get_current_aio_context() == \
qemu_get_aio_context()); \ qemu_get_aio_context()); \
assert(!wait_->need_kick); \ /* Increment wait_->num_waiters before evaluating cond. */ \
/* Set wait_->need_kick before evaluating cond. */ \ atomic_inc(&wait_->num_waiters); \
atomic_mb_set(&wait_->need_kick, true); \ while (busy_) { \
while (busy_) { \ if ((cond)) { \
if ((cond)) { \ waited_ = busy_ = true; \
waited_ = busy_ = true; \ aio_context_release(ctx_); \
aio_context_release(ctx_); \ aio_poll(qemu_get_aio_context(), true); \
aio_poll(qemu_get_aio_context(), true); \ aio_context_acquire(ctx_); \
aio_context_acquire(ctx_); \ } else { \
} else { \ busy_ = aio_poll(ctx_, false); \
busy_ = aio_poll(ctx_, false); \ waited_ |= busy_; \
waited_ |= busy_; \ } \
} \ } \
} \ atomic_dec(&wait_->num_waiters); \
atomic_set(&wait_->need_kick, false); \ } \
} \
waited_; }) waited_; })
/** /**

View File

@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque)
void aio_wait_kick(AioWait *wait) void aio_wait_kick(AioWait *wait)
{ {
/* The barrier (or an atomic op) is in the caller. */ /* The barrier (or an atomic op) is in the caller. */
if (atomic_read(&wait->need_kick)) { if (atomic_read(&wait->num_waiters)) {
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL); aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
} }
} }