async: Add an optional reentrancy guard to the BH API

Devices can pass their MemoryReentrancyGuard (from their DeviceState),
when creating new BHes. Then, the async API will toggle the guard
before/after calling the BH call-back. This prevents bh->mmio reentrancy
issues.

Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Message-Id: <20230427211013.2994127-3-alxndr@bu.edu>
[thuth: Fix "line over 90 characters" checkpatch.pl error]
Signed-off-by: Thomas Huth <thuth@redhat.com>
(cherry picked from commit 9c86c97f12)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
[mjt: minor context adjustment in include/block/aio.h and include/qemu/main-loop.h for 7.2]
Alexander Bulekov 2023-04-27 17:10:07 -04:00 committed by Michael Tokarev
parent c40ca2301c
commit 61dacb401b
7 changed files with 52 additions and 8 deletions

View File

@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext:
* LEGACY qemu_aio_set_event_notifier() - monitor an event notifier * LEGACY qemu_aio_set_event_notifier() - monitor an event notifier
* LEGACY timer_new_ms() - create a timer * LEGACY timer_new_ms() - create a timer
* LEGACY qemu_bh_new() - create a BH * LEGACY qemu_bh_new() - create a BH
* LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard
* LEGACY qemu_aio_wait() - run an event loop iteration * LEGACY qemu_aio_wait() - run an event loop iteration
Since they implicitly work on the main loop they cannot be used in code that Since they implicitly work on the main loop they cannot be used in code that
@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h):
* aio_set_event_notifier() - monitor an event notifier * aio_set_event_notifier() - monitor an event notifier
* aio_timer_new() - create a timer * aio_timer_new() - create a timer
* aio_bh_new() - create a BH * aio_bh_new() - create a BH
* aio_bh_new_guarded() - create a BH with a device re-entrancy guard
* aio_poll() - run an event loop iteration * aio_poll() - run an event loop iteration
The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard"
argument, which is used to check for and prevent re-entrancy problems. For
BHs associated with devices, the reentrancy-guard is contained in the
corresponding DeviceState and named "mem_reentrancy_guard".
The AioContext can be obtained from the IOThread using The AioContext can be obtained from the IOThread using
iothread_get_aio_context() or for the main loop using qemu_get_aio_context(). iothread_get_aio_context() or for the main loop using qemu_get_aio_context().
Code that takes an AioContext argument works both in IOThreads or the main Code that takes an AioContext argument works both in IOThreads or the main

View File

@ -22,6 +22,8 @@
#include "qemu/event_notifier.h" #include "qemu/event_notifier.h"
#include "qemu/thread.h" #include "qemu/thread.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "hw/qdev-core.h"
typedef struct BlockAIOCB BlockAIOCB; typedef struct BlockAIOCB BlockAIOCB;
typedef void BlockCompletionFunc(void *opaque, int ret); typedef void BlockCompletionFunc(void *opaque, int ret);
@ -323,9 +325,11 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
* is opaque and must be allocated prior to its use. * is opaque and must be allocated prior to its use.
* *
* @name: A human-readable identifier for debugging purposes. * @name: A human-readable identifier for debugging purposes.
* @reentrancy_guard: A guard set when entering a cb to prevent
* device-reentrancy issues
*/ */
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
const char *name); const char *name, MemReentrancyGuard *reentrancy_guard);
/** /**
* aio_bh_new: Allocate a new bottom half structure * aio_bh_new: Allocate a new bottom half structure
@ -334,7 +338,17 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
* string. * string.
*/ */
#define aio_bh_new(ctx, cb, opaque) \ #define aio_bh_new(ctx, cb, opaque) \
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb))) aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
/**
* aio_bh_new_guarded: Allocate a new bottom half structure with a
* reentrancy_guard
*
* A convenience wrapper for aio_bh_new_full() that uses the cb as the name
* string.
*/
#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
/** /**
* aio_notify: Force processing of pending events. * aio_notify: Force processing of pending events.

View File

@ -360,9 +360,12 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
void qemu_fd_register(int fd); void qemu_fd_register(int fd);
#define qemu_bh_new_guarded(cb, opaque, guard) \
qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard)
#define qemu_bh_new(cb, opaque) \ #define qemu_bh_new(cb, opaque) \
qemu_bh_new_full((cb), (opaque), (stringify(cb))) qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL)
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name); QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard);
void qemu_bh_schedule_idle(QEMUBH *bh); void qemu_bh_schedule_idle(QEMUBH *bh);
enum { enum {

View File

@ -107,7 +107,8 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
return deadline; return deadline;
} }
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard)
{ {
QEMUBH *bh = g_new(QEMUBH, 1); QEMUBH *bh = g_new(QEMUBH, 1);

View File

@ -64,6 +64,7 @@ struct QEMUBH {
void *opaque; void *opaque;
QSLIST_ENTRY(QEMUBH) next; QSLIST_ENTRY(QEMUBH) next;
unsigned flags; unsigned flags;
MemReentrancyGuard *reentrancy_guard;
}; };
/* Called concurrently from any thread */ /* Called concurrently from any thread */
@ -132,7 +133,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
} }
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
const char *name) const char *name, MemReentrancyGuard *reentrancy_guard)
{ {
QEMUBH *bh; QEMUBH *bh;
bh = g_new(QEMUBH, 1); bh = g_new(QEMUBH, 1);
@ -141,13 +142,28 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
.cb = cb, .cb = cb,
.opaque = opaque, .opaque = opaque,
.name = name, .name = name,
.reentrancy_guard = reentrancy_guard,
}; };
return bh; return bh;
} }
void aio_bh_call(QEMUBH *bh) void aio_bh_call(QEMUBH *bh)
{ {
bool last_engaged_in_io = false;
if (bh->reentrancy_guard) {
last_engaged_in_io = bh->reentrancy_guard->engaged_in_io;
if (bh->reentrancy_guard->engaged_in_io) {
trace_reentrant_aio(bh->ctx, bh->name);
}
bh->reentrancy_guard->engaged_in_io = true;
}
bh->cb(bh->opaque); bh->cb(bh->opaque);
if (bh->reentrancy_guard) {
bh->reentrancy_guard->engaged_in_io = last_engaged_in_io;
}
} }
/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */

View File

@ -619,9 +619,11 @@ void main_loop_wait(int nonblocking)
/* Functions to operate on the main QEMU AioContext. */ /* Functions to operate on the main QEMU AioContext. */
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard)
{ {
return aio_bh_new_full(qemu_aio_context, cb, opaque, name); return aio_bh_new_full(qemu_aio_context, cb, opaque, name,
reentrancy_guard);
} }
/* /*

View File

@ -11,6 +11,7 @@ poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d"
# async.c # async.c
aio_co_schedule(void *ctx, void *co) "ctx %p co %p" aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p" aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
reentrant_aio(void *ctx, const char *name) "ctx %p name %s"
# thread-pool.c # thread-pool.c
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"