graph-lock: TSA annotations for lock/unlock functions

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20221207131838.239125-15-kwolf@redhat.com>
Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
master
Kevin Wolf 2022-12-07 14:18:34 +01:00
parent 3f35f82e04
commit 4002ffdc4f
2 changed files with 73 additions and 10 deletions

View File

@ -24,6 +24,9 @@
#include "block/block.h"
#include "block/block_int.h"
/* Dummy lock object to use for Thread Safety Analysis (TSA) */
BdrvGraphLock graph_lock;
/* Protects the list of aiocontext and orphaned_reader_count */
static QemuMutex aio_context_list_lock;

View File

@ -21,6 +21,7 @@
#define GRAPH_LOCK_H
#include "qemu/osdep.h"
#include "qemu/clang-tsa.h"
#include "qemu/coroutine.h"
@ -57,6 +58,35 @@
*/
typedef struct BdrvGraphRWlock BdrvGraphRWlock;
/* Dummy lock object to use for Thread Safety Analysis (TSA) */
typedef struct TSA_CAPABILITY("mutex") BdrvGraphLock {
} BdrvGraphLock;
extern BdrvGraphLock graph_lock;
/*
* clang doesn't check consistency in locking annotations between forward
* declarations and the function definition. Having the annotation on the
* definition, but not the declaration in a header file, may give the reader
* a false sense of security because the condition actually remains unchecked
* for callers in other source files.
*
* Therefore, as a convention, for public functions, GRAPH_RDLOCK and
* GRAPH_WRLOCK annotations should be present only in the header file.
*/
#define GRAPH_WRLOCK TSA_REQUIRES(graph_lock)
#define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock)
/*
* TSA annotations are not part of function types, so checks are defeated when
* using a function pointer. As a workaround, annotate function pointers with
* this macro that will require that the lock is at least taken while reading
* the pointer. In most cases this is equivalent to actually protecting the
* function call.
*/
#define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock)
#define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock)
/*
* register_aiocontext:
* Add AioContext @ctx to the list of AioContext.
@ -85,14 +115,14 @@ void unregister_aiocontext(AioContext *ctx);
* This function polls. Callers must not hold the lock of any AioContext other
* than the current one.
*/
void bdrv_graph_wrlock(void);
void bdrv_graph_wrlock(void) TSA_ACQUIRE(graph_lock) TSA_NO_TSA;
/*
* bdrv_graph_wrunlock:
* Write finished, reset global has_writer to 0 and restart
* all readers that are waiting.
*/
void bdrv_graph_wrunlock(void);
void bdrv_graph_wrunlock(void) TSA_RELEASE(graph_lock) TSA_NO_TSA;
/*
* bdrv_graph_co_rdlock:
@ -116,7 +146,8 @@ void bdrv_graph_wrunlock(void);
* loop) to take it and wait that the coroutine ends, so that
* we always signal that a reader is running.
*/
void coroutine_fn bdrv_graph_co_rdlock(void);
void coroutine_fn TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
bdrv_graph_co_rdlock(void);
/*
* bdrv_graph_rdunlock:
@ -124,7 +155,8 @@ void coroutine_fn bdrv_graph_co_rdlock(void);
* If the writer is waiting for reads to finish (has_writer == 1), signal
* the writer that we are done via aio_wait_kick() to let it continue.
*/
void coroutine_fn bdrv_graph_co_rdunlock(void);
void coroutine_fn TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
bdrv_graph_co_rdunlock(void);
/*
* bdrv_graph_rd{un}lock_main_loop:
@ -132,8 +164,11 @@ void coroutine_fn bdrv_graph_co_rdunlock(void);
* in the main loop. It is just asserting that we are not
* in a coroutine and in GLOBAL_STATE_CODE.
*/
void bdrv_graph_rdlock_main_loop(void);
void bdrv_graph_rdunlock_main_loop(void);
void TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
bdrv_graph_rdlock_main_loop(void);
void TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
bdrv_graph_rdunlock_main_loop(void);
/*
* assert_bdrv_graph_readable:
@ -150,6 +185,17 @@ void assert_bdrv_graph_readable(void);
*/
void assert_bdrv_graph_writable(void);
/*
* Calling this function tells TSA that we know that the lock is effectively
* taken even though we cannot prove it (yet) with GRAPH_RDLOCK. This can be
* useful in intermediate stages of a conversion to using the GRAPH_RDLOCK
* macro.
*/
static inline void TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
assume_graph_lock(void)
{
}
typedef struct GraphLockable { } GraphLockable;
/*
@ -159,13 +205,21 @@ typedef struct GraphLockable { } GraphLockable;
*/
#define GML_OBJ_() (&(GraphLockable) { })
static inline GraphLockable *graph_lockable_auto_lock(GraphLockable *x)
/*
* This is not marked as TSA_ACQUIRE() because TSA doesn't understand the
* cleanup attribute and would therefore complain that the graph is never
* unlocked. TSA_ASSERT() makes sure that the following calls know that we
* hold the lock while unlocking is left unchecked.
*/
static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA
graph_lockable_auto_lock(GraphLockable *x)
{
bdrv_graph_co_rdlock();
return x;
}
static inline void graph_lockable_auto_unlock(GraphLockable *x)
static inline void TSA_NO_TSA
graph_lockable_auto_unlock(GraphLockable *x)
{
bdrv_graph_co_rdunlock();
}
@ -195,14 +249,20 @@ typedef struct GraphLockableMainloop { } GraphLockableMainloop;
*/
#define GMLML_OBJ_() (&(GraphLockableMainloop) { })
static inline GraphLockableMainloop *
/*
* This is not marked as TSA_ACQUIRE() because TSA doesn't understand the
* cleanup attribute and would therefore complain that the graph is never
* unlocked. TSA_ASSERT() makes sure that the following calls know that we
* hold the lock while unlocking is left unchecked.
*/
static inline GraphLockableMainloop * TSA_ASSERT(graph_lock) TSA_NO_TSA
graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x)
{
bdrv_graph_rdlock_main_loop();
return x;
}
static inline void
static inline void TSA_NO_TSA
graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x)
{
bdrv_graph_rdunlock_main_loop();