Replace "iothread lock" with "BQL" in comments

The term "iothread lock" is obsolete. The APIs use Big QEMU Lock (BQL)
in their names. Update the code comments to use "BQL" instead of
"iothread lock".

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Paul Durrant <paul@xen.org>
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Message-id: 20240102153529.486531-5-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
master
Stefan Hajnoczi 2024-01-02 10:35:28 -05:00
parent 7c754c787e
commit a4a411fbaf
21 changed files with 47 additions and 47 deletions

View File

@ -1975,7 +1975,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
* Context: iothread lock held
* Context: BQL held
*
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
@ -2521,7 +2521,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
* Context: iothread lock held
* Context: BQL held
*
* Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le;

View File

@ -123,7 +123,7 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
if (cpu->icount_budget == 0) {
/*
* We're called without the iothread lock, so must take it while
* We're called without the BQL, so must take it while
* we're calling timer handlers.
*/
bql_lock();

View File

@ -299,7 +299,7 @@ COREAUDIO_WRAPPER_FUNC(write, size_t, (HWVoiceOut *hw, void *buf, size_t size),
#undef COREAUDIO_WRAPPER_FUNC
/*
* callback to feed audiooutput buffer. called without iothread lock.
* callback to feed audiooutput buffer. called without BQL.
* allowed to lock "buf_mutex", but disallowed to have any other locks.
*/
static OSStatus audioDeviceIOProc(
@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
}
}
/* called without iothread lock. */
/* called without BQL. */
static OSStatus handle_voice_change(
AudioObjectID in_object_id,
UInt32 in_number_addresses,

View File

@ -19,7 +19,7 @@ Triggering reset
This section documents the APIs which "users" of a resettable object should use
to control it. All resettable control functions must be called while holding
the iothread lock.
the BQL.
You can apply a reset to an object using ``resettable_assert_reset()``. You need
to call ``resettable_release_reset()`` to release the object from reset. To

View File

@ -159,7 +159,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
* section and does not hold the iothread lock, it must have other means of
* section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*

View File

@ -58,7 +58,7 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
assert(qemu_in_coroutine() || !iothread);
/*
* Skip unlocking/locking iothread lock when the IOThread is running
* Skip unlocking/locking BQL when the IOThread is running
* in co-routine context. Co-routine context is asserted above
* for IOThread case.
* Also skip lock handling while in a co-routine in the main context.

View File

@ -92,7 +92,7 @@ RAMBlock *qemu_ram_block_by_name(const char *name);
*
* By the time this function returns, the returned pointer is not protected
* by RCU anymore. If the caller is not within an RCU critical section and
* does not hold the iothread lock, it must have other means of protecting the
* does not hold the BQL, it must have other means of protecting the
* pointer, such as a reference to the memory region that owns the RAMBlock.
*/
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,

View File

@ -1982,7 +1982,7 @@ int memory_region_get_fd(MemoryRegion *mr);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
* section and does not hold the iothread lock, it must have other means of
* section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@ -1999,7 +1999,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
* section and does not hold the iothread lock, it must have other means of
* section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*

View File

@ -34,7 +34,7 @@ struct RAMBlock {
ram_addr_t max_length;
void (*resized)(const char*, uint64_t length, void *host);
uint32_t flags;
/* Protected by iothread lock. */
/* Protected by the BQL. */
char idstr[256];
/* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;

View File

@ -17,7 +17,7 @@
#include "hw/vmstate-if.h"
typedef struct SaveVMHandlers {
/* This runs inside the iothread lock. */
/* This runs inside the BQL. */
SaveStateHandler *save_state;
/*
@ -30,7 +30,7 @@ typedef struct SaveVMHandlers {
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
/* This runs both outside and inside the iothread lock. */
/* This runs both outside and inside the BQL. */
bool (*is_active)(void *opaque);
bool (*has_postcopy)(void *opaque);
@ -43,14 +43,14 @@ typedef struct SaveVMHandlers {
*/
bool (*is_active_iterate)(void *opaque);
/* This runs outside the iothread lock in the migration case, and
/* This runs outside the BQL in the migration case, and
* within the lock in the savevm case. The callback had better only
* use data that is local to the migration thread or protected
* by other locks.
*/
int (*save_live_iterate)(QEMUFile *f, void *opaque);
/* This runs outside the iothread lock! */
/* This runs outside the BQL! */
/* Note for save_live_pending:
* must_precopy:
* - must be migrated in precopy or in stopped state

View File

@ -464,7 +464,7 @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
g_free(buf);
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
{
SaveBitmapState *dbms;
@ -479,7 +479,7 @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
}
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
const char *bs_name, GHashTable *alias_map)
{
@ -598,7 +598,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
return 0;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int init_dirty_bitmap_migration(DBMSaveState *s)
{
BlockDriverState *bs;
@ -607,7 +607,7 @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
BlockBackend *blk;
GHashTable *alias_map = NULL;
/* Runs in the migration thread, but holds the iothread lock */
/* Runs in the migration thread, but holds the BQL */
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
@ -742,7 +742,7 @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
return s->bulk_completed;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
{

View File

@ -101,7 +101,7 @@ typedef struct BlkMigState {
int prev_progress;
int bulk_completed;
/* Lock must be taken _inside_ the iothread lock. */
/* Lock must be taken _inside_ the BQL. */
QemuMutex lock;
} BlkMigState;
@ -117,7 +117,7 @@ static void blk_mig_unlock(void)
qemu_mutex_unlock(&block_mig_state.lock);
}
/* Must run outside of the iothread lock during the bulk phase,
/* Must run outside of the BQL during the bulk phase,
* or the VM will stall.
*/
@ -327,7 +327,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
return (bmds->cur_sector >= total_sectors);
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int set_dirty_tracking(void)
{
@ -354,7 +354,7 @@ fail:
return ret;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void unset_dirty_tracking(void)
{
@ -505,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void)
}
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
int is_async)
@ -587,7 +587,7 @@ error:
return ret;
}
/* Called with iothread lock taken.
/* Called with the BQL taken.
*
* return value:
* 0: too much data for max_downtime
@ -649,7 +649,7 @@ static int flush_blks(QEMUFile *f)
return ret;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int64_t get_remaining_dirty(void)
{
@ -667,7 +667,7 @@ static int64_t get_remaining_dirty(void)
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void block_migration_cleanup_bmds(void)
{
BlkMigDevState *bmds;
@ -690,7 +690,7 @@ static void block_migration_cleanup_bmds(void)
}
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void block_migration_cleanup(void *opaque)
{
BlkMigBlock *blk;
@ -767,7 +767,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
}
ret = 0;
} else {
/* Always called with iothread lock taken for
/* Always called with the BQL taken for
* simplicity, block_save_complete also calls it.
*/
bql_lock();
@ -795,7 +795,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
return (delta_bytes > 0);
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int block_save_complete(QEMUFile *f, void *opaque)
{

View File

@ -945,7 +945,7 @@ int coroutine_fn colo_incoming_co(void)
qemu_thread_join(&th);
bql_lock();
/* We hold the global iothread lock, so it is safe here */
/* We hold the global BQL, so it is safe here */
colo_release_ram_cache();
return 0;

View File

@ -2551,7 +2551,7 @@ fail:
/**
* migration_maybe_pause: Pause if required to by
* migrate_pause_before_switchover called with the iothread locked
* migrate_pause_before_switchover called with the BQL locked
* Returns: 0 on success
*/
static int migration_maybe_pause(MigrationState *s,

View File

@ -2395,7 +2395,7 @@ static void ram_save_cleanup(void *opaque)
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
/* caller have hold iothread lock or is in a bh, so there is
/* caller have hold BQL or is in a bh, so there is
* no writing race against the migration bitmap
*/
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
@ -3131,7 +3131,7 @@ out:
*
* Returns zero to indicate success or negative on error
*
* Called with iothread lock
* Called with the BQL
*
* @f: QEMUFile where to send the data
* @opaque: RAMState pointer

View File

@ -799,7 +799,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
abort();
found:
/* It is safe to write mru_block outside the iothread lock. This
/* It is safe to write mru_block outside the BQL. This
* is what happens:
*
* mru_block = xxx
@ -1597,7 +1597,7 @@ int qemu_ram_get_fd(RAMBlock *rb)
return rb->fd;
}
/* Called with iothread lock held. */
/* Called with the BQL held. */
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
{
RAMBlock *block;
@ -1625,7 +1625,7 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
}
}
/* Called with iothread lock held. */
/* Called with the BQL held. */
void qemu_ram_unset_idstr(RAMBlock *block)
{
/* FIXME: arch_init.c assumes that this is not called throughout

View File

@ -5844,7 +5844,7 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
* Updates to VI and VF require us to update the status of
* virtual interrupts, which are the logical OR of these bits
* and the state of the input lines from the GIC. (This requires
* that we have the iothread lock, which is done by marking the
* that we have the BQL, which is done by marking the
* reginfo structs as ARM_CP_IO.)
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
* possible for it to be taken immediately, because VIRQ and

View File

@ -940,7 +940,7 @@ static inline const char *aarch32_mode_name(uint32_t psr)
*
* Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
* a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
* Must be called with the iothread lock held.
* Must be called with the BQL held.
*/
void arm_cpu_update_virq(ARMCPU *cpu);
@ -949,7 +949,7 @@ void arm_cpu_update_virq(ARMCPU *cpu);
*
* Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
* a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
* Must be called with the iothread lock held.
* Must be called with the BQL held.
*/
void arm_cpu_update_vfiq(ARMCPU *cpu);

View File

@ -113,7 +113,7 @@ static NSInteger cbchangecount = -1;
static QemuClipboardInfo *cbinfo;
static QemuEvent cbevent;
// Utility functions to run specified code block with iothread lock held
// Utility functions to run specified code block with the BQL held
typedef void (^CodeBlock)(void);
typedef bool (^BoolCodeBlock)(void);
@ -548,7 +548,7 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven
- (void) updateUIInfoLocked
{
/* Must be called with the iothread lock, i.e. via updateUIInfo */
/* Must be called with the BQL, i.e. via updateUIInfo */
NSSize frameSize;
QemuUIInfo info;
@ -2075,7 +2075,7 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts)
* Create the menu entries which depend on QEMU state (for consoles
* and removable devices). These make calls back into QEMU functions,
* which is OK because at this point we know that the second thread
* holds the iothread lock and is synchronously waiting for us to
* holds the BQL and is synchronously waiting for us to
* finish.
*/
add_console_menu_entries();

View File

@ -217,7 +217,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info)
* not do that. It isn't that easy to fix it in spice and even
* when it is fixed we still should cover the already released
* spice versions. So detect that we've been called from another
* thread and grab the iothread lock if so before calling qemu
* thread and grab the BQL if so before calling qemu
* functions.
*/
bool need_lock = !qemu_thread_is_self(&me);

View File

@ -409,7 +409,7 @@ static void rcu_init_complete(void)
qemu_event_init(&rcu_call_ready_event, false);
/* The caller is assumed to have iothread lock, so the call_rcu thread
/* The caller is assumed to have BQL, so the call_rcu thread
* must have been quiescent even after forking, just recreate it.
*/
qemu_thread_create(&thread, "call_rcu", call_rcu_thread,