job.c: make job_mutex and job_lock/unlock() public

job mutex will be used to protect the job struct elements and list,
replacing AioContext locks.

Right now use a shared lock for all jobs, in order to keep things
simple. Once the AioContext lock is gone, we can introduce per-job
locks.

To simplify the switch from aiocontext to job lock, introduce
*nop* lock/unlock functions and macros.
We want to always call job_lock/unlock outside the AioContext locks,
and not vice-versa, otherwise we might get a deadlock. This is not
straightforward to do, and that's why we start with nop functions.
Once everything is protected by job_lock/unlock, we can change the nop into
an actual mutex and remove the aiocontext lock.

Since job_mutex is already being used, add static
real_job_{lock/unlock} for the existing usage.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220926093214.506243-2-eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
master
Emanuele Giuseppe Esposito 2022-09-26 05:31:54 -04:00 committed by Kevin Wolf
parent 2ffc10d53b
commit 55c5a25a03
2 changed files with 47 additions and 12 deletions

View File

@ -303,6 +303,30 @@ typedef enum JobCreateFlags {
JOB_MANUAL_DISMISS = 0x04,
} JobCreateFlags;
extern QemuMutex job_mutex;
#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */
#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */
/**
* job_lock:
*
* Take the mutex protecting the list of jobs and their status.
* Most functions called by the monitor need to call job_lock
* and job_unlock manually. On the other hand, function called
* by the block jobs themselves and by the block layer will take the
* lock for you.
*/
void job_lock(void);
/**
* job_unlock:
*
* Release the mutex protecting the list of jobs and their status.
*/
void job_unlock(void);
/**
* Allocate and return a new job transaction. Jobs can be added to the
* transaction using job_txn_add_job().

35
job.c
View File

@ -32,6 +32,12 @@
#include "trace/trace-root.h"
#include "qapi/qapi-events-job.h"
/*
* job_mutex protects the jobs list, but also makes the
* struct job fields thread-safe.
*/
QemuMutex job_mutex;
static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
/* Job State Transition Table */
@ -74,17 +80,22 @@ struct JobTxn {
int refcnt;
};
/* Right now, this mutex is only needed to synchronize accesses to job->busy
* and job->sleep_timer, such as concurrent calls to job_do_yield and
* job_enter. */
static QemuMutex job_mutex;
void job_lock(void)
{
/* nop */
}
static void job_lock(void)
void job_unlock(void)
{
/* nop */
}
static void real_job_lock(void)
{
qemu_mutex_lock(&job_mutex);
}
static void job_unlock(void)
static void real_job_unlock(void)
{
qemu_mutex_unlock(&job_mutex);
}
@ -450,21 +461,21 @@ void job_enter_cond(Job *job, bool(*fn)(Job *job))
return;
}
job_lock();
real_job_lock();
if (job->busy) {
job_unlock();
real_job_unlock();
return;
}
if (fn && !fn(job)) {
job_unlock();
real_job_unlock();
return;
}
assert(!job->deferred_to_main_loop);
timer_del(&job->sleep_timer);
job->busy = true;
job_unlock();
real_job_unlock();
aio_co_enter(job->aio_context, job->co);
}
@ -481,13 +492,13 @@ void job_enter(Job *job)
* called explicitly. */
static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
{
job_lock();
real_job_lock();
if (ns != -1) {
timer_mod(&job->sleep_timer, ns);
}
job->busy = false;
job_event_idle(job);
job_unlock();
real_job_unlock();
qemu_coroutine_yield();
/* Set by job_enter_cond() before re-entering the coroutine. */