-----BEGIN PGP SIGNATURE-----

iQIcBAABAgAGBQJY0rRYAAoJEL2+eyfA3jBXEvkQAJSNErQOEdqBoX/gSjeYSX85
 PGp+fUrIux0HIYeKySShnsJ3Z1AuIHCogtcfEyHzTo8cDljZssgS4BRKy41ZnNaM
 91Q91MgVyAEtwzApg5WNwWhTB7QDkbz7J75mTk74KPN6y9uKNbjSBRSnH4ZbIH/p
 L3tk6eGpHWf3N0UvoifoKpExlq0A+AYkisuZn7D9C+bBDEnEUWYRcvfEk3sKrZD/
 XikclGwNSPKmdBeYenzlLHFA8WyGe85pkys6QRPeRL1l8dDBBPt1so2y4PLzaEBO
 fImh+ivrHHbKI5TD0RoRVsY4qi9bbH8dK1gDp0oT8uZpwIsO4EWRHA1GZRq6lVIw
 j7a+p/ZFBiVa2WFvWpicZppRwnkuuswqqm4NVsC1djSMoDvPeO2T24YlcRPYeYrp
 FVlY04HpP195mw3e7VVWlirRQY+Jo5IwJkSOUKM4xOZpKY/prS2kqT+KQq2bYK5a
 t3MTKwT04q/7eBtPFoJFf3gwI4q8hyizPtf4X0AN5/YREwJh7J4azQSLEJSjlo2F
 37TbMqGVNQPBtwXWnfK2mi12NIHCaP/clh8hqqrQE6EdjFQQcdD5j5df5syLalTK
 qy+IbxpvoyNt0niXstXI62RnKDbfwsz8YtYYjVIUfv9VkpyQU1gHak7VeodRyHjz
 zuINtr0Jrmr47n8d9qTj
 =Gi6q
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging

# gpg: Signature made Wed 22 Mar 2017 17:28:56 GMT
# gpg:                using RSA key 0xBDBE7B27C0DE3057
# gpg: Good signature from "Jeffrey Cody <jcody@redhat.com>"
# gpg:                 aka "Jeffrey Cody <jeff@codyprime.org>"
# gpg:                 aka "Jeffrey Cody <codyprime@gmail.com>"
# Primary key fingerprint: 9957 4B4D 3474 90E7 9D98  D624 BDBE 7B27 C0DE 3057

* remotes/cody/tags/block-pull-request:
  blockjob: add devops to blockjob backends
  block-backend: add drained_begin / drained_end ops
  blockjob: add block_job_start_shim
  blockjob: avoid recursive AioContext locking

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2017-03-23 11:39:53 +00:00
commit e6ebebc204
3 changed files with 79 additions and 16 deletions

View File

@ -65,6 +65,8 @@ struct BlockBackend {
bool allow_write_beyond_eof;
NotifierList remove_bs_notifiers, insert_bs_notifiers;
int quiesce_counter;
};
typedef struct BlockBackendAIOCB {
@ -699,12 +701,17 @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
void *opaque)
{
/* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
* it that way, so we can assume blk->dev is a DeviceState if blk->dev_ops
* is set. */
* it that way, so we can assume blk->dev, if present, is a DeviceState if
* blk->dev_ops is set. Non-device users may use dev_ops without device. */
assert(!blk->legacy_dev);
blk->dev_ops = ops;
blk->dev_opaque = opaque;
/* Are we currently quiesced? Should we enforce this right now? */
if (blk->quiesce_counter && ops->drained_begin) {
ops->drained_begin(opaque);
}
}
/*
@ -1870,6 +1877,12 @@ static void blk_root_drained_begin(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
if (++blk->quiesce_counter == 1) {
if (blk->dev_ops && blk->dev_ops->drained_begin) {
blk->dev_ops->drained_begin(blk->dev_opaque);
}
}
/* Note that blk->root may not be accessible here yet if we are just
* attaching to a BlockDriverState that is drained. Use child instead. */
@ -1881,7 +1894,14 @@ static void blk_root_drained_begin(BdrvChild *child)
static void blk_root_drained_end(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
assert(blk->quiesce_counter);
assert(blk->public.io_limits_disabled);
--blk->public.io_limits_disabled;
if (--blk->quiesce_counter == 0) {
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
}
}

View File

@ -68,6 +68,23 @@ static const BdrvChildRole child_job = {
.stay_at_node = true,
};
static void block_job_drained_begin(void *opaque)
{
BlockJob *job = opaque;
block_job_pause(job);
}
static void block_job_drained_end(void *opaque)
{
BlockJob *job = opaque;
block_job_resume(job);
}
static const BlockDevOps block_job_dev_ops = {
.drained_begin = block_job_drained_begin,
.drained_end = block_job_drained_end,
};
BlockJob *block_job_next(BlockJob *job)
{
if (!job) {
@ -205,11 +222,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
}
job = g_malloc0(driver->instance_size);
error_setg(&job->blocker, "block device is in use by block job: %s",
BlockJobType_lookup[driver->job_type]);
block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
job->driver = driver;
job->id = g_strdup(job_id);
job->blk = blk;
@ -219,8 +231,15 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
job->paused = true;
job->pause_count = 1;
job->refcnt = 1;
error_setg(&job->blocker, "block device is in use by block job: %s",
BlockJobType_lookup[driver->job_type]);
block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
bs->job = job;
blk_set_dev_ops(blk, &block_job_dev_ops, job);
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
QLIST_INSERT_HEAD(&block_jobs, job, job_list);
blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
@ -250,16 +269,28 @@ static bool block_job_started(BlockJob *job)
return job->co;
}
/**
* All jobs must allow a pause point before entering their job proper. This
* ensures that jobs can be paused prior to being started, then resumed later.
*/
static void coroutine_fn block_job_co_entry(void *opaque)
{
BlockJob *job = opaque;
assert(job && job->driver && job->driver->start);
block_job_pause_point(job);
job->driver->start(job);
}
void block_job_start(BlockJob *job)
{
assert(job && !block_job_started(job) && job->paused &&
!job->busy && job->driver->start);
job->co = qemu_coroutine_create(job->driver->start, job);
if (--job->pause_count == 0) {
job->paused = false;
job->busy = true;
qemu_coroutine_enter(job->co);
}
job->driver && job->driver->start);
job->co = qemu_coroutine_create(block_job_co_entry, job);
job->pause_count--;
job->busy = true;
job->paused = false;
qemu_coroutine_enter(job->co);
}
void block_job_ref(BlockJob *job)
@ -755,12 +786,16 @@ static void block_job_defer_to_main_loop_bh(void *opaque)
/* Fetch BDS AioContext again, in case it has changed */
aio_context = blk_get_aio_context(data->job->blk);
aio_context_acquire(aio_context);
if (aio_context != data->aio_context) {
aio_context_acquire(aio_context);
}
data->job->deferred_to_main_loop = false;
data->fn(data->job, data->opaque);
aio_context_release(aio_context);
if (aio_context != data->aio_context) {
aio_context_release(aio_context);
}
aio_context_release(data->aio_context);

View File

@ -58,6 +58,14 @@ typedef struct BlockDevOps {
* Runs when the size changed (e.g. monitor command block_resize)
*/
void (*resize_cb)(void *opaque);
/*
* Runs when the backend receives a drain request.
*/
void (*drained_begin)(void *opaque);
/*
* Runs when the backend's last drain request ends.
*/
void (*drained_end)(void *opaque);
} BlockDevOps;
/* This struct is embedded in (the private) BlockBackend struct and contains