fix #4101: acquire job's aio context before calling job_unref

Otherwise, we might run into an abort via bdrv_co_yield_to_drain()
(can at least happen when a disk with iothread is used):
> #0  0x00007fef4f5dece1 __GI_raise (libc.so.6 + 0x3bce1)
> #1  0x00007fef4f5c8537 __GI_abort (libc.so.6 + 0x25537)
> #2  0x00005641bce3c71f error_exit (qemu-system-x86_64 + 0x80371f)
> #3  0x00005641bce3d02b qemu_mutex_unlock_impl (qemu-system-x86_64 + 0x80402b)
> #4  0x00005641bcd51655 bdrv_co_yield_to_drain (qemu-system-x86_64 + 0x718655)
> #5  0x00005641bcd52de8 bdrv_do_drained_begin (qemu-system-x86_64 + 0x719de8)
> #6  0x00005641bcd47e07 blk_drain (qemu-system-x86_64 + 0x70ee07)
> #7  0x00005641bcd498cd blk_unref (qemu-system-x86_64 + 0x7108cd)
> #8  0x00005641bcd31e6f block_job_free (qemu-system-x86_64 + 0x6f8e6f)
> #9  0x00005641bcd32d65 job_unref (qemu-system-x86_64 + 0x6f9d65)
> #10 0x00005641bcd93b3d pvebackup_co_complete_stream (qemu-system-x86_64 + 0x75ab3d)
> #11 0x00005641bce4e353 coroutine_trampoline (qemu-system-x86_64 + 0x815353)

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Acked-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
master
Fabian Ebner 2022-06-09 14:31:13 +02:00 committed by Fabian Grünbichler
parent ed3b5b8ab8
commit 7bd4d8645a
1 changed files with 29 additions and 13 deletions

View File

@ -20,41 +20,57 @@ Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
pve-backup.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/pve-backup.c b/pve-backup.c
index 5bed6f4014..cd45e66a61 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -316,6 +316,11 @@ static void coroutine_fn pvebackup_co_complete_stream(void *opaque)
Index: pve-qemu-kvm-6.2.0/pve-backup.c
===================================================================
--- pve-qemu-kvm-6.2.0.orig/pve-backup.c
+++ pve-qemu-kvm-6.2.0/pve-backup.c
@@ -316,6 +316,14 @@ static void coroutine_fn pvebackup_co_co
}
}
+ if (di->job) {
+ AioContext *ctx = di->job->job.aio_context;
+ aio_context_acquire(ctx);
+ job_unref(&di->job->job);
+ di->job = NULL;
+ aio_context_release(ctx);
+ }
+
// remove self from job list
backup_state.di_list = g_list_remove(backup_state.di_list, di);
@@ -494,6 +499,9 @@ static void create_backup_jobs_bh(void *opaque) {
aio_context_release(aio_context);
@@ -491,9 +499,12 @@ static void create_backup_jobs_bh(void *
bitmap_mode, false, NULL, &perf, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
JOB_DEFAULT, pvebackup_complete_cb, di, backup_state.txn, &local_err);
- aio_context_release(aio_context);
-
di->job = job;
+ if (job) {
+ job_ref(&job->job);
+ }
+
+ aio_context_release(aio_context);
if (!job || local_err) {
error_setg(errp, "backup_job_create failed: %s",
@@ -528,6 +536,11 @@ static void create_backup_jobs_bh(void *opaque) {
aio_context_release(ctx);
canceled = true;
@@ -521,12 +532,16 @@ static void create_backup_jobs_bh(void *
di->target = NULL;
}
+
- if (!canceled && di->job) {
+ if (di->job) {
AioContext *ctx = di->job->job.aio_context;
aio_context_acquire(ctx);
- job_cancel_sync(&di->job->job, true);
+ if (!canceled) {
+ job_cancel_sync(&di->job->job, true);
+ canceled = true;
+ }
+ job_unref(&di->job->job);
+ di->job = NULL;
+ }
aio_context_release(ctx);
- canceled = true;
}
}
}