virtio-balloon: Rip out qemu_balloon_inhibit()

The only remaining special case is postcopy. It cannot handle
concurrent discards yet, which would result in requesting already sent
pages from the source. Special-case it in virtio-balloon instead.

Introduce migration_in_incoming_postcopy(), to find out if incoming
postcopy is active.

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Juan Quintela <quintela@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20200626072248.78761-7-david@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
master
David Hildenbrand 2020-06-26 09:22:33 +02:00 committed by Michael S. Tsirkin
parent b030958c2b
commit 06df2e692a
6 changed files with 17 additions and 45 deletions

View File

@ -36,24 +36,6 @@
static QEMUBalloonEvent *balloon_event_fn;
static QEMUBalloonStatus *balloon_stat_fn;
static void *balloon_opaque;
static int balloon_inhibit_count;
bool qemu_balloon_is_inhibited(void)
{
return atomic_read(&balloon_inhibit_count) > 0 ||
ram_block_discard_is_disabled();
}
void qemu_balloon_inhibit(bool state)
{
if (state) {
atomic_inc(&balloon_inhibit_count);
} else {
atomic_dec(&balloon_inhibit_count);
}
assert(atomic_read(&balloon_inhibit_count) >= 0);
}
static bool have_balloon(Error **errp)
{

View File

@ -63,6 +63,12 @@ static bool virtio_balloon_pbp_matches(PartiallyBalloonedPage *pbp,
return pbp->base_gpa == base_gpa;
}
static bool virtio_balloon_inhibited(void)
{
/* Postcopy cannot deal with concurrent discards, so it's special. */
return ram_block_discard_is_disabled() || migration_in_incoming_postcopy();
}
static void balloon_inflate_page(VirtIOBalloon *balloon,
MemoryRegion *mr, hwaddr mr_offset,
PartiallyBalloonedPage *pbp)
@ -336,7 +342,7 @@ static void virtio_balloon_handle_report(VirtIODevice *vdev, VirtQueue *vq)
* accessible by another device or process, or if the guest is
* expecting it to retain a non-zero value.
*/
if (qemu_balloon_is_inhibited() || dev->poison_val) {
if (virtio_balloon_inhibited() || dev->poison_val) {
goto skip_element;
}
@ -421,7 +427,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
trace_virtio_balloon_handle_output(memory_region_name(section.mr),
pa);
if (!qemu_balloon_is_inhibited()) {
if (!virtio_balloon_inhibited()) {
if (vq == s->ivq) {
balloon_inflate_page(s, section.mr,
section.offset_within_region, &pbp);

View File

@ -69,6 +69,8 @@ bool migration_has_failed(MigrationState *);
/* ...and after the device transmission */
bool migration_in_postcopy_after_devices(MigrationState *);
void migration_global_dump(Monitor *mon);
/* True if incomming migration entered POSTCOPY_INCOMING_DISCARD */
bool migration_in_incoming_postcopy(void);
/* migration/block-dirty-bitmap.c */
void dirty_bitmap_mig_init(void);

View File

@ -23,7 +23,5 @@ typedef void (QEMUBalloonStatus)(void *opaque, BalloonInfo *info);
int qemu_add_balloon_handler(QEMUBalloonEvent *event_func,
QEMUBalloonStatus *stat_func, void *opaque);
void qemu_remove_balloon_handler(void *opaque);
bool qemu_balloon_is_inhibited(void);
void qemu_balloon_inhibit(bool state);
#endif

View File

@ -1772,6 +1772,13 @@ bool migration_in_postcopy_after_devices(MigrationState *s)
return migration_in_postcopy() && s->postcopy_after_devices;
}
bool migration_in_incoming_postcopy(void)
{
PostcopyState ps = postcopy_state_get();
return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
}
bool migration_is_idle(void)
{
MigrationState *s = current_migration;

View File

@ -27,7 +27,6 @@
#include "qemu/notify.h"
#include "qemu/rcu.h"
#include "sysemu/sysemu.h"
#include "sysemu/balloon.h"
#include "qemu/error-report.h"
#include "trace.h"
#include "hw/boards.h"
@ -520,20 +519,6 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis)
return 0;
}
/*
* Manage a single vote to the QEMU balloon inhibitor for all postcopy usage,
* last caller wins.
*/
static void postcopy_balloon_inhibit(bool state)
{
static bool cur_state = false;
if (state != cur_state) {
qemu_balloon_inhibit(state);
cur_state = state;
}
}
/*
* At the end of a migration where postcopy_ram_incoming_init was called.
*/
@ -565,8 +550,6 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
mis->have_fault_thread = false;
}
postcopy_balloon_inhibit(false);
if (enable_mlock) {
if (os_mlock() < 0) {
error_report("mlock: %s", strerror(errno));
@ -1160,12 +1143,6 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
}
memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
/*
* Ballooning can mark pages as absent while we're postcopying
* that would cause false userfaults.
*/
postcopy_balloon_inhibit(true);
trace_postcopy_ram_enable_notify();
return 0;