KVM: Provide helper to sync dirty bitmap from slot to ramblock

kvm_physical_sync_dirty_bitmap() calculates the ramblock offset in an
awkward way from the MemoryRegionSection that passed in from the
caller.  The truth is for each KVMSlot the ramblock offset never
change for the lifecycle.  Cache the ramblock offset for each KVMSlot
into the structure when the KVMSlot is created.

With that, we can further simplify kvm_physical_sync_dirty_bitmap()
with a helper to sync KVMSlot dirty bitmap to the ramblock dirty
bitmap of a specific KVMSlot.

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210506160549.130416-6-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
master
Peter Xu 2021-05-06 12:05:44 -04:00 committed by Paolo Bonzini
parent e65e5f50db
commit 2c20b27eed
2 changed files with 19 additions and 20 deletions

View File

@ -573,15 +573,12 @@ static void kvm_log_stop(MemoryListener *listener,
}
/* get kvm's dirty pages bitmap and update qemu's */
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap)
static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
{
ram_addr_t start = section->offset_within_region +
memory_region_get_ram_addr(section->mr);
ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size;
ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
return 0;
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
@ -656,26 +653,19 @@ static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
KVMState *s = kvm_state;
KVMSlot *mem;
hwaddr start_addr, size;
hwaddr slot_size, slot_offset = 0;
hwaddr slot_size;
size = kvm_align_section(section, &start_addr);
while (size) {
MemoryRegionSection subsection = *section;
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
return;
}
if (kvm_slot_get_dirty_log(s, mem)) {
subsection.offset_within_region += slot_offset;
subsection.size = int128_make64(slot_size);
kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
kvm_slot_sync_dirty_pages(mem);
}
slot_offset += slot_size;
start_addr += slot_size;
size -= slot_size;
}
@ -1134,7 +1124,8 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
int err;
MemoryRegion *mr = section->mr;
bool writeable = !mr->readonly && !mr->rom_device;
hwaddr start_addr, size, slot_size;
hwaddr start_addr, size, slot_size, mr_offset;
ram_addr_t ram_start_offset;
void *ram;
if (!memory_region_is_ram(mr)) {
@ -1152,9 +1143,13 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
return;
}
/* use aligned delta to align the ram address */
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
(start_addr - section->offset_within_address_space);
/* The offset of the kvmslot within the memory region */
mr_offset = section->offset_within_region + start_addr -
section->offset_within_address_space;
/* use aligned delta to align the ram address and offset */
ram = memory_region_get_ram_ptr(mr) + mr_offset;
ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
kvm_slots_lock();
@ -1193,6 +1188,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
mem->as_id = kml->as_id;
mem->memory_size = slot_size;
mem->start_addr = start_addr;
mem->ram_start_offset = ram_start_offset;
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);
kvm_slot_init_dirty_bitmap(mem);
@ -1203,6 +1199,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
abort();
}
start_addr += slot_size;
ram_start_offset += slot_size;
ram += slot_size;
size -= slot_size;
} while (size);

View File

@ -25,6 +25,8 @@ typedef struct KVMSlot
unsigned long *dirty_bmap;
/* Cache of the address space ID */
int as_id;
/* Cache of the offset in ram address space */
ram_addr_t ram_start_offset;
} KVMSlot;
typedef struct KVMMemoryListener {