exec/ram_addr: Return number of dirty pages in cpu_physical_memory_set_dirty_lebitmap()

In preparation for including the number of dirty pages in the
vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in
cpu_physical_memory_set_dirty_lebitmap() similar to
cpu_physical_memory_sync_dirty_bitmap().

To avoid counting twice when GLOBAL_DIRTY_RATE is enabled, stash the
number of bits set per bitmap quad in a variable (@nbits) and reuse it
there.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Message-Id: <20230530180556.24441-2-joao.m.martins@oracle.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
master
Joao Martins 2023-05-30 19:05:55 +01:00 committed by Philippe Mathieu-Daudé
parent 9cc44d9bd6
commit f80929f3af
1 changed files with 22 additions and 6 deletions

View File

@ -334,14 +334,23 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
} }
#if !defined(_WIN32) #if !defined(_WIN32)
static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start, /*
ram_addr_t pages) * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
* the number of dirty pages in @bitmap passed as argument. On the other hand,
* cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
* weren't set in the global migration bitmap.
*/
static inline
uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages)
{ {
unsigned long i, j; unsigned long i, j;
unsigned long page_number, c; unsigned long page_number, c, nbits;
hwaddr addr; hwaddr addr;
ram_addr_t ram_addr; ram_addr_t ram_addr;
uint64_t num_dirty = 0;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE; unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
@ -369,6 +378,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
if (bitmap[k]) { if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]); unsigned long temp = leul_to_cpu(bitmap[k]);
nbits = ctpopl(temp);
qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (global_dirty_tracking) { if (global_dirty_tracking) {
@ -377,10 +387,12 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
temp); temp);
if (unlikely( if (unlikely(
global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += ctpopl(temp); total_dirty_pages += nbits;
} }
} }
num_dirty += nbits;
if (tcg_enabled()) { if (tcg_enabled()) {
qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
temp); temp);
@ -409,9 +421,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
if (bitmap[i] != 0) { if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]); c = leul_to_cpu(bitmap[i]);
nbits = ctpopl(c);
if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += ctpopl(c); total_dirty_pages += nbits;
} }
num_dirty += nbits;
do { do {
j = ctzl(c); j = ctzl(c);
c &= ~(1ul << j); c &= ~(1ul << j);
@ -424,6 +438,8 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
} }
} }
} }
return num_dirty;
} }
#endif /* not _WIN32 */ #endif /* not _WIN32 */