exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY

This eliminates a set of runtime shifts.  It turns out that we
require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so
redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of
the other way around.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2019-09-13 12:07:40 -04:00
parent f048b8a7ce
commit bb8e3ea6fa
2 changed files with 7 additions and 2 deletions

View File

@ -96,6 +96,7 @@ void finalize_target_page_bits(void)
if (init_target_page.bits == 0) {
init_target_page.bits = TARGET_PAGE_BITS_MIN;
}
init_target_page.mask = (target_long)-1 << init_target_page.bits;
init_target_page.decided = true;
/*

View File

@ -213,6 +213,7 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
typedef struct {
bool decided;
int bits;
target_long mask;
} TargetPageBits;
#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
extern const TargetPageBits target_page;
@ -221,15 +222,18 @@ extern TargetPageBits target_page;
#endif
#ifdef CONFIG_DEBUG_TCG
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
#else
#define TARGET_PAGE_BITS target_page.bits
#define TARGET_PAGE_MASK target_page.mask
#endif
#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#endif
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even