accel/tcg: Adjust parameters and locking with do_{ld,st}_mmio_*

Replace MMULookupPageData* with CPUTLBEntryFull, addr, size.
Move QEMU_IOTHREAD_LOCK_GUARD to the caller.

This simplifies the usage from do_ld16_beN and do_st16_leN, where
we weren't locking the entire operation, and required hoop jumping
for passing addr and size.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2023-08-01 07:55:38 -07:00
parent 6db03ccc7f
commit 1966855e56
1 changed files with 34 additions and 33 deletions

View File

@ -2066,24 +2066,22 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
/** /**
* do_ld_mmio_beN: * do_ld_mmio_beN:
* @env: cpu context * @env: cpu context
* @p: translation parameters * @full: page parameters
* @ret_be: accumulated data * @ret_be: accumulated data
* @addr: virtual address
* @size: number of bytes
* @mmu_idx: virtual address context * @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0 * @ra: return address into tcg generated code, or 0
* Context: iothread lock held
* *
* Load @p->size bytes from @p->addr, which is memory-mapped i/o. * Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be. * The bytes are concatenated in big-endian order with @ret_be.
*/ */
static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p, static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
uint64_t ret_be, int mmu_idx, uint64_t ret_be, vaddr addr, int size,
MMUAccessType type, uintptr_t ra) int mmu_idx, MMUAccessType type, uintptr_t ra)
{ {
CPUTLBEntryFull *full = p->full; for (int i = 0; i < size; i++) {
vaddr addr = p->addr;
int i, size = p->size;
QEMU_IOTHREAD_LOCK_GUARD();
for (i = 0; i < size; i++) {
uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB); uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB);
ret_be = (ret_be << 8) | x; ret_be = (ret_be << 8) | x;
} }
@ -2232,7 +2230,9 @@ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
unsigned tmp, half_size; unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) { if (unlikely(p->flags & TLB_MMIO)) {
return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra); QEMU_IOTHREAD_LOCK_GUARD();
return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
mmu_idx, type, ra);
} }
/* /*
@ -2281,11 +2281,11 @@ static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
MemOp atom; MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) { if (unlikely(p->flags & TLB_MMIO)) {
p->size = size - 8; QEMU_IOTHREAD_LOCK_GUARD();
a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra); a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
p->addr += p->size; mmu_idx, MMU_DATA_LOAD, ra);
p->size = 8; b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra); mmu_idx, MMU_DATA_LOAD, ra);
return int128_make128(b, a); return int128_make128(b, a);
} }
@ -2664,24 +2664,23 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
/** /**
* do_st_mmio_leN: * do_st_mmio_leN:
* @env: cpu context * @env: cpu context
* @p: translation parameters * @full: page parameters
* @val_le: data to store * @val_le: data to store
* @addr: virtual address
* @size: number of bytes
* @mmu_idx: virtual address context * @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0 * @ra: return address into tcg generated code, or 0
* Context: iothread lock held
* *
* Store @p->size bytes at @p->addr, which is memory-mapped i/o. * Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le; * The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored. * return the bytes of @val_le beyond @p->size that have not been stored.
*/ */
static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p, static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
uint64_t val_le, int mmu_idx, uintptr_t ra) uint64_t val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{ {
CPUTLBEntryFull *full = p->full; for (int i = 0; i < size; i++, val_le >>= 8) {
vaddr addr = p->addr;
int i, size = p->size;
QEMU_IOTHREAD_LOCK_GUARD();
for (i = 0; i < size; i++, val_le >>= 8) {
io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB); io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB);
} }
return val_le; return val_le;
@ -2698,7 +2697,9 @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
unsigned tmp, half_size; unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) { if (unlikely(p->flags & TLB_MMIO)) {
return do_st_mmio_leN(env, p, val_le, mmu_idx, ra); QEMU_IOTHREAD_LOCK_GUARD();
return do_st_mmio_leN(env, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return val_le >> (p->size * 8); return val_le >> (p->size * 8);
} }
@ -2751,11 +2752,11 @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
MemOp atom; MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) { if (unlikely(p->flags & TLB_MMIO)) {
p->size = 8; QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra); do_st_mmio_leN(env, p->full, int128_getlo(val_le),
p->size = size - 8; p->addr, 8, mmu_idx, ra);
p->addr += 8; return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra); p->addr + 8, size - 8, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return int128_gethi(val_le) >> ((size - 8) * 8); return int128_gethi(val_le) >> ((size - 8) * 8);
} }