Merge remote-tracking branch 'remotes/bonzini/softmmu-smap' into staging

* remotes/bonzini/softmmu-smap: (33 commits)
  target-i386: cleanup x86_cpu_get_phys_page_debug
  target-i386: fix protection bits in the TLB for SMEP
  target-i386: support long addresses for 4MB pages (PSE-36)
  target-i386: raise page fault for reserved bits in large pages
  target-i386: unify reserved bits and NX bit check
  target-i386: simplify pte/vaddr calculation
  target-i386: raise page fault for reserved physical address bits
  target-i386: test reserved PS bit on PML4Es
  target-i386: set correct error code for reserved bit access
  target-i386: introduce support for 1 GB pages
  target-i386: introduce do_check_protect label
  target-i386: tweak handling of PG_NX_MASK
  target-i386: commonize checks for PAE and non-PAE
  target-i386: commonize checks for 4MB and 4KB pages
  target-i386: commonize checks for 2MB and 4KB pages
  target-i386: fix coding standards in x86_cpu_handle_mmu_fault
  target-i386: simplify SMAP handling in MMU_KSMAP_IDX
  target-i386: fix kernel accesses with SMAP and CPL = 3
  target-i386: move check_io helpers to seg_helper.c
  target-i386: rename KSMAP to KNOSMAP
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2014-06-05 21:06:13 +01:00
commit 31e25e3e57
85 changed files with 1142 additions and 1336 deletions

View File

@ -5,6 +5,7 @@
#include <string.h>
#include "cpu.h"
#include "exec/cpu_ldst.h"
#undef DEBUG_REMAP
#ifdef DEBUG_REMAP

View File

@ -22,11 +22,13 @@
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
//#define DEBUG_TLB
//#define DEBUG_TLB_CHECK
@ -330,21 +332,36 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
return qemu_ram_addr_from_host_nofail(p);
}
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
#undef MMUSUFFIX
#define MMUSUFFIX _cmmu
#undef GETPC
#define GETPC() ((uintptr_t)0)
#undef GETPC_ADJ
#define GETPC_ADJ 0
#undef GETRA
#define GETRA() ((uintptr_t)0)
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
#include "exec/softmmu_template.h"
#include "softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#include "softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#include "softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
#undef env
#include "softmmu_template.h"

View File

@ -34,7 +34,7 @@ static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config)
len = strlen(s->tag);
cfg = g_malloc0(sizeof(struct virtio_9p_config) + len);
stw_raw(&cfg->tag_len, len);
stw_p(&cfg->tag_len, len);
/* We don't copy the terminating null to config space */
memcpy(cfg->tag, s->tag, len);
memcpy(config, cfg, s->config_size);

View File

@ -239,8 +239,9 @@ static void n800_key_event(void *opaque, int keycode)
int code = s->keymap[keycode & 0x7f];
if (code == -1) {
if ((keycode & 0x7f) == RETU_KEYCODE)
if ((keycode & 0x7f) == RETU_KEYCODE) {
retu_key_event(s->retu, !(keycode & 0x80));
}
return;
}
@ -280,11 +281,14 @@ static void n800_tsc_kbd_setup(struct n800_s *s)
s->ts.opaque = s->ts.chip->opaque;
s->ts.txrx = tsc210x_txrx;
for (i = 0; i < 0x80; i ++)
for (i = 0; i < 0x80; i++) {
s->keymap[i] = -1;
for (i = 0; i < 0x10; i ++)
if (n800_keys[i] >= 0)
}
for (i = 0; i < 0x10; i++) {
if (n800_keys[i] >= 0) {
s->keymap[n800_keys[i]] = i;
}
}
qemu_add_kbd_event_handler(n800_key_event, s);
@ -308,8 +312,9 @@ static void n810_key_event(void *opaque, int keycode)
int code = s->keymap[keycode & 0x7f];
if (code == -1) {
if ((keycode & 0x7f) == RETU_KEYCODE)
if ((keycode & 0x7f) == RETU_KEYCODE) {
retu_key_event(s->retu, !(keycode & 0x80));
}
return;
}
@ -388,11 +393,14 @@ static void n810_kbd_setup(struct n800_s *s)
qemu_irq kbd_irq = qdev_get_gpio_in(s->mpu->gpio, N810_KEYBOARD_GPIO);
int i;
for (i = 0; i < 0x80; i ++)
for (i = 0; i < 0x80; i++) {
s->keymap[i] = -1;
for (i = 0; i < 0x80; i ++)
if (n810_keys[i] > 0)
}
for (i = 0; i < 0x80; i++) {
if (n810_keys[i] > 0) {
s->keymap[n810_keys[i]] = i;
}
}
qemu_add_kbd_event_handler(n810_key_event, s);
@ -449,17 +457,20 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len)
struct mipid_s *s = (struct mipid_s *) opaque;
uint8_t ret;
if (len > 9)
if (len > 9) {
hw_error("%s: FIXME: bad SPI word width %i\n", __FUNCTION__, len);
}
if (s->p >= ARRAY_SIZE(s->resp))
if (s->p >= ARRAY_SIZE(s->resp)) {
ret = 0;
else
ret = s->resp[s->p ++];
if (s->pm --> 0)
} else {
ret = s->resp[s->p++];
}
if (s->pm-- > 0) {
s->param[s->pm] = cmd;
else
} else {
s->cmd = cmd;
}
switch (s->cmd) {
case 0x00: /* NOP */
@ -560,15 +571,17 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len)
goto bad_cmd;
case 0x25: /* WRCNTR */
if (s->pm < 0)
if (s->pm < 0) {
s->pm = 1;
}
goto bad_cmd;
case 0x26: /* GAMSET */
if (!s->pm)
if (!s->pm) {
s->gamma = ffs(s->param[0] & 0xf) - 1;
else if (s->pm < 0)
} else if (s->pm < 0) {
s->pm = 1;
}
break;
case 0x28: /* DISPOFF */
@ -591,10 +604,11 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len)
s->te = 0;
break;
case 0x35: /* TEON */
if (!s->pm)
if (!s->pm) {
s->te = 1;
else if (s->pm < 0)
} else if (s->pm < 0) {
s->pm = 1;
}
break;
case 0x36: /* MADCTR */
@ -613,8 +627,9 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len)
case 0xb0: /* CLKINT / DISCTL */
case 0xb1: /* CLKEXT */
if (s->pm < 0)
if (s->pm < 0) {
s->pm = 2;
}
break;
case 0xb4: /* FRMSEL */
@ -635,8 +650,9 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len)
break;
case 0xc2: /* IFMOD */
if (s->pm < 0)
if (s->pm < 0) {
s->pm = 2;
}
break;
case 0xc6: /* PWRCTL */
@ -834,118 +850,119 @@ static void n800_setup_nolo_tags(void *sram_base)
strcpy((void *) (p + 8), "F5");
stl_raw(p + 10, 0x04f70000);
stl_p(p + 10, 0x04f70000);
strcpy((void *) (p + 9), "RX-34");
/* RAM size in MB? */
stl_raw(p + 12, 0x80);
stl_p(p + 12, 0x80);
/* Pointer to the list of tags */
stl_raw(p + 13, OMAP2_SRAM_BASE + 0x9000);
stl_p(p + 13, OMAP2_SRAM_BASE + 0x9000);
/* The NOLO tags start here */
p = sram_base + 0x9000;
#define ADD_TAG(tag, len) \
stw_raw((uint16_t *) p + 0, tag); \
stw_raw((uint16_t *) p + 1, len); p ++; \
stl_raw(p ++, OMAP2_SRAM_BASE | (((void *) v - sram_base) & 0xffff));
stw_p((uint16_t *) p + 0, tag); \
stw_p((uint16_t *) p + 1, len); p++; \
stl_p(p++, OMAP2_SRAM_BASE | (((void *) v - sram_base) & 0xffff));
/* OMAP STI console? Pin out settings? */
ADD_TAG(0x6e01, 414);
for (i = 0; i < ARRAY_SIZE(n800_pinout); i ++)
stl_raw(v ++, n800_pinout[i]);
for (i = 0; i < ARRAY_SIZE(n800_pinout); i++) {
stl_p(v++, n800_pinout[i]);
}
/* Kernel memsize? */
ADD_TAG(0x6e05, 1);
stl_raw(v ++, 2);
stl_p(v++, 2);
/* NOLO serial console */
ADD_TAG(0x6e02, 4);
stl_raw(v ++, XLDR_LL_UART); /* UART number (1 - 3) */
stl_p(v++, XLDR_LL_UART); /* UART number (1 - 3) */
#if 0
/* CBUS settings (Retu/AVilma) */
ADD_TAG(0x6e03, 6);
stw_raw((uint16_t *) v + 0, 65); /* CBUS GPIO0 */
stw_raw((uint16_t *) v + 1, 66); /* CBUS GPIO1 */
stw_raw((uint16_t *) v + 2, 64); /* CBUS GPIO2 */
stw_p((uint16_t *) v + 0, 65); /* CBUS GPIO0 */
stw_p((uint16_t *) v + 1, 66); /* CBUS GPIO1 */
stw_p((uint16_t *) v + 2, 64); /* CBUS GPIO2 */
v += 2;
#endif
/* Nokia ASIC BB5 (Retu/Tahvo) */
ADD_TAG(0x6e0a, 4);
stw_raw((uint16_t *) v + 0, 111); /* "Retu" interrupt GPIO */
stw_raw((uint16_t *) v + 1, 108); /* "Tahvo" interrupt GPIO */
v ++;
stw_p((uint16_t *) v + 0, 111); /* "Retu" interrupt GPIO */
stw_p((uint16_t *) v + 1, 108); /* "Tahvo" interrupt GPIO */
v++;
/* LCD console? */
ADD_TAG(0x6e04, 4);
stw_raw((uint16_t *) v + 0, 30); /* ??? */
stw_raw((uint16_t *) v + 1, 24); /* ??? */
v ++;
stw_p((uint16_t *) v + 0, 30); /* ??? */
stw_p((uint16_t *) v + 1, 24); /* ??? */
v++;
#if 0
/* LCD settings */
ADD_TAG(0x6e06, 2);
stw_raw((uint16_t *) (v ++), 15); /* ??? */
stw_p((uint16_t *) (v++), 15); /* ??? */
#endif
/* I^2C (Menelaus) */
ADD_TAG(0x6e07, 4);
stl_raw(v ++, 0x00720000); /* ??? */
stl_p(v++, 0x00720000); /* ??? */
/* Unknown */
ADD_TAG(0x6e0b, 6);
stw_raw((uint16_t *) v + 0, 94); /* ??? */
stw_raw((uint16_t *) v + 1, 23); /* ??? */
stw_raw((uint16_t *) v + 2, 0); /* ??? */
stw_p((uint16_t *) v + 0, 94); /* ??? */
stw_p((uint16_t *) v + 1, 23); /* ??? */
stw_p((uint16_t *) v + 2, 0); /* ??? */
v += 2;
/* OMAP gpio switch info */
ADD_TAG(0x6e0c, 80);
strcpy((void *) v, "bat_cover"); v += 3;
stw_raw((uint16_t *) v + 0, 110); /* GPIO num ??? */
stw_raw((uint16_t *) v + 1, 1); /* GPIO num ??? */
stw_p((uint16_t *) v + 0, 110); /* GPIO num ??? */
stw_p((uint16_t *) v + 1, 1); /* GPIO num ??? */
v += 2;
strcpy((void *) v, "cam_act"); v += 3;
stw_raw((uint16_t *) v + 0, 95); /* GPIO num ??? */
stw_raw((uint16_t *) v + 1, 32); /* GPIO num ??? */
stw_p((uint16_t *) v + 0, 95); /* GPIO num ??? */
stw_p((uint16_t *) v + 1, 32); /* GPIO num ??? */
v += 2;
strcpy((void *) v, "cam_turn"); v += 3;
stw_raw((uint16_t *) v + 0, 12); /* GPIO num ??? */
stw_raw((uint16_t *) v + 1, 33); /* GPIO num ??? */
stw_p((uint16_t *) v + 0, 12); /* GPIO num ??? */
stw_p((uint16_t *) v + 1, 33); /* GPIO num ??? */
v += 2;
strcpy((void *) v, "headphone"); v += 3;
stw_raw((uint16_t *) v + 0, 107); /* GPIO num ??? */
stw_raw((uint16_t *) v + 1, 17); /* GPIO num ??? */
stw_p((uint16_t *) v + 0, 107); /* GPIO num ??? */
stw_p((uint16_t *) v + 1, 17); /* GPIO num ??? */
v += 2;
/* Bluetooth */
ADD_TAG(0x6e0e, 12);
stl_raw(v ++, 0x5c623d01); /* ??? */
stl_raw(v ++, 0x00000201); /* ??? */
stl_raw(v ++, 0x00000000); /* ??? */
stl_p(v++, 0x5c623d01); /* ??? */
stl_p(v++, 0x00000201); /* ??? */
stl_p(v++, 0x00000000); /* ??? */
/* CX3110x WLAN settings */
ADD_TAG(0x6e0f, 8);
stl_raw(v ++, 0x00610025); /* ??? */
stl_raw(v ++, 0xffff0057); /* ??? */
stl_p(v++, 0x00610025); /* ??? */
stl_p(v++, 0xffff0057); /* ??? */
/* MMC host settings */
ADD_TAG(0x6e10, 12);
stl_raw(v ++, 0xffff000f); /* ??? */
stl_raw(v ++, 0xffffffff); /* ??? */
stl_raw(v ++, 0x00000060); /* ??? */
stl_p(v++, 0xffff000f); /* ??? */
stl_p(v++, 0xffffffff); /* ??? */
stl_p(v++, 0x00000060); /* ??? */
/* OneNAND chip select */
ADD_TAG(0x6e11, 10);
stl_raw(v ++, 0x00000401); /* ??? */
stl_raw(v ++, 0x0002003a); /* ??? */
stl_raw(v ++, 0x00000002); /* ??? */
stl_p(v++, 0x00000401); /* ??? */
stl_p(v++, 0x0002003a); /* ??? */
stl_p(v++, 0x00000002); /* ??? */
/* TEA5761 sensor settings */
ADD_TAG(0x6e12, 2);
stl_raw(v ++, 93); /* GPIO num ??? */
stl_p(v++, 93); /* GPIO num ??? */
#if 0
/* Unknown tag */
@ -956,8 +973,8 @@ static void n800_setup_nolo_tags(void *sram_base)
#endif
/* End of the list */
stl_raw(p ++, 0x00000000);
stl_raw(p ++, 0x00000000);
stl_p(p++, 0x00000000);
stl_p(p++, 0x00000000);
}
/* This task is normally performed by the bootloader. If we're loading
@ -1032,8 +1049,9 @@ static void n8x0_boot_init(void *opaque)
s->mpu->cpu->env.GE = 0x5;
/* If the machine has a slided keyboard, open it */
if (s->kbd)
if (s->kbd) {
qemu_irq_raise(qdev_get_gpio_in(s->mpu->gpio, N810_SLIDE_GPIO));
}
}
#define OMAP_TAG_NOKIA_BT 0x4e01
@ -1119,112 +1137,112 @@ static int n8x0_atag_setup(void *p, int model)
w = p;
stw_raw(w ++, OMAP_TAG_UART); /* u16 tag */
stw_raw(w ++, 4); /* u16 len */
stw_raw(w ++, (1 << 2) | (1 << 1) | (1 << 0)); /* uint enabled_uarts */
w ++;
stw_p(w++, OMAP_TAG_UART); /* u16 tag */
stw_p(w++, 4); /* u16 len */
stw_p(w++, (1 << 2) | (1 << 1) | (1 << 0)); /* uint enabled_uarts */
w++;
#if 0
stw_raw(w ++, OMAP_TAG_SERIAL_CONSOLE); /* u16 tag */
stw_raw(w ++, 4); /* u16 len */
stw_raw(w ++, XLDR_LL_UART + 1); /* u8 console_uart */
stw_raw(w ++, 115200); /* u32 console_speed */
stw_p(w++, OMAP_TAG_SERIAL_CONSOLE); /* u16 tag */
stw_p(w++, 4); /* u16 len */
stw_p(w++, XLDR_LL_UART + 1); /* u8 console_uart */
stw_p(w++, 115200); /* u32 console_speed */
#endif
stw_raw(w ++, OMAP_TAG_LCD); /* u16 tag */
stw_raw(w ++, 36); /* u16 len */
stw_p(w++, OMAP_TAG_LCD); /* u16 tag */
stw_p(w++, 36); /* u16 len */
strcpy((void *) w, "QEMU LCD panel"); /* char panel_name[16] */
w += 8;
strcpy((void *) w, "blizzard"); /* char ctrl_name[16] */
w += 8;
stw_raw(w ++, N810_BLIZZARD_RESET_GPIO); /* TODO: n800 s16 nreset_gpio */
stw_raw(w ++, 24); /* u8 data_lines */
stw_p(w++, N810_BLIZZARD_RESET_GPIO); /* TODO: n800 s16 nreset_gpio */
stw_p(w++, 24); /* u8 data_lines */
stw_raw(w ++, OMAP_TAG_CBUS); /* u16 tag */
stw_raw(w ++, 8); /* u16 len */
stw_raw(w ++, N8X0_CBUS_CLK_GPIO); /* s16 clk_gpio */
stw_raw(w ++, N8X0_CBUS_DAT_GPIO); /* s16 dat_gpio */
stw_raw(w ++, N8X0_CBUS_SEL_GPIO); /* s16 sel_gpio */
w ++;
stw_p(w++, OMAP_TAG_CBUS); /* u16 tag */
stw_p(w++, 8); /* u16 len */
stw_p(w++, N8X0_CBUS_CLK_GPIO); /* s16 clk_gpio */
stw_p(w++, N8X0_CBUS_DAT_GPIO); /* s16 dat_gpio */
stw_p(w++, N8X0_CBUS_SEL_GPIO); /* s16 sel_gpio */
w++;
stw_raw(w ++, OMAP_TAG_EM_ASIC_BB5); /* u16 tag */
stw_raw(w ++, 4); /* u16 len */
stw_raw(w ++, N8X0_RETU_GPIO); /* s16 retu_irq_gpio */
stw_raw(w ++, N8X0_TAHVO_GPIO); /* s16 tahvo_irq_gpio */
stw_p(w++, OMAP_TAG_EM_ASIC_BB5); /* u16 tag */
stw_p(w++, 4); /* u16 len */
stw_p(w++, N8X0_RETU_GPIO); /* s16 retu_irq_gpio */
stw_p(w++, N8X0_TAHVO_GPIO); /* s16 tahvo_irq_gpio */
gpiosw = (model == 810) ? n810_gpiosw_info : n800_gpiosw_info;
for (; gpiosw->name; gpiosw ++) {
stw_raw(w ++, OMAP_TAG_GPIO_SWITCH); /* u16 tag */
stw_raw(w ++, 20); /* u16 len */
for (; gpiosw->name; gpiosw++) {
stw_p(w++, OMAP_TAG_GPIO_SWITCH); /* u16 tag */
stw_p(w++, 20); /* u16 len */
strcpy((void *) w, gpiosw->name); /* char name[12] */
w += 6;
stw_raw(w ++, gpiosw->line); /* u16 gpio */
stw_raw(w ++, gpiosw->type);
stw_raw(w ++, 0);
stw_raw(w ++, 0);
stw_p(w++, gpiosw->line); /* u16 gpio */
stw_p(w++, gpiosw->type);
stw_p(w++, 0);
stw_p(w++, 0);
}
stw_raw(w ++, OMAP_TAG_NOKIA_BT); /* u16 tag */
stw_raw(w ++, 12); /* u16 len */
stw_p(w++, OMAP_TAG_NOKIA_BT); /* u16 tag */
stw_p(w++, 12); /* u16 len */
b = (void *) w;
stb_raw(b ++, 0x01); /* u8 chip_type (CSR) */
stb_raw(b ++, N8X0_BT_WKUP_GPIO); /* u8 bt_wakeup_gpio */
stb_raw(b ++, N8X0_BT_HOST_WKUP_GPIO); /* u8 host_wakeup_gpio */
stb_raw(b ++, N8X0_BT_RESET_GPIO); /* u8 reset_gpio */
stb_raw(b ++, BT_UART + 1); /* u8 bt_uart */
stb_p(b++, 0x01); /* u8 chip_type (CSR) */
stb_p(b++, N8X0_BT_WKUP_GPIO); /* u8 bt_wakeup_gpio */
stb_p(b++, N8X0_BT_HOST_WKUP_GPIO); /* u8 host_wakeup_gpio */
stb_p(b++, N8X0_BT_RESET_GPIO); /* u8 reset_gpio */
stb_p(b++, BT_UART + 1); /* u8 bt_uart */
memcpy(b, &n8x0_bd_addr, 6); /* u8 bd_addr[6] */
b += 6;
stb_raw(b ++, 0x02); /* u8 bt_sysclk (38.4) */
stb_p(b++, 0x02); /* u8 bt_sysclk (38.4) */
w = (void *) b;
stw_raw(w ++, OMAP_TAG_WLAN_CX3110X); /* u16 tag */
stw_raw(w ++, 8); /* u16 len */
stw_raw(w ++, 0x25); /* u8 chip_type */
stw_raw(w ++, N8X0_WLAN_PWR_GPIO); /* s16 power_gpio */
stw_raw(w ++, N8X0_WLAN_IRQ_GPIO); /* s16 irq_gpio */
stw_raw(w ++, -1); /* s16 spi_cs_gpio */
stw_p(w++, OMAP_TAG_WLAN_CX3110X); /* u16 tag */
stw_p(w++, 8); /* u16 len */
stw_p(w++, 0x25); /* u8 chip_type */
stw_p(w++, N8X0_WLAN_PWR_GPIO); /* s16 power_gpio */
stw_p(w++, N8X0_WLAN_IRQ_GPIO); /* s16 irq_gpio */
stw_p(w++, -1); /* s16 spi_cs_gpio */
stw_raw(w ++, OMAP_TAG_MMC); /* u16 tag */
stw_raw(w ++, 16); /* u16 len */
stw_p(w++, OMAP_TAG_MMC); /* u16 tag */
stw_p(w++, 16); /* u16 len */
if (model == 810) {
stw_raw(w ++, 0x23f); /* unsigned flags */
stw_raw(w ++, -1); /* s16 power_pin */
stw_raw(w ++, -1); /* s16 switch_pin */
stw_raw(w ++, -1); /* s16 wp_pin */
stw_raw(w ++, 0x240); /* unsigned flags */
stw_raw(w ++, 0xc000); /* s16 power_pin */
stw_raw(w ++, 0x0248); /* s16 switch_pin */
stw_raw(w ++, 0xc000); /* s16 wp_pin */
stw_p(w++, 0x23f); /* unsigned flags */
stw_p(w++, -1); /* s16 power_pin */
stw_p(w++, -1); /* s16 switch_pin */
stw_p(w++, -1); /* s16 wp_pin */
stw_p(w++, 0x240); /* unsigned flags */
stw_p(w++, 0xc000); /* s16 power_pin */
stw_p(w++, 0x0248); /* s16 switch_pin */
stw_p(w++, 0xc000); /* s16 wp_pin */
} else {
stw_raw(w ++, 0xf); /* unsigned flags */
stw_raw(w ++, -1); /* s16 power_pin */
stw_raw(w ++, -1); /* s16 switch_pin */
stw_raw(w ++, -1); /* s16 wp_pin */
stw_raw(w ++, 0); /* unsigned flags */
stw_raw(w ++, 0); /* s16 power_pin */
stw_raw(w ++, 0); /* s16 switch_pin */
stw_raw(w ++, 0); /* s16 wp_pin */
stw_p(w++, 0xf); /* unsigned flags */
stw_p(w++, -1); /* s16 power_pin */
stw_p(w++, -1); /* s16 switch_pin */
stw_p(w++, -1); /* s16 wp_pin */
stw_p(w++, 0); /* unsigned flags */
stw_p(w++, 0); /* s16 power_pin */
stw_p(w++, 0); /* s16 switch_pin */
stw_p(w++, 0); /* s16 wp_pin */
}
stw_raw(w ++, OMAP_TAG_TEA5761); /* u16 tag */
stw_raw(w ++, 4); /* u16 len */
stw_raw(w ++, N8X0_TEA5761_CS_GPIO); /* u16 enable_gpio */
w ++;
stw_p(w++, OMAP_TAG_TEA5761); /* u16 tag */
stw_p(w++, 4); /* u16 len */
stw_p(w++, N8X0_TEA5761_CS_GPIO); /* u16 enable_gpio */
w++;
partition = (model == 810) ? n810_part_info : n800_part_info;
for (; partition->name; partition ++) {
stw_raw(w ++, OMAP_TAG_PARTITION); /* u16 tag */
stw_raw(w ++, 28); /* u16 len */
for (; partition->name; partition++) {
stw_p(w++, OMAP_TAG_PARTITION); /* u16 tag */
stw_p(w++, 28); /* u16 len */
strcpy((void *) w, partition->name); /* char name[16] */
l = (void *) (w + 8);
stl_raw(l ++, partition->size); /* unsigned int size */
stl_raw(l ++, partition->offset); /* unsigned int offset */
stl_raw(l ++, partition->mask); /* unsigned int mask_flags */
stl_p(l++, partition->size); /* unsigned int size */
stl_p(l++, partition->offset); /* unsigned int offset */
stl_p(l++, partition->mask); /* unsigned int mask_flags */
w = (void *) l;
}
stw_raw(w ++, OMAP_TAG_BOOT_REASON); /* u16 tag */
stw_raw(w ++, 12); /* u16 len */
stw_p(w++, OMAP_TAG_BOOT_REASON); /* u16 tag */
stw_p(w++, 12); /* u16 len */
#if 0
strcpy((void *) w, "por"); /* char reason_str[12] */
strcpy((void *) w, "charger"); /* char reason_str[12] */
@ -1242,15 +1260,15 @@ static int n8x0_atag_setup(void *p, int model)
w += 6;
tag = (model == 810) ? "RX-44" : "RX-34";
stw_raw(w ++, OMAP_TAG_VERSION_STR); /* u16 tag */
stw_raw(w ++, 24); /* u16 len */
stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */
stw_p(w++, 24); /* u16 len */
strcpy((void *) w, "product"); /* char component[12] */
w += 6;
strcpy((void *) w, tag); /* char version[12] */
w += 6;
stw_raw(w ++, OMAP_TAG_VERSION_STR); /* u16 tag */
stw_raw(w ++, 24); /* u16 len */
stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */
stw_p(w++, 24); /* u16 len */
strcpy((void *) w, "hw-build"); /* char component[12] */
w += 6;
strcpy((void *) w, "QEMU ");
@ -1258,8 +1276,8 @@ static int n8x0_atag_setup(void *p, int model)
w += 6;
tag = (model == 810) ? "1.1.10-qemu" : "1.1.6-qemu";
stw_raw(w ++, OMAP_TAG_VERSION_STR); /* u16 tag */
stw_raw(w ++, 24); /* u16 len */
stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */
stw_p(w++, 24); /* u16 len */
strcpy((void *) w, "nolo"); /* char component[12] */
w += 6;
strcpy((void *) w, tag); /* char version[12] */
@ -1315,9 +1333,9 @@ static void n8x0_init(MachineState *machine,
n8x0_gpio_setup(s);
n8x0_nand_setup(s);
n8x0_i2c_setup(s);
if (model == 800)
if (model == 800) {
n800_tsc_kbd_setup(s);
else if (model == 810) {
} else if (model == 810) {
n810_tsc_setup(s);
n810_kbd_setup(s);
}

View File

@ -487,12 +487,12 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
bdrv_get_geometry(s->bs, &capacity);
memset(&blkcfg, 0, sizeof(blkcfg));
stq_raw(&blkcfg.capacity, capacity);
stl_raw(&blkcfg.seg_max, 128 - 2);
stw_raw(&blkcfg.cylinders, s->conf->cyls);
stl_raw(&blkcfg.blk_size, blk_size);
stw_raw(&blkcfg.min_io_size, s->conf->min_io_size / blk_size);
stw_raw(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size);
stq_p(&blkcfg.capacity, capacity);
stl_p(&blkcfg.seg_max, 128 - 2);
stw_p(&blkcfg.cylinders, s->conf->cyls);
stl_p(&blkcfg.blk_size, blk_size);
stw_p(&blkcfg.min_io_size, s->conf->min_io_size / blk_size);
stw_p(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size);
blkcfg.heads = s->conf->heads;
/*
* We must ensure that the block device capacity is a multiple of

View File

@ -50,7 +50,7 @@ static void glue(draw_line2_, DEPTH)(void *opaque,
uint8_t v, r, g, b;
do {
v = ldub_raw((void *) s);
v = ldub_p((void *) s);
r = (pal[v & 3] >> 4) & 0xf0;
g = pal[v & 3] & 0xf0;
b = (pal[v & 3] << 4) & 0xf0;
@ -89,7 +89,7 @@ static void glue(draw_line4_, DEPTH)(void *opaque,
uint8_t v, r, g, b;
do {
v = ldub_raw((void *) s);
v = ldub_p((void *) s);
r = (pal[v & 0xf] >> 4) & 0xf0;
g = pal[v & 0xf] & 0xf0;
b = (pal[v & 0xf] << 4) & 0xf0;
@ -116,7 +116,7 @@ static void glue(draw_line8_, DEPTH)(void *opaque,
uint8_t v, r, g, b;
do {
v = ldub_raw((void *) s);
v = ldub_p((void *) s);
r = (pal[v] >> 4) & 0xf0;
g = pal[v] & 0xf0;
b = (pal[v] << 4) & 0xf0;
@ -136,7 +136,7 @@ static void glue(draw_line12_, DEPTH)(void *opaque,
uint8_t r, g, b;
do {
v = lduw_raw((void *) s);
v = lduw_p((void *) s);
r = (v >> 4) & 0xf0;
g = v & 0xf0;
b = (v << 4) & 0xf0;
@ -159,7 +159,7 @@ static void glue(draw_line16_, DEPTH)(void *opaque,
uint8_t r, g, b;
do {
v = lduw_raw((void *) s);
v = lduw_p((void *) s);
r = (v >> 8) & 0xf8;
g = (v >> 3) & 0xfc;
b = (v << 3) & 0xf8;

View File

@ -47,7 +47,7 @@ static void glue(draw_line8_, PIXEL_NAME)(
{
uint8_t v, r, g, b;
do {
v = ldub_raw(s);
v = ldub_p(s);
r = (pal[v] >> 16) & 0xff;
g = (pal[v] >> 8) & 0xff;
b = (pal[v] >> 0) & 0xff;
@ -64,7 +64,7 @@ static void glue(draw_line16_, PIXEL_NAME)(
uint8_t r, g, b;
do {
rgb565 = lduw_raw(s);
rgb565 = lduw_p(s);
r = ((rgb565 >> 11) & 0x1f) << 3;
g = ((rgb565 >> 5) & 0x3f) << 2;
b = ((rgb565 >> 0) & 0x1f) << 3;
@ -80,7 +80,7 @@ static void glue(draw_line32_, PIXEL_NAME)(
uint8_t r, g, b;
do {
ldub_raw(s);
ldub_p(s);
#if defined(TARGET_WORDS_BIGENDIAN)
r = s[1];
g = s[2];

View File

@ -361,7 +361,7 @@ static void glue(vga_draw_line15_, PIXEL_NAME)(VGACommonState *s1, uint8_t *d,
w = width;
do {
v = lduw_raw((void *)s);
v = lduw_p((void *)s);
r = (v >> 7) & 0xf8;
g = (v >> 2) & 0xf8;
b = (v << 3) & 0xf8;
@ -386,7 +386,7 @@ static void glue(vga_draw_line16_, PIXEL_NAME)(VGACommonState *s1, uint8_t *d,
w = width;
do {
v = lduw_raw((void *)s);
v = lduw_p((void *)s);
r = (v >> 8) & 0xf8;
g = (v >> 3) & 0xfc;
b = (v << 3) & 0xf8;

View File

@ -178,24 +178,24 @@ static void write_bootloader (CPUMIPSState *env, uint8_t *base, int64_t kernel_a
/* Small bootloader */
p = (uint32_t *) base;
stl_raw(p++, 0x0bf00010); /* j 0x1fc00040 */
stl_raw(p++, 0x00000000); /* nop */
stl_p(p++, 0x0bf00010); /* j 0x1fc00040 */
stl_p(p++, 0x00000000); /* nop */
/* Second part of the bootloader */
p = (uint32_t *) (base + 0x040);
stl_raw(p++, 0x3c040000); /* lui a0, 0 */
stl_raw(p++, 0x34840002); /* ori a0, a0, 2 */
stl_raw(p++, 0x3c050000 | ((ENVP_ADDR >> 16) & 0xffff)); /* lui a1, high(ENVP_ADDR) */
stl_raw(p++, 0x34a50000 | (ENVP_ADDR & 0xffff)); /* ori a1, a0, low(ENVP_ADDR) */
stl_raw(p++, 0x3c060000 | (((ENVP_ADDR + 8) >> 16) & 0xffff)); /* lui a2, high(ENVP_ADDR + 8) */
stl_raw(p++, 0x34c60000 | ((ENVP_ADDR + 8) & 0xffff)); /* ori a2, a2, low(ENVP_ADDR + 8) */
stl_raw(p++, 0x3c070000 | (loaderparams.ram_size >> 16)); /* lui a3, high(env->ram_size) */
stl_raw(p++, 0x34e70000 | (loaderparams.ram_size & 0xffff)); /* ori a3, a3, low(env->ram_size) */
stl_raw(p++, 0x3c1f0000 | ((kernel_addr >> 16) & 0xffff)); /* lui ra, high(kernel_addr) */;
stl_raw(p++, 0x37ff0000 | (kernel_addr & 0xffff)); /* ori ra, ra, low(kernel_addr) */
stl_raw(p++, 0x03e00008); /* jr ra */
stl_raw(p++, 0x00000000); /* nop */
stl_p(p++, 0x3c040000); /* lui a0, 0 */
stl_p(p++, 0x34840002); /* ori a0, a0, 2 */
stl_p(p++, 0x3c050000 | ((ENVP_ADDR >> 16) & 0xffff)); /* lui a1, high(ENVP_ADDR) */
stl_p(p++, 0x34a50000 | (ENVP_ADDR & 0xffff)); /* ori a1, a0, low(ENVP_ADDR) */
stl_p(p++, 0x3c060000 | (((ENVP_ADDR + 8) >> 16) & 0xffff)); /* lui a2, high(ENVP_ADDR + 8) */
stl_p(p++, 0x34c60000 | ((ENVP_ADDR + 8) & 0xffff)); /* ori a2, a2, low(ENVP_ADDR + 8) */
stl_p(p++, 0x3c070000 | (loaderparams.ram_size >> 16)); /* lui a3, high(env->ram_size) */
stl_p(p++, 0x34e70000 | (loaderparams.ram_size & 0xffff)); /* ori a3, a3, low(env->ram_size) */
stl_p(p++, 0x3c1f0000 | ((kernel_addr >> 16) & 0xffff)); /* lui ra, high(kernel_addr) */;
stl_p(p++, 0x37ff0000 | (kernel_addr & 0xffff)); /* ori ra, ra, low(kernel_addr) */
stl_p(p++, 0x03e00008); /* jr ra */
stl_p(p++, 0x00000000); /* nop */
}

View File

@ -609,136 +609,136 @@ static void write_bootloader (CPUMIPSState *env, uint8_t *base,
/* Small bootloader */
p = (uint32_t *)base;
stl_raw(p++, 0x0bf00160); /* j 0x1fc00580 */
stl_raw(p++, 0x00000000); /* nop */
stl_p(p++, 0x0bf00160); /* j 0x1fc00580 */
stl_p(p++, 0x00000000); /* nop */
/* YAMON service vector */
stl_raw(base + 0x500, 0xbfc00580); /* start: */
stl_raw(base + 0x504, 0xbfc0083c); /* print_count: */
stl_raw(base + 0x520, 0xbfc00580); /* start: */
stl_raw(base + 0x52c, 0xbfc00800); /* flush_cache: */
stl_raw(base + 0x534, 0xbfc00808); /* print: */
stl_raw(base + 0x538, 0xbfc00800); /* reg_cpu_isr: */
stl_raw(base + 0x53c, 0xbfc00800); /* unred_cpu_isr: */
stl_raw(base + 0x540, 0xbfc00800); /* reg_ic_isr: */
stl_raw(base + 0x544, 0xbfc00800); /* unred_ic_isr: */
stl_raw(base + 0x548, 0xbfc00800); /* reg_esr: */
stl_raw(base + 0x54c, 0xbfc00800); /* unreg_esr: */
stl_raw(base + 0x550, 0xbfc00800); /* getchar: */
stl_raw(base + 0x554, 0xbfc00800); /* syscon_read: */
stl_p(base + 0x500, 0xbfc00580); /* start: */
stl_p(base + 0x504, 0xbfc0083c); /* print_count: */
stl_p(base + 0x520, 0xbfc00580); /* start: */
stl_p(base + 0x52c, 0xbfc00800); /* flush_cache: */
stl_p(base + 0x534, 0xbfc00808); /* print: */
stl_p(base + 0x538, 0xbfc00800); /* reg_cpu_isr: */
stl_p(base + 0x53c, 0xbfc00800); /* unred_cpu_isr: */
stl_p(base + 0x540, 0xbfc00800); /* reg_ic_isr: */
stl_p(base + 0x544, 0xbfc00800); /* unred_ic_isr: */
stl_p(base + 0x548, 0xbfc00800); /* reg_esr: */
stl_p(base + 0x54c, 0xbfc00800); /* unreg_esr: */
stl_p(base + 0x550, 0xbfc00800); /* getchar: */
stl_p(base + 0x554, 0xbfc00800); /* syscon_read: */
/* Second part of the bootloader */
p = (uint32_t *) (base + 0x580);
stl_raw(p++, 0x24040002); /* addiu a0, zero, 2 */
stl_raw(p++, 0x3c1d0000 | (((ENVP_ADDR - 64) >> 16) & 0xffff)); /* lui sp, high(ENVP_ADDR) */
stl_raw(p++, 0x37bd0000 | ((ENVP_ADDR - 64) & 0xffff)); /* ori sp, sp, low(ENVP_ADDR) */
stl_raw(p++, 0x3c050000 | ((ENVP_ADDR >> 16) & 0xffff)); /* lui a1, high(ENVP_ADDR) */
stl_raw(p++, 0x34a50000 | (ENVP_ADDR & 0xffff)); /* ori a1, a1, low(ENVP_ADDR) */
stl_raw(p++, 0x3c060000 | (((ENVP_ADDR + 8) >> 16) & 0xffff)); /* lui a2, high(ENVP_ADDR + 8) */
stl_raw(p++, 0x34c60000 | ((ENVP_ADDR + 8) & 0xffff)); /* ori a2, a2, low(ENVP_ADDR + 8) */
stl_raw(p++, 0x3c070000 | (loaderparams.ram_size >> 16)); /* lui a3, high(ram_size) */
stl_raw(p++, 0x34e70000 | (loaderparams.ram_size & 0xffff)); /* ori a3, a3, low(ram_size) */
stl_p(p++, 0x24040002); /* addiu a0, zero, 2 */
stl_p(p++, 0x3c1d0000 | (((ENVP_ADDR - 64) >> 16) & 0xffff)); /* lui sp, high(ENVP_ADDR) */
stl_p(p++, 0x37bd0000 | ((ENVP_ADDR - 64) & 0xffff)); /* ori sp, sp, low(ENVP_ADDR) */
stl_p(p++, 0x3c050000 | ((ENVP_ADDR >> 16) & 0xffff)); /* lui a1, high(ENVP_ADDR) */
stl_p(p++, 0x34a50000 | (ENVP_ADDR & 0xffff)); /* ori a1, a1, low(ENVP_ADDR) */
stl_p(p++, 0x3c060000 | (((ENVP_ADDR + 8) >> 16) & 0xffff)); /* lui a2, high(ENVP_ADDR + 8) */
stl_p(p++, 0x34c60000 | ((ENVP_ADDR + 8) & 0xffff)); /* ori a2, a2, low(ENVP_ADDR + 8) */
stl_p(p++, 0x3c070000 | (loaderparams.ram_size >> 16)); /* lui a3, high(ram_size) */
stl_p(p++, 0x34e70000 | (loaderparams.ram_size & 0xffff)); /* ori a3, a3, low(ram_size) */
/* Load BAR registers as done by YAMON */
stl_raw(p++, 0x3c09b400); /* lui t1, 0xb400 */
stl_p(p++, 0x3c09b400); /* lui t1, 0xb400 */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c08df00); /* lui t0, 0xdf00 */
stl_p(p++, 0x3c08df00); /* lui t0, 0xdf00 */
#else
stl_raw(p++, 0x340800df); /* ori t0, r0, 0x00df */
stl_p(p++, 0x340800df); /* ori t0, r0, 0x00df */
#endif
stl_raw(p++, 0xad280068); /* sw t0, 0x0068(t1) */
stl_p(p++, 0xad280068); /* sw t0, 0x0068(t1) */
stl_raw(p++, 0x3c09bbe0); /* lui t1, 0xbbe0 */
stl_p(p++, 0x3c09bbe0); /* lui t1, 0xbbe0 */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c08c000); /* lui t0, 0xc000 */
stl_p(p++, 0x3c08c000); /* lui t0, 0xc000 */
#else
stl_raw(p++, 0x340800c0); /* ori t0, r0, 0x00c0 */
stl_p(p++, 0x340800c0); /* ori t0, r0, 0x00c0 */
#endif
stl_raw(p++, 0xad280048); /* sw t0, 0x0048(t1) */
stl_p(p++, 0xad280048); /* sw t0, 0x0048(t1) */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c084000); /* lui t0, 0x4000 */
stl_p(p++, 0x3c084000); /* lui t0, 0x4000 */
#else
stl_raw(p++, 0x34080040); /* ori t0, r0, 0x0040 */
stl_p(p++, 0x34080040); /* ori t0, r0, 0x0040 */
#endif
stl_raw(p++, 0xad280050); /* sw t0, 0x0050(t1) */
stl_p(p++, 0xad280050); /* sw t0, 0x0050(t1) */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c088000); /* lui t0, 0x8000 */
stl_p(p++, 0x3c088000); /* lui t0, 0x8000 */
#else
stl_raw(p++, 0x34080080); /* ori t0, r0, 0x0080 */
stl_p(p++, 0x34080080); /* ori t0, r0, 0x0080 */
#endif
stl_raw(p++, 0xad280058); /* sw t0, 0x0058(t1) */
stl_p(p++, 0xad280058); /* sw t0, 0x0058(t1) */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c083f00); /* lui t0, 0x3f00 */
stl_p(p++, 0x3c083f00); /* lui t0, 0x3f00 */
#else
stl_raw(p++, 0x3408003f); /* ori t0, r0, 0x003f */
stl_p(p++, 0x3408003f); /* ori t0, r0, 0x003f */
#endif
stl_raw(p++, 0xad280060); /* sw t0, 0x0060(t1) */
stl_p(p++, 0xad280060); /* sw t0, 0x0060(t1) */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c08c100); /* lui t0, 0xc100 */
stl_p(p++, 0x3c08c100); /* lui t0, 0xc100 */
#else
stl_raw(p++, 0x340800c1); /* ori t0, r0, 0x00c1 */
stl_p(p++, 0x340800c1); /* ori t0, r0, 0x00c1 */
#endif
stl_raw(p++, 0xad280080); /* sw t0, 0x0080(t1) */
stl_p(p++, 0xad280080); /* sw t0, 0x0080(t1) */
#ifdef TARGET_WORDS_BIGENDIAN
stl_raw(p++, 0x3c085e00); /* lui t0, 0x5e00 */
stl_p(p++, 0x3c085e00); /* lui t0, 0x5e00 */
#else
stl_raw(p++, 0x3408005e); /* ori t0, r0, 0x005e */
stl_p(p++, 0x3408005e); /* ori t0, r0, 0x005e */
#endif
stl_raw(p++, 0xad280088); /* sw t0, 0x0088(t1) */
stl_p(p++, 0xad280088); /* sw t0, 0x0088(t1) */
/* Jump to kernel code */
stl_raw(p++, 0x3c1f0000 | ((kernel_entry >> 16) & 0xffff)); /* lui ra, high(kernel_entry) */
stl_raw(p++, 0x37ff0000 | (kernel_entry & 0xffff)); /* ori ra, ra, low(kernel_entry) */
stl_raw(p++, 0x03e00008); /* jr ra */
stl_raw(p++, 0x00000000); /* nop */
stl_p(p++, 0x3c1f0000 | ((kernel_entry >> 16) & 0xffff)); /* lui ra, high(kernel_entry) */
stl_p(p++, 0x37ff0000 | (kernel_entry & 0xffff)); /* ori ra, ra, low(kernel_entry) */
stl_p(p++, 0x03e00008); /* jr ra */
stl_p(p++, 0x00000000); /* nop */
/* YAMON subroutines */
p = (uint32_t *) (base + 0x800);
stl_raw(p++, 0x03e00008); /* jr ra */
stl_raw(p++, 0x24020000); /* li v0,0 */
stl_p(p++, 0x03e00008); /* jr ra */
stl_p(p++, 0x24020000); /* li v0,0 */
/* 808 YAMON print */
stl_raw(p++, 0x03e06821); /* move t5,ra */
stl_raw(p++, 0x00805821); /* move t3,a0 */
stl_raw(p++, 0x00a05021); /* move t2,a1 */
stl_raw(p++, 0x91440000); /* lbu a0,0(t2) */
stl_raw(p++, 0x254a0001); /* addiu t2,t2,1 */
stl_raw(p++, 0x10800005); /* beqz a0,834 */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x0ff0021c); /* jal 870 */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x08000205); /* j 814 */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x01a00008); /* jr t5 */
stl_raw(p++, 0x01602021); /* move a0,t3 */
stl_p(p++, 0x03e06821); /* move t5,ra */
stl_p(p++, 0x00805821); /* move t3,a0 */
stl_p(p++, 0x00a05021); /* move t2,a1 */
stl_p(p++, 0x91440000); /* lbu a0,0(t2) */
stl_p(p++, 0x254a0001); /* addiu t2,t2,1 */
stl_p(p++, 0x10800005); /* beqz a0,834 */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x0ff0021c); /* jal 870 */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x08000205); /* j 814 */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x01a00008); /* jr t5 */
stl_p(p++, 0x01602021); /* move a0,t3 */
/* 0x83c YAMON print_count */
stl_raw(p++, 0x03e06821); /* move t5,ra */
stl_raw(p++, 0x00805821); /* move t3,a0 */
stl_raw(p++, 0x00a05021); /* move t2,a1 */
stl_raw(p++, 0x00c06021); /* move t4,a2 */
stl_raw(p++, 0x91440000); /* lbu a0,0(t2) */
stl_raw(p++, 0x0ff0021c); /* jal 870 */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x254a0001); /* addiu t2,t2,1 */
stl_raw(p++, 0x258cffff); /* addiu t4,t4,-1 */
stl_raw(p++, 0x1580fffa); /* bnez t4,84c */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x01a00008); /* jr t5 */
stl_raw(p++, 0x01602021); /* move a0,t3 */
stl_p(p++, 0x03e06821); /* move t5,ra */
stl_p(p++, 0x00805821); /* move t3,a0 */
stl_p(p++, 0x00a05021); /* move t2,a1 */
stl_p(p++, 0x00c06021); /* move t4,a2 */
stl_p(p++, 0x91440000); /* lbu a0,0(t2) */
stl_p(p++, 0x0ff0021c); /* jal 870 */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x254a0001); /* addiu t2,t2,1 */
stl_p(p++, 0x258cffff); /* addiu t4,t4,-1 */
stl_p(p++, 0x1580fffa); /* bnez t4,84c */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x01a00008); /* jr t5 */
stl_p(p++, 0x01602021); /* move a0,t3 */
/* 0x870 */
stl_raw(p++, 0x3c08b800); /* lui t0,0xb400 */
stl_raw(p++, 0x350803f8); /* ori t0,t0,0x3f8 */
stl_raw(p++, 0x91090005); /* lbu t1,5(t0) */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x31290040); /* andi t1,t1,0x40 */
stl_raw(p++, 0x1120fffc); /* beqz t1,878 <outch+0x8> */
stl_raw(p++, 0x00000000); /* nop */
stl_raw(p++, 0x03e00008); /* jr ra */
stl_raw(p++, 0xa1040000); /* sb a0,0(t0) */
stl_p(p++, 0x3c08b800); /* lui t0,0xb400 */
stl_p(p++, 0x350803f8); /* ori t0,t0,0x3f8 */
stl_p(p++, 0x91090005); /* lbu t1,5(t0) */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x31290040); /* andi t1,t1,0x40 */
stl_p(p++, 0x1120fffc); /* beqz t1,878 <outch+0x8> */
stl_p(p++, 0x00000000); /* nop */
stl_p(p++, 0x03e00008); /* jr ra */
stl_p(p++, 0xa1040000); /* sb a0,0(t0) */
}

View File

@ -164,8 +164,8 @@ static void vhost_scsi_set_config(VirtIODevice *vdev,
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
if ((uint32_t) ldl_raw(&scsiconf->sense_size) != vs->sense_size ||
(uint32_t) ldl_raw(&scsiconf->cdb_size) != vs->cdb_size) {
if ((uint32_t) ldl_p(&scsiconf->sense_size) != vs->sense_size ||
(uint32_t) ldl_p(&scsiconf->cdb_size) != vs->cdb_size) {
error_report("vhost-scsi does not support changing the sense data and CDB sizes");
exit(1);
}

View File

@ -425,16 +425,16 @@ static void virtio_scsi_get_config(VirtIODevice *vdev,
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
stl_raw(&scsiconf->num_queues, s->conf.num_queues);
stl_raw(&scsiconf->seg_max, 128 - 2);
stl_raw(&scsiconf->max_sectors, s->conf.max_sectors);
stl_raw(&scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
stl_raw(&scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
stl_raw(&scsiconf->sense_size, s->sense_size);
stl_raw(&scsiconf->cdb_size, s->cdb_size);
stw_raw(&scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
stw_raw(&scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
stl_raw(&scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
stl_p(&scsiconf->num_queues, s->conf.num_queues);
stl_p(&scsiconf->seg_max, 128 - 2);
stl_p(&scsiconf->max_sectors, s->conf.max_sectors);
stl_p(&scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
stl_p(&scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
stl_p(&scsiconf->sense_size, s->sense_size);
stl_p(&scsiconf->cdb_size, s->cdb_size);
stw_p(&scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
stw_p(&scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
stl_p(&scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
}
static void virtio_scsi_set_config(VirtIODevice *vdev,
@ -443,14 +443,14 @@ static void virtio_scsi_set_config(VirtIODevice *vdev,
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
if ((uint32_t) ldl_raw(&scsiconf->sense_size) >= 65536 ||
(uint32_t) ldl_raw(&scsiconf->cdb_size) >= 256) {
if ((uint32_t) ldl_p(&scsiconf->sense_size) >= 65536 ||
(uint32_t) ldl_p(&scsiconf->cdb_size) >= 256) {
error_report("bad data written to virtio-scsi configuration space");
exit(1);
}
vs->sense_size = ldl_raw(&scsiconf->sense_size);
vs->cdb_size = ldl_raw(&scsiconf->cdb_size);
vs->sense_size = ldl_p(&scsiconf->sense_size);
vs->cdb_size = ldl_p(&scsiconf->cdb_size);
}
static uint32_t virtio_scsi_get_features(VirtIODevice *vdev,

View File

@ -198,127 +198,8 @@ extern unsigned long reserved_va;
#define RESERVED_VA 0ul
#endif
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define h2g_valid(x) 1
#else
#define h2g_valid(x) ({ \
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
(!RESERVED_VA || (__guest < RESERVED_VA)); \
})
#endif
#define h2g_nocheck(x) ({ \
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
(abi_ulong)__ret; \
})
#define h2g(x) ({ \
/* Check if given address fits target address space */ \
assert(h2g_valid(x)); \
h2g_nocheck(x); \
})
#define saddr(x) g2h(x)
#define laddr(x) g2h(x)
#else /* !CONFIG_USER_ONLY */
/* NOTE: we use double casts if pointers and target_ulong have
different sizes */
#define saddr(x) (uint8_t *)(intptr_t)(x)
#define laddr(x) (uint8_t *)(intptr_t)(x)
#endif
#define ldub_raw(p) ldub_p(laddr((p)))
#define ldsb_raw(p) ldsb_p(laddr((p)))
#define lduw_raw(p) lduw_p(laddr((p)))
#define ldsw_raw(p) ldsw_p(laddr((p)))
#define ldl_raw(p) ldl_p(laddr((p)))
#define ldq_raw(p) ldq_p(laddr((p)))
#define ldfl_raw(p) ldfl_p(laddr((p)))
#define ldfq_raw(p) ldfq_p(laddr((p)))
#define stb_raw(p, v) stb_p(saddr((p)), v)
#define stw_raw(p, v) stw_p(saddr((p)), v)
#define stl_raw(p, v) stl_p(saddr((p)), v)
#define stq_raw(p, v) stq_p(saddr((p)), v)
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
#define ldub(p) ldub_raw(p)
#define ldsb(p) ldsb_raw(p)
#define lduw(p) lduw_raw(p)
#define ldsw(p) ldsw_raw(p)
#define ldl(p) ldl_raw(p)
#define ldq(p) ldq_raw(p)
#define ldfl(p) ldfl_raw(p)
#define ldfq(p) ldfq_raw(p)
#define stb(p, v) stb_raw(p, v)
#define stw(p, v) stw_raw(p, v)
#define stl(p, v) stl_raw(p, v)
#define stq(p, v) stq_raw(p, v)
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
#define cpu_ldub_code(env1, p) ldub_raw(p)
#define cpu_ldsb_code(env1, p) ldsb_raw(p)
#define cpu_lduw_code(env1, p) lduw_raw(p)
#define cpu_ldsw_code(env1, p) ldsw_raw(p)
#define cpu_ldl_code(env1, p) ldl_raw(p)
#define cpu_ldq_code(env1, p) ldq_raw(p)
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_ldq_data(env, addr) ldq_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
#define ldq_kernel(p) ldq_raw(p)
#define ldfl_kernel(p) ldfl_raw(p)
#define ldfq_kernel(p) ldfq_raw(p)
#define stb_kernel(p, v) stb_raw(p, v)
#define stw_kernel(p, v) stw_raw(p, v)
#define stl_kernel(p, v) stl_raw(p, v)
#define stq_kernel(p, v) stq_raw(p, v)
#define stfl_kernel(p, v) stfl_raw(p, v)
#define stfq_kernel(p, vt) stfq_raw(p, v)
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
#endif /* defined(CONFIG_USER_ONLY) */
/* page related stuff */
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)

400
include/exec/cpu_ldst.h Normal file
View File

@ -0,0 +1,400 @@
/*
* Software MMU support
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
/*
* Generate inline load/store functions for all MMU modes (typically
* at least _user and _kernel) as well as _data versions, for all data
* sizes.
*
* Used by target op helpers.
*
* MMU mode suffixes are defined in target cpu.h.
*/
#ifndef CPU_LDST_H
#define CPU_LDST_H
#if defined(CONFIG_USER_ONLY)
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define h2g_valid(x) 1
#else
#define h2g_valid(x) ({ \
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
(!RESERVED_VA || (__guest < RESERVED_VA)); \
})
#endif
#define h2g_nocheck(x) ({ \
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
(abi_ulong)__ret; \
})
#define h2g(x) ({ \
/* Check if given address fits target address space */ \
assert(h2g_valid(x)); \
h2g_nocheck(x); \
})
#define saddr(x) g2h(x)
#define laddr(x) g2h(x)
#else /* !CONFIG_USER_ONLY */
/* NOTE: we use double casts if pointers and target_ulong have
different sizes */
#define saddr(x) (uint8_t *)(intptr_t)(x)
#define laddr(x) (uint8_t *)(intptr_t)(x)
#endif
#define ldub_raw(p) ldub_p(laddr((p)))
#define ldsb_raw(p) ldsb_p(laddr((p)))
#define lduw_raw(p) lduw_p(laddr((p)))
#define ldsw_raw(p) ldsw_p(laddr((p)))
#define ldl_raw(p) ldl_p(laddr((p)))
#define ldq_raw(p) ldq_p(laddr((p)))
#define ldfl_raw(p) ldfl_p(laddr((p)))
#define ldfq_raw(p) ldfq_p(laddr((p)))
#define stb_raw(p, v) stb_p(saddr((p)), v)
#define stw_raw(p, v) stw_p(saddr((p)), v)
#define stl_raw(p, v) stl_p(saddr((p)), v)
#define stq_raw(p, v) stq_p(saddr((p)), v)
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
#define ldub(p) ldub_raw(p)
#define ldsb(p) ldsb_raw(p)
#define lduw(p) lduw_raw(p)
#define ldsw(p) ldsw_raw(p)
#define ldl(p) ldl_raw(p)
#define ldq(p) ldq_raw(p)
#define ldfl(p) ldfl_raw(p)
#define ldfq(p) ldfq_raw(p)
#define stb(p, v) stb_raw(p, v)
#define stw(p, v) stw_raw(p, v)
#define stl(p, v) stl_raw(p, v)
#define stq(p, v) stq_raw(p, v)
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
#define cpu_ldub_code(env1, p) ldub_raw(p)
#define cpu_ldsb_code(env1, p) ldsb_raw(p)
#define cpu_lduw_code(env1, p) lduw_raw(p)
#define cpu_ldsw_code(env1, p) ldsw_raw(p)
#define cpu_ldl_code(env1, p) ldl_raw(p)
#define cpu_ldq_code(env1, p) ldq_raw(p)
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_ldq_data(env, addr) ldq_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
#define ldq_kernel(p) ldq_raw(p)
#define ldfl_kernel(p) ldfl_raw(p)
#define ldfq_kernel(p) ldfq_raw(p)
#define stb_kernel(p, v) stb_raw(p, v)
#define stw_kernel(p, v) stw_raw(p, v)
#define stl_kernel(p, v) stl_raw(p, v)
#define stq_kernel(p, v) stq_raw(p, v)
#define stfl_kernel(p, v) stfl_raw(p, v)
#define stfq_kernel(p, vt) stfq_raw(p, v)
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
#else
/* XXX: find something cleaner.
* Furthermore, this is false for 64 bits targets
*/
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
#define ldul_hypv ldl_hypv
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr,
uint8_t val, int mmu_idx);
void helper_stw_mmu(CPUArchState *env, target_ulong addr,
uint16_t val, int mmu_idx);
void helper_stl_mmu(CPUArchState *env, target_ulong addr,
uint32_t val, int mmu_idx);
void helper_stq_mmu(CPUArchState *env, target_ulong addr,
uint64_t val, int mmu_idx);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#define CPU_MMU_INDEX 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#define CPU_MMU_INDEX 1
#define MEMSUFFIX MMU_MODE1_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#if (NB_MMU_MODES >= 3)
#define CPU_MMU_INDEX 2
#define MEMSUFFIX MMU_MODE2_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 3) */
#if (NB_MMU_MODES >= 4)
#define CPU_MMU_INDEX 3
#define MEMSUFFIX MMU_MODE3_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 4) */
#if (NB_MMU_MODES >= 5)
#define CPU_MMU_INDEX 4
#define MEMSUFFIX MMU_MODE4_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 5) */
#if (NB_MMU_MODES >= 6)
#define CPU_MMU_INDEX 5
#define MEMSUFFIX MMU_MODE5_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 6) */
#if (NB_MMU_MODES > 6)
#error "NB_MMU_MODES > 6 is not supported for now"
#endif /* (NB_MMU_MODES > 6) */
/* these access are slower, they must be as rare as possible */
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#define ldub(p) ldub_data(p)
#define ldsb(p) ldsb_data(p)
#define lduw(p) lduw_data(p)
#define ldsw(p) ldsw_data(p)
#define ldl(p) ldl_data(p)
#define ldq(p) ldq_data(p)
#define stb(p, v) stb_data(p, v)
#define stw(p, v) stw_data(p, v)
#define stl(p, v) stl_data(p, v)
#define stq(p, v) stq_data(p, v)
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#undef SOFTMMU_CODE_ACCESS
/**
* tlb_vaddr_to_host:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @access_type: 0 for read, 1 for write, 2 for execute
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
* If the TLB contains a host virtual address suitable for direct RAM
* access, then return it. Otherwise (TLB miss, TLB entry is for an
* I/O access, etc) return NULL.
*
* This is the equivalent of the initial fast-path code used by
* TCG backends for guest load and store accesses.
*/
static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
int access_type, int mmu_idx)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
target_ulong tlb_addr;
uintptr_t haddr;
switch (access_type) {
case 0:
tlb_addr = tlbentry->addr_read;
break;
case 1:
tlb_addr = tlbentry->addr_write;
break;
case 2:
tlb_addr = tlbentry->addr_code;
break;
default:
g_assert_not_reached();
}
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
return NULL;
}
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
return NULL;
}
haddr = addr + env->tlb_table[mmu_idx][index].addend;
return (void *)haddr;
}
#endif /* defined(CONFIG_USER_ONLY) */
#endif /* CPU_LDST_H */

View File

@ -8,7 +8,7 @@
* 32 and 64 bit cases, also generate floating point functions with
* the same size.
*
* Not used directly but included from softmmu_exec.h and exec-all.h.
* Not used directly but included from cpu_ldst.h.
*
* Copyright (c) 2003 Fabrice Bellard
*
@ -47,35 +47,18 @@
#error unsupported data size
#endif
#if ACCESS_TYPE < (NB_MMU_MODES)
#define CPU_MMU_INDEX ACCESS_TYPE
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == (NB_MMU_MODES)
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MMUSUFFIX _cmmu
#else
#error invalid ACCESS_TYPE
#endif
#if DATA_SIZE == 8
#define RES_TYPE uint64_t
#else
#define RES_TYPE uint32_t
#endif
#if ACCESS_TYPE == (NB_MMU_MODES + 1)
#ifdef SOFTMMU_CODE_ACCESS
#define ADDR_READ addr_code
#define MMUSUFFIX _cmmu
#else
#define ADDR_READ addr_read
#define MMUSUFFIX _mmu
#endif
/* generic load/store macros */
@ -124,7 +107,7 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
}
#endif
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
#ifndef SOFTMMU_CODE_ACCESS
/* generic store macro */
@ -148,9 +131,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
}
}
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
#if DATA_SIZE == 8
static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
@ -200,7 +181,7 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
}
#endif /* DATA_SIZE == 4 */
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
#endif /* !SOFTMMU_CODE_ACCESS */
#undef RES_TYPE
#undef DATA_TYPE
@ -208,6 +189,5 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef CPU_MMU_INDEX
#undef MMUSUFFIX
#undef ADDR_READ

View File

@ -344,29 +344,6 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#endif
#if defined(CONFIG_USER_ONLY)

View File

@ -1,216 +0,0 @@
/*
* Software MMU support
*
* Generate inline load/store functions for all MMU modes (typically
* at least _user and _kernel) as well as _data versions, for all data
* sizes.
*
* Used by target op helpers.
*
* MMU mode suffixes are defined in target cpu.h.
*/
/* XXX: find something cleaner.
* Furthermore, this is false for 64 bits targets
*/
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
#define ldul_hypv ldl_hypv
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
#define ACCESS_TYPE 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#define ACCESS_TYPE 1
#define MEMSUFFIX MMU_MODE1_SUFFIX
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#if (NB_MMU_MODES >= 3)
#define ACCESS_TYPE 2
#define MEMSUFFIX MMU_MODE2_SUFFIX
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 3) */
#if (NB_MMU_MODES >= 4)
#define ACCESS_TYPE 3
#define MEMSUFFIX MMU_MODE3_SUFFIX
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 4) */
#if (NB_MMU_MODES >= 5)
#define ACCESS_TYPE 4
#define MEMSUFFIX MMU_MODE4_SUFFIX
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 5) */
#if (NB_MMU_MODES >= 6)
#define ACCESS_TYPE 5
#define MEMSUFFIX MMU_MODE5_SUFFIX
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 6) */
#if (NB_MMU_MODES > 6)
#error "NB_MMU_MODES > 6 is not supported for now"
#endif /* (NB_MMU_MODES > 6) */
/* these access are slower, they must be as rare as possible */
#define ACCESS_TYPE (NB_MMU_MODES)
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/softmmu_header.h"
#define DATA_SIZE 2
#include "exec/softmmu_header.h"
#define DATA_SIZE 4
#include "exec/softmmu_header.h"
#define DATA_SIZE 8
#include "exec/softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#define ldub(p) ldub_data(p)
#define ldsb(p) ldsb_data(p)
#define lduw(p) lduw_data(p)
#define ldsw(p) ldsw_data(p)
#define ldl(p) ldl_data(p)
#define ldq(p) ldq_data(p)
#define stb(p, v) stb_data(p, v)
#define stw(p, v) stw_data(p, v)
#define stl(p, v) stl_data(p, v)
#define stq(p, v) stq_data(p, v)
/**
* tlb_vaddr_to_host:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @access_type: 0 for read, 1 for write, 2 for execute
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
* If the TLB contains a host virtual address suitable for direct RAM
* access, then return it. Otherwise (TLB miss, TLB entry is for an
* I/O access, etc) return NULL.
*
* This is the equivalent of the initial fast-path code used by
* TCG backends for guest load and store accesses.
*/
static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
int access_type, int mmu_idx)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
target_ulong tlb_addr;
uintptr_t haddr;
switch (access_type) {
case 0:
tlb_addr = tlbentry->addr_read;
break;
case 1:
tlb_addr = tlbentry->addr_write;
break;
case 2:
tlb_addr = tlbentry->addr_code;
break;
default:
g_assert_not_reached();
}
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
return NULL;
}
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
return NULL;
}
haddr = addr + env->tlb_table[mmu_idx][index].addend;
return (void *)haddr;
}

View File

@ -80,6 +80,8 @@ struct TranslationBlock;
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling.
* @do_unaligned_access: Callback for unaligned access handling, if
* the target defines #ALIGNED_ONLY.
* @memory_rw_debug: Callback for GDB memory access.
* @dump_state: Callback for dumping state.
* @dump_statistics: Callback for dumping statistics.
@ -112,6 +114,8 @@ typedef struct CPUClass {
bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
CPUUnassignedAccess do_unassigned_access;
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
int is_write, int is_user, uintptr_t retaddr);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
uint8_t *buf, int len, bool is_write);
void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
@ -544,8 +548,7 @@ void cpu_interrupt(CPUState *cpu, int mask);
#endif /* USER_ONLY */
#ifndef CONFIG_USER_ONLY
#ifdef CONFIG_SOFTMMU
static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
bool is_write, bool is_exec,
int opaque, unsigned size)
@ -557,6 +560,14 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
}
}
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
int is_write, int is_user,
uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
}
#endif
/**

View File

@ -5,6 +5,7 @@
#include <string.h>
#include "cpu.h"
#include "exec/cpu_ldst.h"
#undef DEBUG_REMAP
#ifdef DEBUG_REMAP

View File

@ -66,6 +66,7 @@
#include "trace/simple.h"
#endif
#include "exec/memory.h"
#include "exec/cpu_ldst.h"
#include "qmp-commands.h"
#include "hmp.h"
#include "qemu/thread.h"

View File

@ -116,6 +116,7 @@
# define helper_te_st_name helper_le_st_name
#endif
#ifndef SOFTMMU_CODE_ACCESS
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
target_ulong addr,
@ -135,6 +136,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
io_mem_read(mr, physaddr, &val, 1 << SHIFT);
return val;
}
#endif
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
@ -155,7 +157,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
#endif
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
@ -186,7 +189,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
@ -204,7 +208,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
#endif
@ -237,7 +242,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
#endif
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
@ -268,7 +274,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
@ -286,7 +293,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
#endif
@ -357,7 +365,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
@ -386,7 +394,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
#endif
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
@ -405,7 +413,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
@ -433,7 +441,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
@ -462,7 +470,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
#endif
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
@ -481,7 +489,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif

View File

@ -84,5 +84,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int alpha_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
int is_write, int is_user, uintptr_t retaddr);
#endif

View File

@ -292,6 +292,7 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = alpha_cpu_unassigned_access;
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_alpha_cpu;
#endif

View File

@ -24,6 +24,7 @@
#include "qemu-common.h"
#define TARGET_LONG_BITS 64
#define ALIGNED_ONLY
#define CPUArchState struct CPUAlphaState

View File

@ -19,7 +19,7 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
/* Softmmu support */
#ifndef CONFIG_USER_ONLY
@ -96,11 +96,11 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
return ret;
}
static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
int is_write, int is_user, uintptr_t retaddr)
void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
int is_write, int is_user, uintptr_t retaddr)
{
AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
uint64_t pc;
uint32_t insn;
@ -131,23 +131,6 @@ void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
dynamic_excp(env, 0, EXCP_MCHK, 0);
}
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define ALIGNED_ONLY
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */

View File

@ -21,6 +21,7 @@
#include "disas/disas.h"
#include "qemu/host-utils.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

48
target-arm/arm_ldst.h Normal file
View File

@ -0,0 +1,48 @@
/*
* ARM load/store instructions for code (armeb-user support)
*
* Copyright (c) 2012 CodeSourcery, LLC
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ARM_LDST_H
#define ARM_LDST_H
#include "exec/cpu_ldst.h"
#include "qemu/bswap.h"
/* Load an instruction and return it in the standard little-endian order */
static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
bool do_swap)
{
uint32_t insn = cpu_ldl_code(env, addr);
if (do_swap) {
return bswap32(insn);
}
return insn;
}
/* Ditto, for a halfword (Thumb) instruction */
static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
bool do_swap)
{
uint16_t insn = cpu_lduw_code(env, addr);
if (do_swap) {
return bswap16(insn);
}
return insn;
}
#endif

View File

@ -1199,26 +1199,4 @@ static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
}
}
/* Load an instruction and return it in the standard little-endian order */
static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
bool do_swap)
{
uint32_t insn = cpu_ldl_code(env, addr);
if (do_swap) {
return bswap32(insn);
}
return insn;
}
/* Ditto, for a halfword (Thumb) instruction */
static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
bool do_swap)
{
uint16_t insn = cpu_lduw_code(env, addr);
if (do_swap) {
return bswap16(insn);
}
return insn;
}
#endif

View File

@ -7,11 +7,11 @@
#include "sysemu/sysemu.h"
#include "qemu/bitops.h"
#include "qemu/crc32c.h"
#include "exec/cpu_ldst.h"
#include "arm_ldst.h"
#include <zlib.h> /* For crc32 */
#ifndef CONFIG_USER_ONLY
#include "exec/softmmu_exec.h"
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
int access_type, int is_user,
hwaddr *phys_ptr, int *prot,

View File

@ -19,6 +19,7 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "internals.h"
#include "exec/cpu_ldst.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
@ -56,22 +57,6 @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)

View File

@ -25,6 +25,7 @@
#include "cpu.h"
#include "tcg-op.h"
#include "qemu/log.h"
#include "arm_ldst.h"
#include "translate.h"
#include "internals.h"
#include "qemu/host-utils.h"

View File

@ -30,6 +30,7 @@
#include "tcg-op.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
#include "arm_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -21,6 +21,7 @@
#include "cpu.h"
#include "mmu.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
//#define CRIS_HELPER_DEBUG

View File

@ -22,6 +22,7 @@
#include "mmu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
//#define CRIS_OP_HELPER_DEBUG
@ -35,22 +36,6 @@
#endif
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */

View File

@ -28,6 +28,7 @@
#include "tcg-op.h"
#include "exec/helper-proto.h"
#include "mmu.h"
#include "exec/cpu_ldst.h"
#include "crisv32-decode.h"
#include "exec/helper-gen.h"

View File

@ -552,8 +552,7 @@ struct X86CPUDefinition {
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
/* partly implemented:
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
CPUID_PSE36 (needed for Solaris) */
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
/* missing:
CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
@ -569,9 +568,7 @@ struct X86CPUDefinition {
CPUID_EXT_RDRAND */
#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
/* missing:
CPUID_EXT2_PDPE1GB */
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB)
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
#define TCG_SVM_FEATURES 0

View File

@ -249,6 +249,7 @@
#define PG_DIRTY_BIT 6
#define PG_PSE_BIT 7
#define PG_GLOBAL_BIT 8
#define PG_PSE_PAT_BIT 12
#define PG_NX_BIT 63
#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
@ -260,6 +261,9 @@
#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
#define PG_PSE_MASK (1 << PG_PSE_BIT)
#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
#define PG_ADDRESS_MASK 0x000ffffffffff000LL
#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
#define PG_HI_USER_MASK 0x7ff0000000000000LL
#define PG_NX_MASK (1LL << PG_NX_BIT)
@ -1135,6 +1139,14 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
/* XXX: This value should match the one returned by CPUID
* and in exec.c */
# if defined(TARGET_X86_64)
# define PHYS_ADDR_MASK 0xffffffffffLL
# else
# define PHYS_ADDR_MASK 0xfffffffffLL
# endif
static inline CPUX86State *cpu_init(const char *cpu_model)
{
X86CPU *cpu = cpu_x86_init(cpu_model);
@ -1151,17 +1163,24 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
#define cpudef_setup x86_cpudef_setup
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE0_SUFFIX _ksmap
#define MMU_MODE1_SUFFIX _user
#define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */
#define MMU_KERNEL_IDX 0
#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KSMAP_IDX 2
static inline int cpu_mmu_index (CPUX86State *env)
#define MMU_KNOSMAP_IDX 2
static inline int cpu_mmu_index(CPUX86State *env)
{
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK))
? MMU_KSMAP_IDX : MMU_KERNEL_IDX;
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
}
static inline int cpu_mmu_index_kernel(CPUX86State *env)
{
return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
}
#define CC_DST (env->cc_dst)

View File

@ -22,10 +22,7 @@
#include "exec/helper-proto.h"
#include "qemu/aes.h"
#include "qemu/host-utils.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#include "exec/cpu_ldst.h"
#define FPU_RC_MASK 0xc00
#define FPU_RC_NEAR 0x000

View File

@ -510,14 +510,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
#else
/* XXX: This value should match the one returned by CPUID
* and in exec.c */
# if defined(TARGET_X86_64)
# define PHYS_ADDR_MASK 0xfffffff000LL
# else
# define PHYS_ADDR_MASK 0xffffff000LL
# endif
/* return value:
* -1 = cannot handle fault
* 0 = nothing more to do
@ -530,10 +522,12 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
CPUX86State *env = &cpu->env;
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
int error_code = 0;
int is_dirty, prot, page_size, is_write, is_user;
hwaddr paddr;
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
uint32_t page_offset;
target_ulong vaddr, virt_addr;
target_ulong vaddr;
is_user = mmu_idx == MMU_USER_IDX;
#if defined(DEBUG_MMU)
@ -550,12 +544,15 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
pte = (uint32_t)pte;
}
#endif
virt_addr = addr & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
page_size = 4096;
goto do_mapping;
}
if (!(env->efer & MSR_EFER_NXE)) {
rsvd_mask |= PG_NX_MASK;
}
if (env->cr[4] & CR4_PAE_MASK) {
uint64_t pde, pdpe;
target_ulong pdpe_addr;
@ -577,34 +574,37 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
env->a20_mask;
pml4e = ldq_phys(cs->as, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
}
ptep = pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
}
if (pdpe & PG_PSE_MASK) {
/* 1 GB page */
page_size = 1024 * 1024 * 1024;
pte_addr = pdpe_addr;
pte = pdpe;
goto do_check_protect;
}
} else
#endif
{
@ -613,134 +613,49 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
rsvd_mask |= PG_HI_USER_MASK | PG_NX_MASK;
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
pde = ldq_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
if (pde & rsvd_mask) {
goto do_fault_rsvd;
}
ptep &= pde ^ PG_NX_MASK;
if (pde & PG_PSE_MASK) {
/* 2 MB page */
page_size = 2048 * 1024;
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2) {
goto do_fault_protect;
}
switch (mmu_idx) {
case MMU_USER_IDX:
if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
case MMU_KERNEL_IDX:
if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
/* fall through */
case MMU_KSMAP_IDX:
if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
default: /* cannot happen */
break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
}
/* align to page_size */
pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
virt_addr = addr & ~(page_size - 1);
} else {
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
}
pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
pte = ldq_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
}
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2)
goto do_fault_protect;
switch (mmu_idx) {
case MMU_USER_IDX:
if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
case MMU_KERNEL_IDX:
if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
/* fall through */
case MMU_KSMAP_IDX:
if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
default: /* cannot happen */
break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl_phys_notdirty(cs->as, pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
pte = pte & (PHYS_ADDR_MASK | 0xfff);
pte_addr = pde_addr;
pte = pde;
goto do_check_protect;
}
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
}
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
pte = ldq_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
page_size = 4096;
} else {
uint32_t pde;
@ -749,114 +664,95 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
env->a20_mask;
pde = ldl_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
ptep = pde | PG_NX_MASK;
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
switch (mmu_idx) {
case MMU_USER_IDX:
if (!(pde & PG_USER_MASK)) {
goto do_fault_protect;
}
if (is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
pte_addr = pde_addr;
case MMU_KERNEL_IDX:
if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
(pde & PG_USER_MASK)) {
goto do_fault_protect;
}
/* fall through */
case MMU_KSMAP_IDX:
if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
(pde & PG_USER_MASK)) {
goto do_fault_protect;
}
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
default: /* cannot happen */
break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
}
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
ptep = pte;
virt_addr = addr & ~(page_size - 1);
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
pte = ldl_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
switch (mmu_idx) {
case MMU_USER_IDX:
if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
case MMU_KERNEL_IDX:
if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
/* fall through */
case MMU_KSMAP_IDX:
if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
default: /* cannot happen */
break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl_phys_notdirty(cs->as, pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
* Leave bits 20-13 in place for setting accessed/dirty bits below.
*/
pte = pde | ((pde & 0x1fe000) << (32 - 13));
rsvd_mask = 0x200000;
goto do_check_protect_pse36;
}
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
pte = ldl_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep &= pte | PG_NX_MASK;
page_size = 4096;
rsvd_mask = 0;
}
do_check_protect:
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
do_check_protect_pse36:
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2) {
goto do_fault_protect;
}
switch (mmu_idx) {
case MMU_USER_IDX:
if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
case MMU_KSMAP_IDX:
if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
/* fall through */
case MMU_KNOSMAP_IDX:
if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
(ptep & PG_USER_MASK)) {
goto do_fault_protect;
}
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
}
break;
default: /* cannot happen */
break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty) {
pte |= PG_DIRTY_MASK;
}
stl_phys_notdirty(cs->as, pte_addr, pte);
}
/* the page can be put in the TLB */
prot = PAGE_READ;
if (!(ptep & PG_NX_MASK))
if (!(ptep & PG_NX_MASK) &&
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK))) {
prot |= PAGE_EXEC;
}
if (pte & PG_DIRTY_MASK) {
/* only set write access if already dirty... otherwise wait
for dirty access */
@ -872,16 +768,21 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
do_mapping:
pte = pte & env->a20_mask;
/* align to page_size */
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
/* Even if 4MB pages, we map only one 4KB page in the cache to
avoid filling it too fast */
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset;
vaddr = addr & TARGET_PAGE_MASK;
page_offset = vaddr & (page_size - 1);
paddr = pte + page_offset;
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
do_fault_rsvd:
error_code |= PG_ERROR_RSVD_MASK;
do_fault_protect:
error_code = PG_ERROR_P_MASK;
error_code |= PG_ERROR_P_MASK;
do_fault:
error_code |= (is_write << PG_ERROR_W_BIT);
if (is_user)
@ -910,7 +811,6 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
CPUX86State *env = &cpu->env;
target_ulong pde_addr, pte_addr;
uint64_t pte;
hwaddr paddr;
uint32_t page_offset;
int page_size;
@ -928,25 +828,24 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
/* test virtual address sign extension */
sext = (int64_t)addr >> 47;
if (sext != 0 && sext != -1)
if (sext != 0 && sext != -1) {
return -1;
}
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
pml4e = ldq_phys(cs->as, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK))
if (!(pml4e & PG_PRESENT_MASK)) {
return -1;
pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
}
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
(((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
if (!(pdpe & PG_PRESENT_MASK)) {
return -1;
}
if (pdpe & PG_PSE_MASK) {
page_size = 1024 * 1024 * 1024;
pte = pdpe & ~( (page_size - 1) & ~0xfff);
pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
pte = pdpe;
goto out;
}
@ -960,7 +859,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return -1;
}
pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
pde_addr = ((pdpe & PG_ADDRESS_MASK) +
(((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
pde = ldq_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
@ -969,17 +868,17 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (pde & PG_PSE_MASK) {
/* 2 MB page */
page_size = 2048 * 1024;
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
pte = pde;
} else {
/* 4 KB page */
pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
pte_addr = ((pde & PG_ADDRESS_MASK) +
(((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
page_size = 4096;
pte = ldq_phys(cs->as, pte_addr);
}
pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
if (!(pte & PG_PRESENT_MASK))
if (!(pte & PG_PRESENT_MASK)) {
return -1;
}
} else {
uint32_t pde;
@ -989,14 +888,15 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (!(pde & PG_PRESENT_MASK))
return -1;
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
pte = pde & ~0x003ff000; /* align to 4MB */
pte = pde | ((pde & 0x1fe000) << (32 - 13));
page_size = 4096 * 1024;
} else {
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
pte = ldl_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK))
if (!(pte & PG_PRESENT_MASK)) {
return -1;
}
page_size = 4096;
}
pte = pte & env->a20_mask;
@ -1005,9 +905,9 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
#ifdef TARGET_X86_64
out:
#endif
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
return paddr;
return pte | page_offset;
}
void hw_breakpoint_insert(CPUX86State *env, int index)

View File

@ -19,10 +19,7 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#include "exec/cpu_ldst.h"
/* broken thread support */
@ -109,24 +106,6 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
}
}
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
#endif
#if !defined(CONFIG_USER_ONLY)
/* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not

View File

@ -20,52 +20,7 @@
#include "cpu.h"
#include "exec/ioport.h"
#include "exec/helper-proto.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* check if Port I/O is allowed in TSS */
static inline void check_io(CPUX86State *env, int addr, int size)
{
int io_offset, val, mask;
/* TSS must be a valid 32 bit one */
if (!(env->tr.flags & DESC_P_MASK) ||
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
env->tr.limit < 103) {
goto fail;
}
io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
io_offset += (addr >> 3);
/* Note: the check needs two bytes */
if ((io_offset + 1) > env->tr.limit) {
goto fail;
}
val = cpu_lduw_kernel(env, env->tr.base + io_offset);
val >>= (addr & 7);
mask = (1 << size) - 1;
/* all bits must be zero to allow the I/O */
if ((val & mask) != 0) {
fail:
raise_exception_err(env, EXCP0D_GPF, 0);
}
}
void helper_check_iob(CPUX86State *env, uint32_t t0)
{
check_io(env, t0, 1);
}
void helper_check_iow(CPUX86State *env, uint32_t t0)
{
check_io(env, t0, 2);
}
void helper_check_iol(CPUX86State *env, uint32_t t0)
{
check_io(env, t0, 4);
}
#include "exec/cpu_ldst.h"
void helper_outb(uint32_t port, uint32_t data)
{

View File

@ -21,13 +21,10 @@
#include "cpu.h"
#include "qemu/log.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
//#define DEBUG_PCALL
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#ifdef DEBUG_PCALL
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
# define LOG_PCALL_STATE(cpu) \
@ -37,6 +34,24 @@
# define LOG_PCALL_STATE(cpu) do { } while (0)
#endif
#ifndef CONFIG_USER_ONLY
#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
#define MEMSUFFIX _kernel
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif
/* return non zero if error */
static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
uint32_t *e2_ptr, int selector)
@ -2471,3 +2486,45 @@ void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
}
}
#endif
/* check if Port I/O is allowed in TSS */
static inline void check_io(CPUX86State *env, int addr, int size)
{
int io_offset, val, mask;
/* TSS must be a valid 32 bit one */
if (!(env->tr.flags & DESC_P_MASK) ||
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
env->tr.limit < 103) {
goto fail;
}
io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
io_offset += (addr >> 3);
/* Note: the check needs two bytes */
if ((io_offset + 1) > env->tr.limit) {
goto fail;
}
val = cpu_lduw_kernel(env, env->tr.base + io_offset);
val >>= (addr & 7);
mask = (1 << size) - 1;
/* all bits must be zero to allow the I/O */
if ((val & mask) != 0) {
fail:
raise_exception_err(env, EXCP0D_GPF, 0);
}
}
void helper_check_iob(CPUX86State *env, uint32_t t0)
{
check_io(env, t0, 1);
}
void helper_check_iow(CPUX86State *env, uint32_t t0)
{
check_io(env, t0, 2);
}
void helper_check_iol(CPUX86State *env, uint32_t t0)
{
check_io(env, t0, 4);
}

View File

@ -20,10 +20,7 @@
#include "cpu.h"
#include "exec/cpu-all.h"
#include "exec/helper-proto.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#include "exec/cpu_ldst.h"
/* Secure Virtual Machine helpers */

View File

@ -27,6 +27,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -6,23 +6,13 @@
#include "hw/lm32/lm32_pic.h"
#include "hw/char/lm32_juart.h"
#include "exec/softmmu_exec.h"
#include "exec/cpu_ldst.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#endif
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
void raise_exception(CPULM32State *env, int index)
{
CPUState *cs = CPU(lm32_env_get_cpu(env));

View File

@ -22,6 +22,7 @@
#include "exec/helper-proto.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "hw/lm32/lm32_pic.h"
#include "exec/helper-gen.h"

View File

@ -18,6 +18,7 @@
*/
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#if defined(CONFIG_USER_ONLY)
@ -34,22 +35,6 @@ void do_interrupt_m68k_hardirq(CPUM68KState *env)
extern int semihosting_enabled;
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */

View File

@ -22,6 +22,7 @@
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu/log.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -22,21 +22,11 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
#define D(x)
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not

View File

@ -23,6 +23,7 @@
#include "tcg-op.h"
#include "exec/helper-proto.h"
#include "microblaze-decode.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-gen.h"
#define SIM_COMPAT 0

View File

@ -80,5 +80,7 @@ void mips_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
int is_write, int is_user, uintptr_t retaddr);
#endif

View File

@ -137,6 +137,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
#endif

View File

@ -3,6 +3,7 @@
//#define DEBUG_OP
#define ALIGNED_ONLY
#define TARGET_HAS_ICE 1
#define ELF_MACHINE EM_MIPS

View File

@ -19,12 +19,8 @@
#include <stdlib.h>
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#include "exec/cpu_ldst.h"
#ifndef CONFIG_USER_ONLY
static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
@ -2128,28 +2124,12 @@ void helper_wait(CPUMIPSState *env)
#if !defined(CONFIG_USER_ONLY)
static void QEMU_NORETURN do_unaligned_access(CPUMIPSState *env,
target_ulong addr, int is_write,
int is_user, uintptr_t retaddr);
#define MMUSUFFIX _mmu
#define ALIGNED_ONLY
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
int is_write, int is_user, uintptr_t retaddr)
void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
int is_write, int is_user, uintptr_t retaddr)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
env->CP0_BadVAddr = addr;
do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr);
}

View File

@ -24,6 +24,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -25,24 +25,10 @@
#include "cpu.h"
#include "mmu.h"
#include "exec/exec-all.h"
#include "exec/softmmu_exec.h"
#include "exec/cpu_ldst.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */

View File

@ -32,6 +32,7 @@
#include "exec/exec-all.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -19,22 +19,9 @@
*/
#include "cpu.h"
#include "exec/cpu_ldst.h"
#ifndef CONFIG_USER_ONLY
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)

View File

@ -26,6 +26,7 @@
#include "qemu/log.h"
#include "config.h"
#include "qemu/bitops.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -18,6 +18,7 @@
*/
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "helper_regs.h"

View File

@ -21,10 +21,7 @@
#include "exec/helper-proto.h"
#include "helper_regs.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#include "exec/cpu_ldst.h"
//#define DEBUG_OP

View File

@ -22,6 +22,7 @@
#include "kvm_ppc.h"
#include "mmu-hash64.h"
#include "mmu-hash32.h"
#include "exec/cpu_ldst.h"
//#define DEBUG_MMU
//#define DEBUG_BATS
@ -2903,22 +2904,6 @@ void helper_booke206_tlbflush(CPUPPCState *env, uint32_t type)
/*****************************************************************************/
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */

View File

@ -22,6 +22,7 @@
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -19,12 +19,9 @@
*/
#include "cpu.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#endif
/* #define DEBUG_HELPER */
#ifdef DEBUG_HELPER
#define HELPER_LOG(x...) qemu_log(x)

View File

@ -21,6 +21,7 @@
#include "cpu.h"
#include "exec/gdbstub.h"
#include "qemu/timer.h"
#include "exec/cpu_ldst.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#endif

View File

@ -20,25 +20,11 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
/*****************************************************************************/
/* Softmmu support */
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not

View File

@ -28,9 +28,9 @@
#ifdef CONFIG_KVM
#include <linux/kvm.h>
#endif
#include "exec/cpu_ldst.h"
#if !defined(CONFIG_USER_ONLY)
#include "exec/softmmu_exec.h"
#include "sysemu/cpus.h"
#include "sysemu/sysemu.h"
#include "hw/s390x/ebcdic.h"

View File

@ -33,6 +33,7 @@
#include "tcg-op.h"
#include "qemu/log.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
/* global register indexes */
static TCGv_ptr cpu_env;

View File

@ -20,23 +20,9 @@
#include <stdlib.h>
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#ifndef CONFIG_USER_ONLY
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)

View File

@ -23,6 +23,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -81,5 +81,8 @@ void sparc_cpu_dump_state(CPUState *cpu, FILE *f,
hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu,
vaddr addr, int is_write,
int is_user, uintptr_t retaddr);
#endif

View File

@ -825,6 +825,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = sparc_cpu_unassigned_access;
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
#endif

View File

@ -5,6 +5,8 @@
#include "qemu-common.h"
#include "qemu/bswap.h"
#define ALIGNED_ONLY
#if !defined(TARGET_SPARC64)
#define TARGET_LONG_BITS 32
#define TARGET_DPREGS 16

View File

@ -19,6 +19,7 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
//#define DEBUG_MMU
//#define DEBUG_MXCC
@ -64,27 +65,6 @@
#define QT0 (env->qt0)
#define QT1 (env->qt1)
#if !defined(CONFIG_USER_ONLY)
static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
target_ulong addr, int is_write,
int is_user, uintptr_t retaddr);
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define ALIGNED_ONLY
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
#endif
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
/* Calculates TSB pointer value for fault page size 8k or 64k */
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
@ -2425,11 +2405,13 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
#endif
#if !defined(CONFIG_USER_ONLY)
static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
target_ulong addr, int is_write,
int is_user, uintptr_t retaddr)
void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs,
vaddr addr, int is_write,
int is_user, uintptr_t retaddr)
{
SPARCCPU *cpu = sparc_env_get_cpu(env);
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
#ifdef DEBUG_UNALIGNED
printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
"\n", addr, env->pc);

View File

@ -28,6 +28,7 @@
#include "disas/disas.h"
#include "exec/helper-proto.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-gen.h"

View File

@ -10,6 +10,7 @@
*/
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
@ -241,22 +242,6 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
}
#ifndef CONFIG_USER_ONLY
#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)
{

View File

@ -18,6 +18,7 @@
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu/log.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -89,5 +89,7 @@ void xtensa_cpu_dump_state(CPUState *cpu, FILE *f,
hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int xtensa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
int is_write, int is_user, uintptr_t retaddr);
#endif

View File

@ -148,6 +148,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
#endif
dc->vmsd = &vmstate_xtensa_cpu;

View File

@ -28,6 +28,7 @@
#ifndef CPU_XTENSA_H
#define CPU_XTENSA_H
#define ALIGNED_ONLY
#define TARGET_LONG_BITS 32
#define ELF_MACHINE EM_XTENSA

View File

@ -28,31 +28,15 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "exec/softmmu_exec.h"
#include "exec/cpu_ldst.h"
#include "exec/address-spaces.h"
#include "qemu/timer.h"
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr);
#define ALIGNED_ONLY
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "exec/softmmu_template.h"
#define SHIFT 1
#include "exec/softmmu_template.h"
#define SHIFT 2
#include "exec/softmmu_template.h"
#define SHIFT 3
#include "exec/softmmu_template.h"
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr)
void xtensa_cpu_do_unaligned_access(CPUState *cs,
vaddr addr, int is_write, int is_user, uintptr_t retaddr)
{
XtensaCPU *cpu = xtensa_env_get_cpu(env);
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {

View File

@ -36,6 +36,7 @@
#include "tcg-op.h"
#include "qemu/log.h"
#include "sysemu/sysemu.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -909,19 +909,6 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr,
uint8_t val, int mmu_idx);
void helper_stw_mmu(CPUArchState *env, target_ulong addr,
uint16_t val, int mmu_idx);
void helper_stl_mmu(CPUArchState *env, target_ulong addr,
uint32_t val, int mmu_idx);
void helper_stq_mmu(CPUArchState *env, target_ulong addr,
uint64_t val, int mmu_idx);
#endif /* CONFIG_SOFTMMU */
#endif /* TCG_H */

1
tci.c
View File

@ -26,6 +26,7 @@
#include "qemu-common.h"
#include "exec/exec-all.h" /* MAX_OPC_PARAM_IARGS */
#include "exec/cpu_ldst.h"
#include "tcg-op.h"
/* Marker for missing code. */

View File

@ -21,6 +21,7 @@
#include "disas/disas.h"
#include "tcg.h"
#include "qemu/bitops.h"
#include "exec/cpu_ldst.h"
#undef EAX
#undef ECX