Merge remote-tracking branch 'sstabellini/xen-2013-09-25' into staging

# By Anthony PERARD (2) and Liu, Jinsong (2)
# Via Stefano Stabellini
* sstabellini/xen-2013-09-25:
  xen: Enable cpu-hotplug on xenfv machine.
  xen: Fix vcpu initialization.
  qemu: Add qemu xen logic for Xen HVM S3 resume
  qemu: Adjust qemu wakeup

Message-id: alpine.DEB.2.02.1309251749180.5498@kaball.uk.xensource.com
master
Anthony Liguori 2013-09-30 17:14:10 -05:00
commit 3469a60d9f
5 changed files with 26 additions and 14 deletions

View File

@ -324,12 +324,13 @@ static void acpi_notify_wakeup(Notifier *notifier, void *data)
(ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_TIMER_STATUS);
break;
case QEMU_WAKEUP_REASON_OTHER:
default:
/* ACPI_BITMASK_WAKE_STATUS should be set on resume.
Pretend that resume was caused by power button */
ar->pm1.evt.sts |=
(ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS);
break;
default:
break;
}
}

View File

@ -741,6 +741,7 @@ static QEMUMachine xenfv_machine = {
.init = pc_xen_hvm_init,
.max_cpus = HVM_MAX_VCPUS,
.default_machine_opts = "accel=xen",
.hot_add_cpu = pc_hot_add_cpu,
};
#endif

View File

@ -41,9 +41,11 @@ int vm_stop(RunState state);
int vm_stop_force_state(RunState state);
typedef enum WakeupReason {
QEMU_WAKEUP_REASON_OTHER = 0,
/* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
QEMU_WAKEUP_REASON_NONE = 0,
QEMU_WAKEUP_REASON_RTC,
QEMU_WAKEUP_REASON_PMTIMER,
QEMU_WAKEUP_REASON_OTHER,
} WakeupReason;
void qemu_system_reset_request(void);

15
vl.c
View File

@ -1718,14 +1718,14 @@ static pid_t shutdown_pid;
static int powerdown_requested;
static int debug_requested;
static int suspend_requested;
static int wakeup_requested;
static WakeupReason wakeup_reason;
static NotifierList powerdown_notifiers =
NOTIFIER_LIST_INITIALIZER(powerdown_notifiers);
static NotifierList suspend_notifiers =
NOTIFIER_LIST_INITIALIZER(suspend_notifiers);
static NotifierList wakeup_notifiers =
NOTIFIER_LIST_INITIALIZER(wakeup_notifiers);
static uint32_t wakeup_reason_mask = ~0;
static uint32_t wakeup_reason_mask = ~(1 << QEMU_WAKEUP_REASON_NONE);
static RunState vmstop_requested = RUN_STATE_MAX;
int qemu_shutdown_requested_get(void)
@ -1775,11 +1775,9 @@ static int qemu_suspend_requested(void)
return r;
}
static int qemu_wakeup_requested(void)
static WakeupReason qemu_wakeup_requested(void)
{
int r = wakeup_requested;
wakeup_requested = 0;
return r;
return wakeup_reason;
}
static int qemu_powerdown_requested(void)
@ -1896,8 +1894,7 @@ void qemu_system_wakeup_request(WakeupReason reason)
return;
}
runstate_set(RUN_STATE_RUNNING);
notifier_list_notify(&wakeup_notifiers, &reason);
wakeup_requested = 1;
wakeup_reason = reason;
qemu_notify_event();
}
@ -1989,6 +1986,8 @@ static bool main_loop_should_exit(void)
pause_all_vcpus();
cpu_synchronize_all_states();
qemu_system_reset(VMRESET_SILENT);
notifier_list_notify(&wakeup_notifiers, &wakeup_reason);
wakeup_reason = QEMU_WAKEUP_REASON_NONE;
resume_all_vcpus();
monitor_protocol_event(QEVENT_WAKEUP, NULL);
}

View File

@ -98,6 +98,7 @@ typedef struct XenIOState {
Notifier exit;
Notifier suspend;
Notifier wakeup;
} XenIOState;
/* Xen specific function for piix pci */
@ -613,13 +614,13 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
}
if (port != -1) {
for (i = 0; i < smp_cpus; i++) {
for (i = 0; i < max_cpus; i++) {
if (state->ioreq_local_port[i] == port) {
break;
}
}
if (i == smp_cpus) {
if (i == max_cpus) {
hw_error("Fatal error while trying to get io event!\n");
}
@ -1060,6 +1061,11 @@ static void xen_read_physmap(XenIOState *state)
free(entries);
}
static void xen_wakeup_notifier(Notifier *notifier, void *data)
{
xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
}
int xen_hvm_init(MemoryRegion **ram_memory)
{
int i, rc;
@ -1089,6 +1095,9 @@ int xen_hvm_init(MemoryRegion **ram_memory)
state->suspend.notify = xen_suspend_notifier;
qemu_register_suspend_notifier(&state->suspend);
state->wakeup.notify = xen_wakeup_notifier;
qemu_register_wakeup_notifier(&state->wakeup);
xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
@ -1106,10 +1115,10 @@ int xen_hvm_init(MemoryRegion **ram_memory)
hw_error("map buffered IO page returned error %d", errno);
}
state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t));
state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
/* FIXME: how about if we overflow the page here? */
for (i = 0; i < smp_cpus; i++) {
for (i = 0; i < max_cpus; i++) {
rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
xen_vcpu_eport(state->shared_page, i));
if (rc == -1) {