diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 90fd50acfc..784e67d77e 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -441,12 +441,20 @@ static MemoryListener hvf_memory_listener = { }; void hvf_reset_vcpu(CPUState *cpu) { + uint64_t pdpte[4] = {0, 0, 0, 0}; + int i; /* TODO: this shouldn't be needed; there is already a call to * cpu_synchronize_all_post_reset in vl.c */ wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0); wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0); + + /* Initialize PDPTE */ + for (i = 0; i < 4; i++) { + wvmcs(cpu->hvf_fd, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]); + } + macvm_set_cr0(cpu->hvf_fd, 0x60000010); wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK); diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h index 5dc52ecad6..eb8894cd58 100644 --- a/target/i386/hvf/vmx.h +++ b/target/i386/hvf/vmx.h @@ -121,6 +121,7 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) uint64_t pdpte[4] = {0, 0, 0, 0}; uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER); uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0); + uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET; if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && !(efer & MSR_EFER_LME)) { @@ -128,18 +129,15 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f, MEMTXATTRS_UNSPECIFIED, (uint8_t *)pdpte, 32, 0); + /* Only set PDPTE when appropriate. */ + for (i = 0; i < 4; i++) { + wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]); + } } - for (i = 0; i < 4; i++) { - wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]); - } - - wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG); + wvmcs(vcpu, VMCS_CR0_MASK, mask); wvmcs(vcpu, VMCS_CR0_SHADOW, cr0); - cr0 &= ~CR0_CD; - wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET); - if (efer & MSR_EFER_LME) { if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) { enter_long_mode(vcpu, cr0, efer); @@ -149,6 +147,10 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) } } + /* Filter new CR0 after we are finished examining it above. */ + cr0 = (cr0 & ~(mask & ~CR0_PG)); + wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET); + hv_vcpu_invalidate_tlb(vcpu); hv_vcpu_flush(vcpu); }