qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

vmx.h (7036B)


      1 /*
      2  * Copyright (C) 2016 Veertu Inc,
      3  * Copyright (C) 2017 Google Inc,
      4  * Based on Veertu vddh/vmm/vmx.h
      5  *
      6  * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
      7  *
      8  * This program is free software; you can redistribute it and/or
      9  * modify it under the terms of the GNU Lesser General Public
     10  * License as published by the Free Software Foundation; either
     11  * version 2.1 of the License, or (at your option) any later version.
     12  *
     13  * This program is distributed in the hope that it will be useful,
     14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     16  * Lesser General Public License for more details.
     17  *
     18  * You should have received a copy of the GNU Lesser General Public
     19  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
     20  *
     21  * This file contain code under public domain from the hvdos project:
     22  * https://github.com/mist64/hvdos
     23  */
     24 
     25 #ifndef VMX_H
     26 #define VMX_H
     27 
     28 #include <Hypervisor/hv.h>
     29 #include <Hypervisor/hv_vmx.h>
     30 #include "vmcs.h"
     31 #include "cpu.h"
     32 #include "x86.h"
     33 #include "sysemu/hvf.h"
     34 #include "sysemu/hvf_int.h"
     35 
     36 #include "exec/address-spaces.h"
     37 
     38 static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
     39 {
     40     uint64_t v;
     41 
     42     if (hv_vcpu_read_register(vcpu, reg, &v)) {
     43         abort();
     44     }
     45 
     46     return v;
     47 }
     48 
     49 /* write GPR */
     50 static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
     51 {
     52     if (hv_vcpu_write_register(vcpu, reg, v)) {
     53         abort();
     54     }
     55 }
     56 
     57 /* read VMCS field */
     58 static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
     59 {
     60     uint64_t v;
     61 
     62     hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
     63 
     64     return v;
     65 }
     66 
     67 /* write VMCS field */
     68 static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
     69 {
     70     hv_vmx_vcpu_write_vmcs(vcpu, field, v);
     71 }
     72 
     73 /* desired control word constrained by hardware/hypervisor capabilities */
     74 static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
     75 {
     76     return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
     77 }
     78 
     79 #define VM_ENTRY_GUEST_LMA (1LL << 9)
     80 
     81 #define AR_TYPE_ACCESSES_MASK 1
     82 #define AR_TYPE_READABLE_MASK (1 << 1)
     83 #define AR_TYPE_WRITABLE_MASK (1 << 2)
     84 #define AR_TYPE_CODE_MASK (1 << 3)
     85 #define AR_TYPE_MASK 0x0f
     86 #define AR_TYPE_BUSY_64_TSS 11
     87 #define AR_TYPE_BUSY_32_TSS 11
     88 #define AR_TYPE_BUSY_16_TSS 3
     89 #define AR_TYPE_LDT 2
     90 
     91 static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
     92 {
     93     uint64_t entry_ctls;
     94 
     95     efer |= MSR_EFER_LMA;
     96     wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
     97     entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
     98     wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
     99           VM_ENTRY_GUEST_LMA);
    100 
    101     uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
    102     if ((efer & MSR_EFER_LME) &&
    103         (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
    104         wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
    105               (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
    106     }
    107 }
    108 
    109 static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
    110 {
    111     uint64_t entry_ctls;
    112 
    113     entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
    114     wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
    115 
    116     efer &= ~MSR_EFER_LMA;
    117     wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
    118 }
    119 
    120 static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
    121 {
    122     int i;
    123     uint64_t pdpte[4] = {0, 0, 0, 0};
    124     uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
    125     uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
    126     uint64_t changed_cr0 = old_cr0 ^ cr0;
    127     uint64_t mask = CR0_PG_MASK | CR0_CD_MASK | CR0_NW_MASK |
    128                     CR0_NE_MASK | CR0_ET_MASK;
    129     uint64_t entry_ctls;
    130 
    131     if ((cr0 & CR0_PG_MASK) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE_MASK) &&
    132         !(efer & MSR_EFER_LME)) {
    133         address_space_read(&address_space_memory,
    134                            rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
    135                            MEMTXATTRS_UNSPECIFIED, pdpte, 32);
    136         /* Only set PDPTE when appropriate. */
    137         for (i = 0; i < 4; i++) {
    138             wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
    139         }
    140     }
    141 
    142     wvmcs(vcpu, VMCS_CR0_MASK, mask);
    143     wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
    144 
    145     if (efer & MSR_EFER_LME) {
    146         if (changed_cr0 & CR0_PG_MASK) {
    147             if (cr0 & CR0_PG_MASK) {
    148                 enter_long_mode(vcpu, cr0, efer);
    149             } else {
    150                 exit_long_mode(vcpu, cr0, efer);
    151             }
    152         }
    153     } else {
    154         entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
    155         wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
    156     }
    157 
    158     /* Filter new CR0 after we are finished examining it above. */
    159     cr0 = (cr0 & ~(mask & ~CR0_PG_MASK));
    160     wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE_MASK | CR0_ET_MASK);
    161 
    162     hv_vcpu_invalidate_tlb(vcpu);
    163 }
    164 
    165 static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
    166 {
    167     uint64_t guest_cr4 = cr4 | CR4_VMXE_MASK;
    168 
    169     wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
    170     wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
    171     wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE_MASK);
    172 
    173     hv_vcpu_invalidate_tlb(vcpu);
    174 }
    175 
    176 static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
    177 {
    178     X86CPU *x86_cpu = X86_CPU(cpu);
    179     CPUX86State *env = &x86_cpu->env;
    180     uint64_t val;
    181 
    182     /* BUG, should take considering overlap.. */
    183     wreg(cpu->hvf->fd, HV_X86_RIP, rip);
    184     env->eip = rip;
    185 
    186     /* after moving forward in rip, we need to clean INTERRUPTABILITY */
    187    val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
    188    if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
    189                VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
    190         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
    191         wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,
    192                val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
    193                VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
    194    }
    195 }
    196 
    197 static inline void vmx_clear_nmi_blocking(CPUState *cpu)
    198 {
    199     X86CPU *x86_cpu = X86_CPU(cpu);
    200     CPUX86State *env = &x86_cpu->env;
    201 
    202     env->hflags2 &= ~HF2_NMI_MASK;
    203     uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
    204     gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
    205     wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
    206 }
    207 
    208 static inline void vmx_set_nmi_blocking(CPUState *cpu)
    209 {
    210     X86CPU *x86_cpu = X86_CPU(cpu);
    211     CPUX86State *env = &x86_cpu->env;
    212 
    213     env->hflags2 |= HF2_NMI_MASK;
    214     uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
    215     gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
    216     wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
    217 }
    218 
    219 static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
    220 {
    221     uint64_t val;
    222     val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
    223     wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
    224           VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
    225 
    226 }
    227 
    228 static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
    229 {
    230 
    231     uint64_t val;
    232     val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
    233     wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
    234           ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
    235 }
    236 
    237 #endif