qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

helper.c (21117B)


      1 /*
      2  *  i386 helpers (without register variable usage)
      3  *
      4  *  Copyright (c) 2003 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2.1 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 
     20 #include "qemu/osdep.h"
     21 #include "qapi/qapi-events-run-state.h"
     22 #include "cpu.h"
     23 #include "exec/exec-all.h"
     24 #include "sysemu/runstate.h"
     25 #include "kvm/kvm_i386.h"
     26 #ifndef CONFIG_USER_ONLY
     27 #include "sysemu/hw_accel.h"
     28 #include "monitor/monitor.h"
     29 #endif
     30 #include "qemu/log.h"
     31 
     32 void cpu_sync_avx_hflag(CPUX86State *env)
     33 {
     34     if ((env->cr[4] & CR4_OSXSAVE_MASK)
     35         && (env->xcr0 & (XSTATE_SSE_MASK | XSTATE_YMM_MASK))
     36             == (XSTATE_SSE_MASK | XSTATE_YMM_MASK)) {
     37         env->hflags |= HF_AVX_EN_MASK;
     38     } else{
     39         env->hflags &= ~HF_AVX_EN_MASK;
     40     }
     41 }
     42 
     43 void cpu_sync_bndcs_hflags(CPUX86State *env)
     44 {
     45     uint32_t hflags = env->hflags;
     46     uint32_t hflags2 = env->hflags2;
     47     uint32_t bndcsr;
     48 
     49     if ((hflags & HF_CPL_MASK) == 3) {
     50         bndcsr = env->bndcs_regs.cfgu;
     51     } else {
     52         bndcsr = env->msr_bndcfgs;
     53     }
     54 
     55     if ((env->cr[4] & CR4_OSXSAVE_MASK)
     56         && (env->xcr0 & XSTATE_BNDCSR_MASK)
     57         && (bndcsr & BNDCFG_ENABLE)) {
     58         hflags |= HF_MPX_EN_MASK;
     59     } else {
     60         hflags &= ~HF_MPX_EN_MASK;
     61     }
     62 
     63     if (bndcsr & BNDCFG_BNDPRESERVE) {
     64         hflags2 |= HF2_MPX_PR_MASK;
     65     } else {
     66         hflags2 &= ~HF2_MPX_PR_MASK;
     67     }
     68 
     69     env->hflags = hflags;
     70     env->hflags2 = hflags2;
     71 }
     72 
     73 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
     74 {
     75     int cpuver = env->cpuid_version;
     76 
     77     if (family == NULL || model == NULL) {
     78         return;
     79     }
     80 
     81     *family = (cpuver >> 8) & 0x0f;
     82     *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
     83 }
     84 
     85 /* Broadcast MCA signal for processor version 06H_EH and above */
     86 int cpu_x86_support_mca_broadcast(CPUX86State *env)
     87 {
     88     int family = 0;
     89     int model = 0;
     90 
     91     cpu_x86_version(env, &family, &model);
     92     if ((family == 6 && model >= 14) || family > 6) {
     93         return 1;
     94     }
     95 
     96     return 0;
     97 }
     98 
     99 /***********************************************************/
    100 /* x86 mmu */
    101 /* XXX: add PGE support */
    102 
    103 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
    104 {
    105     CPUX86State *env = &cpu->env;
    106 
    107     a20_state = (a20_state != 0);
    108     if (a20_state != ((env->a20_mask >> 20) & 1)) {
    109         CPUState *cs = CPU(cpu);
    110 
    111         qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
    112         /* if the cpu is currently executing code, we must unlink it and
    113            all the potentially executing TB */
    114         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
    115 
    116         /* when a20 is changed, all the MMU mappings are invalid, so
    117            we must flush everything */
    118         tlb_flush(cs);
    119         env->a20_mask = ~(1 << 20) | (a20_state << 20);
    120     }
    121 }
    122 
    123 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
    124 {
    125     X86CPU *cpu = env_archcpu(env);
    126     int pe_state;
    127 
    128     qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
    129     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
    130         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
    131         tlb_flush(CPU(cpu));
    132     }
    133 
    134 #ifdef TARGET_X86_64
    135     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
    136         (env->efer & MSR_EFER_LME)) {
    137         /* enter in long mode */
    138         /* XXX: generate an exception */
    139         if (!(env->cr[4] & CR4_PAE_MASK))
    140             return;
    141         env->efer |= MSR_EFER_LMA;
    142         env->hflags |= HF_LMA_MASK;
    143     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
    144                (env->efer & MSR_EFER_LMA)) {
    145         /* exit long mode */
    146         env->efer &= ~MSR_EFER_LMA;
    147         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
    148         env->eip &= 0xffffffff;
    149     }
    150 #endif
    151     env->cr[0] = new_cr0 | CR0_ET_MASK;
    152 
    153     /* update PE flag in hidden flags */
    154     pe_state = (env->cr[0] & CR0_PE_MASK);
    155     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
    156     /* ensure that ADDSEG is always set in real mode */
    157     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
    158     /* update FPU flags */
    159     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
    160         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
    161 }
    162 
    163 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
    164    the PDPT */
    165 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
    166 {
    167     env->cr[3] = new_cr3;
    168     if (env->cr[0] & CR0_PG_MASK) {
    169         qemu_log_mask(CPU_LOG_MMU,
    170                         "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
    171         tlb_flush(env_cpu(env));
    172     }
    173 }
    174 
    175 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
    176 {
    177     uint32_t hflags;
    178 
    179 #if defined(DEBUG_MMU)
    180     printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
    181 #endif
    182     if ((new_cr4 ^ env->cr[4]) &
    183         (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
    184          CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
    185         tlb_flush(env_cpu(env));
    186     }
    187 
    188     /* Clear bits we're going to recompute.  */
    189     hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK | HF_UMIP_MASK);
    190 
    191     /* SSE handling */
    192     if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
    193         new_cr4 &= ~CR4_OSFXSR_MASK;
    194     }
    195     if (new_cr4 & CR4_OSFXSR_MASK) {
    196         hflags |= HF_OSFXSR_MASK;
    197     }
    198 
    199     if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
    200         new_cr4 &= ~CR4_SMAP_MASK;
    201     }
    202     if (new_cr4 & CR4_SMAP_MASK) {
    203         hflags |= HF_SMAP_MASK;
    204     }
    205     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
    206         new_cr4 &= ~CR4_UMIP_MASK;
    207     }
    208     if (new_cr4 & CR4_UMIP_MASK) {
    209         hflags |= HF_UMIP_MASK;
    210     }
    211 
    212     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
    213         new_cr4 &= ~CR4_PKE_MASK;
    214     }
    215     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
    216         new_cr4 &= ~CR4_PKS_MASK;
    217     }
    218 
    219     env->cr[4] = new_cr4;
    220     env->hflags = hflags;
    221 
    222     cpu_sync_bndcs_hflags(env);
    223     cpu_sync_avx_hflag(env);
    224 }
    225 
    226 #if !defined(CONFIG_USER_ONLY)
    227 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
    228                                          MemTxAttrs *attrs)
    229 {
    230     X86CPU *cpu = X86_CPU(cs);
    231     CPUX86State *env = &cpu->env;
    232     target_ulong pde_addr, pte_addr;
    233     uint64_t pte;
    234     int32_t a20_mask;
    235     uint32_t page_offset;
    236     int page_size;
    237 
    238     *attrs = cpu_get_mem_attrs(env);
    239 
    240     a20_mask = x86_get_a20_mask(env);
    241     if (!(env->cr[0] & CR0_PG_MASK)) {
    242         pte = addr & a20_mask;
    243         page_size = 4096;
    244     } else if (env->cr[4] & CR4_PAE_MASK) {
    245         target_ulong pdpe_addr;
    246         uint64_t pde, pdpe;
    247 
    248 #ifdef TARGET_X86_64
    249         if (env->hflags & HF_LMA_MASK) {
    250             bool la57 = env->cr[4] & CR4_LA57_MASK;
    251             uint64_t pml5e_addr, pml5e;
    252             uint64_t pml4e_addr, pml4e;
    253             int32_t sext;
    254 
    255             /* test virtual address sign extension */
    256             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
    257             if (sext != 0 && sext != -1) {
    258                 return -1;
    259             }
    260 
    261             if (la57) {
    262                 pml5e_addr = ((env->cr[3] & ~0xfff) +
    263                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
    264                 pml5e = x86_ldq_phys(cs, pml5e_addr);
    265                 if (!(pml5e & PG_PRESENT_MASK)) {
    266                     return -1;
    267                 }
    268             } else {
    269                 pml5e = env->cr[3];
    270             }
    271 
    272             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
    273                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
    274             pml4e = x86_ldq_phys(cs, pml4e_addr);
    275             if (!(pml4e & PG_PRESENT_MASK)) {
    276                 return -1;
    277             }
    278             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
    279                          (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
    280             pdpe = x86_ldq_phys(cs, pdpe_addr);
    281             if (!(pdpe & PG_PRESENT_MASK)) {
    282                 return -1;
    283             }
    284             if (pdpe & PG_PSE_MASK) {
    285                 page_size = 1024 * 1024 * 1024;
    286                 pte = pdpe;
    287                 goto out;
    288             }
    289 
    290         } else
    291 #endif
    292         {
    293             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
    294                 a20_mask;
    295             pdpe = x86_ldq_phys(cs, pdpe_addr);
    296             if (!(pdpe & PG_PRESENT_MASK))
    297                 return -1;
    298         }
    299 
    300         pde_addr = ((pdpe & PG_ADDRESS_MASK) +
    301                     (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
    302         pde = x86_ldq_phys(cs, pde_addr);
    303         if (!(pde & PG_PRESENT_MASK)) {
    304             return -1;
    305         }
    306         if (pde & PG_PSE_MASK) {
    307             /* 2 MB page */
    308             page_size = 2048 * 1024;
    309             pte = pde;
    310         } else {
    311             /* 4 KB page */
    312             pte_addr = ((pde & PG_ADDRESS_MASK) +
    313                         (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
    314             page_size = 4096;
    315             pte = x86_ldq_phys(cs, pte_addr);
    316         }
    317         if (!(pte & PG_PRESENT_MASK)) {
    318             return -1;
    319         }
    320     } else {
    321         uint32_t pde;
    322 
    323         /* page directory entry */
    324         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
    325         pde = x86_ldl_phys(cs, pde_addr);
    326         if (!(pde & PG_PRESENT_MASK))
    327             return -1;
    328         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
    329             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
    330             page_size = 4096 * 1024;
    331         } else {
    332             /* page directory entry */
    333             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
    334             pte = x86_ldl_phys(cs, pte_addr);
    335             if (!(pte & PG_PRESENT_MASK)) {
    336                 return -1;
    337             }
    338             page_size = 4096;
    339         }
    340         pte = pte & a20_mask;
    341     }
    342 
    343 #ifdef TARGET_X86_64
    344 out:
    345 #endif
    346     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
    347     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
    348     return pte | page_offset;
    349 }
    350 
    351 typedef struct MCEInjectionParams {
    352     Monitor *mon;
    353     int bank;
    354     uint64_t status;
    355     uint64_t mcg_status;
    356     uint64_t addr;
    357     uint64_t misc;
    358     int flags;
    359 } MCEInjectionParams;
    360 
    361 static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
    362                                       bool recursive)
    363 {
    364     MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
    365 
    366     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
    367                                    &mff);
    368 }
    369 
    370 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
    371 {
    372     MCEInjectionParams *params = data.host_ptr;
    373     X86CPU *cpu = X86_CPU(cs);
    374     CPUX86State *cenv = &cpu->env;
    375     uint64_t *banks = cenv->mce_banks + 4 * params->bank;
    376     g_autofree char *msg = NULL;
    377     bool need_reset = false;
    378     bool recursive;
    379     bool ar = !!(params->status & MCI_STATUS_AR);
    380 
    381     cpu_synchronize_state(cs);
    382     recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
    383 
    384     /*
    385      * If there is an MCE exception being processed, ignore this SRAO MCE
    386      * unless unconditional injection was requested.
    387      */
    388     if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
    389         emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
    390         return;
    391     }
    392 
    393     if (params->status & MCI_STATUS_UC) {
    394         /*
    395          * if MSR_MCG_CTL is not all 1s, the uncorrected error
    396          * reporting is disabled
    397          */
    398         if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
    399             monitor_printf(params->mon,
    400                            "CPU %d: Uncorrected error reporting disabled\n",
    401                            cs->cpu_index);
    402             return;
    403         }
    404 
    405         /*
    406          * if MSR_MCi_CTL is not all 1s, the uncorrected error
    407          * reporting is disabled for the bank
    408          */
    409         if (banks[0] != ~(uint64_t)0) {
    410             monitor_printf(params->mon,
    411                            "CPU %d: Uncorrected error reporting disabled for"
    412                            " bank %d\n",
    413                            cs->cpu_index, params->bank);
    414             return;
    415         }
    416 
    417         if (!(cenv->cr[4] & CR4_MCE_MASK)) {
    418             need_reset = true;
    419             msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
    420                                   "raising triple fault", cs->cpu_index);
    421         } else if (recursive) {
    422             need_reset = true;
    423             msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
    424                                   "raising triple fault", cs->cpu_index);
    425         }
    426 
    427         if (need_reset) {
    428             emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
    429                                       recursive);
    430             monitor_puts(params->mon, msg);
    431             qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
    432             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
    433             return;
    434         }
    435 
    436         if (banks[1] & MCI_STATUS_VAL) {
    437             params->status |= MCI_STATUS_OVER;
    438         }
    439         banks[2] = params->addr;
    440         banks[3] = params->misc;
    441         cenv->mcg_status = params->mcg_status;
    442         banks[1] = params->status;
    443         cpu_interrupt(cs, CPU_INTERRUPT_MCE);
    444     } else if (!(banks[1] & MCI_STATUS_VAL)
    445                || !(banks[1] & MCI_STATUS_UC)) {
    446         if (banks[1] & MCI_STATUS_VAL) {
    447             params->status |= MCI_STATUS_OVER;
    448         }
    449         banks[2] = params->addr;
    450         banks[3] = params->misc;
    451         banks[1] = params->status;
    452     } else {
    453         banks[1] |= MCI_STATUS_OVER;
    454     }
    455 
    456     emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
    457 }
    458 
    459 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
    460                         uint64_t status, uint64_t mcg_status, uint64_t addr,
    461                         uint64_t misc, int flags)
    462 {
    463     CPUState *cs = CPU(cpu);
    464     CPUX86State *cenv = &cpu->env;
    465     MCEInjectionParams params = {
    466         .mon = mon,
    467         .bank = bank,
    468         .status = status,
    469         .mcg_status = mcg_status,
    470         .addr = addr,
    471         .misc = misc,
    472         .flags = flags,
    473     };
    474     unsigned bank_num = cenv->mcg_cap & 0xff;
    475 
    476     if (!cenv->mcg_cap) {
    477         monitor_printf(mon, "MCE injection not supported\n");
    478         return;
    479     }
    480     if (bank >= bank_num) {
    481         monitor_printf(mon, "Invalid MCE bank number\n");
    482         return;
    483     }
    484     if (!(status & MCI_STATUS_VAL)) {
    485         monitor_printf(mon, "Invalid MCE status code\n");
    486         return;
    487     }
    488     if ((flags & MCE_INJECT_BROADCAST)
    489         && !cpu_x86_support_mca_broadcast(cenv)) {
    490         monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
    491         return;
    492     }
    493 
    494     run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
    495     if (flags & MCE_INJECT_BROADCAST) {
    496         CPUState *other_cs;
    497 
    498         params.bank = 1;
    499         params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
    500         params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
    501         params.addr = 0;
    502         params.misc = 0;
    503         CPU_FOREACH(other_cs) {
    504             if (other_cs == cs) {
    505                 continue;
    506             }
    507             run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
    508         }
    509     }
    510 }
    511 
    512 static inline target_ulong get_memio_eip(CPUX86State *env)
    513 {
    514 #ifdef CONFIG_TCG
    515     uint64_t data[TARGET_INSN_START_WORDS];
    516     CPUState *cs = env_cpu(env);
    517 
    518     if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
    519         return env->eip;
    520     }
    521 
    522     /* Per x86_restore_state_to_opc. */
    523     if (TARGET_TB_PCREL) {
    524         return (env->eip & TARGET_PAGE_MASK) | data[0];
    525     } else {
    526         return data[0] - env->segs[R_CS].base;
    527     }
    528 #else
    529     qemu_build_not_reached();
    530 #endif
    531 }
    532 
    533 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
    534 {
    535     X86CPU *cpu = env_archcpu(env);
    536     CPUState *cs = env_cpu(env);
    537 
    538     if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
    539         env->tpr_access_type = access;
    540 
    541         cpu_interrupt(cs, CPU_INTERRUPT_TPR);
    542     } else if (tcg_enabled()) {
    543         target_ulong eip = get_memio_eip(env);
    544 
    545         apic_handle_tpr_access_report(cpu->apic_state, eip, access);
    546     }
    547 }
    548 #endif /* !CONFIG_USER_ONLY */
    549 
    550 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
    551                             target_ulong *base, unsigned int *limit,
    552                             unsigned int *flags)
    553 {
    554     CPUState *cs = env_cpu(env);
    555     SegmentCache *dt;
    556     target_ulong ptr;
    557     uint32_t e1, e2;
    558     int index;
    559 
    560     if (selector & 0x4)
    561         dt = &env->ldt;
    562     else
    563         dt = &env->gdt;
    564     index = selector & ~7;
    565     ptr = dt->base + index;
    566     if ((index + 7) > dt->limit
    567         || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
    568         || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
    569         return 0;
    570 
    571     *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
    572     *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
    573     if (e2 & DESC_G_MASK)
    574         *limit = (*limit << 12) | 0xfff;
    575     *flags = e2;
    576 
    577     return 1;
    578 }
    579 
    580 #if !defined(CONFIG_USER_ONLY)
    581 void do_cpu_init(X86CPU *cpu)
    582 {
    583     CPUState *cs = CPU(cpu);
    584     CPUX86State *env = &cpu->env;
    585     CPUX86State *save = g_new(CPUX86State, 1);
    586     int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
    587 
    588     *save = *env;
    589 
    590     cpu_reset(cs);
    591     cs->interrupt_request = sipi;
    592     memcpy(&env->start_init_save, &save->start_init_save,
    593            offsetof(CPUX86State, end_init_save) -
    594            offsetof(CPUX86State, start_init_save));
    595     g_free(save);
    596 
    597     if (kvm_enabled()) {
    598         kvm_arch_do_init_vcpu(cpu);
    599     }
    600     apic_init_reset(cpu->apic_state);
    601 }
    602 
    603 void do_cpu_sipi(X86CPU *cpu)
    604 {
    605     apic_sipi(cpu->apic_state);
    606 }
    607 #else
    608 void do_cpu_init(X86CPU *cpu)
    609 {
    610 }
    611 void do_cpu_sipi(X86CPU *cpu)
    612 {
    613 }
    614 #endif
    615 
    616 #ifndef CONFIG_USER_ONLY
    617 
    618 void cpu_load_efer(CPUX86State *env, uint64_t val)
    619 {
    620     env->efer = val;
    621     env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
    622     if (env->efer & MSR_EFER_LMA) {
    623         env->hflags |= HF_LMA_MASK;
    624     }
    625     if (env->efer & MSR_EFER_SVME) {
    626         env->hflags |= HF_SVME_MASK;
    627     }
    628 }
    629 
    630 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
    631 {
    632     X86CPU *cpu = X86_CPU(cs);
    633     CPUX86State *env = &cpu->env;
    634     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    635     AddressSpace *as = cpu_addressspace(cs, attrs);
    636 
    637     return address_space_ldub(as, addr, attrs, NULL);
    638 }
    639 
    640 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
    641 {
    642     X86CPU *cpu = X86_CPU(cs);
    643     CPUX86State *env = &cpu->env;
    644     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    645     AddressSpace *as = cpu_addressspace(cs, attrs);
    646 
    647     return address_space_lduw(as, addr, attrs, NULL);
    648 }
    649 
    650 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
    651 {
    652     X86CPU *cpu = X86_CPU(cs);
    653     CPUX86State *env = &cpu->env;
    654     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    655     AddressSpace *as = cpu_addressspace(cs, attrs);
    656 
    657     return address_space_ldl(as, addr, attrs, NULL);
    658 }
    659 
    660 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
    661 {
    662     X86CPU *cpu = X86_CPU(cs);
    663     CPUX86State *env = &cpu->env;
    664     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    665     AddressSpace *as = cpu_addressspace(cs, attrs);
    666 
    667     return address_space_ldq(as, addr, attrs, NULL);
    668 }
    669 
    670 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
    671 {
    672     X86CPU *cpu = X86_CPU(cs);
    673     CPUX86State *env = &cpu->env;
    674     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    675     AddressSpace *as = cpu_addressspace(cs, attrs);
    676 
    677     address_space_stb(as, addr, val, attrs, NULL);
    678 }
    679 
    680 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
    681 {
    682     X86CPU *cpu = X86_CPU(cs);
    683     CPUX86State *env = &cpu->env;
    684     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    685     AddressSpace *as = cpu_addressspace(cs, attrs);
    686 
    687     address_space_stl_notdirty(as, addr, val, attrs, NULL);
    688 }
    689 
    690 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
    691 {
    692     X86CPU *cpu = X86_CPU(cs);
    693     CPUX86State *env = &cpu->env;
    694     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    695     AddressSpace *as = cpu_addressspace(cs, attrs);
    696 
    697     address_space_stw(as, addr, val, attrs, NULL);
    698 }
    699 
    700 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
    701 {
    702     X86CPU *cpu = X86_CPU(cs);
    703     CPUX86State *env = &cpu->env;
    704     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    705     AddressSpace *as = cpu_addressspace(cs, attrs);
    706 
    707     address_space_stl(as, addr, val, attrs, NULL);
    708 }
    709 
    710 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
    711 {
    712     X86CPU *cpu = X86_CPU(cs);
    713     CPUX86State *env = &cpu->env;
    714     MemTxAttrs attrs = cpu_get_mem_attrs(env);
    715     AddressSpace *as = cpu_addressspace(cs, attrs);
    716 
    717     address_space_stq(as, addr, val, attrs, NULL);
    718 }
    719 #endif