qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

machine.c (50484B)


      1 #include "qemu/osdep.h"
      2 #include "cpu.h"
      3 #include "exec/exec-all.h"
      4 #include "hw/isa/isa.h"
      5 #include "migration/cpu.h"
      6 #include "kvm/hyperv.h"
      7 #include "hw/i386/x86.h"
      8 #include "kvm/kvm_i386.h"
      9 
     10 #include "sysemu/kvm.h"
     11 #include "sysemu/tcg.h"
     12 
     13 #include "qemu/error-report.h"
     14 
     15 static const VMStateDescription vmstate_segment = {
     16     .name = "segment",
     17     .version_id = 1,
     18     .minimum_version_id = 1,
     19     .fields = (VMStateField[]) {
     20         VMSTATE_UINT32(selector, SegmentCache),
     21         VMSTATE_UINTTL(base, SegmentCache),
     22         VMSTATE_UINT32(limit, SegmentCache),
     23         VMSTATE_UINT32(flags, SegmentCache),
     24         VMSTATE_END_OF_LIST()
     25     }
     26 };
     27 
     28 #define VMSTATE_SEGMENT(_field, _state) {                            \
     29     .name       = (stringify(_field)),                               \
     30     .size       = sizeof(SegmentCache),                              \
     31     .vmsd       = &vmstate_segment,                                  \
     32     .flags      = VMS_STRUCT,                                        \
     33     .offset     = offsetof(_state, _field)                           \
     34             + type_check(SegmentCache,typeof_field(_state, _field))  \
     35 }
     36 
     37 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
     38     VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
     39 
     40 static const VMStateDescription vmstate_xmm_reg = {
     41     .name = "xmm_reg",
     42     .version_id = 1,
     43     .minimum_version_id = 1,
     44     .fields = (VMStateField[]) {
     45         VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
     46         VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
     47         VMSTATE_END_OF_LIST()
     48     }
     49 };
     50 
     51 #define VMSTATE_XMM_REGS(_field, _state, _start)                         \
     52     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
     53                              vmstate_xmm_reg, ZMMReg)
     54 
     55 /* YMMH format is the same as XMM, but for bits 128-255 */
     56 static const VMStateDescription vmstate_ymmh_reg = {
     57     .name = "ymmh_reg",
     58     .version_id = 1,
     59     .minimum_version_id = 1,
     60     .fields = (VMStateField[]) {
     61         VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
     62         VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
     63         VMSTATE_END_OF_LIST()
     64     }
     65 };
     66 
     67 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
     68     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
     69                              vmstate_ymmh_reg, ZMMReg)
     70 
     71 static const VMStateDescription vmstate_zmmh_reg = {
     72     .name = "zmmh_reg",
     73     .version_id = 1,
     74     .minimum_version_id = 1,
     75     .fields = (VMStateField[]) {
     76         VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
     77         VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
     78         VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
     79         VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
     80         VMSTATE_END_OF_LIST()
     81     }
     82 };
     83 
     84 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
     85     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
     86                              vmstate_zmmh_reg, ZMMReg)
     87 
     88 #ifdef TARGET_X86_64
     89 static const VMStateDescription vmstate_hi16_zmm_reg = {
     90     .name = "hi16_zmm_reg",
     91     .version_id = 1,
     92     .minimum_version_id = 1,
     93     .fields = (VMStateField[]) {
     94         VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
     95         VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
     96         VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
     97         VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
     98         VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
     99         VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
    100         VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
    101         VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
    102         VMSTATE_END_OF_LIST()
    103     }
    104 };
    105 
    106 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
    107     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
    108                              vmstate_hi16_zmm_reg, ZMMReg)
    109 #endif
    110 
    111 static const VMStateDescription vmstate_bnd_regs = {
    112     .name = "bnd_regs",
    113     .version_id = 1,
    114     .minimum_version_id = 1,
    115     .fields = (VMStateField[]) {
    116         VMSTATE_UINT64(lb, BNDReg),
    117         VMSTATE_UINT64(ub, BNDReg),
    118         VMSTATE_END_OF_LIST()
    119     }
    120 };
    121 
    122 #define VMSTATE_BND_REGS(_field, _state, _n)          \
    123     VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
    124 
    125 static const VMStateDescription vmstate_mtrr_var = {
    126     .name = "mtrr_var",
    127     .version_id = 1,
    128     .minimum_version_id = 1,
    129     .fields = (VMStateField[]) {
    130         VMSTATE_UINT64(base, MTRRVar),
    131         VMSTATE_UINT64(mask, MTRRVar),
    132         VMSTATE_END_OF_LIST()
    133     }
    134 };
    135 
    136 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
    137     VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
    138 
    139 static const VMStateDescription vmstate_lbr_records_var = {
    140     .name = "lbr_records_var",
    141     .version_id = 1,
    142     .minimum_version_id = 1,
    143     .fields = (VMStateField[]) {
    144         VMSTATE_UINT64(from, LBREntry),
    145         VMSTATE_UINT64(to, LBREntry),
    146         VMSTATE_UINT64(info, LBREntry),
    147         VMSTATE_END_OF_LIST()
    148     }
    149 };
    150 
    151 #define VMSTATE_LBR_VARS(_field, _state, _n, _v)                    \
    152     VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_lbr_records_var, \
    153                          LBREntry)
    154 
    155 typedef struct x86_FPReg_tmp {
    156     FPReg *parent;
    157     uint64_t tmp_mant;
    158     uint16_t tmp_exp;
    159 } x86_FPReg_tmp;
    160 
    161 static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
    162 {
    163     CPU_LDoubleU temp;
    164 
    165     temp.d = f;
    166     *pmant = temp.l.lower;
    167     *pexp = temp.l.upper;
    168 }
    169 
    170 static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
    171 {
    172     CPU_LDoubleU temp;
    173 
    174     temp.l.upper = upper;
    175     temp.l.lower = mant;
    176     return temp.d;
    177 }
    178 
    179 static int fpreg_pre_save(void *opaque)
    180 {
    181     x86_FPReg_tmp *tmp = opaque;
    182 
    183     /* we save the real CPU data (in case of MMX usage only 'mant'
    184        contains the MMX register */
    185     cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
    186 
    187     return 0;
    188 }
    189 
    190 static int fpreg_post_load(void *opaque, int version)
    191 {
    192     x86_FPReg_tmp *tmp = opaque;
    193 
    194     tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
    195     return 0;
    196 }
    197 
    198 static const VMStateDescription vmstate_fpreg_tmp = {
    199     .name = "fpreg_tmp",
    200     .post_load = fpreg_post_load,
    201     .pre_save  = fpreg_pre_save,
    202     .fields = (VMStateField[]) {
    203         VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
    204         VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
    205         VMSTATE_END_OF_LIST()
    206     }
    207 };
    208 
    209 static const VMStateDescription vmstate_fpreg = {
    210     .name = "fpreg",
    211     .fields = (VMStateField[]) {
    212         VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
    213         VMSTATE_END_OF_LIST()
    214     }
    215 };
    216 
    217 static int cpu_pre_save(void *opaque)
    218 {
    219     X86CPU *cpu = opaque;
    220     CPUX86State *env = &cpu->env;
    221     int i;
    222     env->v_tpr = env->int_ctl & V_TPR_MASK;
    223     /* FPU */
    224     env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
    225     env->fptag_vmstate = 0;
    226     for(i = 0; i < 8; i++) {
    227         env->fptag_vmstate |= ((!env->fptags[i]) << i);
    228     }
    229 
    230     env->fpregs_format_vmstate = 0;
    231 
    232     /*
    233      * Real mode guest segments register DPL should be zero.
    234      * Older KVM version were setting it wrongly.
    235      * Fixing it will allow live migration to host with unrestricted guest
    236      * support (otherwise the migration will fail with invalid guest state
    237      * error).
    238      */
    239     if (!(env->cr[0] & CR0_PE_MASK) &&
    240         (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
    241         env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
    242         env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
    243         env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
    244         env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
    245         env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
    246         env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
    247     }
    248 
    249 #ifdef CONFIG_KVM
    250     /*
    251      * In case vCPU may have enabled VMX, we need to make sure kernel have
    252      * required capabilities in order to perform migration correctly:
    253      *
    254      * 1) We must be able to extract vCPU nested-state from KVM.
    255      *
    256      * 2) In case vCPU is running in guest-mode and it has a pending exception,
    257      * we must be able to determine if it's in a pending or injected state.
    258      * Note that in case KVM don't have required capability to do so,
    259      * a pending/injected exception will always appear as an
    260      * injected exception.
    261      */
    262     if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
    263         (!env->nested_state ||
    264          (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
    265           env->exception_injected))) {
    266         error_report("Guest maybe enabled nested virtualization but kernel "
    267                 "does not support required capabilities to save vCPU "
    268                 "nested state");
    269         return -EINVAL;
    270     }
    271 #endif
    272 
    273     /*
    274      * When vCPU is running L2 and exception is still pending,
    275      * it can potentially be intercepted by L1 hypervisor.
    276      * In contrast to an injected exception which cannot be
    277      * intercepted anymore.
    278      *
    279      * Furthermore, when a L2 exception is intercepted by L1
    280      * hypervisor, its exception payload (CR2/DR6 on #PF/#DB)
    281      * should not be set yet in the respective vCPU register.
    282      * Thus, in case an exception is pending, it is
    283      * important to save the exception payload seperately.
    284      *
    285      * Therefore, if an exception is not in a pending state
    286      * or vCPU is not in guest-mode, it is not important to
    287      * distinguish between a pending and injected exception
    288      * and we don't need to store seperately the exception payload.
    289      *
    290      * In order to preserve better backwards-compatible migration,
    291      * convert a pending exception to an injected exception in
    292      * case it is not important to distinguish between them
    293      * as described above.
    294      */
    295     if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
    296         env->exception_pending = 0;
    297         env->exception_injected = 1;
    298 
    299         if (env->exception_has_payload) {
    300             if (env->exception_nr == EXCP01_DB) {
    301                 env->dr[6] = env->exception_payload;
    302             } else if (env->exception_nr == EXCP0E_PAGE) {
    303                 env->cr[2] = env->exception_payload;
    304             }
    305         }
    306     }
    307 
    308     return 0;
    309 }
    310 
    311 static int cpu_post_load(void *opaque, int version_id)
    312 {
    313     X86CPU *cpu = opaque;
    314     CPUState *cs = CPU(cpu);
    315     CPUX86State *env = &cpu->env;
    316     int i;
    317 
    318     if (env->tsc_khz && env->user_tsc_khz &&
    319         env->tsc_khz != env->user_tsc_khz) {
    320         error_report("Mismatch between user-specified TSC frequency and "
    321                      "migrated TSC frequency");
    322         return -EINVAL;
    323     }
    324 
    325     if (env->fpregs_format_vmstate) {
    326         error_report("Unsupported old non-softfloat CPU state");
    327         return -EINVAL;
    328     }
    329     /*
    330      * Real mode guest segments register DPL should be zero.
    331      * Older KVM version were setting it wrongly.
    332      * Fixing it will allow live migration from such host that don't have
    333      * restricted guest support to a host with unrestricted guest support
    334      * (otherwise the migration will fail with invalid guest state
    335      * error).
    336      */
    337     if (!(env->cr[0] & CR0_PE_MASK) &&
    338         (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
    339         env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
    340         env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
    341         env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
    342         env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
    343         env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
    344         env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
    345     }
    346 
    347     /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
    348      * running under KVM.  This is wrong for conforming code segments.
    349      * Luckily, in our implementation the CPL field of hflags is redundant
    350      * and we can get the right value from the SS descriptor privilege level.
    351      */
    352     env->hflags &= ~HF_CPL_MASK;
    353     env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
    354 
    355 #ifdef CONFIG_KVM
    356     if ((env->hflags & HF_GUEST_MASK) &&
    357         (!env->nested_state ||
    358         !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
    359         error_report("vCPU set in guest-mode inconsistent with "
    360                      "migrated kernel nested state");
    361         return -EINVAL;
    362     }
    363 #endif
    364 
    365     /*
    366      * There are cases that we can get valid exception_nr with both
    367      * exception_pending and exception_injected being cleared.
    368      * This can happen in one of the following scenarios:
    369      * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
    370      * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
    371      * 3) "cpu/exception_info" subsection not sent because there is no exception
    372      *    pending or guest wasn't running L2 (See comment in cpu_pre_save()).
    373      *
    374      * In those cases, we can just deduce that a valid exception_nr means
    375      * we can treat the exception as already injected.
    376      */
    377     if ((env->exception_nr != -1) &&
    378         !env->exception_pending && !env->exception_injected) {
    379         env->exception_injected = 1;
    380     }
    381 
    382     env->fpstt = (env->fpus_vmstate >> 11) & 7;
    383     env->fpus = env->fpus_vmstate & ~0x3800;
    384     env->fptag_vmstate ^= 0xff;
    385     for(i = 0; i < 8; i++) {
    386         env->fptags[i] = (env->fptag_vmstate >> i) & 1;
    387     }
    388     if (tcg_enabled()) {
    389         target_ulong dr7;
    390         update_fp_status(env);
    391         update_mxcsr_status(env);
    392 
    393         cpu_breakpoint_remove_all(cs, BP_CPU);
    394         cpu_watchpoint_remove_all(cs, BP_CPU);
    395 
    396         /* Indicate all breakpoints disabled, as they are, then
    397            let the helper re-enable them.  */
    398         dr7 = env->dr[7];
    399         env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
    400         cpu_x86_update_dr7(env, dr7);
    401     }
    402     tlb_flush(cs);
    403     return 0;
    404 }
    405 
    406 static bool async_pf_msr_needed(void *opaque)
    407 {
    408     X86CPU *cpu = opaque;
    409 
    410     return cpu->env.async_pf_en_msr != 0;
    411 }
    412 
    413 static bool async_pf_int_msr_needed(void *opaque)
    414 {
    415     X86CPU *cpu = opaque;
    416 
    417     return cpu->env.async_pf_int_msr != 0;
    418 }
    419 
    420 static bool pv_eoi_msr_needed(void *opaque)
    421 {
    422     X86CPU *cpu = opaque;
    423 
    424     return cpu->env.pv_eoi_en_msr != 0;
    425 }
    426 
    427 static bool steal_time_msr_needed(void *opaque)
    428 {
    429     X86CPU *cpu = opaque;
    430 
    431     return cpu->env.steal_time_msr != 0;
    432 }
    433 
    434 static bool exception_info_needed(void *opaque)
    435 {
    436     X86CPU *cpu = opaque;
    437     CPUX86State *env = &cpu->env;
    438 
    439     /*
    440      * It is important to save exception-info only in case
    441      * we need to distinguish between a pending and injected
    442      * exception. Which is only required in case there is a
    443      * pending exception and vCPU is running L2.
    444      * For more info, refer to comment in cpu_pre_save().
    445      */
    446     return env->exception_pending && (env->hflags & HF_GUEST_MASK);
    447 }
    448 
    449 static const VMStateDescription vmstate_exception_info = {
    450     .name = "cpu/exception_info",
    451     .version_id = 1,
    452     .minimum_version_id = 1,
    453     .needed = exception_info_needed,
    454     .fields = (VMStateField[]) {
    455         VMSTATE_UINT8(env.exception_pending, X86CPU),
    456         VMSTATE_UINT8(env.exception_injected, X86CPU),
    457         VMSTATE_UINT8(env.exception_has_payload, X86CPU),
    458         VMSTATE_UINT64(env.exception_payload, X86CPU),
    459         VMSTATE_END_OF_LIST()
    460     }
    461 };
    462 
    463 /* Poll control MSR enabled by default */
    464 static bool poll_control_msr_needed(void *opaque)
    465 {
    466     X86CPU *cpu = opaque;
    467 
    468     return cpu->env.poll_control_msr != 1;
    469 }
    470 
    471 static const VMStateDescription vmstate_steal_time_msr = {
    472     .name = "cpu/steal_time_msr",
    473     .version_id = 1,
    474     .minimum_version_id = 1,
    475     .needed = steal_time_msr_needed,
    476     .fields = (VMStateField[]) {
    477         VMSTATE_UINT64(env.steal_time_msr, X86CPU),
    478         VMSTATE_END_OF_LIST()
    479     }
    480 };
    481 
    482 static const VMStateDescription vmstate_async_pf_msr = {
    483     .name = "cpu/async_pf_msr",
    484     .version_id = 1,
    485     .minimum_version_id = 1,
    486     .needed = async_pf_msr_needed,
    487     .fields = (VMStateField[]) {
    488         VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
    489         VMSTATE_END_OF_LIST()
    490     }
    491 };
    492 
    493 static const VMStateDescription vmstate_async_pf_int_msr = {
    494     .name = "cpu/async_pf_int_msr",
    495     .version_id = 1,
    496     .minimum_version_id = 1,
    497     .needed = async_pf_int_msr_needed,
    498     .fields = (VMStateField[]) {
    499         VMSTATE_UINT64(env.async_pf_int_msr, X86CPU),
    500         VMSTATE_END_OF_LIST()
    501     }
    502 };
    503 
    504 static const VMStateDescription vmstate_pv_eoi_msr = {
    505     .name = "cpu/async_pv_eoi_msr",
    506     .version_id = 1,
    507     .minimum_version_id = 1,
    508     .needed = pv_eoi_msr_needed,
    509     .fields = (VMStateField[]) {
    510         VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
    511         VMSTATE_END_OF_LIST()
    512     }
    513 };
    514 
    515 static const VMStateDescription vmstate_poll_control_msr = {
    516     .name = "cpu/poll_control_msr",
    517     .version_id = 1,
    518     .minimum_version_id = 1,
    519     .needed = poll_control_msr_needed,
    520     .fields = (VMStateField[]) {
    521         VMSTATE_UINT64(env.poll_control_msr, X86CPU),
    522         VMSTATE_END_OF_LIST()
    523     }
    524 };
    525 
    526 static bool fpop_ip_dp_needed(void *opaque)
    527 {
    528     X86CPU *cpu = opaque;
    529     CPUX86State *env = &cpu->env;
    530 
    531     return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
    532 }
    533 
    534 static const VMStateDescription vmstate_fpop_ip_dp = {
    535     .name = "cpu/fpop_ip_dp",
    536     .version_id = 1,
    537     .minimum_version_id = 1,
    538     .needed = fpop_ip_dp_needed,
    539     .fields = (VMStateField[]) {
    540         VMSTATE_UINT16(env.fpop, X86CPU),
    541         VMSTATE_UINT64(env.fpip, X86CPU),
    542         VMSTATE_UINT64(env.fpdp, X86CPU),
    543         VMSTATE_END_OF_LIST()
    544     }
    545 };
    546 
    547 static bool tsc_adjust_needed(void *opaque)
    548 {
    549     X86CPU *cpu = opaque;
    550     CPUX86State *env = &cpu->env;
    551 
    552     return env->tsc_adjust != 0;
    553 }
    554 
    555 static const VMStateDescription vmstate_msr_tsc_adjust = {
    556     .name = "cpu/msr_tsc_adjust",
    557     .version_id = 1,
    558     .minimum_version_id = 1,
    559     .needed = tsc_adjust_needed,
    560     .fields = (VMStateField[]) {
    561         VMSTATE_UINT64(env.tsc_adjust, X86CPU),
    562         VMSTATE_END_OF_LIST()
    563     }
    564 };
    565 
    566 static bool msr_smi_count_needed(void *opaque)
    567 {
    568     X86CPU *cpu = opaque;
    569     CPUX86State *env = &cpu->env;
    570 
    571     return cpu->migrate_smi_count && env->msr_smi_count != 0;
    572 }
    573 
    574 static const VMStateDescription vmstate_msr_smi_count = {
    575     .name = "cpu/msr_smi_count",
    576     .version_id = 1,
    577     .minimum_version_id = 1,
    578     .needed = msr_smi_count_needed,
    579     .fields = (VMStateField[]) {
    580         VMSTATE_UINT64(env.msr_smi_count, X86CPU),
    581         VMSTATE_END_OF_LIST()
    582     }
    583 };
    584 
    585 static bool tscdeadline_needed(void *opaque)
    586 {
    587     X86CPU *cpu = opaque;
    588     CPUX86State *env = &cpu->env;
    589 
    590     return env->tsc_deadline != 0;
    591 }
    592 
    593 static const VMStateDescription vmstate_msr_tscdeadline = {
    594     .name = "cpu/msr_tscdeadline",
    595     .version_id = 1,
    596     .minimum_version_id = 1,
    597     .needed = tscdeadline_needed,
    598     .fields = (VMStateField[]) {
    599         VMSTATE_UINT64(env.tsc_deadline, X86CPU),
    600         VMSTATE_END_OF_LIST()
    601     }
    602 };
    603 
    604 static bool misc_enable_needed(void *opaque)
    605 {
    606     X86CPU *cpu = opaque;
    607     CPUX86State *env = &cpu->env;
    608 
    609     return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
    610 }
    611 
    612 static bool feature_control_needed(void *opaque)
    613 {
    614     X86CPU *cpu = opaque;
    615     CPUX86State *env = &cpu->env;
    616 
    617     return env->msr_ia32_feature_control != 0;
    618 }
    619 
    620 static const VMStateDescription vmstate_msr_ia32_misc_enable = {
    621     .name = "cpu/msr_ia32_misc_enable",
    622     .version_id = 1,
    623     .minimum_version_id = 1,
    624     .needed = misc_enable_needed,
    625     .fields = (VMStateField[]) {
    626         VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
    627         VMSTATE_END_OF_LIST()
    628     }
    629 };
    630 
    631 static const VMStateDescription vmstate_msr_ia32_feature_control = {
    632     .name = "cpu/msr_ia32_feature_control",
    633     .version_id = 1,
    634     .minimum_version_id = 1,
    635     .needed = feature_control_needed,
    636     .fields = (VMStateField[]) {
    637         VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
    638         VMSTATE_END_OF_LIST()
    639     }
    640 };
    641 
    642 static bool pmu_enable_needed(void *opaque)
    643 {
    644     X86CPU *cpu = opaque;
    645     CPUX86State *env = &cpu->env;
    646     int i;
    647 
    648     if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
    649         env->msr_global_status || env->msr_global_ovf_ctrl) {
    650         return true;
    651     }
    652     for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
    653         if (env->msr_fixed_counters[i]) {
    654             return true;
    655         }
    656     }
    657     for (i = 0; i < MAX_GP_COUNTERS; i++) {
    658         if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
    659             return true;
    660         }
    661     }
    662 
    663     return false;
    664 }
    665 
    666 static const VMStateDescription vmstate_msr_architectural_pmu = {
    667     .name = "cpu/msr_architectural_pmu",
    668     .version_id = 1,
    669     .minimum_version_id = 1,
    670     .needed = pmu_enable_needed,
    671     .fields = (VMStateField[]) {
    672         VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
    673         VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
    674         VMSTATE_UINT64(env.msr_global_status, X86CPU),
    675         VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
    676         VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
    677         VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
    678         VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
    679         VMSTATE_END_OF_LIST()
    680     }
    681 };
    682 
    683 static bool mpx_needed(void *opaque)
    684 {
    685     X86CPU *cpu = opaque;
    686     CPUX86State *env = &cpu->env;
    687     unsigned int i;
    688 
    689     for (i = 0; i < 4; i++) {
    690         if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
    691             return true;
    692         }
    693     }
    694 
    695     if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
    696         return true;
    697     }
    698 
    699     return !!env->msr_bndcfgs;
    700 }
    701 
    702 static const VMStateDescription vmstate_mpx = {
    703     .name = "cpu/mpx",
    704     .version_id = 1,
    705     .minimum_version_id = 1,
    706     .needed = mpx_needed,
    707     .fields = (VMStateField[]) {
    708         VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
    709         VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
    710         VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
    711         VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
    712         VMSTATE_END_OF_LIST()
    713     }
    714 };
    715 
    716 static bool hyperv_hypercall_enable_needed(void *opaque)
    717 {
    718     X86CPU *cpu = opaque;
    719     CPUX86State *env = &cpu->env;
    720 
    721     return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
    722 }
    723 
    724 static const VMStateDescription vmstate_msr_hyperv_hypercall = {
    725     .name = "cpu/msr_hyperv_hypercall",
    726     .version_id = 1,
    727     .minimum_version_id = 1,
    728     .needed = hyperv_hypercall_enable_needed,
    729     .fields = (VMStateField[]) {
    730         VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
    731         VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
    732         VMSTATE_END_OF_LIST()
    733     }
    734 };
    735 
    736 static bool hyperv_vapic_enable_needed(void *opaque)
    737 {
    738     X86CPU *cpu = opaque;
    739     CPUX86State *env = &cpu->env;
    740 
    741     return env->msr_hv_vapic != 0;
    742 }
    743 
    744 static const VMStateDescription vmstate_msr_hyperv_vapic = {
    745     .name = "cpu/msr_hyperv_vapic",
    746     .version_id = 1,
    747     .minimum_version_id = 1,
    748     .needed = hyperv_vapic_enable_needed,
    749     .fields = (VMStateField[]) {
    750         VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
    751         VMSTATE_END_OF_LIST()
    752     }
    753 };
    754 
    755 static bool hyperv_time_enable_needed(void *opaque)
    756 {
    757     X86CPU *cpu = opaque;
    758     CPUX86State *env = &cpu->env;
    759 
    760     return env->msr_hv_tsc != 0;
    761 }
    762 
    763 static const VMStateDescription vmstate_msr_hyperv_time = {
    764     .name = "cpu/msr_hyperv_time",
    765     .version_id = 1,
    766     .minimum_version_id = 1,
    767     .needed = hyperv_time_enable_needed,
    768     .fields = (VMStateField[]) {
    769         VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
    770         VMSTATE_END_OF_LIST()
    771     }
    772 };
    773 
    774 static bool hyperv_crash_enable_needed(void *opaque)
    775 {
    776     X86CPU *cpu = opaque;
    777     CPUX86State *env = &cpu->env;
    778     int i;
    779 
    780     for (i = 0; i < HV_CRASH_PARAMS; i++) {
    781         if (env->msr_hv_crash_params[i]) {
    782             return true;
    783         }
    784     }
    785     return false;
    786 }
    787 
    788 static const VMStateDescription vmstate_msr_hyperv_crash = {
    789     .name = "cpu/msr_hyperv_crash",
    790     .version_id = 1,
    791     .minimum_version_id = 1,
    792     .needed = hyperv_crash_enable_needed,
    793     .fields = (VMStateField[]) {
    794         VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
    795         VMSTATE_END_OF_LIST()
    796     }
    797 };
    798 
    799 static bool hyperv_runtime_enable_needed(void *opaque)
    800 {
    801     X86CPU *cpu = opaque;
    802     CPUX86State *env = &cpu->env;
    803 
    804     if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
    805         return false;
    806     }
    807 
    808     return env->msr_hv_runtime != 0;
    809 }
    810 
    811 static const VMStateDescription vmstate_msr_hyperv_runtime = {
    812     .name = "cpu/msr_hyperv_runtime",
    813     .version_id = 1,
    814     .minimum_version_id = 1,
    815     .needed = hyperv_runtime_enable_needed,
    816     .fields = (VMStateField[]) {
    817         VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
    818         VMSTATE_END_OF_LIST()
    819     }
    820 };
    821 
    822 static bool hyperv_synic_enable_needed(void *opaque)
    823 {
    824     X86CPU *cpu = opaque;
    825     CPUX86State *env = &cpu->env;
    826     int i;
    827 
    828     if (env->msr_hv_synic_control != 0 ||
    829         env->msr_hv_synic_evt_page != 0 ||
    830         env->msr_hv_synic_msg_page != 0) {
    831         return true;
    832     }
    833 
    834     for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
    835         if (env->msr_hv_synic_sint[i] != 0) {
    836             return true;
    837         }
    838     }
    839 
    840     return false;
    841 }
    842 
    843 static int hyperv_synic_post_load(void *opaque, int version_id)
    844 {
    845     X86CPU *cpu = opaque;
    846     hyperv_x86_synic_update(cpu);
    847     return 0;
    848 }
    849 
    850 static const VMStateDescription vmstate_msr_hyperv_synic = {
    851     .name = "cpu/msr_hyperv_synic",
    852     .version_id = 1,
    853     .minimum_version_id = 1,
    854     .needed = hyperv_synic_enable_needed,
    855     .post_load = hyperv_synic_post_load,
    856     .fields = (VMStateField[]) {
    857         VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
    858         VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
    859         VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
    860         VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
    861         VMSTATE_END_OF_LIST()
    862     }
    863 };
    864 
    865 static bool hyperv_stimer_enable_needed(void *opaque)
    866 {
    867     X86CPU *cpu = opaque;
    868     CPUX86State *env = &cpu->env;
    869     int i;
    870 
    871     for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
    872         if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
    873             return true;
    874         }
    875     }
    876     return false;
    877 }
    878 
    879 static const VMStateDescription vmstate_msr_hyperv_stimer = {
    880     .name = "cpu/msr_hyperv_stimer",
    881     .version_id = 1,
    882     .minimum_version_id = 1,
    883     .needed = hyperv_stimer_enable_needed,
    884     .fields = (VMStateField[]) {
    885         VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
    886                              HV_STIMER_COUNT),
    887         VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
    888         VMSTATE_END_OF_LIST()
    889     }
    890 };
    891 
    892 static bool hyperv_reenlightenment_enable_needed(void *opaque)
    893 {
    894     X86CPU *cpu = opaque;
    895     CPUX86State *env = &cpu->env;
    896 
    897     return env->msr_hv_reenlightenment_control != 0 ||
    898         env->msr_hv_tsc_emulation_control != 0 ||
    899         env->msr_hv_tsc_emulation_status != 0;
    900 }
    901 
    902 static int hyperv_reenlightenment_post_load(void *opaque, int version_id)
    903 {
    904     X86CPU *cpu = opaque;
    905     CPUX86State *env = &cpu->env;
    906 
    907     /*
    908      * KVM doesn't fully support re-enlightenment notifications so we need to
    909      * make sure TSC frequency doesn't change upon migration.
    910      */
    911     if ((env->msr_hv_reenlightenment_control & HV_REENLIGHTENMENT_ENABLE_BIT) &&
    912         !env->user_tsc_khz) {
    913         error_report("Guest enabled re-enlightenment notifications, "
    914                      "'tsc-frequency=' has to be specified");
    915         return -EINVAL;
    916     }
    917 
    918     return 0;
    919 }
    920 
    921 static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
    922     .name = "cpu/msr_hyperv_reenlightenment",
    923     .version_id = 1,
    924     .minimum_version_id = 1,
    925     .needed = hyperv_reenlightenment_enable_needed,
    926     .post_load = hyperv_reenlightenment_post_load,
    927     .fields = (VMStateField[]) {
    928         VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
    929         VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
    930         VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
    931         VMSTATE_END_OF_LIST()
    932     }
    933 };
    934 
    935 static bool avx512_needed(void *opaque)
    936 {
    937     X86CPU *cpu = opaque;
    938     CPUX86State *env = &cpu->env;
    939     unsigned int i;
    940 
    941     for (i = 0; i < NB_OPMASK_REGS; i++) {
    942         if (env->opmask_regs[i]) {
    943             return true;
    944         }
    945     }
    946 
    947     for (i = 0; i < CPU_NB_REGS; i++) {
    948 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
    949         if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
    950             ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
    951             return true;
    952         }
    953 #ifdef TARGET_X86_64
    954         if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
    955             ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
    956             ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
    957             ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
    958             return true;
    959         }
    960 #endif
    961     }
    962 
    963     return false;
    964 }
    965 
    966 static const VMStateDescription vmstate_avx512 = {
    967     .name = "cpu/avx512",
    968     .version_id = 1,
    969     .minimum_version_id = 1,
    970     .needed = avx512_needed,
    971     .fields = (VMStateField[]) {
    972         VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
    973         VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
    974 #ifdef TARGET_X86_64
    975         VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
    976 #endif
    977         VMSTATE_END_OF_LIST()
    978     }
    979 };
    980 
    981 static bool xss_needed(void *opaque)
    982 {
    983     X86CPU *cpu = opaque;
    984     CPUX86State *env = &cpu->env;
    985 
    986     return env->xss != 0;
    987 }
    988 
    989 static const VMStateDescription vmstate_xss = {
    990     .name = "cpu/xss",
    991     .version_id = 1,
    992     .minimum_version_id = 1,
    993     .needed = xss_needed,
    994     .fields = (VMStateField[]) {
    995         VMSTATE_UINT64(env.xss, X86CPU),
    996         VMSTATE_END_OF_LIST()
    997     }
    998 };
    999 
   1000 static bool umwait_needed(void *opaque)
   1001 {
   1002     X86CPU *cpu = opaque;
   1003     CPUX86State *env = &cpu->env;
   1004 
   1005     return env->umwait != 0;
   1006 }
   1007 
   1008 static const VMStateDescription vmstate_umwait = {
   1009     .name = "cpu/umwait",
   1010     .version_id = 1,
   1011     .minimum_version_id = 1,
   1012     .needed = umwait_needed,
   1013     .fields = (VMStateField[]) {
   1014         VMSTATE_UINT32(env.umwait, X86CPU),
   1015         VMSTATE_END_OF_LIST()
   1016     }
   1017 };
   1018 
   1019 static bool pkru_needed(void *opaque)
   1020 {
   1021     X86CPU *cpu = opaque;
   1022     CPUX86State *env = &cpu->env;
   1023 
   1024     return env->pkru != 0;
   1025 }
   1026 
   1027 static const VMStateDescription vmstate_pkru = {
   1028     .name = "cpu/pkru",
   1029     .version_id = 1,
   1030     .minimum_version_id = 1,
   1031     .needed = pkru_needed,
   1032     .fields = (VMStateField[]){
   1033         VMSTATE_UINT32(env.pkru, X86CPU),
   1034         VMSTATE_END_OF_LIST()
   1035     }
   1036 };
   1037 
   1038 static bool pkrs_needed(void *opaque)
   1039 {
   1040     X86CPU *cpu = opaque;
   1041     CPUX86State *env = &cpu->env;
   1042 
   1043     return env->pkrs != 0;
   1044 }
   1045 
   1046 static const VMStateDescription vmstate_pkrs = {
   1047     .name = "cpu/pkrs",
   1048     .version_id = 1,
   1049     .minimum_version_id = 1,
   1050     .needed = pkrs_needed,
   1051     .fields = (VMStateField[]){
   1052         VMSTATE_UINT32(env.pkrs, X86CPU),
   1053         VMSTATE_END_OF_LIST()
   1054     }
   1055 };
   1056 
   1057 static bool tsc_khz_needed(void *opaque)
   1058 {
   1059     X86CPU *cpu = opaque;
   1060     CPUX86State *env = &cpu->env;
   1061     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
   1062     X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
   1063     return env->tsc_khz && x86mc->save_tsc_khz;
   1064 }
   1065 
   1066 static const VMStateDescription vmstate_tsc_khz = {
   1067     .name = "cpu/tsc_khz",
   1068     .version_id = 1,
   1069     .minimum_version_id = 1,
   1070     .needed = tsc_khz_needed,
   1071     .fields = (VMStateField[]) {
   1072         VMSTATE_INT64(env.tsc_khz, X86CPU),
   1073         VMSTATE_END_OF_LIST()
   1074     }
   1075 };
   1076 
   1077 #ifdef CONFIG_KVM
   1078 
   1079 static bool vmx_vmcs12_needed(void *opaque)
   1080 {
   1081     struct kvm_nested_state *nested_state = opaque;
   1082     return (nested_state->size >
   1083             offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
   1084 }
   1085 
   1086 static const VMStateDescription vmstate_vmx_vmcs12 = {
   1087     .name = "cpu/kvm_nested_state/vmx/vmcs12",
   1088     .version_id = 1,
   1089     .minimum_version_id = 1,
   1090     .needed = vmx_vmcs12_needed,
   1091     .fields = (VMStateField[]) {
   1092         VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
   1093                             struct kvm_nested_state,
   1094                             KVM_STATE_NESTED_VMX_VMCS_SIZE),
   1095         VMSTATE_END_OF_LIST()
   1096     }
   1097 };
   1098 
   1099 static bool vmx_shadow_vmcs12_needed(void *opaque)
   1100 {
   1101     struct kvm_nested_state *nested_state = opaque;
   1102     return (nested_state->size >
   1103             offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
   1104 }
   1105 
   1106 static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
   1107     .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
   1108     .version_id = 1,
   1109     .minimum_version_id = 1,
   1110     .needed = vmx_shadow_vmcs12_needed,
   1111     .fields = (VMStateField[]) {
   1112         VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
   1113                             struct kvm_nested_state,
   1114                             KVM_STATE_NESTED_VMX_VMCS_SIZE),
   1115         VMSTATE_END_OF_LIST()
   1116     }
   1117 };
   1118 
   1119 static bool vmx_nested_state_needed(void *opaque)
   1120 {
   1121     struct kvm_nested_state *nested_state = opaque;
   1122 
   1123     return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX &&
   1124             nested_state->hdr.vmx.vmxon_pa != -1ull);
   1125 }
   1126 
   1127 static const VMStateDescription vmstate_vmx_nested_state = {
   1128     .name = "cpu/kvm_nested_state/vmx",
   1129     .version_id = 1,
   1130     .minimum_version_id = 1,
   1131     .needed = vmx_nested_state_needed,
   1132     .fields = (VMStateField[]) {
   1133         VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
   1134         VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
   1135         VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
   1136         VMSTATE_END_OF_LIST()
   1137     },
   1138     .subsections = (const VMStateDescription*[]) {
   1139         &vmstate_vmx_vmcs12,
   1140         &vmstate_vmx_shadow_vmcs12,
   1141         NULL,
   1142     }
   1143 };
   1144 
   1145 static bool svm_nested_state_needed(void *opaque)
   1146 {
   1147     struct kvm_nested_state *nested_state = opaque;
   1148 
   1149     /*
   1150      * HF_GUEST_MASK and HF2_GIF_MASK are already serialized
   1151      * via hflags and hflags2, all that's left is the opaque
   1152      * nested state blob.
   1153      */
   1154     return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
   1155             nested_state->size > offsetof(struct kvm_nested_state, data));
   1156 }
   1157 
   1158 static const VMStateDescription vmstate_svm_nested_state = {
   1159     .name = "cpu/kvm_nested_state/svm",
   1160     .version_id = 1,
   1161     .minimum_version_id = 1,
   1162     .needed = svm_nested_state_needed,
   1163     .fields = (VMStateField[]) {
   1164         VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
   1165         VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
   1166                             struct kvm_nested_state,
   1167                             KVM_STATE_NESTED_SVM_VMCB_SIZE),
   1168         VMSTATE_END_OF_LIST()
   1169     }
   1170 };
   1171 
   1172 static bool nested_state_needed(void *opaque)
   1173 {
   1174     X86CPU *cpu = opaque;
   1175     CPUX86State *env = &cpu->env;
   1176 
   1177     return (env->nested_state &&
   1178             (vmx_nested_state_needed(env->nested_state) ||
   1179              svm_nested_state_needed(env->nested_state)));
   1180 }
   1181 
   1182 static int nested_state_post_load(void *opaque, int version_id)
   1183 {
   1184     X86CPU *cpu = opaque;
   1185     CPUX86State *env = &cpu->env;
   1186     struct kvm_nested_state *nested_state = env->nested_state;
   1187     int min_nested_state_len = offsetof(struct kvm_nested_state, data);
   1188     int max_nested_state_len = kvm_max_nested_state_length();
   1189 
   1190     /*
   1191      * If our kernel don't support setting nested state
   1192      * and we have received nested state from migration stream,
   1193      * we need to fail migration
   1194      */
   1195     if (max_nested_state_len <= 0) {
   1196         error_report("Received nested state when kernel cannot restore it");
   1197         return -EINVAL;
   1198     }
   1199 
   1200     /*
   1201      * Verify that the size of received nested_state struct
   1202      * at least cover required header and is not larger
   1203      * than the max size that our kernel support
   1204      */
   1205     if (nested_state->size < min_nested_state_len) {
   1206         error_report("Received nested state size less than min: "
   1207                      "len=%d, min=%d",
   1208                      nested_state->size, min_nested_state_len);
   1209         return -EINVAL;
   1210     }
   1211     if (nested_state->size > max_nested_state_len) {
   1212         error_report("Received unsupported nested state size: "
   1213                      "nested_state->size=%d, max=%d",
   1214                      nested_state->size, max_nested_state_len);
   1215         return -EINVAL;
   1216     }
   1217 
   1218     /* Verify format is valid */
   1219     if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
   1220         (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
   1221         error_report("Received invalid nested state format: %d",
   1222                      nested_state->format);
   1223         return -EINVAL;
   1224     }
   1225 
   1226     return 0;
   1227 }
   1228 
   1229 static const VMStateDescription vmstate_kvm_nested_state = {
   1230     .name = "cpu/kvm_nested_state",
   1231     .version_id = 1,
   1232     .minimum_version_id = 1,
   1233     .fields = (VMStateField[]) {
   1234         VMSTATE_U16(flags, struct kvm_nested_state),
   1235         VMSTATE_U16(format, struct kvm_nested_state),
   1236         VMSTATE_U32(size, struct kvm_nested_state),
   1237         VMSTATE_END_OF_LIST()
   1238     },
   1239     .subsections = (const VMStateDescription*[]) {
   1240         &vmstate_vmx_nested_state,
   1241         &vmstate_svm_nested_state,
   1242         NULL
   1243     }
   1244 };
   1245 
   1246 static const VMStateDescription vmstate_nested_state = {
   1247     .name = "cpu/nested_state",
   1248     .version_id = 1,
   1249     .minimum_version_id = 1,
   1250     .needed = nested_state_needed,
   1251     .post_load = nested_state_post_load,
   1252     .fields = (VMStateField[]) {
   1253         VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
   1254                 vmstate_kvm_nested_state,
   1255                 struct kvm_nested_state),
   1256         VMSTATE_END_OF_LIST()
   1257     }
   1258 };
   1259 
   1260 #endif
   1261 
   1262 static bool mcg_ext_ctl_needed(void *opaque)
   1263 {
   1264     X86CPU *cpu = opaque;
   1265     CPUX86State *env = &cpu->env;
   1266     return cpu->enable_lmce && env->mcg_ext_ctl;
   1267 }
   1268 
   1269 static const VMStateDescription vmstate_mcg_ext_ctl = {
   1270     .name = "cpu/mcg_ext_ctl",
   1271     .version_id = 1,
   1272     .minimum_version_id = 1,
   1273     .needed = mcg_ext_ctl_needed,
   1274     .fields = (VMStateField[]) {
   1275         VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
   1276         VMSTATE_END_OF_LIST()
   1277     }
   1278 };
   1279 
   1280 static bool spec_ctrl_needed(void *opaque)
   1281 {
   1282     X86CPU *cpu = opaque;
   1283     CPUX86State *env = &cpu->env;
   1284 
   1285     return env->spec_ctrl != 0;
   1286 }
   1287 
   1288 static const VMStateDescription vmstate_spec_ctrl = {
   1289     .name = "cpu/spec_ctrl",
   1290     .version_id = 1,
   1291     .minimum_version_id = 1,
   1292     .needed = spec_ctrl_needed,
   1293     .fields = (VMStateField[]){
   1294         VMSTATE_UINT64(env.spec_ctrl, X86CPU),
   1295         VMSTATE_END_OF_LIST()
   1296     }
   1297 };
   1298 
   1299 
   1300 static bool amd_tsc_scale_msr_needed(void *opaque)
   1301 {
   1302     X86CPU *cpu = opaque;
   1303     CPUX86State *env = &cpu->env;
   1304 
   1305     return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE);
   1306 }
   1307 
   1308 static const VMStateDescription amd_tsc_scale_msr_ctrl = {
   1309     .name = "cpu/amd_tsc_scale_msr",
   1310     .version_id = 1,
   1311     .minimum_version_id = 1,
   1312     .needed = amd_tsc_scale_msr_needed,
   1313     .fields = (VMStateField[]){
   1314         VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU),
   1315         VMSTATE_END_OF_LIST()
   1316     }
   1317 };
   1318 
   1319 
   1320 static bool intel_pt_enable_needed(void *opaque)
   1321 {
   1322     X86CPU *cpu = opaque;
   1323     CPUX86State *env = &cpu->env;
   1324     int i;
   1325 
   1326     if (env->msr_rtit_ctrl || env->msr_rtit_status ||
   1327         env->msr_rtit_output_base || env->msr_rtit_output_mask ||
   1328         env->msr_rtit_cr3_match) {
   1329         return true;
   1330     }
   1331 
   1332     for (i = 0; i < MAX_RTIT_ADDRS; i++) {
   1333         if (env->msr_rtit_addrs[i]) {
   1334             return true;
   1335         }
   1336     }
   1337 
   1338     return false;
   1339 }
   1340 
   1341 static const VMStateDescription vmstate_msr_intel_pt = {
   1342     .name = "cpu/intel_pt",
   1343     .version_id = 1,
   1344     .minimum_version_id = 1,
   1345     .needed = intel_pt_enable_needed,
   1346     .fields = (VMStateField[]) {
   1347         VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
   1348         VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
   1349         VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
   1350         VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
   1351         VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
   1352         VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
   1353         VMSTATE_END_OF_LIST()
   1354     }
   1355 };
   1356 
   1357 static bool virt_ssbd_needed(void *opaque)
   1358 {
   1359     X86CPU *cpu = opaque;
   1360     CPUX86State *env = &cpu->env;
   1361 
   1362     return env->virt_ssbd != 0;
   1363 }
   1364 
   1365 static const VMStateDescription vmstate_msr_virt_ssbd = {
   1366     .name = "cpu/virt_ssbd",
   1367     .version_id = 1,
   1368     .minimum_version_id = 1,
   1369     .needed = virt_ssbd_needed,
   1370     .fields = (VMStateField[]){
   1371         VMSTATE_UINT64(env.virt_ssbd, X86CPU),
   1372         VMSTATE_END_OF_LIST()
   1373     }
   1374 };
   1375 
   1376 static bool svm_npt_needed(void *opaque)
   1377 {
   1378     X86CPU *cpu = opaque;
   1379     CPUX86State *env = &cpu->env;
   1380 
   1381     return !!(env->hflags2 & HF2_NPT_MASK);
   1382 }
   1383 
   1384 static const VMStateDescription vmstate_svm_npt = {
   1385     .name = "cpu/svn_npt",
   1386     .version_id = 1,
   1387     .minimum_version_id = 1,
   1388     .needed = svm_npt_needed,
   1389     .fields = (VMStateField[]){
   1390         VMSTATE_UINT64(env.nested_cr3, X86CPU),
   1391         VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
   1392         VMSTATE_END_OF_LIST()
   1393     }
   1394 };
   1395 
   1396 static bool svm_guest_needed(void *opaque)
   1397 {
   1398     X86CPU *cpu = opaque;
   1399     CPUX86State *env = &cpu->env;
   1400 
   1401     return tcg_enabled() && env->int_ctl;
   1402 }
   1403 
   1404 static const VMStateDescription vmstate_svm_guest = {
   1405     .name = "cpu/svm_guest",
   1406     .version_id = 1,
   1407     .minimum_version_id = 1,
   1408     .needed = svm_guest_needed,
   1409     .fields = (VMStateField[]){
   1410         VMSTATE_UINT32(env.int_ctl, X86CPU),
   1411         VMSTATE_END_OF_LIST()
   1412     }
   1413 };
   1414 
   1415 #ifndef TARGET_X86_64
   1416 static bool intel_efer32_needed(void *opaque)
   1417 {
   1418     X86CPU *cpu = opaque;
   1419     CPUX86State *env = &cpu->env;
   1420 
   1421     return env->efer != 0;
   1422 }
   1423 
   1424 static const VMStateDescription vmstate_efer32 = {
   1425     .name = "cpu/efer32",
   1426     .version_id = 1,
   1427     .minimum_version_id = 1,
   1428     .needed = intel_efer32_needed,
   1429     .fields = (VMStateField[]) {
   1430         VMSTATE_UINT64(env.efer, X86CPU),
   1431         VMSTATE_END_OF_LIST()
   1432     }
   1433 };
   1434 #endif
   1435 
   1436 static bool msr_tsx_ctrl_needed(void *opaque)
   1437 {
   1438     X86CPU *cpu = opaque;
   1439     CPUX86State *env = &cpu->env;
   1440 
   1441     return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR;
   1442 }
   1443 
   1444 static const VMStateDescription vmstate_msr_tsx_ctrl = {
   1445     .name = "cpu/msr_tsx_ctrl",
   1446     .version_id = 1,
   1447     .minimum_version_id = 1,
   1448     .needed = msr_tsx_ctrl_needed,
   1449     .fields = (VMStateField[]) {
   1450         VMSTATE_UINT32(env.tsx_ctrl, X86CPU),
   1451         VMSTATE_END_OF_LIST()
   1452     }
   1453 };
   1454 
   1455 static bool intel_sgx_msrs_needed(void *opaque)
   1456 {
   1457     X86CPU *cpu = opaque;
   1458     CPUX86State *env = &cpu->env;
   1459 
   1460     return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC);
   1461 }
   1462 
   1463 static const VMStateDescription vmstate_msr_intel_sgx = {
   1464     .name = "cpu/intel_sgx",
   1465     .version_id = 1,
   1466     .minimum_version_id = 1,
   1467     .needed = intel_sgx_msrs_needed,
   1468     .fields = (VMStateField[]) {
   1469         VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4),
   1470                 VMSTATE_END_OF_LIST()
   1471             }
   1472         };
   1473 
   1474 static bool pdptrs_needed(void *opaque)
   1475 {
   1476     X86CPU *cpu = opaque;
   1477     CPUX86State *env = &cpu->env;
   1478     return env->pdptrs_valid;
   1479 }
   1480 
   1481 static int pdptrs_post_load(void *opaque, int version_id)
   1482 {
   1483     X86CPU *cpu = opaque;
   1484     CPUX86State *env = &cpu->env;
   1485     env->pdptrs_valid = true;
   1486     return 0;
   1487 }
   1488 
   1489 
   1490 static const VMStateDescription vmstate_pdptrs = {
   1491     .name = "cpu/pdptrs",
   1492     .version_id = 1,
   1493     .minimum_version_id = 1,
   1494     .needed = pdptrs_needed,
   1495     .post_load = pdptrs_post_load,
   1496     .fields = (VMStateField[]) {
   1497         VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4),
   1498         VMSTATE_END_OF_LIST()
   1499     }
   1500 };
   1501 
   1502 static bool xfd_msrs_needed(void *opaque)
   1503 {
   1504     X86CPU *cpu = opaque;
   1505     CPUX86State *env = &cpu->env;
   1506 
   1507     return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD);
   1508 }
   1509 
   1510 static const VMStateDescription vmstate_msr_xfd = {
   1511     .name = "cpu/msr_xfd",
   1512     .version_id = 1,
   1513     .minimum_version_id = 1,
   1514     .needed = xfd_msrs_needed,
   1515     .fields = (VMStateField[]) {
   1516         VMSTATE_UINT64(env.msr_xfd, X86CPU),
   1517         VMSTATE_UINT64(env.msr_xfd_err, X86CPU),
   1518         VMSTATE_END_OF_LIST()
   1519     }
   1520 };
   1521 
   1522 #ifdef TARGET_X86_64
   1523 static bool amx_xtile_needed(void *opaque)
   1524 {
   1525     X86CPU *cpu = opaque;
   1526     CPUX86State *env = &cpu->env;
   1527 
   1528     return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE);
   1529 }
   1530 
   1531 static const VMStateDescription vmstate_amx_xtile = {
   1532     .name = "cpu/intel_amx_xtile",
   1533     .version_id = 1,
   1534     .minimum_version_id = 1,
   1535     .needed = amx_xtile_needed,
   1536     .fields = (VMStateField[]) {
   1537         VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64),
   1538         VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192),
   1539         VMSTATE_END_OF_LIST()
   1540     }
   1541 };
   1542 #endif
   1543 
   1544 static bool arch_lbr_needed(void *opaque)
   1545 {
   1546     X86CPU *cpu = opaque;
   1547     CPUX86State *env = &cpu->env;
   1548 
   1549     return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR);
   1550 }
   1551 
   1552 static const VMStateDescription vmstate_arch_lbr = {
   1553     .name = "cpu/arch_lbr",
   1554     .version_id = 1,
   1555     .minimum_version_id = 1,
   1556     .needed = arch_lbr_needed,
   1557     .fields = (VMStateField[]) {
   1558         VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU),
   1559         VMSTATE_UINT64(env.msr_lbr_depth, X86CPU),
   1560         VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1),
   1561         VMSTATE_END_OF_LIST()
   1562     }
   1563 };
   1564 
   1565 static bool triple_fault_needed(void *opaque)
   1566 {
   1567     X86CPU *cpu = opaque;
   1568     CPUX86State *env = &cpu->env;
   1569 
   1570     return env->triple_fault_pending;
   1571 }
   1572 
   1573 static const VMStateDescription vmstate_triple_fault = {
   1574     .name = "cpu/triple_fault",
   1575     .version_id = 1,
   1576     .minimum_version_id = 1,
   1577     .needed = triple_fault_needed,
   1578     .fields = (VMStateField[]) {
   1579         VMSTATE_UINT8(env.triple_fault_pending, X86CPU),
   1580         VMSTATE_END_OF_LIST()
   1581     }
   1582 };
   1583 
   1584 const VMStateDescription vmstate_x86_cpu = {
   1585     .name = "cpu",
   1586     .version_id = 12,
   1587     .minimum_version_id = 11,
   1588     .pre_save = cpu_pre_save,
   1589     .post_load = cpu_post_load,
   1590     .fields = (VMStateField[]) {
   1591         VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
   1592         VMSTATE_UINTTL(env.eip, X86CPU),
   1593         VMSTATE_UINTTL(env.eflags, X86CPU),
   1594         VMSTATE_UINT32(env.hflags, X86CPU),
   1595         /* FPU */
   1596         VMSTATE_UINT16(env.fpuc, X86CPU),
   1597         VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
   1598         VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
   1599         VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
   1600 
   1601         VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
   1602 
   1603         VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
   1604         VMSTATE_SEGMENT(env.ldt, X86CPU),
   1605         VMSTATE_SEGMENT(env.tr, X86CPU),
   1606         VMSTATE_SEGMENT(env.gdt, X86CPU),
   1607         VMSTATE_SEGMENT(env.idt, X86CPU),
   1608 
   1609         VMSTATE_UINT32(env.sysenter_cs, X86CPU),
   1610         VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
   1611         VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
   1612 
   1613         VMSTATE_UINTTL(env.cr[0], X86CPU),
   1614         VMSTATE_UINTTL(env.cr[2], X86CPU),
   1615         VMSTATE_UINTTL(env.cr[3], X86CPU),
   1616         VMSTATE_UINTTL(env.cr[4], X86CPU),
   1617         VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
   1618         /* MMU */
   1619         VMSTATE_INT32(env.a20_mask, X86CPU),
   1620         /* XMM */
   1621         VMSTATE_UINT32(env.mxcsr, X86CPU),
   1622         VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
   1623 
   1624 #ifdef TARGET_X86_64
   1625         VMSTATE_UINT64(env.efer, X86CPU),
   1626         VMSTATE_UINT64(env.star, X86CPU),
   1627         VMSTATE_UINT64(env.lstar, X86CPU),
   1628         VMSTATE_UINT64(env.cstar, X86CPU),
   1629         VMSTATE_UINT64(env.fmask, X86CPU),
   1630         VMSTATE_UINT64(env.kernelgsbase, X86CPU),
   1631 #endif
   1632         VMSTATE_UINT32(env.smbase, X86CPU),
   1633 
   1634         VMSTATE_UINT64(env.pat, X86CPU),
   1635         VMSTATE_UINT32(env.hflags2, X86CPU),
   1636 
   1637         VMSTATE_UINT64(env.vm_hsave, X86CPU),
   1638         VMSTATE_UINT64(env.vm_vmcb, X86CPU),
   1639         VMSTATE_UINT64(env.tsc_offset, X86CPU),
   1640         VMSTATE_UINT64(env.intercept, X86CPU),
   1641         VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
   1642         VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
   1643         VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
   1644         VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
   1645         VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
   1646         VMSTATE_UINT8(env.v_tpr, X86CPU),
   1647         /* MTRRs */
   1648         VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
   1649         VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
   1650         VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
   1651         /* KVM-related states */
   1652         VMSTATE_INT32(env.interrupt_injected, X86CPU),
   1653         VMSTATE_UINT32(env.mp_state, X86CPU),
   1654         VMSTATE_UINT64(env.tsc, X86CPU),
   1655         VMSTATE_INT32(env.exception_nr, X86CPU),
   1656         VMSTATE_UINT8(env.soft_interrupt, X86CPU),
   1657         VMSTATE_UINT8(env.nmi_injected, X86CPU),
   1658         VMSTATE_UINT8(env.nmi_pending, X86CPU),
   1659         VMSTATE_UINT8(env.has_error_code, X86CPU),
   1660         VMSTATE_UINT32(env.sipi_vector, X86CPU),
   1661         /* MCE */
   1662         VMSTATE_UINT64(env.mcg_cap, X86CPU),
   1663         VMSTATE_UINT64(env.mcg_status, X86CPU),
   1664         VMSTATE_UINT64(env.mcg_ctl, X86CPU),
   1665         VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
   1666         /* rdtscp */
   1667         VMSTATE_UINT64(env.tsc_aux, X86CPU),
   1668         /* KVM pvclock msr */
   1669         VMSTATE_UINT64(env.system_time_msr, X86CPU),
   1670         VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
   1671         /* XSAVE related fields */
   1672         VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
   1673         VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
   1674         VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
   1675         VMSTATE_END_OF_LIST()
   1676         /* The above list is not sorted /wrt version numbers, watch out! */
   1677     },
   1678     .subsections = (const VMStateDescription*[]) {
   1679         &vmstate_exception_info,
   1680         &vmstate_async_pf_msr,
   1681         &vmstate_async_pf_int_msr,
   1682         &vmstate_pv_eoi_msr,
   1683         &vmstate_steal_time_msr,
   1684         &vmstate_poll_control_msr,
   1685         &vmstate_fpop_ip_dp,
   1686         &vmstate_msr_tsc_adjust,
   1687         &vmstate_msr_tscdeadline,
   1688         &vmstate_msr_ia32_misc_enable,
   1689         &vmstate_msr_ia32_feature_control,
   1690         &vmstate_msr_architectural_pmu,
   1691         &vmstate_mpx,
   1692         &vmstate_msr_hyperv_hypercall,
   1693         &vmstate_msr_hyperv_vapic,
   1694         &vmstate_msr_hyperv_time,
   1695         &vmstate_msr_hyperv_crash,
   1696         &vmstate_msr_hyperv_runtime,
   1697         &vmstate_msr_hyperv_synic,
   1698         &vmstate_msr_hyperv_stimer,
   1699         &vmstate_msr_hyperv_reenlightenment,
   1700         &vmstate_avx512,
   1701         &vmstate_xss,
   1702         &vmstate_umwait,
   1703         &vmstate_tsc_khz,
   1704         &vmstate_msr_smi_count,
   1705         &vmstate_pkru,
   1706         &vmstate_pkrs,
   1707         &vmstate_spec_ctrl,
   1708         &amd_tsc_scale_msr_ctrl,
   1709         &vmstate_mcg_ext_ctl,
   1710         &vmstate_msr_intel_pt,
   1711         &vmstate_msr_virt_ssbd,
   1712         &vmstate_svm_npt,
   1713         &vmstate_svm_guest,
   1714 #ifndef TARGET_X86_64
   1715         &vmstate_efer32,
   1716 #endif
   1717 #ifdef CONFIG_KVM
   1718         &vmstate_nested_state,
   1719 #endif
   1720         &vmstate_msr_tsx_ctrl,
   1721         &vmstate_msr_intel_sgx,
   1722         &vmstate_pdptrs,
   1723         &vmstate_msr_xfd,
   1724 #ifdef TARGET_X86_64
   1725         &vmstate_amx_xtile,
   1726 #endif
   1727         &vmstate_arch_lbr,
   1728         &vmstate_triple_fault,
   1729         NULL
   1730     }
   1731 };