qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

seg_helper.c (82862B)


      1 /*
      2  *  x86 segmentation related helpers:
      3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
      4  *
      5  *  Copyright (c) 2003 Fabrice Bellard
      6  *
      7  * This library is free software; you can redistribute it and/or
      8  * modify it under the terms of the GNU Lesser General Public
      9  * License as published by the Free Software Foundation; either
     10  * version 2.1 of the License, or (at your option) any later version.
     11  *
     12  * This library is distributed in the hope that it will be useful,
     13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15  * Lesser General Public License for more details.
     16  *
     17  * You should have received a copy of the GNU Lesser General Public
     18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     19  */
     20 
     21 #include "qemu/osdep.h"
     22 #include "cpu.h"
     23 #include "qemu/log.h"
     24 #include "exec/helper-proto.h"
     25 #include "exec/exec-all.h"
     26 #include "exec/cpu_ldst.h"
     27 #include "exec/log.h"
     28 #include "helper-tcg.h"
     29 #include "seg_helper.h"
     30 
     31 int get_pg_mode(CPUX86State *env)
     32 {
     33     int pg_mode = 0;
     34     if (!(env->cr[0] & CR0_PG_MASK)) {
     35         return 0;
     36     }
     37     if (env->cr[0] & CR0_WP_MASK) {
     38         pg_mode |= PG_MODE_WP;
     39     }
     40     if (env->cr[4] & CR4_PAE_MASK) {
     41         pg_mode |= PG_MODE_PAE;
     42         if (env->efer & MSR_EFER_NXE) {
     43             pg_mode |= PG_MODE_NXE;
     44         }
     45     }
     46     if (env->cr[4] & CR4_PSE_MASK) {
     47         pg_mode |= PG_MODE_PSE;
     48     }
     49     if (env->cr[4] & CR4_SMEP_MASK) {
     50         pg_mode |= PG_MODE_SMEP;
     51     }
     52     if (env->hflags & HF_LMA_MASK) {
     53         pg_mode |= PG_MODE_LMA;
     54         if (env->cr[4] & CR4_PKE_MASK) {
     55             pg_mode |= PG_MODE_PKE;
     56         }
     57         if (env->cr[4] & CR4_PKS_MASK) {
     58             pg_mode |= PG_MODE_PKS;
     59         }
     60         if (env->cr[4] & CR4_LA57_MASK) {
     61             pg_mode |= PG_MODE_LA57;
     62         }
     63     }
     64     return pg_mode;
     65 }
     66 
     67 /* return non zero if error */
     68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
     69                                uint32_t *e2_ptr, int selector,
     70                                uintptr_t retaddr)
     71 {
     72     SegmentCache *dt;
     73     int index;
     74     target_ulong ptr;
     75 
     76     if (selector & 0x4) {
     77         dt = &env->ldt;
     78     } else {
     79         dt = &env->gdt;
     80     }
     81     index = selector & ~7;
     82     if ((index + 7) > dt->limit) {
     83         return -1;
     84     }
     85     ptr = dt->base + index;
     86     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
     87     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
     88     return 0;
     89 }
     90 
     91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
     92                                uint32_t *e2_ptr, int selector)
     93 {
     94     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
     95 }
     96 
     97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
     98 {
     99     unsigned int limit;
    100 
    101     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
    102     if (e2 & DESC_G_MASK) {
    103         limit = (limit << 12) | 0xfff;
    104     }
    105     return limit;
    106 }
    107 
    108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
    109 {
    110     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
    111 }
    112 
    113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
    114                                          uint32_t e2)
    115 {
    116     sc->base = get_seg_base(e1, e2);
    117     sc->limit = get_seg_limit(e1, e2);
    118     sc->flags = e2;
    119 }
    120 
    121 /* init the segment cache in vm86 mode. */
    122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
    123 {
    124     selector &= 0xffff;
    125 
    126     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
    127                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
    128                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
    129 }
    130 
    131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
    132                                        uint32_t *esp_ptr, int dpl,
    133                                        uintptr_t retaddr)
    134 {
    135     X86CPU *cpu = env_archcpu(env);
    136     int type, index, shift;
    137 
    138 #if 0
    139     {
    140         int i;
    141         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
    142         for (i = 0; i < env->tr.limit; i++) {
    143             printf("%02x ", env->tr.base[i]);
    144             if ((i & 7) == 7) {
    145                 printf("\n");
    146             }
    147         }
    148         printf("\n");
    149     }
    150 #endif
    151 
    152     if (!(env->tr.flags & DESC_P_MASK)) {
    153         cpu_abort(CPU(cpu), "invalid tss");
    154     }
    155     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
    156     if ((type & 7) != 1) {
    157         cpu_abort(CPU(cpu), "invalid tss type");
    158     }
    159     shift = type >> 3;
    160     index = (dpl * 4 + 2) << shift;
    161     if (index + (4 << shift) - 1 > env->tr.limit) {
    162         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
    163     }
    164     if (shift == 0) {
    165         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
    166         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
    167     } else {
    168         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
    169         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
    170     }
    171 }
    172 
    173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
    174                          int cpl, uintptr_t retaddr)
    175 {
    176     uint32_t e1, e2;
    177     int rpl, dpl;
    178 
    179     if ((selector & 0xfffc) != 0) {
    180         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
    181             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    182         }
    183         if (!(e2 & DESC_S_MASK)) {
    184             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    185         }
    186         rpl = selector & 3;
    187         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    188         if (seg_reg == R_CS) {
    189             if (!(e2 & DESC_CS_MASK)) {
    190                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    191             }
    192             if (dpl != rpl) {
    193                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    194             }
    195         } else if (seg_reg == R_SS) {
    196             /* SS must be writable data */
    197             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
    198                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    199             }
    200             if (dpl != cpl || dpl != rpl) {
    201                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    202             }
    203         } else {
    204             /* not readable code */
    205             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
    206                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    207             }
    208             /* if data or non conforming code, checks the rights */
    209             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
    210                 if (dpl < cpl || dpl < rpl) {
    211                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    212                 }
    213             }
    214         }
    215         if (!(e2 & DESC_P_MASK)) {
    216             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
    217         }
    218         cpu_x86_load_seg_cache(env, seg_reg, selector,
    219                                get_seg_base(e1, e2),
    220                                get_seg_limit(e1, e2),
    221                                e2);
    222     } else {
    223         if (seg_reg == R_SS || seg_reg == R_CS) {
    224             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
    225         }
    226     }
    227 }
    228 
    229 #define SWITCH_TSS_JMP  0
    230 #define SWITCH_TSS_IRET 1
    231 #define SWITCH_TSS_CALL 2
    232 
    233 /* XXX: restore CPU state in registers (PowerPC case) */
    234 static void switch_tss_ra(CPUX86State *env, int tss_selector,
    235                           uint32_t e1, uint32_t e2, int source,
    236                           uint32_t next_eip, uintptr_t retaddr)
    237 {
    238     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
    239     target_ulong tss_base;
    240     uint32_t new_regs[8], new_segs[6];
    241     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
    242     uint32_t old_eflags, eflags_mask;
    243     SegmentCache *dt;
    244     int index;
    245     target_ulong ptr;
    246 
    247     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
    248     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
    249               source);
    250 
    251     /* if task gate, we read the TSS segment and we load it */
    252     if (type == 5) {
    253         if (!(e2 & DESC_P_MASK)) {
    254             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
    255         }
    256         tss_selector = e1 >> 16;
    257         if (tss_selector & 4) {
    258             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
    259         }
    260         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
    261             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
    262         }
    263         if (e2 & DESC_S_MASK) {
    264             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
    265         }
    266         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
    267         if ((type & 7) != 1) {
    268             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
    269         }
    270     }
    271 
    272     if (!(e2 & DESC_P_MASK)) {
    273         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
    274     }
    275 
    276     if (type & 8) {
    277         tss_limit_max = 103;
    278     } else {
    279         tss_limit_max = 43;
    280     }
    281     tss_limit = get_seg_limit(e1, e2);
    282     tss_base = get_seg_base(e1, e2);
    283     if ((tss_selector & 4) != 0 ||
    284         tss_limit < tss_limit_max) {
    285         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
    286     }
    287     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
    288     if (old_type & 8) {
    289         old_tss_limit_max = 103;
    290     } else {
    291         old_tss_limit_max = 43;
    292     }
    293 
    294     /* read all the registers from the new TSS */
    295     if (type & 8) {
    296         /* 32 bit */
    297         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
    298         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
    299         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
    300         for (i = 0; i < 8; i++) {
    301             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
    302                                             retaddr);
    303         }
    304         for (i = 0; i < 6; i++) {
    305             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
    306                                              retaddr);
    307         }
    308         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
    309         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
    310     } else {
    311         /* 16 bit */
    312         new_cr3 = 0;
    313         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
    314         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
    315         for (i = 0; i < 8; i++) {
    316             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
    317         }
    318         for (i = 0; i < 4; i++) {
    319             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
    320                                              retaddr);
    321         }
    322         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
    323         new_segs[R_FS] = 0;
    324         new_segs[R_GS] = 0;
    325         new_trap = 0;
    326     }
    327     /* XXX: avoid a compiler warning, see
    328      http://support.amd.com/us/Processor_TechDocs/24593.pdf
    329      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
    330     (void)new_trap;
    331 
    332     /* NOTE: we must avoid memory exceptions during the task switch,
    333        so we make dummy accesses before */
    334     /* XXX: it can still fail in some cases, so a bigger hack is
    335        necessary to valid the TLB after having done the accesses */
    336 
    337     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
    338     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
    339     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
    340     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
    341 
    342     /* clear busy bit (it is restartable) */
    343     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
    344         target_ulong ptr;
    345         uint32_t e2;
    346 
    347         ptr = env->gdt.base + (env->tr.selector & ~7);
    348         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
    349         e2 &= ~DESC_TSS_BUSY_MASK;
    350         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
    351     }
    352     old_eflags = cpu_compute_eflags(env);
    353     if (source == SWITCH_TSS_IRET) {
    354         old_eflags &= ~NT_MASK;
    355     }
    356 
    357     /* save the current state in the old TSS */
    358     if (old_type & 8) {
    359         /* 32 bit */
    360         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
    361         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
    362         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
    363         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
    364         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
    365         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
    366         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
    367         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
    368         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
    369         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
    370         for (i = 0; i < 6; i++) {
    371             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
    372                               env->segs[i].selector, retaddr);
    373         }
    374     } else {
    375         /* 16 bit */
    376         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
    377         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
    378         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
    379         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
    380         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
    381         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
    382         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
    383         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
    384         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
    385         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
    386         for (i = 0; i < 4; i++) {
    387             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
    388                               env->segs[i].selector, retaddr);
    389         }
    390     }
    391 
    392     /* now if an exception occurs, it will occurs in the next task
    393        context */
    394 
    395     if (source == SWITCH_TSS_CALL) {
    396         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
    397         new_eflags |= NT_MASK;
    398     }
    399 
    400     /* set busy bit */
    401     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
    402         target_ulong ptr;
    403         uint32_t e2;
    404 
    405         ptr = env->gdt.base + (tss_selector & ~7);
    406         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
    407         e2 |= DESC_TSS_BUSY_MASK;
    408         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
    409     }
    410 
    411     /* set the new CPU state */
    412     /* from this point, any exception which occurs can give problems */
    413     env->cr[0] |= CR0_TS_MASK;
    414     env->hflags |= HF_TS_MASK;
    415     env->tr.selector = tss_selector;
    416     env->tr.base = tss_base;
    417     env->tr.limit = tss_limit;
    418     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
    419 
    420     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
    421         cpu_x86_update_cr3(env, new_cr3);
    422     }
    423 
    424     /* load all registers without an exception, then reload them with
    425        possible exception */
    426     env->eip = new_eip;
    427     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
    428         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
    429     if (type & 8) {
    430         cpu_load_eflags(env, new_eflags, eflags_mask);
    431         for (i = 0; i < 8; i++) {
    432             env->regs[i] = new_regs[i];
    433         }
    434     } else {
    435         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
    436         for (i = 0; i < 8; i++) {
    437             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
    438         }
    439     }
    440     if (new_eflags & VM_MASK) {
    441         for (i = 0; i < 6; i++) {
    442             load_seg_vm(env, i, new_segs[i]);
    443         }
    444     } else {
    445         /* first just selectors as the rest may trigger exceptions */
    446         for (i = 0; i < 6; i++) {
    447             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
    448         }
    449     }
    450 
    451     env->ldt.selector = new_ldt & ~4;
    452     env->ldt.base = 0;
    453     env->ldt.limit = 0;
    454     env->ldt.flags = 0;
    455 
    456     /* load the LDT */
    457     if (new_ldt & 4) {
    458         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
    459     }
    460 
    461     if ((new_ldt & 0xfffc) != 0) {
    462         dt = &env->gdt;
    463         index = new_ldt & ~7;
    464         if ((index + 7) > dt->limit) {
    465             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
    466         }
    467         ptr = dt->base + index;
    468         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
    469         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
    470         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
    471             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
    472         }
    473         if (!(e2 & DESC_P_MASK)) {
    474             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
    475         }
    476         load_seg_cache_raw_dt(&env->ldt, e1, e2);
    477     }
    478 
    479     /* load the segments */
    480     if (!(new_eflags & VM_MASK)) {
    481         int cpl = new_segs[R_CS] & 3;
    482         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
    483         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
    484         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
    485         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
    486         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
    487         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
    488     }
    489 
    490     /* check that env->eip is in the CS segment limits */
    491     if (new_eip > env->segs[R_CS].limit) {
    492         /* XXX: different exception if CALL? */
    493         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
    494     }
    495 
    496 #ifndef CONFIG_USER_ONLY
    497     /* reset local breakpoints */
    498     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
    499         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
    500     }
    501 #endif
    502 }
    503 
    504 static void switch_tss(CPUX86State *env, int tss_selector,
    505                        uint32_t e1, uint32_t e2, int source,
    506                         uint32_t next_eip)
    507 {
    508     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
    509 }
    510 
    511 static inline unsigned int get_sp_mask(unsigned int e2)
    512 {
    513 #ifdef TARGET_X86_64
    514     if (e2 & DESC_L_MASK) {
    515         return 0;
    516     } else
    517 #endif
    518     if (e2 & DESC_B_MASK) {
    519         return 0xffffffff;
    520     } else {
    521         return 0xffff;
    522     }
    523 }
    524 
    525 int exception_has_error_code(int intno)
    526 {
    527     switch (intno) {
    528     case 8:
    529     case 10:
    530     case 11:
    531     case 12:
    532     case 13:
    533     case 14:
    534     case 17:
    535         return 1;
    536     }
    537     return 0;
    538 }
    539 
    540 #ifdef TARGET_X86_64
    541 #define SET_ESP(val, sp_mask)                                   \
    542     do {                                                        \
    543         if ((sp_mask) == 0xffff) {                              \
    544             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
    545                 ((val) & 0xffff);                               \
    546         } else if ((sp_mask) == 0xffffffffLL) {                 \
    547             env->regs[R_ESP] = (uint32_t)(val);                 \
    548         } else {                                                \
    549             env->regs[R_ESP] = (val);                           \
    550         }                                                       \
    551     } while (0)
    552 #else
    553 #define SET_ESP(val, sp_mask)                                   \
    554     do {                                                        \
    555         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
    556             ((val) & (sp_mask));                                \
    557     } while (0)
    558 #endif
    559 
    560 /* in 64-bit machines, this can overflow. So this segment addition macro
    561  * can be used to trim the value to 32-bit whenever needed */
    562 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
    563 
    564 /* XXX: add a is_user flag to have proper security support */
    565 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
    566     {                                                            \
    567         sp -= 2;                                                 \
    568         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
    569     }
    570 
    571 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
    572     {                                                                   \
    573         sp -= 4;                                                        \
    574         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
    575     }
    576 
    577 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
    578     {                                                            \
    579         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
    580         sp += 2;                                                 \
    581     }
    582 
    583 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
    584     {                                                                   \
    585         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
    586         sp += 4;                                                        \
    587     }
    588 
    589 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
    590 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
    591 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
    592 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
    593 
    594 /* protected mode interrupt */
    595 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
    596                                    int error_code, unsigned int next_eip,
    597                                    int is_hw)
    598 {
    599     SegmentCache *dt;
    600     target_ulong ptr, ssp;
    601     int type, dpl, selector, ss_dpl, cpl;
    602     int has_error_code, new_stack, shift;
    603     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
    604     uint32_t old_eip, sp_mask;
    605     int vm86 = env->eflags & VM_MASK;
    606 
    607     has_error_code = 0;
    608     if (!is_int && !is_hw) {
    609         has_error_code = exception_has_error_code(intno);
    610     }
    611     if (is_int) {
    612         old_eip = next_eip;
    613     } else {
    614         old_eip = env->eip;
    615     }
    616 
    617     dt = &env->idt;
    618     if (intno * 8 + 7 > dt->limit) {
    619         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
    620     }
    621     ptr = dt->base + intno * 8;
    622     e1 = cpu_ldl_kernel(env, ptr);
    623     e2 = cpu_ldl_kernel(env, ptr + 4);
    624     /* check gate type */
    625     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
    626     switch (type) {
    627     case 5: /* task gate */
    628     case 6: /* 286 interrupt gate */
    629     case 7: /* 286 trap gate */
    630     case 14: /* 386 interrupt gate */
    631     case 15: /* 386 trap gate */
    632         break;
    633     default:
    634         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
    635         break;
    636     }
    637     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    638     cpl = env->hflags & HF_CPL_MASK;
    639     /* check privilege if software int */
    640     if (is_int && dpl < cpl) {
    641         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
    642     }
    643 
    644     if (type == 5) {
    645         /* task gate */
    646         /* must do that check here to return the correct error code */
    647         if (!(e2 & DESC_P_MASK)) {
    648             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
    649         }
    650         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
    651         if (has_error_code) {
    652             int type;
    653             uint32_t mask;
    654 
    655             /* push the error code */
    656             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
    657             shift = type >> 3;
    658             if (env->segs[R_SS].flags & DESC_B_MASK) {
    659                 mask = 0xffffffff;
    660             } else {
    661                 mask = 0xffff;
    662             }
    663             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
    664             ssp = env->segs[R_SS].base + esp;
    665             if (shift) {
    666                 cpu_stl_kernel(env, ssp, error_code);
    667             } else {
    668                 cpu_stw_kernel(env, ssp, error_code);
    669             }
    670             SET_ESP(esp, mask);
    671         }
    672         return;
    673     }
    674 
    675     /* Otherwise, trap or interrupt gate */
    676 
    677     /* check valid bit */
    678     if (!(e2 & DESC_P_MASK)) {
    679         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
    680     }
    681     selector = e1 >> 16;
    682     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
    683     if ((selector & 0xfffc) == 0) {
    684         raise_exception_err(env, EXCP0D_GPF, 0);
    685     }
    686     if (load_segment(env, &e1, &e2, selector) != 0) {
    687         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    688     }
    689     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
    690         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    691     }
    692     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    693     if (dpl > cpl) {
    694         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    695     }
    696     if (!(e2 & DESC_P_MASK)) {
    697         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
    698     }
    699     if (e2 & DESC_C_MASK) {
    700         dpl = cpl;
    701     }
    702     if (dpl < cpl) {
    703         /* to inner privilege */
    704         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
    705         if ((ss & 0xfffc) == 0) {
    706             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
    707         }
    708         if ((ss & 3) != dpl) {
    709             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
    710         }
    711         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
    712             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
    713         }
    714         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
    715         if (ss_dpl != dpl) {
    716             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
    717         }
    718         if (!(ss_e2 & DESC_S_MASK) ||
    719             (ss_e2 & DESC_CS_MASK) ||
    720             !(ss_e2 & DESC_W_MASK)) {
    721             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
    722         }
    723         if (!(ss_e2 & DESC_P_MASK)) {
    724             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
    725         }
    726         new_stack = 1;
    727         sp_mask = get_sp_mask(ss_e2);
    728         ssp = get_seg_base(ss_e1, ss_e2);
    729     } else  {
    730         /* to same privilege */
    731         if (vm86) {
    732             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    733         }
    734         new_stack = 0;
    735         sp_mask = get_sp_mask(env->segs[R_SS].flags);
    736         ssp = env->segs[R_SS].base;
    737         esp = env->regs[R_ESP];
    738     }
    739 
    740     shift = type >> 3;
    741 
    742 #if 0
    743     /* XXX: check that enough room is available */
    744     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
    745     if (vm86) {
    746         push_size += 8;
    747     }
    748     push_size <<= shift;
    749 #endif
    750     if (shift == 1) {
    751         if (new_stack) {
    752             if (vm86) {
    753                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
    754                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
    755                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
    756                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
    757             }
    758             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
    759             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
    760         }
    761         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
    762         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
    763         PUSHL(ssp, esp, sp_mask, old_eip);
    764         if (has_error_code) {
    765             PUSHL(ssp, esp, sp_mask, error_code);
    766         }
    767     } else {
    768         if (new_stack) {
    769             if (vm86) {
    770                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
    771                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
    772                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
    773                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
    774             }
    775             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
    776             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
    777         }
    778         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
    779         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
    780         PUSHW(ssp, esp, sp_mask, old_eip);
    781         if (has_error_code) {
    782             PUSHW(ssp, esp, sp_mask, error_code);
    783         }
    784     }
    785 
    786     /* interrupt gate clear IF mask */
    787     if ((type & 1) == 0) {
    788         env->eflags &= ~IF_MASK;
    789     }
    790     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
    791 
    792     if (new_stack) {
    793         if (vm86) {
    794             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
    795             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
    796             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
    797             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
    798         }
    799         ss = (ss & ~3) | dpl;
    800         cpu_x86_load_seg_cache(env, R_SS, ss,
    801                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
    802     }
    803     SET_ESP(esp, sp_mask);
    804 
    805     selector = (selector & ~3) | dpl;
    806     cpu_x86_load_seg_cache(env, R_CS, selector,
    807                    get_seg_base(e1, e2),
    808                    get_seg_limit(e1, e2),
    809                    e2);
    810     env->eip = offset;
    811 }
    812 
    813 #ifdef TARGET_X86_64
    814 
    815 #define PUSHQ_RA(sp, val, ra)                   \
    816     {                                           \
    817         sp -= 8;                                \
    818         cpu_stq_kernel_ra(env, sp, (val), ra);  \
    819     }
    820 
    821 #define POPQ_RA(sp, val, ra)                    \
    822     {                                           \
    823         val = cpu_ldq_kernel_ra(env, sp, ra);   \
    824         sp += 8;                                \
    825     }
    826 
    827 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
    828 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
    829 
    830 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
    831 {
    832     X86CPU *cpu = env_archcpu(env);
    833     int index, pg_mode;
    834     target_ulong rsp;
    835     int32_t sext;
    836 
    837 #if 0
    838     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
    839            env->tr.base, env->tr.limit);
    840 #endif
    841 
    842     if (!(env->tr.flags & DESC_P_MASK)) {
    843         cpu_abort(CPU(cpu), "invalid tss");
    844     }
    845     index = 8 * level + 4;
    846     if ((index + 7) > env->tr.limit) {
    847         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
    848     }
    849 
    850     rsp = cpu_ldq_kernel(env, env->tr.base + index);
    851 
    852     /* test virtual address sign extension */
    853     pg_mode = get_pg_mode(env);
    854     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
    855     if (sext != 0 && sext != -1) {
    856         raise_exception_err(env, EXCP0C_STACK, 0);
    857     }
    858 
    859     return rsp;
    860 }
    861 
    862 /* 64 bit interrupt */
    863 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
    864                            int error_code, target_ulong next_eip, int is_hw)
    865 {
    866     SegmentCache *dt;
    867     target_ulong ptr;
    868     int type, dpl, selector, cpl, ist;
    869     int has_error_code, new_stack;
    870     uint32_t e1, e2, e3, ss;
    871     target_ulong old_eip, esp, offset;
    872 
    873     has_error_code = 0;
    874     if (!is_int && !is_hw) {
    875         has_error_code = exception_has_error_code(intno);
    876     }
    877     if (is_int) {
    878         old_eip = next_eip;
    879     } else {
    880         old_eip = env->eip;
    881     }
    882 
    883     dt = &env->idt;
    884     if (intno * 16 + 15 > dt->limit) {
    885         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
    886     }
    887     ptr = dt->base + intno * 16;
    888     e1 = cpu_ldl_kernel(env, ptr);
    889     e2 = cpu_ldl_kernel(env, ptr + 4);
    890     e3 = cpu_ldl_kernel(env, ptr + 8);
    891     /* check gate type */
    892     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
    893     switch (type) {
    894     case 14: /* 386 interrupt gate */
    895     case 15: /* 386 trap gate */
    896         break;
    897     default:
    898         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
    899         break;
    900     }
    901     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    902     cpl = env->hflags & HF_CPL_MASK;
    903     /* check privilege if software int */
    904     if (is_int && dpl < cpl) {
    905         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
    906     }
    907     /* check valid bit */
    908     if (!(e2 & DESC_P_MASK)) {
    909         raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
    910     }
    911     selector = e1 >> 16;
    912     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
    913     ist = e2 & 7;
    914     if ((selector & 0xfffc) == 0) {
    915         raise_exception_err(env, EXCP0D_GPF, 0);
    916     }
    917 
    918     if (load_segment(env, &e1, &e2, selector) != 0) {
    919         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    920     }
    921     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
    922         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    923     }
    924     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    925     if (dpl > cpl) {
    926         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    927     }
    928     if (!(e2 & DESC_P_MASK)) {
    929         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
    930     }
    931     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
    932         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    933     }
    934     if (e2 & DESC_C_MASK) {
    935         dpl = cpl;
    936     }
    937     if (dpl < cpl || ist != 0) {
    938         /* to inner privilege */
    939         new_stack = 1;
    940         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
    941         ss = 0;
    942     } else {
    943         /* to same privilege */
    944         if (env->eflags & VM_MASK) {
    945             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
    946         }
    947         new_stack = 0;
    948         esp = env->regs[R_ESP];
    949     }
    950     esp &= ~0xfLL; /* align stack */
    951 
    952     PUSHQ(esp, env->segs[R_SS].selector);
    953     PUSHQ(esp, env->regs[R_ESP]);
    954     PUSHQ(esp, cpu_compute_eflags(env));
    955     PUSHQ(esp, env->segs[R_CS].selector);
    956     PUSHQ(esp, old_eip);
    957     if (has_error_code) {
    958         PUSHQ(esp, error_code);
    959     }
    960 
    961     /* interrupt gate clear IF mask */
    962     if ((type & 1) == 0) {
    963         env->eflags &= ~IF_MASK;
    964     }
    965     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
    966 
    967     if (new_stack) {
    968         ss = 0 | dpl;
    969         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
    970     }
    971     env->regs[R_ESP] = esp;
    972 
    973     selector = (selector & ~3) | dpl;
    974     cpu_x86_load_seg_cache(env, R_CS, selector,
    975                    get_seg_base(e1, e2),
    976                    get_seg_limit(e1, e2),
    977                    e2);
    978     env->eip = offset;
    979 }
    980 
    981 void helper_sysret(CPUX86State *env, int dflag)
    982 {
    983     int cpl, selector;
    984 
    985     if (!(env->efer & MSR_EFER_SCE)) {
    986         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
    987     }
    988     cpl = env->hflags & HF_CPL_MASK;
    989     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
    990         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
    991     }
    992     selector = (env->star >> 48) & 0xffff;
    993     if (env->hflags & HF_LMA_MASK) {
    994         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
    995                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
    996                         NT_MASK);
    997         if (dflag == 2) {
    998             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
    999                                    0, 0xffffffff,
   1000                                    DESC_G_MASK | DESC_P_MASK |
   1001                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   1002                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
   1003                                    DESC_L_MASK);
   1004             env->eip = env->regs[R_ECX];
   1005         } else {
   1006             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
   1007                                    0, 0xffffffff,
   1008                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   1009                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   1010                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
   1011             env->eip = (uint32_t)env->regs[R_ECX];
   1012         }
   1013         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
   1014                                0, 0xffffffff,
   1015                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   1016                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   1017                                DESC_W_MASK | DESC_A_MASK);
   1018     } else {
   1019         env->eflags |= IF_MASK;
   1020         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
   1021                                0, 0xffffffff,
   1022                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   1023                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   1024                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
   1025         env->eip = (uint32_t)env->regs[R_ECX];
   1026         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
   1027                                0, 0xffffffff,
   1028                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   1029                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   1030                                DESC_W_MASK | DESC_A_MASK);
   1031     }
   1032 }
   1033 #endif /* TARGET_X86_64 */
   1034 
   1035 /* real mode interrupt */
   1036 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
   1037                               int error_code, unsigned int next_eip)
   1038 {
   1039     SegmentCache *dt;
   1040     target_ulong ptr, ssp;
   1041     int selector;
   1042     uint32_t offset, esp;
   1043     uint32_t old_cs, old_eip;
   1044 
   1045     /* real mode (simpler!) */
   1046     dt = &env->idt;
   1047     if (intno * 4 + 3 > dt->limit) {
   1048         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
   1049     }
   1050     ptr = dt->base + intno * 4;
   1051     offset = cpu_lduw_kernel(env, ptr);
   1052     selector = cpu_lduw_kernel(env, ptr + 2);
   1053     esp = env->regs[R_ESP];
   1054     ssp = env->segs[R_SS].base;
   1055     if (is_int) {
   1056         old_eip = next_eip;
   1057     } else {
   1058         old_eip = env->eip;
   1059     }
   1060     old_cs = env->segs[R_CS].selector;
   1061     /* XXX: use SS segment size? */
   1062     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
   1063     PUSHW(ssp, esp, 0xffff, old_cs);
   1064     PUSHW(ssp, esp, 0xffff, old_eip);
   1065 
   1066     /* update processor state */
   1067     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
   1068     env->eip = offset;
   1069     env->segs[R_CS].selector = selector;
   1070     env->segs[R_CS].base = (selector << 4);
   1071     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
   1072 }
   1073 
   1074 /*
   1075  * Begin execution of an interruption. is_int is TRUE if coming from
   1076  * the int instruction. next_eip is the env->eip value AFTER the interrupt
   1077  * instruction. It is only relevant if is_int is TRUE.
   1078  */
   1079 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
   1080                       int error_code, target_ulong next_eip, int is_hw)
   1081 {
   1082     CPUX86State *env = &cpu->env;
   1083 
   1084     if (qemu_loglevel_mask(CPU_LOG_INT)) {
   1085         if ((env->cr[0] & CR0_PE_MASK)) {
   1086             static int count;
   1087 
   1088             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
   1089                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
   1090                      count, intno, error_code, is_int,
   1091                      env->hflags & HF_CPL_MASK,
   1092                      env->segs[R_CS].selector, env->eip,
   1093                      (int)env->segs[R_CS].base + env->eip,
   1094                      env->segs[R_SS].selector, env->regs[R_ESP]);
   1095             if (intno == 0x0e) {
   1096                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
   1097             } else {
   1098                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
   1099             }
   1100             qemu_log("\n");
   1101             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
   1102 #if 0
   1103             {
   1104                 int i;
   1105                 target_ulong ptr;
   1106 
   1107                 qemu_log("       code=");
   1108                 ptr = env->segs[R_CS].base + env->eip;
   1109                 for (i = 0; i < 16; i++) {
   1110                     qemu_log(" %02x", ldub(ptr + i));
   1111                 }
   1112                 qemu_log("\n");
   1113             }
   1114 #endif
   1115             count++;
   1116         }
   1117     }
   1118     if (env->cr[0] & CR0_PE_MASK) {
   1119 #if !defined(CONFIG_USER_ONLY)
   1120         if (env->hflags & HF_GUEST_MASK) {
   1121             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
   1122         }
   1123 #endif
   1124 #ifdef TARGET_X86_64
   1125         if (env->hflags & HF_LMA_MASK) {
   1126             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
   1127         } else
   1128 #endif
   1129         {
   1130             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
   1131                                    is_hw);
   1132         }
   1133     } else {
   1134 #if !defined(CONFIG_USER_ONLY)
   1135         if (env->hflags & HF_GUEST_MASK) {
   1136             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
   1137         }
   1138 #endif
   1139         do_interrupt_real(env, intno, is_int, error_code, next_eip);
   1140     }
   1141 
   1142 #if !defined(CONFIG_USER_ONLY)
   1143     if (env->hflags & HF_GUEST_MASK) {
   1144         CPUState *cs = CPU(cpu);
   1145         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
   1146                                       offsetof(struct vmcb,
   1147                                                control.event_inj));
   1148 
   1149         x86_stl_phys(cs,
   1150                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
   1151                  event_inj & ~SVM_EVTINJ_VALID);
   1152     }
   1153 #endif
   1154 }
   1155 
   1156 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
   1157 {
   1158     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
   1159 }
   1160 
   1161 void helper_lldt(CPUX86State *env, int selector)
   1162 {
   1163     SegmentCache *dt;
   1164     uint32_t e1, e2;
   1165     int index, entry_limit;
   1166     target_ulong ptr;
   1167 
   1168     selector &= 0xffff;
   1169     if ((selector & 0xfffc) == 0) {
   1170         /* XXX: NULL selector case: invalid LDT */
   1171         env->ldt.base = 0;
   1172         env->ldt.limit = 0;
   1173     } else {
   1174         if (selector & 0x4) {
   1175             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1176         }
   1177         dt = &env->gdt;
   1178         index = selector & ~7;
   1179 #ifdef TARGET_X86_64
   1180         if (env->hflags & HF_LMA_MASK) {
   1181             entry_limit = 15;
   1182         } else
   1183 #endif
   1184         {
   1185             entry_limit = 7;
   1186         }
   1187         if ((index + entry_limit) > dt->limit) {
   1188             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1189         }
   1190         ptr = dt->base + index;
   1191         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
   1192         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
   1193         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
   1194             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1195         }
   1196         if (!(e2 & DESC_P_MASK)) {
   1197             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
   1198         }
   1199 #ifdef TARGET_X86_64
   1200         if (env->hflags & HF_LMA_MASK) {
   1201             uint32_t e3;
   1202 
   1203             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
   1204             load_seg_cache_raw_dt(&env->ldt, e1, e2);
   1205             env->ldt.base |= (target_ulong)e3 << 32;
   1206         } else
   1207 #endif
   1208         {
   1209             load_seg_cache_raw_dt(&env->ldt, e1, e2);
   1210         }
   1211     }
   1212     env->ldt.selector = selector;
   1213 }
   1214 
   1215 void helper_ltr(CPUX86State *env, int selector)
   1216 {
   1217     SegmentCache *dt;
   1218     uint32_t e1, e2;
   1219     int index, type, entry_limit;
   1220     target_ulong ptr;
   1221 
   1222     selector &= 0xffff;
   1223     if ((selector & 0xfffc) == 0) {
   1224         /* NULL selector case: invalid TR */
   1225         env->tr.base = 0;
   1226         env->tr.limit = 0;
   1227         env->tr.flags = 0;
   1228     } else {
   1229         if (selector & 0x4) {
   1230             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1231         }
   1232         dt = &env->gdt;
   1233         index = selector & ~7;
   1234 #ifdef TARGET_X86_64
   1235         if (env->hflags & HF_LMA_MASK) {
   1236             entry_limit = 15;
   1237         } else
   1238 #endif
   1239         {
   1240             entry_limit = 7;
   1241         }
   1242         if ((index + entry_limit) > dt->limit) {
   1243             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1244         }
   1245         ptr = dt->base + index;
   1246         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
   1247         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
   1248         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
   1249         if ((e2 & DESC_S_MASK) ||
   1250             (type != 1 && type != 9)) {
   1251             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1252         }
   1253         if (!(e2 & DESC_P_MASK)) {
   1254             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
   1255         }
   1256 #ifdef TARGET_X86_64
   1257         if (env->hflags & HF_LMA_MASK) {
   1258             uint32_t e3, e4;
   1259 
   1260             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
   1261             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
   1262             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
   1263                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1264             }
   1265             load_seg_cache_raw_dt(&env->tr, e1, e2);
   1266             env->tr.base |= (target_ulong)e3 << 32;
   1267         } else
   1268 #endif
   1269         {
   1270             load_seg_cache_raw_dt(&env->tr, e1, e2);
   1271         }
   1272         e2 |= DESC_TSS_BUSY_MASK;
   1273         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
   1274     }
   1275     env->tr.selector = selector;
   1276 }
   1277 
   1278 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
   1279 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
   1280 {
   1281     uint32_t e1, e2;
   1282     int cpl, dpl, rpl;
   1283     SegmentCache *dt;
   1284     int index;
   1285     target_ulong ptr;
   1286 
   1287     selector &= 0xffff;
   1288     cpl = env->hflags & HF_CPL_MASK;
   1289     if ((selector & 0xfffc) == 0) {
   1290         /* null selector case */
   1291         if (seg_reg == R_SS
   1292 #ifdef TARGET_X86_64
   1293             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
   1294 #endif
   1295             ) {
   1296             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   1297         }
   1298         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
   1299     } else {
   1300 
   1301         if (selector & 0x4) {
   1302             dt = &env->ldt;
   1303         } else {
   1304             dt = &env->gdt;
   1305         }
   1306         index = selector & ~7;
   1307         if ((index + 7) > dt->limit) {
   1308             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1309         }
   1310         ptr = dt->base + index;
   1311         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
   1312         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
   1313 
   1314         if (!(e2 & DESC_S_MASK)) {
   1315             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1316         }
   1317         rpl = selector & 3;
   1318         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1319         if (seg_reg == R_SS) {
   1320             /* must be writable segment */
   1321             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
   1322                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1323             }
   1324             if (rpl != cpl || dpl != cpl) {
   1325                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1326             }
   1327         } else {
   1328             /* must be readable segment */
   1329             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
   1330                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1331             }
   1332 
   1333             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
   1334                 /* if not conforming code, test rights */
   1335                 if (dpl < cpl || dpl < rpl) {
   1336                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1337                 }
   1338             }
   1339         }
   1340 
   1341         if (!(e2 & DESC_P_MASK)) {
   1342             if (seg_reg == R_SS) {
   1343                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
   1344             } else {
   1345                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
   1346             }
   1347         }
   1348 
   1349         /* set the access bit if not already set */
   1350         if (!(e2 & DESC_A_MASK)) {
   1351             e2 |= DESC_A_MASK;
   1352             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
   1353         }
   1354 
   1355         cpu_x86_load_seg_cache(env, seg_reg, selector,
   1356                        get_seg_base(e1, e2),
   1357                        get_seg_limit(e1, e2),
   1358                        e2);
   1359 #if 0
   1360         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
   1361                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
   1362 #endif
   1363     }
   1364 }
   1365 
   1366 /* protected mode jump */
   1367 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
   1368                            target_ulong next_eip)
   1369 {
   1370     int gate_cs, type;
   1371     uint32_t e1, e2, cpl, dpl, rpl, limit;
   1372 
   1373     if ((new_cs & 0xfffc) == 0) {
   1374         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   1375     }
   1376     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
   1377         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1378     }
   1379     cpl = env->hflags & HF_CPL_MASK;
   1380     if (e2 & DESC_S_MASK) {
   1381         if (!(e2 & DESC_CS_MASK)) {
   1382             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1383         }
   1384         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1385         if (e2 & DESC_C_MASK) {
   1386             /* conforming code segment */
   1387             if (dpl > cpl) {
   1388                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1389             }
   1390         } else {
   1391             /* non conforming code segment */
   1392             rpl = new_cs & 3;
   1393             if (rpl > cpl) {
   1394                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1395             }
   1396             if (dpl != cpl) {
   1397                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1398             }
   1399         }
   1400         if (!(e2 & DESC_P_MASK)) {
   1401             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
   1402         }
   1403         limit = get_seg_limit(e1, e2);
   1404         if (new_eip > limit &&
   1405             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
   1406             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   1407         }
   1408         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
   1409                        get_seg_base(e1, e2), limit, e2);
   1410         env->eip = new_eip;
   1411     } else {
   1412         /* jump to call or task gate */
   1413         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1414         rpl = new_cs & 3;
   1415         cpl = env->hflags & HF_CPL_MASK;
   1416         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
   1417 
   1418 #ifdef TARGET_X86_64
   1419         if (env->efer & MSR_EFER_LMA) {
   1420             if (type != 12) {
   1421                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1422             }
   1423         }
   1424 #endif
   1425         switch (type) {
   1426         case 1: /* 286 TSS */
   1427         case 9: /* 386 TSS */
   1428         case 5: /* task gate */
   1429             if (dpl < cpl || dpl < rpl) {
   1430                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1431             }
   1432             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
   1433             break;
   1434         case 4: /* 286 call gate */
   1435         case 12: /* 386 call gate */
   1436             if ((dpl < cpl) || (dpl < rpl)) {
   1437                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1438             }
   1439             if (!(e2 & DESC_P_MASK)) {
   1440                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
   1441             }
   1442             gate_cs = e1 >> 16;
   1443             new_eip = (e1 & 0xffff);
   1444             if (type == 12) {
   1445                 new_eip |= (e2 & 0xffff0000);
   1446             }
   1447 
   1448 #ifdef TARGET_X86_64
   1449             if (env->efer & MSR_EFER_LMA) {
   1450                 /* load the upper 8 bytes of the 64-bit call gate */
   1451                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
   1452                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
   1453                                            GETPC());
   1454                 }
   1455                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
   1456                 if (type != 0) {
   1457                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
   1458                                            GETPC());
   1459                 }
   1460                 new_eip |= ((target_ulong)e1) << 32;
   1461             }
   1462 #endif
   1463 
   1464             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
   1465                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
   1466             }
   1467             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1468             /* must be code segment */
   1469             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
   1470                  (DESC_S_MASK | DESC_CS_MASK))) {
   1471                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
   1472             }
   1473             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
   1474                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
   1475                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
   1476             }
   1477 #ifdef TARGET_X86_64
   1478             if (env->efer & MSR_EFER_LMA) {
   1479                 if (!(e2 & DESC_L_MASK)) {
   1480                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
   1481                 }
   1482                 if (e2 & DESC_B_MASK) {
   1483                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
   1484                 }
   1485             }
   1486 #endif
   1487             if (!(e2 & DESC_P_MASK)) {
   1488                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
   1489             }
   1490             limit = get_seg_limit(e1, e2);
   1491             if (new_eip > limit &&
   1492                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
   1493                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   1494             }
   1495             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
   1496                                    get_seg_base(e1, e2), limit, e2);
   1497             env->eip = new_eip;
   1498             break;
   1499         default:
   1500             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1501             break;
   1502         }
   1503     }
   1504 }
   1505 
   1506 /* real mode call */
   1507 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
   1508                        int shift, uint32_t next_eip)
   1509 {
   1510     uint32_t esp, esp_mask;
   1511     target_ulong ssp;
   1512 
   1513     esp = env->regs[R_ESP];
   1514     esp_mask = get_sp_mask(env->segs[R_SS].flags);
   1515     ssp = env->segs[R_SS].base;
   1516     if (shift) {
   1517         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
   1518         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
   1519     } else {
   1520         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
   1521         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
   1522     }
   1523 
   1524     SET_ESP(esp, esp_mask);
   1525     env->eip = new_eip;
   1526     env->segs[R_CS].selector = new_cs;
   1527     env->segs[R_CS].base = (new_cs << 4);
   1528 }
   1529 
   1530 /* protected mode call */
   1531 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
   1532                             int shift, target_ulong next_eip)
   1533 {
   1534     int new_stack, i;
   1535     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
   1536     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
   1537     uint32_t val, limit, old_sp_mask;
   1538     target_ulong ssp, old_ssp, offset, sp;
   1539 
   1540     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
   1541     LOG_PCALL_STATE(env_cpu(env));
   1542     if ((new_cs & 0xfffc) == 0) {
   1543         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   1544     }
   1545     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
   1546         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1547     }
   1548     cpl = env->hflags & HF_CPL_MASK;
   1549     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
   1550     if (e2 & DESC_S_MASK) {
   1551         if (!(e2 & DESC_CS_MASK)) {
   1552             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1553         }
   1554         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1555         if (e2 & DESC_C_MASK) {
   1556             /* conforming code segment */
   1557             if (dpl > cpl) {
   1558                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1559             }
   1560         } else {
   1561             /* non conforming code segment */
   1562             rpl = new_cs & 3;
   1563             if (rpl > cpl) {
   1564                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1565             }
   1566             if (dpl != cpl) {
   1567                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1568             }
   1569         }
   1570         if (!(e2 & DESC_P_MASK)) {
   1571             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
   1572         }
   1573 
   1574 #ifdef TARGET_X86_64
   1575         /* XXX: check 16/32 bit cases in long mode */
   1576         if (shift == 2) {
   1577             target_ulong rsp;
   1578 
   1579             /* 64 bit case */
   1580             rsp = env->regs[R_ESP];
   1581             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
   1582             PUSHQ_RA(rsp, next_eip, GETPC());
   1583             /* from this point, not restartable */
   1584             env->regs[R_ESP] = rsp;
   1585             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
   1586                                    get_seg_base(e1, e2),
   1587                                    get_seg_limit(e1, e2), e2);
   1588             env->eip = new_eip;
   1589         } else
   1590 #endif
   1591         {
   1592             sp = env->regs[R_ESP];
   1593             sp_mask = get_sp_mask(env->segs[R_SS].flags);
   1594             ssp = env->segs[R_SS].base;
   1595             if (shift) {
   1596                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
   1597                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
   1598             } else {
   1599                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
   1600                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
   1601             }
   1602 
   1603             limit = get_seg_limit(e1, e2);
   1604             if (new_eip > limit) {
   1605                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1606             }
   1607             /* from this point, not restartable */
   1608             SET_ESP(sp, sp_mask);
   1609             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
   1610                                    get_seg_base(e1, e2), limit, e2);
   1611             env->eip = new_eip;
   1612         }
   1613     } else {
   1614         /* check gate type */
   1615         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
   1616         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1617         rpl = new_cs & 3;
   1618 
   1619 #ifdef TARGET_X86_64
   1620         if (env->efer & MSR_EFER_LMA) {
   1621             if (type != 12) {
   1622                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1623             }
   1624         }
   1625 #endif
   1626 
   1627         switch (type) {
   1628         case 1: /* available 286 TSS */
   1629         case 9: /* available 386 TSS */
   1630         case 5: /* task gate */
   1631             if (dpl < cpl || dpl < rpl) {
   1632                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1633             }
   1634             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
   1635             return;
   1636         case 4: /* 286 call gate */
   1637         case 12: /* 386 call gate */
   1638             break;
   1639         default:
   1640             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1641             break;
   1642         }
   1643         shift = type >> 3;
   1644 
   1645         if (dpl < cpl || dpl < rpl) {
   1646             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
   1647         }
   1648         /* check valid bit */
   1649         if (!(e2 & DESC_P_MASK)) {
   1650             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
   1651         }
   1652         selector = e1 >> 16;
   1653         param_count = e2 & 0x1f;
   1654         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
   1655 #ifdef TARGET_X86_64
   1656         if (env->efer & MSR_EFER_LMA) {
   1657             /* load the upper 8 bytes of the 64-bit call gate */
   1658             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
   1659                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
   1660                                        GETPC());
   1661             }
   1662             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
   1663             if (type != 0) {
   1664                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
   1665                                        GETPC());
   1666             }
   1667             offset |= ((target_ulong)e1) << 32;
   1668         }
   1669 #endif
   1670         if ((selector & 0xfffc) == 0) {
   1671             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   1672         }
   1673 
   1674         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
   1675             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1676         }
   1677         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
   1678             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1679         }
   1680         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1681         if (dpl > cpl) {
   1682             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1683         }
   1684 #ifdef TARGET_X86_64
   1685         if (env->efer & MSR_EFER_LMA) {
   1686             if (!(e2 & DESC_L_MASK)) {
   1687                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1688             }
   1689             if (e2 & DESC_B_MASK) {
   1690                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
   1691             }
   1692             shift++;
   1693         }
   1694 #endif
   1695         if (!(e2 & DESC_P_MASK)) {
   1696             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
   1697         }
   1698 
   1699         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
   1700             /* to inner privilege */
   1701 #ifdef TARGET_X86_64
   1702             if (shift == 2) {
   1703                 sp = get_rsp_from_tss(env, dpl);
   1704                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
   1705                 new_stack = 1;
   1706                 sp_mask = 0;
   1707                 ssp = 0;  /* SS base is always zero in IA-32e mode */
   1708                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
   1709                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
   1710             } else
   1711 #endif
   1712             {
   1713                 uint32_t sp32;
   1714                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
   1715                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
   1716                           TARGET_FMT_lx "\n", ss, sp32, param_count,
   1717                           env->regs[R_ESP]);
   1718                 sp = sp32;
   1719                 if ((ss & 0xfffc) == 0) {
   1720                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
   1721                 }
   1722                 if ((ss & 3) != dpl) {
   1723                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
   1724                 }
   1725                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
   1726                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
   1727                 }
   1728                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
   1729                 if (ss_dpl != dpl) {
   1730                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
   1731                 }
   1732                 if (!(ss_e2 & DESC_S_MASK) ||
   1733                     (ss_e2 & DESC_CS_MASK) ||
   1734                     !(ss_e2 & DESC_W_MASK)) {
   1735                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
   1736                 }
   1737                 if (!(ss_e2 & DESC_P_MASK)) {
   1738                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
   1739                 }
   1740 
   1741                 sp_mask = get_sp_mask(ss_e2);
   1742                 ssp = get_seg_base(ss_e1, ss_e2);
   1743             }
   1744 
   1745             /* push_size = ((param_count * 2) + 8) << shift; */
   1746 
   1747             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
   1748             old_ssp = env->segs[R_SS].base;
   1749 #ifdef TARGET_X86_64
   1750             if (shift == 2) {
   1751                 /* XXX: verify if new stack address is canonical */
   1752                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
   1753                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
   1754                 /* parameters aren't supported for 64-bit call gates */
   1755             } else
   1756 #endif
   1757             if (shift == 1) {
   1758                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
   1759                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
   1760                 for (i = param_count - 1; i >= 0; i--) {
   1761                     val = cpu_ldl_kernel_ra(env, old_ssp +
   1762                                             ((env->regs[R_ESP] + i * 4) &
   1763                                              old_sp_mask), GETPC());
   1764                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
   1765                 }
   1766             } else {
   1767                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
   1768                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
   1769                 for (i = param_count - 1; i >= 0; i--) {
   1770                     val = cpu_lduw_kernel_ra(env, old_ssp +
   1771                                              ((env->regs[R_ESP] + i * 2) &
   1772                                               old_sp_mask), GETPC());
   1773                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
   1774                 }
   1775             }
   1776             new_stack = 1;
   1777         } else {
   1778             /* to same privilege */
   1779             sp = env->regs[R_ESP];
   1780             sp_mask = get_sp_mask(env->segs[R_SS].flags);
   1781             ssp = env->segs[R_SS].base;
   1782             /* push_size = (4 << shift); */
   1783             new_stack = 0;
   1784         }
   1785 
   1786 #ifdef TARGET_X86_64
   1787         if (shift == 2) {
   1788             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
   1789             PUSHQ_RA(sp, next_eip, GETPC());
   1790         } else
   1791 #endif
   1792         if (shift == 1) {
   1793             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
   1794             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
   1795         } else {
   1796             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
   1797             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
   1798         }
   1799 
   1800         /* from this point, not restartable */
   1801 
   1802         if (new_stack) {
   1803 #ifdef TARGET_X86_64
   1804             if (shift == 2) {
   1805                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
   1806             } else
   1807 #endif
   1808             {
   1809                 ss = (ss & ~3) | dpl;
   1810                 cpu_x86_load_seg_cache(env, R_SS, ss,
   1811                                        ssp,
   1812                                        get_seg_limit(ss_e1, ss_e2),
   1813                                        ss_e2);
   1814             }
   1815         }
   1816 
   1817         selector = (selector & ~3) | dpl;
   1818         cpu_x86_load_seg_cache(env, R_CS, selector,
   1819                        get_seg_base(e1, e2),
   1820                        get_seg_limit(e1, e2),
   1821                        e2);
   1822         SET_ESP(sp, sp_mask);
   1823         env->eip = offset;
   1824     }
   1825 }
   1826 
   1827 /* real and vm86 mode iret */
   1828 void helper_iret_real(CPUX86State *env, int shift)
   1829 {
   1830     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
   1831     target_ulong ssp;
   1832     int eflags_mask;
   1833 
   1834     sp_mask = 0xffff; /* XXXX: use SS segment size? */
   1835     sp = env->regs[R_ESP];
   1836     ssp = env->segs[R_SS].base;
   1837     if (shift == 1) {
   1838         /* 32 bits */
   1839         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
   1840         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
   1841         new_cs &= 0xffff;
   1842         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
   1843     } else {
   1844         /* 16 bits */
   1845         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
   1846         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
   1847         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
   1848     }
   1849     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
   1850     env->segs[R_CS].selector = new_cs;
   1851     env->segs[R_CS].base = (new_cs << 4);
   1852     env->eip = new_eip;
   1853     if (env->eflags & VM_MASK) {
   1854         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
   1855             NT_MASK;
   1856     } else {
   1857         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
   1858             RF_MASK | NT_MASK;
   1859     }
   1860     if (shift == 0) {
   1861         eflags_mask &= 0xffff;
   1862     }
   1863     cpu_load_eflags(env, new_eflags, eflags_mask);
   1864     env->hflags2 &= ~HF2_NMI_MASK;
   1865 }
   1866 
   1867 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
   1868 {
   1869     int dpl;
   1870     uint32_t e2;
   1871 
   1872     /* XXX: on x86_64, we do not want to nullify FS and GS because
   1873        they may still contain a valid base. I would be interested to
   1874        know how a real x86_64 CPU behaves */
   1875     if ((seg_reg == R_FS || seg_reg == R_GS) &&
   1876         (env->segs[seg_reg].selector & 0xfffc) == 0) {
   1877         return;
   1878     }
   1879 
   1880     e2 = env->segs[seg_reg].flags;
   1881     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1882     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
   1883         /* data or non conforming code segment */
   1884         if (dpl < cpl) {
   1885             cpu_x86_load_seg_cache(env, seg_reg, 0,
   1886                                    env->segs[seg_reg].base,
   1887                                    env->segs[seg_reg].limit,
   1888                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
   1889         }
   1890     }
   1891 }
   1892 
   1893 /* protected mode iret */
   1894 static inline void helper_ret_protected(CPUX86State *env, int shift,
   1895                                         int is_iret, int addend,
   1896                                         uintptr_t retaddr)
   1897 {
   1898     uint32_t new_cs, new_eflags, new_ss;
   1899     uint32_t new_es, new_ds, new_fs, new_gs;
   1900     uint32_t e1, e2, ss_e1, ss_e2;
   1901     int cpl, dpl, rpl, eflags_mask, iopl;
   1902     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
   1903 
   1904 #ifdef TARGET_X86_64
   1905     if (shift == 2) {
   1906         sp_mask = -1;
   1907     } else
   1908 #endif
   1909     {
   1910         sp_mask = get_sp_mask(env->segs[R_SS].flags);
   1911     }
   1912     sp = env->regs[R_ESP];
   1913     ssp = env->segs[R_SS].base;
   1914     new_eflags = 0; /* avoid warning */
   1915 #ifdef TARGET_X86_64
   1916     if (shift == 2) {
   1917         POPQ_RA(sp, new_eip, retaddr);
   1918         POPQ_RA(sp, new_cs, retaddr);
   1919         new_cs &= 0xffff;
   1920         if (is_iret) {
   1921             POPQ_RA(sp, new_eflags, retaddr);
   1922         }
   1923     } else
   1924 #endif
   1925     {
   1926         if (shift == 1) {
   1927             /* 32 bits */
   1928             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
   1929             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
   1930             new_cs &= 0xffff;
   1931             if (is_iret) {
   1932                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
   1933                 if (new_eflags & VM_MASK) {
   1934                     goto return_to_vm86;
   1935                 }
   1936             }
   1937         } else {
   1938             /* 16 bits */
   1939             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
   1940             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
   1941             if (is_iret) {
   1942                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
   1943             }
   1944         }
   1945     }
   1946     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
   1947               new_cs, new_eip, shift, addend);
   1948     LOG_PCALL_STATE(env_cpu(env));
   1949     if ((new_cs & 0xfffc) == 0) {
   1950         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
   1951     }
   1952     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
   1953         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
   1954     }
   1955     if (!(e2 & DESC_S_MASK) ||
   1956         !(e2 & DESC_CS_MASK)) {
   1957         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
   1958     }
   1959     cpl = env->hflags & HF_CPL_MASK;
   1960     rpl = new_cs & 3;
   1961     if (rpl < cpl) {
   1962         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
   1963     }
   1964     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   1965     if (e2 & DESC_C_MASK) {
   1966         if (dpl > rpl) {
   1967             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
   1968         }
   1969     } else {
   1970         if (dpl != rpl) {
   1971             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
   1972         }
   1973     }
   1974     if (!(e2 & DESC_P_MASK)) {
   1975         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
   1976     }
   1977 
   1978     sp += addend;
   1979     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
   1980                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
   1981         /* return to same privilege level */
   1982         cpu_x86_load_seg_cache(env, R_CS, new_cs,
   1983                        get_seg_base(e1, e2),
   1984                        get_seg_limit(e1, e2),
   1985                        e2);
   1986     } else {
   1987         /* return to different privilege level */
   1988 #ifdef TARGET_X86_64
   1989         if (shift == 2) {
   1990             POPQ_RA(sp, new_esp, retaddr);
   1991             POPQ_RA(sp, new_ss, retaddr);
   1992             new_ss &= 0xffff;
   1993         } else
   1994 #endif
   1995         {
   1996             if (shift == 1) {
   1997                 /* 32 bits */
   1998                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
   1999                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
   2000                 new_ss &= 0xffff;
   2001             } else {
   2002                 /* 16 bits */
   2003                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
   2004                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
   2005             }
   2006         }
   2007         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
   2008                   new_ss, new_esp);
   2009         if ((new_ss & 0xfffc) == 0) {
   2010 #ifdef TARGET_X86_64
   2011             /* NULL ss is allowed in long mode if cpl != 3 */
   2012             /* XXX: test CS64? */
   2013             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
   2014                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
   2015                                        0, 0xffffffff,
   2016                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2017                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
   2018                                        DESC_W_MASK | DESC_A_MASK);
   2019                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
   2020             } else
   2021 #endif
   2022             {
   2023                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
   2024             }
   2025         } else {
   2026             if ((new_ss & 3) != rpl) {
   2027                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
   2028             }
   2029             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
   2030                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
   2031             }
   2032             if (!(ss_e2 & DESC_S_MASK) ||
   2033                 (ss_e2 & DESC_CS_MASK) ||
   2034                 !(ss_e2 & DESC_W_MASK)) {
   2035                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
   2036             }
   2037             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
   2038             if (dpl != rpl) {
   2039                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
   2040             }
   2041             if (!(ss_e2 & DESC_P_MASK)) {
   2042                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
   2043             }
   2044             cpu_x86_load_seg_cache(env, R_SS, new_ss,
   2045                                    get_seg_base(ss_e1, ss_e2),
   2046                                    get_seg_limit(ss_e1, ss_e2),
   2047                                    ss_e2);
   2048         }
   2049 
   2050         cpu_x86_load_seg_cache(env, R_CS, new_cs,
   2051                        get_seg_base(e1, e2),
   2052                        get_seg_limit(e1, e2),
   2053                        e2);
   2054         sp = new_esp;
   2055 #ifdef TARGET_X86_64
   2056         if (env->hflags & HF_CS64_MASK) {
   2057             sp_mask = -1;
   2058         } else
   2059 #endif
   2060         {
   2061             sp_mask = get_sp_mask(ss_e2);
   2062         }
   2063 
   2064         /* validate data segments */
   2065         validate_seg(env, R_ES, rpl);
   2066         validate_seg(env, R_DS, rpl);
   2067         validate_seg(env, R_FS, rpl);
   2068         validate_seg(env, R_GS, rpl);
   2069 
   2070         sp += addend;
   2071     }
   2072     SET_ESP(sp, sp_mask);
   2073     env->eip = new_eip;
   2074     if (is_iret) {
   2075         /* NOTE: 'cpl' is the _old_ CPL */
   2076         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
   2077         if (cpl == 0) {
   2078             eflags_mask |= IOPL_MASK;
   2079         }
   2080         iopl = (env->eflags >> IOPL_SHIFT) & 3;
   2081         if (cpl <= iopl) {
   2082             eflags_mask |= IF_MASK;
   2083         }
   2084         if (shift == 0) {
   2085             eflags_mask &= 0xffff;
   2086         }
   2087         cpu_load_eflags(env, new_eflags, eflags_mask);
   2088     }
   2089     return;
   2090 
   2091  return_to_vm86:
   2092     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
   2093     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
   2094     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
   2095     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
   2096     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
   2097     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
   2098 
   2099     /* modify processor state */
   2100     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
   2101                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
   2102                     VIP_MASK);
   2103     load_seg_vm(env, R_CS, new_cs & 0xffff);
   2104     load_seg_vm(env, R_SS, new_ss & 0xffff);
   2105     load_seg_vm(env, R_ES, new_es & 0xffff);
   2106     load_seg_vm(env, R_DS, new_ds & 0xffff);
   2107     load_seg_vm(env, R_FS, new_fs & 0xffff);
   2108     load_seg_vm(env, R_GS, new_gs & 0xffff);
   2109 
   2110     env->eip = new_eip & 0xffff;
   2111     env->regs[R_ESP] = new_esp;
   2112 }
   2113 
   2114 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
   2115 {
   2116     int tss_selector, type;
   2117     uint32_t e1, e2;
   2118 
   2119     /* specific case for TSS */
   2120     if (env->eflags & NT_MASK) {
   2121 #ifdef TARGET_X86_64
   2122         if (env->hflags & HF_LMA_MASK) {
   2123             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   2124         }
   2125 #endif
   2126         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
   2127         if (tss_selector & 4) {
   2128             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
   2129         }
   2130         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
   2131             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
   2132         }
   2133         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
   2134         /* NOTE: we check both segment and busy TSS */
   2135         if (type != 3) {
   2136             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
   2137         }
   2138         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
   2139     } else {
   2140         helper_ret_protected(env, shift, 1, 0, GETPC());
   2141     }
   2142     env->hflags2 &= ~HF2_NMI_MASK;
   2143 }
   2144 
   2145 void helper_lret_protected(CPUX86State *env, int shift, int addend)
   2146 {
   2147     helper_ret_protected(env, shift, 0, addend, GETPC());
   2148 }
   2149 
   2150 void helper_sysenter(CPUX86State *env)
   2151 {
   2152     if (env->sysenter_cs == 0) {
   2153         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   2154     }
   2155     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
   2156 
   2157 #ifdef TARGET_X86_64
   2158     if (env->hflags & HF_LMA_MASK) {
   2159         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
   2160                                0, 0xffffffff,
   2161                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2162                                DESC_S_MASK |
   2163                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
   2164                                DESC_L_MASK);
   2165     } else
   2166 #endif
   2167     {
   2168         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
   2169                                0, 0xffffffff,
   2170                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2171                                DESC_S_MASK |
   2172                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
   2173     }
   2174     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
   2175                            0, 0xffffffff,
   2176                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2177                            DESC_S_MASK |
   2178                            DESC_W_MASK | DESC_A_MASK);
   2179     env->regs[R_ESP] = env->sysenter_esp;
   2180     env->eip = env->sysenter_eip;
   2181 }
   2182 
   2183 void helper_sysexit(CPUX86State *env, int dflag)
   2184 {
   2185     int cpl;
   2186 
   2187     cpl = env->hflags & HF_CPL_MASK;
   2188     if (env->sysenter_cs == 0 || cpl != 0) {
   2189         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
   2190     }
   2191 #ifdef TARGET_X86_64
   2192     if (dflag == 2) {
   2193         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
   2194                                3, 0, 0xffffffff,
   2195                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2196                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   2197                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
   2198                                DESC_L_MASK);
   2199         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
   2200                                3, 0, 0xffffffff,
   2201                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2202                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   2203                                DESC_W_MASK | DESC_A_MASK);
   2204     } else
   2205 #endif
   2206     {
   2207         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
   2208                                3, 0, 0xffffffff,
   2209                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2210                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   2211                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
   2212         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
   2213                                3, 0, 0xffffffff,
   2214                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
   2215                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
   2216                                DESC_W_MASK | DESC_A_MASK);
   2217     }
   2218     env->regs[R_ESP] = env->regs[R_ECX];
   2219     env->eip = env->regs[R_EDX];
   2220 }
   2221 
   2222 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
   2223 {
   2224     unsigned int limit;
   2225     uint32_t e1, e2, eflags, selector;
   2226     int rpl, dpl, cpl, type;
   2227 
   2228     selector = selector1 & 0xffff;
   2229     eflags = cpu_cc_compute_all(env, CC_OP);
   2230     if ((selector & 0xfffc) == 0) {
   2231         goto fail;
   2232     }
   2233     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
   2234         goto fail;
   2235     }
   2236     rpl = selector & 3;
   2237     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   2238     cpl = env->hflags & HF_CPL_MASK;
   2239     if (e2 & DESC_S_MASK) {
   2240         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
   2241             /* conforming */
   2242         } else {
   2243             if (dpl < cpl || dpl < rpl) {
   2244                 goto fail;
   2245             }
   2246         }
   2247     } else {
   2248         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
   2249         switch (type) {
   2250         case 1:
   2251         case 2:
   2252         case 3:
   2253         case 9:
   2254         case 11:
   2255             break;
   2256         default:
   2257             goto fail;
   2258         }
   2259         if (dpl < cpl || dpl < rpl) {
   2260         fail:
   2261             CC_SRC = eflags & ~CC_Z;
   2262             return 0;
   2263         }
   2264     }
   2265     limit = get_seg_limit(e1, e2);
   2266     CC_SRC = eflags | CC_Z;
   2267     return limit;
   2268 }
   2269 
   2270 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
   2271 {
   2272     uint32_t e1, e2, eflags, selector;
   2273     int rpl, dpl, cpl, type;
   2274 
   2275     selector = selector1 & 0xffff;
   2276     eflags = cpu_cc_compute_all(env, CC_OP);
   2277     if ((selector & 0xfffc) == 0) {
   2278         goto fail;
   2279     }
   2280     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
   2281         goto fail;
   2282     }
   2283     rpl = selector & 3;
   2284     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   2285     cpl = env->hflags & HF_CPL_MASK;
   2286     if (e2 & DESC_S_MASK) {
   2287         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
   2288             /* conforming */
   2289         } else {
   2290             if (dpl < cpl || dpl < rpl) {
   2291                 goto fail;
   2292             }
   2293         }
   2294     } else {
   2295         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
   2296         switch (type) {
   2297         case 1:
   2298         case 2:
   2299         case 3:
   2300         case 4:
   2301         case 5:
   2302         case 9:
   2303         case 11:
   2304         case 12:
   2305             break;
   2306         default:
   2307             goto fail;
   2308         }
   2309         if (dpl < cpl || dpl < rpl) {
   2310         fail:
   2311             CC_SRC = eflags & ~CC_Z;
   2312             return 0;
   2313         }
   2314     }
   2315     CC_SRC = eflags | CC_Z;
   2316     return e2 & 0x00f0ff00;
   2317 }
   2318 
   2319 void helper_verr(CPUX86State *env, target_ulong selector1)
   2320 {
   2321     uint32_t e1, e2, eflags, selector;
   2322     int rpl, dpl, cpl;
   2323 
   2324     selector = selector1 & 0xffff;
   2325     eflags = cpu_cc_compute_all(env, CC_OP);
   2326     if ((selector & 0xfffc) == 0) {
   2327         goto fail;
   2328     }
   2329     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
   2330         goto fail;
   2331     }
   2332     if (!(e2 & DESC_S_MASK)) {
   2333         goto fail;
   2334     }
   2335     rpl = selector & 3;
   2336     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   2337     cpl = env->hflags & HF_CPL_MASK;
   2338     if (e2 & DESC_CS_MASK) {
   2339         if (!(e2 & DESC_R_MASK)) {
   2340             goto fail;
   2341         }
   2342         if (!(e2 & DESC_C_MASK)) {
   2343             if (dpl < cpl || dpl < rpl) {
   2344                 goto fail;
   2345             }
   2346         }
   2347     } else {
   2348         if (dpl < cpl || dpl < rpl) {
   2349         fail:
   2350             CC_SRC = eflags & ~CC_Z;
   2351             return;
   2352         }
   2353     }
   2354     CC_SRC = eflags | CC_Z;
   2355 }
   2356 
   2357 void helper_verw(CPUX86State *env, target_ulong selector1)
   2358 {
   2359     uint32_t e1, e2, eflags, selector;
   2360     int rpl, dpl, cpl;
   2361 
   2362     selector = selector1 & 0xffff;
   2363     eflags = cpu_cc_compute_all(env, CC_OP);
   2364     if ((selector & 0xfffc) == 0) {
   2365         goto fail;
   2366     }
   2367     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
   2368         goto fail;
   2369     }
   2370     if (!(e2 & DESC_S_MASK)) {
   2371         goto fail;
   2372     }
   2373     rpl = selector & 3;
   2374     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
   2375     cpl = env->hflags & HF_CPL_MASK;
   2376     if (e2 & DESC_CS_MASK) {
   2377         goto fail;
   2378     } else {
   2379         if (dpl < cpl || dpl < rpl) {
   2380             goto fail;
   2381         }
   2382         if (!(e2 & DESC_W_MASK)) {
   2383         fail:
   2384             CC_SRC = eflags & ~CC_Z;
   2385             return;
   2386         }
   2387     }
   2388     CC_SRC = eflags | CC_Z;
   2389 }