qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

vm86.c (16233B)


      1 /*
      2  *  vm86 linux syscall support
      3  *
      4  *  Copyright (c) 2003 Fabrice Bellard
      5  *
      6  *  This program is free software; you can redistribute it and/or modify
      7  *  it under the terms of the GNU General Public License as published by
      8  *  the Free Software Foundation; either version 2 of the License, or
      9  *  (at your option) any later version.
     10  *
     11  *  This program is distributed in the hope that it will be useful,
     12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14  *  GNU General Public License for more details.
     15  *
     16  *  You should have received a copy of the GNU General Public License
     17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 #include "qemu/osdep.h"
     20 
     21 #include "qemu.h"
     22 #include "user-internals.h"
     23 
     24 //#define DEBUG_VM86
     25 
     26 #ifdef DEBUG_VM86
     27 #  define LOG_VM86(...) qemu_log(__VA_ARGS__);
     28 #else
     29 #  define LOG_VM86(...) do { } while (0)
     30 #endif
     31 
     32 
     33 #define set_flags(X,new,mask) \
     34 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
     35 
     36 #define SAFE_MASK	(0xDD5)
     37 #define RETURN_MASK	(0xDFF)
     38 
     39 static inline int is_revectored(int nr, struct target_revectored_struct *bitmap)
     40 {
     41     return (((uint8_t *)bitmap)[nr >> 3] >> (nr & 7)) & 1;
     42 }
     43 
     44 static inline void vm_putw(CPUX86State *env, uint32_t segptr,
     45                            unsigned int reg16, unsigned int val)
     46 {
     47     cpu_stw_data(env, segptr + (reg16 & 0xffff), val);
     48 }
     49 
     50 static inline void vm_putl(CPUX86State *env, uint32_t segptr,
     51                            unsigned int reg16, unsigned int val)
     52 {
     53     cpu_stl_data(env, segptr + (reg16 & 0xffff), val);
     54 }
     55 
     56 static inline unsigned int vm_getb(CPUX86State *env,
     57                                    uint32_t segptr, unsigned int reg16)
     58 {
     59     return cpu_ldub_data(env, segptr + (reg16 & 0xffff));
     60 }
     61 
     62 static inline unsigned int vm_getw(CPUX86State *env,
     63                                    uint32_t segptr, unsigned int reg16)
     64 {
     65     return cpu_lduw_data(env, segptr + (reg16 & 0xffff));
     66 }
     67 
     68 static inline unsigned int vm_getl(CPUX86State *env,
     69                                    uint32_t segptr, unsigned int reg16)
     70 {
     71     return cpu_ldl_data(env, segptr + (reg16 & 0xffff));
     72 }
     73 
     74 void save_v86_state(CPUX86State *env)
     75 {
     76     CPUState *cs = env_cpu(env);
     77     TaskState *ts = cs->opaque;
     78     struct target_vm86plus_struct * target_v86;
     79 
     80     if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
     81         /* FIXME - should return an error */
     82         return;
     83     /* put the VM86 registers in the userspace register structure */
     84     target_v86->regs.eax = tswap32(env->regs[R_EAX]);
     85     target_v86->regs.ebx = tswap32(env->regs[R_EBX]);
     86     target_v86->regs.ecx = tswap32(env->regs[R_ECX]);
     87     target_v86->regs.edx = tswap32(env->regs[R_EDX]);
     88     target_v86->regs.esi = tswap32(env->regs[R_ESI]);
     89     target_v86->regs.edi = tswap32(env->regs[R_EDI]);
     90     target_v86->regs.ebp = tswap32(env->regs[R_EBP]);
     91     target_v86->regs.esp = tswap32(env->regs[R_ESP]);
     92     target_v86->regs.eip = tswap32(env->eip);
     93     target_v86->regs.cs = tswap16(env->segs[R_CS].selector);
     94     target_v86->regs.ss = tswap16(env->segs[R_SS].selector);
     95     target_v86->regs.ds = tswap16(env->segs[R_DS].selector);
     96     target_v86->regs.es = tswap16(env->segs[R_ES].selector);
     97     target_v86->regs.fs = tswap16(env->segs[R_FS].selector);
     98     target_v86->regs.gs = tswap16(env->segs[R_GS].selector);
     99     set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask);
    100     target_v86->regs.eflags = tswap32(env->eflags);
    101     unlock_user_struct(target_v86, ts->target_v86, 1);
    102     LOG_VM86("save_v86_state: eflags=%08x cs:ip=%04x:%04x\n",
    103              env->eflags, env->segs[R_CS].selector, env->eip);
    104 
    105     /* restore 32 bit registers */
    106     env->regs[R_EAX] = ts->vm86_saved_regs.eax;
    107     env->regs[R_EBX] = ts->vm86_saved_regs.ebx;
    108     env->regs[R_ECX] = ts->vm86_saved_regs.ecx;
    109     env->regs[R_EDX] = ts->vm86_saved_regs.edx;
    110     env->regs[R_ESI] = ts->vm86_saved_regs.esi;
    111     env->regs[R_EDI] = ts->vm86_saved_regs.edi;
    112     env->regs[R_EBP] = ts->vm86_saved_regs.ebp;
    113     env->regs[R_ESP] = ts->vm86_saved_regs.esp;
    114     env->eflags = ts->vm86_saved_regs.eflags;
    115     env->eip = ts->vm86_saved_regs.eip;
    116 
    117     cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs);
    118     cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss);
    119     cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds);
    120     cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es);
    121     cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs);
    122     cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs);
    123 }
    124 
    125 /* return from vm86 mode to 32 bit. The vm86() syscall will return
    126    'retval' */
    127 static inline void return_to_32bit(CPUX86State *env, int retval)
    128 {
    129     LOG_VM86("return_to_32bit: ret=0x%x\n", retval);
    130     save_v86_state(env);
    131     env->regs[R_EAX] = retval;
    132 }
    133 
    134 static inline int set_IF(CPUX86State *env)
    135 {
    136     CPUState *cs = env_cpu(env);
    137     TaskState *ts = cs->opaque;
    138 
    139     ts->v86flags |= VIF_MASK;
    140     if (ts->v86flags & VIP_MASK) {
    141         return_to_32bit(env, TARGET_VM86_STI);
    142         return 1;
    143     }
    144     return 0;
    145 }
    146 
    147 static inline void clear_IF(CPUX86State *env)
    148 {
    149     CPUState *cs = env_cpu(env);
    150     TaskState *ts = cs->opaque;
    151 
    152     ts->v86flags &= ~VIF_MASK;
    153 }
    154 
    155 static inline void clear_TF(CPUX86State *env)
    156 {
    157     env->eflags &= ~TF_MASK;
    158 }
    159 
    160 static inline void clear_AC(CPUX86State *env)
    161 {
    162     env->eflags &= ~AC_MASK;
    163 }
    164 
    165 static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
    166 {
    167     CPUState *cs = env_cpu(env);
    168     TaskState *ts = cs->opaque;
    169 
    170     set_flags(ts->v86flags, eflags, ts->v86mask);
    171     set_flags(env->eflags, eflags, SAFE_MASK);
    172     if (eflags & IF_MASK)
    173         return set_IF(env);
    174     else
    175         clear_IF(env);
    176     return 0;
    177 }
    178 
    179 static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
    180 {
    181     CPUState *cs = env_cpu(env);
    182     TaskState *ts = cs->opaque;
    183 
    184     set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
    185     set_flags(env->eflags, flags, SAFE_MASK);
    186     if (flags & IF_MASK)
    187         return set_IF(env);
    188     else
    189         clear_IF(env);
    190     return 0;
    191 }
    192 
    193 static inline unsigned int get_vflags(CPUX86State *env)
    194 {
    195     CPUState *cs = env_cpu(env);
    196     TaskState *ts = cs->opaque;
    197     unsigned int flags;
    198 
    199     flags = env->eflags & RETURN_MASK;
    200     if (ts->v86flags & VIF_MASK)
    201         flags |= IF_MASK;
    202     flags |= IOPL_MASK;
    203     return flags | (ts->v86flags & ts->v86mask);
    204 }
    205 
    206 #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff)
    207 
    208 /* handle VM86 interrupt (NOTE: the CPU core currently does not
    209    support TSS interrupt revectoring, so this code is always executed) */
    210 static void do_int(CPUX86State *env, int intno)
    211 {
    212     CPUState *cs = env_cpu(env);
    213     TaskState *ts = cs->opaque;
    214     uint32_t int_addr, segoffs, ssp;
    215     unsigned int sp;
    216 
    217     if (env->segs[R_CS].selector == TARGET_BIOSSEG)
    218         goto cannot_handle;
    219     if (is_revectored(intno, &ts->vm86plus.int_revectored))
    220         goto cannot_handle;
    221     if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff,
    222                                        &ts->vm86plus.int21_revectored))
    223         goto cannot_handle;
    224     int_addr = (intno << 2);
    225     segoffs = cpu_ldl_data(env, int_addr);
    226     if ((segoffs >> 16) == TARGET_BIOSSEG)
    227         goto cannot_handle;
    228     LOG_VM86("VM86: emulating int 0x%x. CS:IP=%04x:%04x\n",
    229              intno, segoffs >> 16, segoffs & 0xffff);
    230     /* save old state */
    231     ssp = env->segs[R_SS].selector << 4;
    232     sp = env->regs[R_ESP] & 0xffff;
    233     vm_putw(env, ssp, sp - 2, get_vflags(env));
    234     vm_putw(env, ssp, sp - 4, env->segs[R_CS].selector);
    235     vm_putw(env, ssp, sp - 6, env->eip);
    236     ADD16(env->regs[R_ESP], -6);
    237     /* goto interrupt handler */
    238     env->eip = segoffs & 0xffff;
    239     cpu_x86_load_seg(env, R_CS, segoffs >> 16);
    240     clear_TF(env);
    241     clear_IF(env);
    242     clear_AC(env);
    243     return;
    244  cannot_handle:
    245     LOG_VM86("VM86: return to 32 bits int 0x%x\n", intno);
    246     return_to_32bit(env, TARGET_VM86_INTx | (intno << 8));
    247 }
    248 
    249 void handle_vm86_trap(CPUX86State *env, int trapno)
    250 {
    251     if (trapno == 1 || trapno == 3) {
    252         return_to_32bit(env, TARGET_VM86_TRAP + (trapno << 8));
    253     } else {
    254         do_int(env, trapno);
    255     }
    256 }
    257 
    258 #define CHECK_IF_IN_TRAP() \
    259       if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \
    260           (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \
    261                 newflags |= TF_MASK
    262 
    263 #define VM86_FAULT_RETURN \
    264         if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \
    265             (ts->v86flags & (IF_MASK | VIF_MASK))) \
    266             return_to_32bit(env, TARGET_VM86_PICRETURN); \
    267         return
    268 
    269 void handle_vm86_fault(CPUX86State *env)
    270 {
    271     CPUState *cs = env_cpu(env);
    272     TaskState *ts = cs->opaque;
    273     uint32_t csp, ssp;
    274     unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
    275     int data32, pref_done;
    276 
    277     csp = env->segs[R_CS].selector << 4;
    278     ip = env->eip & 0xffff;
    279 
    280     ssp = env->segs[R_SS].selector << 4;
    281     sp = env->regs[R_ESP] & 0xffff;
    282 
    283     LOG_VM86("VM86 exception %04x:%08x\n",
    284              env->segs[R_CS].selector, env->eip);
    285 
    286     data32 = 0;
    287     pref_done = 0;
    288     do {
    289         opcode = vm_getb(env, csp, ip);
    290         ADD16(ip, 1);
    291         switch (opcode) {
    292         case 0x66:      /* 32-bit data */     data32=1; break;
    293         case 0x67:      /* 32-bit address */  break;
    294         case 0x2e:      /* CS */              break;
    295         case 0x3e:      /* DS */              break;
    296         case 0x26:      /* ES */              break;
    297         case 0x36:      /* SS */              break;
    298         case 0x65:      /* GS */              break;
    299         case 0x64:      /* FS */              break;
    300         case 0xf2:      /* repnz */	      break;
    301         case 0xf3:      /* rep */             break;
    302         default: pref_done = 1;
    303         }
    304     } while (!pref_done);
    305 
    306     /* VM86 mode */
    307     switch(opcode) {
    308     case 0x9c: /* pushf */
    309         if (data32) {
    310             vm_putl(env, ssp, sp - 4, get_vflags(env));
    311             ADD16(env->regs[R_ESP], -4);
    312         } else {
    313             vm_putw(env, ssp, sp - 2, get_vflags(env));
    314             ADD16(env->regs[R_ESP], -2);
    315         }
    316         env->eip = ip;
    317         VM86_FAULT_RETURN;
    318 
    319     case 0x9d: /* popf */
    320         if (data32) {
    321             newflags = vm_getl(env, ssp, sp);
    322             ADD16(env->regs[R_ESP], 4);
    323         } else {
    324             newflags = vm_getw(env, ssp, sp);
    325             ADD16(env->regs[R_ESP], 2);
    326         }
    327         env->eip = ip;
    328         CHECK_IF_IN_TRAP();
    329         if (data32) {
    330             if (set_vflags_long(newflags, env))
    331                 return;
    332         } else {
    333             if (set_vflags_short(newflags, env))
    334                 return;
    335         }
    336         VM86_FAULT_RETURN;
    337 
    338     case 0xcd: /* int */
    339         intno = vm_getb(env, csp, ip);
    340         ADD16(ip, 1);
    341         env->eip = ip;
    342         if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) {
    343             if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >>
    344                   (intno &7)) & 1) {
    345                 return_to_32bit(env, TARGET_VM86_INTx + (intno << 8));
    346                 return;
    347             }
    348         }
    349         do_int(env, intno);
    350         break;
    351 
    352     case 0xcf: /* iret */
    353         if (data32) {
    354             newip = vm_getl(env, ssp, sp) & 0xffff;
    355             newcs = vm_getl(env, ssp, sp + 4) & 0xffff;
    356             newflags = vm_getl(env, ssp, sp + 8);
    357             ADD16(env->regs[R_ESP], 12);
    358         } else {
    359             newip = vm_getw(env, ssp, sp);
    360             newcs = vm_getw(env, ssp, sp + 2);
    361             newflags = vm_getw(env, ssp, sp + 4);
    362             ADD16(env->regs[R_ESP], 6);
    363         }
    364         env->eip = newip;
    365         cpu_x86_load_seg(env, R_CS, newcs);
    366         CHECK_IF_IN_TRAP();
    367         if (data32) {
    368             if (set_vflags_long(newflags, env))
    369                 return;
    370         } else {
    371             if (set_vflags_short(newflags, env))
    372                 return;
    373         }
    374         VM86_FAULT_RETURN;
    375 
    376     case 0xfa: /* cli */
    377         env->eip = ip;
    378         clear_IF(env);
    379         VM86_FAULT_RETURN;
    380 
    381     case 0xfb: /* sti */
    382         env->eip = ip;
    383         if (set_IF(env))
    384             return;
    385         VM86_FAULT_RETURN;
    386 
    387     default:
    388         /* real VM86 GPF exception */
    389         return_to_32bit(env, TARGET_VM86_UNKNOWN);
    390         break;
    391     }
    392 }
    393 
    394 int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
    395 {
    396     CPUState *cs = env_cpu(env);
    397     TaskState *ts = cs->opaque;
    398     struct target_vm86plus_struct * target_v86;
    399     int ret;
    400 
    401     switch (subfunction) {
    402     case TARGET_VM86_REQUEST_IRQ:
    403     case TARGET_VM86_FREE_IRQ:
    404     case TARGET_VM86_GET_IRQ_BITS:
    405     case TARGET_VM86_GET_AND_RESET_IRQ:
    406         qemu_log_mask(LOG_UNIMP, "qemu: unsupported vm86 subfunction (%ld)\n",
    407                       subfunction);
    408         ret = -TARGET_EINVAL;
    409         goto out;
    410     case TARGET_VM86_PLUS_INSTALL_CHECK:
    411         /* NOTE: on old vm86 stuff this will return the error
    412            from verify_area(), because the subfunction is
    413            interpreted as (invalid) address to vm86_struct.
    414            So the installation check works.
    415             */
    416         ret = 0;
    417         goto out;
    418     }
    419 
    420     /* save current CPU regs */
    421     ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */
    422     ts->vm86_saved_regs.ebx = env->regs[R_EBX];
    423     ts->vm86_saved_regs.ecx = env->regs[R_ECX];
    424     ts->vm86_saved_regs.edx = env->regs[R_EDX];
    425     ts->vm86_saved_regs.esi = env->regs[R_ESI];
    426     ts->vm86_saved_regs.edi = env->regs[R_EDI];
    427     ts->vm86_saved_regs.ebp = env->regs[R_EBP];
    428     ts->vm86_saved_regs.esp = env->regs[R_ESP];
    429     ts->vm86_saved_regs.eflags = env->eflags;
    430     ts->vm86_saved_regs.eip  = env->eip;
    431     ts->vm86_saved_regs.cs = env->segs[R_CS].selector;
    432     ts->vm86_saved_regs.ss = env->segs[R_SS].selector;
    433     ts->vm86_saved_regs.ds = env->segs[R_DS].selector;
    434     ts->vm86_saved_regs.es = env->segs[R_ES].selector;
    435     ts->vm86_saved_regs.fs = env->segs[R_FS].selector;
    436     ts->vm86_saved_regs.gs = env->segs[R_GS].selector;
    437 
    438     ts->target_v86 = vm86_addr;
    439     if (!lock_user_struct(VERIFY_READ, target_v86, vm86_addr, 1))
    440         return -TARGET_EFAULT;
    441     /* build vm86 CPU state */
    442     ts->v86flags = tswap32(target_v86->regs.eflags);
    443     env->eflags = (env->eflags & ~SAFE_MASK) |
    444         (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK;
    445 
    446     ts->vm86plus.cpu_type = tswapal(target_v86->cpu_type);
    447     switch (ts->vm86plus.cpu_type) {
    448     case TARGET_CPU_286:
    449         ts->v86mask = 0;
    450         break;
    451     case TARGET_CPU_386:
    452         ts->v86mask = NT_MASK | IOPL_MASK;
    453         break;
    454     case TARGET_CPU_486:
    455         ts->v86mask = AC_MASK | NT_MASK | IOPL_MASK;
    456         break;
    457     default:
    458         ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
    459         break;
    460     }
    461 
    462     env->regs[R_EBX] = tswap32(target_v86->regs.ebx);
    463     env->regs[R_ECX] = tswap32(target_v86->regs.ecx);
    464     env->regs[R_EDX] = tswap32(target_v86->regs.edx);
    465     env->regs[R_ESI] = tswap32(target_v86->regs.esi);
    466     env->regs[R_EDI] = tswap32(target_v86->regs.edi);
    467     env->regs[R_EBP] = tswap32(target_v86->regs.ebp);
    468     env->regs[R_ESP] = tswap32(target_v86->regs.esp);
    469     env->eip = tswap32(target_v86->regs.eip);
    470     cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs));
    471     cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss));
    472     cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds));
    473     cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es));
    474     cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs));
    475     cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs));
    476     ret = tswap32(target_v86->regs.eax); /* eax will be restored at
    477                                             the end of the syscall */
    478     memcpy(&ts->vm86plus.int_revectored,
    479            &target_v86->int_revectored, 32);
    480     memcpy(&ts->vm86plus.int21_revectored,
    481            &target_v86->int21_revectored, 32);
    482     ts->vm86plus.vm86plus.flags = tswapal(target_v86->vm86plus.flags);
    483     memcpy(&ts->vm86plus.vm86plus.vm86dbg_intxxtab,
    484            target_v86->vm86plus.vm86dbg_intxxtab, 32);
    485     unlock_user_struct(target_v86, vm86_addr, 0);
    486 
    487     LOG_VM86("do_vm86: cs:ip=%04x:%04x\n",
    488              env->segs[R_CS].selector, env->eip);
    489     /* now the virtual CPU is ready for vm86 execution ! */
    490  out:
    491     return ret;
    492 }