File:  [Qemu by Fabrice Bellard] / qemu / linux-user / vm86.c
Revision 1.1.1.5 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 17:22:53 2018 UTC (3 years, 3 months ago) by root
Branches: qemu, MAIN
CVS tags: qemu1000, qemu0151, qemu0150, qemu0141, qemu0140, qemu0130, qemu0125, qemu0124, qemu0123, qemu0122, qemu0121, qemu0120, qemu0111, qemu0110, HEAD
qemu 0.11.0

    1: /*
    2:  *  vm86 linux syscall support
    3:  *
    4:  *  Copyright (c) 2003 Fabrice Bellard
    5:  *
    6:  *  This program is free software; you can redistribute it and/or modify
    7:  *  it under the terms of the GNU General Public License as published by
    8:  *  the Free Software Foundation; either version 2 of the License, or
    9:  *  (at your option) any later version.
   10:  *
   11:  *  This program is distributed in the hope that it will be useful,
   12:  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14:  *  GNU General Public License for more details.
   15:  *
   16:  *  You should have received a copy of the GNU General Public License
   17:  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   18:  */
   19: #include <stdlib.h>
   20: #include <stdio.h>
   21: #include <stdarg.h>
   22: #include <string.h>
   23: #include <errno.h>
   24: #include <unistd.h>
   25: 
   26: #include "qemu.h"
   27: 
   28: //#define DEBUG_VM86
   29: 
   30: #ifdef DEBUG_VM86
   31: #  define LOG_VM86(...) qemu_log(__VA_ARGS__);
   32: #else
   33: #  define LOG_VM86(...) do { } while (0)
   34: #endif
   35: 
   36: 
   37: #define set_flags(X,new,mask) \
   38: ((X) = ((X) & ~(mask)) | ((new) & (mask)))
   39: 
   40: #define SAFE_MASK	(0xDD5)
   41: #define RETURN_MASK	(0xDFF)
   42: 
   43: static inline int is_revectored(int nr, struct target_revectored_struct *bitmap)
   44: {
   45:     return (((uint8_t *)bitmap)[nr >> 3] >> (nr & 7)) & 1;
   46: }
   47: 
   48: static inline void vm_putw(uint32_t segptr, unsigned int reg16, unsigned int val)
   49: {
   50:     stw(segptr + (reg16 & 0xffff), val);
   51: }
   52: 
   53: static inline void vm_putl(uint32_t segptr, unsigned int reg16, unsigned int val)
   54: {
   55:     stl(segptr + (reg16 & 0xffff), val);
   56: }
   57: 
   58: static inline unsigned int vm_getb(uint32_t segptr, unsigned int reg16)
   59: {
   60:     return ldub(segptr + (reg16 & 0xffff));
   61: }
   62: 
   63: static inline unsigned int vm_getw(uint32_t segptr, unsigned int reg16)
   64: {
   65:     return lduw(segptr + (reg16 & 0xffff));
   66: }
   67: 
   68: static inline unsigned int vm_getl(uint32_t segptr, unsigned int reg16)
   69: {
   70:     return ldl(segptr + (reg16 & 0xffff));
   71: }
   72: 
   73: void save_v86_state(CPUX86State *env)
   74: {
   75:     TaskState *ts = env->opaque;
   76:     struct target_vm86plus_struct * target_v86;
   77: 
   78:     if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
   79:         /* FIXME - should return an error */
   80:         return;
   81:     /* put the VM86 registers in the userspace register structure */
   82:     target_v86->regs.eax = tswap32(env->regs[R_EAX]);
   83:     target_v86->regs.ebx = tswap32(env->regs[R_EBX]);
   84:     target_v86->regs.ecx = tswap32(env->regs[R_ECX]);
   85:     target_v86->regs.edx = tswap32(env->regs[R_EDX]);
   86:     target_v86->regs.esi = tswap32(env->regs[R_ESI]);
   87:     target_v86->regs.edi = tswap32(env->regs[R_EDI]);
   88:     target_v86->regs.ebp = tswap32(env->regs[R_EBP]);
   89:     target_v86->regs.esp = tswap32(env->regs[R_ESP]);
   90:     target_v86->regs.eip = tswap32(env->eip);
   91:     target_v86->regs.cs = tswap16(env->segs[R_CS].selector);
   92:     target_v86->regs.ss = tswap16(env->segs[R_SS].selector);
   93:     target_v86->regs.ds = tswap16(env->segs[R_DS].selector);
   94:     target_v86->regs.es = tswap16(env->segs[R_ES].selector);
   95:     target_v86->regs.fs = tswap16(env->segs[R_FS].selector);
   96:     target_v86->regs.gs = tswap16(env->segs[R_GS].selector);
   97:     set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask);
   98:     target_v86->regs.eflags = tswap32(env->eflags);
   99:     unlock_user_struct(target_v86, ts->target_v86, 1);
  100:     LOG_VM86("save_v86_state: eflags=%08x cs:ip=%04x:%04x\n",
  101:              env->eflags, env->segs[R_CS].selector, env->eip);
  102: 
  103:     /* restore 32 bit registers */
  104:     env->regs[R_EAX] = ts->vm86_saved_regs.eax;
  105:     env->regs[R_EBX] = ts->vm86_saved_regs.ebx;
  106:     env->regs[R_ECX] = ts->vm86_saved_regs.ecx;
  107:     env->regs[R_EDX] = ts->vm86_saved_regs.edx;
  108:     env->regs[R_ESI] = ts->vm86_saved_regs.esi;
  109:     env->regs[R_EDI] = ts->vm86_saved_regs.edi;
  110:     env->regs[R_EBP] = ts->vm86_saved_regs.ebp;
  111:     env->regs[R_ESP] = ts->vm86_saved_regs.esp;
  112:     env->eflags = ts->vm86_saved_regs.eflags;
  113:     env->eip = ts->vm86_saved_regs.eip;
  114: 
  115:     cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs);
  116:     cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss);
  117:     cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds);
  118:     cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es);
  119:     cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs);
  120:     cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs);
  121: }
  122: 
  123: /* return from vm86 mode to 32 bit. The vm86() syscall will return
  124:    'retval' */
  125: static inline void return_to_32bit(CPUX86State *env, int retval)
  126: {
  127:     LOG_VM86("return_to_32bit: ret=0x%x\n", retval);
  128:     save_v86_state(env);
  129:     env->regs[R_EAX] = retval;
  130: }
  131: 
  132: static inline int set_IF(CPUX86State *env)
  133: {
  134:     TaskState *ts = env->opaque;
  135: 
  136:     ts->v86flags |= VIF_MASK;
  137:     if (ts->v86flags & VIP_MASK) {
  138:         return_to_32bit(env, TARGET_VM86_STI);
  139:         return 1;
  140:     }
  141:     return 0;
  142: }
  143: 
  144: static inline void clear_IF(CPUX86State *env)
  145: {
  146:     TaskState *ts = env->opaque;
  147: 
  148:     ts->v86flags &= ~VIF_MASK;
  149: }
  150: 
  151: static inline void clear_TF(CPUX86State *env)
  152: {
  153:     env->eflags &= ~TF_MASK;
  154: }
  155: 
  156: static inline void clear_AC(CPUX86State *env)
  157: {
  158:     env->eflags &= ~AC_MASK;
  159: }
  160: 
  161: static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
  162: {
  163:     TaskState *ts = env->opaque;
  164: 
  165:     set_flags(ts->v86flags, eflags, ts->v86mask);
  166:     set_flags(env->eflags, eflags, SAFE_MASK);
  167:     if (eflags & IF_MASK)
  168:         return set_IF(env);
  169:     else
  170:         clear_IF(env);
  171:     return 0;
  172: }
  173: 
  174: static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
  175: {
  176:     TaskState *ts = env->opaque;
  177: 
  178:     set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
  179:     set_flags(env->eflags, flags, SAFE_MASK);
  180:     if (flags & IF_MASK)
  181:         return set_IF(env);
  182:     else
  183:         clear_IF(env);
  184:     return 0;
  185: }
  186: 
  187: static inline unsigned int get_vflags(CPUX86State *env)
  188: {
  189:     TaskState *ts = env->opaque;
  190:     unsigned int flags;
  191: 
  192:     flags = env->eflags & RETURN_MASK;
  193:     if (ts->v86flags & VIF_MASK)
  194:         flags |= IF_MASK;
  195:     flags |= IOPL_MASK;
  196:     return flags | (ts->v86flags & ts->v86mask);
  197: }
  198: 
  199: #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff)
  200: 
  201: /* handle VM86 interrupt (NOTE: the CPU core currently does not
  202:    support TSS interrupt revectoring, so this code is always executed) */
  203: static void do_int(CPUX86State *env, int intno)
  204: {
  205:     TaskState *ts = env->opaque;
  206:     uint32_t int_addr, segoffs, ssp;
  207:     unsigned int sp;
  208: 
  209:     if (env->segs[R_CS].selector == TARGET_BIOSSEG)
  210:         goto cannot_handle;
  211:     if (is_revectored(intno, &ts->vm86plus.int_revectored))
  212:         goto cannot_handle;
  213:     if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff,
  214:                                        &ts->vm86plus.int21_revectored))
  215:         goto cannot_handle;
  216:     int_addr = (intno << 2);
  217:     segoffs = ldl(int_addr);
  218:     if ((segoffs >> 16) == TARGET_BIOSSEG)
  219:         goto cannot_handle;
  220:     LOG_VM86("VM86: emulating int 0x%x. CS:IP=%04x:%04x\n",
  221:              intno, segoffs >> 16, segoffs & 0xffff);
  222:     /* save old state */
  223:     ssp = env->segs[R_SS].selector << 4;
  224:     sp = env->regs[R_ESP] & 0xffff;
  225:     vm_putw(ssp, sp - 2, get_vflags(env));
  226:     vm_putw(ssp, sp - 4, env->segs[R_CS].selector);
  227:     vm_putw(ssp, sp - 6, env->eip);
  228:     ADD16(env->regs[R_ESP], -6);
  229:     /* goto interrupt handler */
  230:     env->eip = segoffs & 0xffff;
  231:     cpu_x86_load_seg(env, R_CS, segoffs >> 16);
  232:     clear_TF(env);
  233:     clear_IF(env);
  234:     clear_AC(env);
  235:     return;
  236:  cannot_handle:
  237:     LOG_VM86("VM86: return to 32 bits int 0x%x\n", intno);
  238:     return_to_32bit(env, TARGET_VM86_INTx | (intno << 8));
  239: }
  240: 
  241: void handle_vm86_trap(CPUX86State *env, int trapno)
  242: {
  243:     if (trapno == 1 || trapno == 3) {
  244:         return_to_32bit(env, TARGET_VM86_TRAP + (trapno << 8));
  245:     } else {
  246:         do_int(env, trapno);
  247:     }
  248: }
  249: 
  250: #define CHECK_IF_IN_TRAP() \
  251:       if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \
  252:           (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \
  253: 		newflags |= TF_MASK
  254: 
  255: #define VM86_FAULT_RETURN \
  256:         if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \
  257:             (ts->v86flags & (IF_MASK | VIF_MASK))) \
  258:             return_to_32bit(env, TARGET_VM86_PICRETURN); \
  259:         return
  260: 
  261: void handle_vm86_fault(CPUX86State *env)
  262: {
  263:     TaskState *ts = env->opaque;
  264:     uint32_t csp, ssp;
  265:     unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
  266:     int data32, pref_done;
  267: 
  268:     csp = env->segs[R_CS].selector << 4;
  269:     ip = env->eip & 0xffff;
  270: 
  271:     ssp = env->segs[R_SS].selector << 4;
  272:     sp = env->regs[R_ESP] & 0xffff;
  273: 
  274:     LOG_VM86("VM86 exception %04x:%08x\n",
  275:              env->segs[R_CS].selector, env->eip);
  276: 
  277:     data32 = 0;
  278:     pref_done = 0;
  279:     do {
  280:         opcode = vm_getb(csp, ip);
  281:         ADD16(ip, 1);
  282:         switch (opcode) {
  283:         case 0x66:      /* 32-bit data */     data32=1; break;
  284:         case 0x67:      /* 32-bit address */  break;
  285:         case 0x2e:      /* CS */              break;
  286:         case 0x3e:      /* DS */              break;
  287:         case 0x26:      /* ES */              break;
  288:         case 0x36:      /* SS */              break;
  289:         case 0x65:      /* GS */              break;
  290:         case 0x64:      /* FS */              break;
  291:         case 0xf2:      /* repnz */	      break;
  292:         case 0xf3:      /* rep */             break;
  293:         default: pref_done = 1;
  294:         }
  295:     } while (!pref_done);
  296: 
  297:     /* VM86 mode */
  298:     switch(opcode) {
  299:     case 0x9c: /* pushf */
  300:         if (data32) {
  301:             vm_putl(ssp, sp - 4, get_vflags(env));
  302:             ADD16(env->regs[R_ESP], -4);
  303:         } else {
  304:             vm_putw(ssp, sp - 2, get_vflags(env));
  305:             ADD16(env->regs[R_ESP], -2);
  306:         }
  307:         env->eip = ip;
  308:         VM86_FAULT_RETURN;
  309: 
  310:     case 0x9d: /* popf */
  311:         if (data32) {
  312:             newflags = vm_getl(ssp, sp);
  313:             ADD16(env->regs[R_ESP], 4);
  314:         } else {
  315:             newflags = vm_getw(ssp, sp);
  316:             ADD16(env->regs[R_ESP], 2);
  317:         }
  318:         env->eip = ip;
  319:         CHECK_IF_IN_TRAP();
  320:         if (data32) {
  321:             if (set_vflags_long(newflags, env))
  322:                 return;
  323:         } else {
  324:             if (set_vflags_short(newflags, env))
  325:                 return;
  326:         }
  327:         VM86_FAULT_RETURN;
  328: 
  329:     case 0xcd: /* int */
  330:         intno = vm_getb(csp, ip);
  331:         ADD16(ip, 1);
  332:         env->eip = ip;
  333:         if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) {
  334:             if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >>
  335:                   (intno &7)) & 1) {
  336:                 return_to_32bit(env, TARGET_VM86_INTx + (intno << 8));
  337:                 return;
  338:             }
  339:         }
  340:         do_int(env, intno);
  341:         break;
  342: 
  343:     case 0xcf: /* iret */
  344:         if (data32) {
  345:             newip = vm_getl(ssp, sp) & 0xffff;
  346:             newcs = vm_getl(ssp, sp + 4) & 0xffff;
  347:             newflags = vm_getl(ssp, sp + 8);
  348:             ADD16(env->regs[R_ESP], 12);
  349:         } else {
  350:             newip = vm_getw(ssp, sp);
  351:             newcs = vm_getw(ssp, sp + 2);
  352:             newflags = vm_getw(ssp, sp + 4);
  353:             ADD16(env->regs[R_ESP], 6);
  354:         }
  355:         env->eip = newip;
  356:         cpu_x86_load_seg(env, R_CS, newcs);
  357:         CHECK_IF_IN_TRAP();
  358:         if (data32) {
  359:             if (set_vflags_long(newflags, env))
  360:                 return;
  361:         } else {
  362:             if (set_vflags_short(newflags, env))
  363:                 return;
  364:         }
  365:         VM86_FAULT_RETURN;
  366: 
  367:     case 0xfa: /* cli */
  368:         env->eip = ip;
  369:         clear_IF(env);
  370:         VM86_FAULT_RETURN;
  371: 
  372:     case 0xfb: /* sti */
  373:         env->eip = ip;
  374:         if (set_IF(env))
  375:             return;
  376:         VM86_FAULT_RETURN;
  377: 
  378:     default:
  379:         /* real VM86 GPF exception */
  380:         return_to_32bit(env, TARGET_VM86_UNKNOWN);
  381:         break;
  382:     }
  383: }
  384: 
  385: int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
  386: {
  387:     TaskState *ts = env->opaque;
  388:     struct target_vm86plus_struct * target_v86;
  389:     int ret;
  390: 
  391:     switch (subfunction) {
  392:     case TARGET_VM86_REQUEST_IRQ:
  393:     case TARGET_VM86_FREE_IRQ:
  394:     case TARGET_VM86_GET_IRQ_BITS:
  395:     case TARGET_VM86_GET_AND_RESET_IRQ:
  396:         gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction);
  397:         ret = -TARGET_EINVAL;
  398:         goto out;
  399:     case TARGET_VM86_PLUS_INSTALL_CHECK:
  400:         /* NOTE: on old vm86 stuff this will return the error
  401:            from verify_area(), because the subfunction is
  402:            interpreted as (invalid) address to vm86_struct.
  403:            So the installation check works.
  404:             */
  405:         ret = 0;
  406:         goto out;
  407:     }
  408: 
  409:     /* save current CPU regs */
  410:     ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */
  411:     ts->vm86_saved_regs.ebx = env->regs[R_EBX];
  412:     ts->vm86_saved_regs.ecx = env->regs[R_ECX];
  413:     ts->vm86_saved_regs.edx = env->regs[R_EDX];
  414:     ts->vm86_saved_regs.esi = env->regs[R_ESI];
  415:     ts->vm86_saved_regs.edi = env->regs[R_EDI];
  416:     ts->vm86_saved_regs.ebp = env->regs[R_EBP];
  417:     ts->vm86_saved_regs.esp = env->regs[R_ESP];
  418:     ts->vm86_saved_regs.eflags = env->eflags;
  419:     ts->vm86_saved_regs.eip  = env->eip;
  420:     ts->vm86_saved_regs.cs = env->segs[R_CS].selector;
  421:     ts->vm86_saved_regs.ss = env->segs[R_SS].selector;
  422:     ts->vm86_saved_regs.ds = env->segs[R_DS].selector;
  423:     ts->vm86_saved_regs.es = env->segs[R_ES].selector;
  424:     ts->vm86_saved_regs.fs = env->segs[R_FS].selector;
  425:     ts->vm86_saved_regs.gs = env->segs[R_GS].selector;
  426: 
  427:     ts->target_v86 = vm86_addr;
  428:     if (!lock_user_struct(VERIFY_READ, target_v86, vm86_addr, 1))
  429:         return -TARGET_EFAULT;
  430:     /* build vm86 CPU state */
  431:     ts->v86flags = tswap32(target_v86->regs.eflags);
  432:     env->eflags = (env->eflags & ~SAFE_MASK) |
  433:         (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK;
  434: 
  435:     ts->vm86plus.cpu_type = tswapl(target_v86->cpu_type);
  436:     switch (ts->vm86plus.cpu_type) {
  437:     case TARGET_CPU_286:
  438:         ts->v86mask = 0;
  439:         break;
  440:     case TARGET_CPU_386:
  441:         ts->v86mask = NT_MASK | IOPL_MASK;
  442:         break;
  443:     case TARGET_CPU_486:
  444:         ts->v86mask = AC_MASK | NT_MASK | IOPL_MASK;
  445:         break;
  446:     default:
  447:         ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
  448:         break;
  449:     }
  450: 
  451:     env->regs[R_EBX] = tswap32(target_v86->regs.ebx);
  452:     env->regs[R_ECX] = tswap32(target_v86->regs.ecx);
  453:     env->regs[R_EDX] = tswap32(target_v86->regs.edx);
  454:     env->regs[R_ESI] = tswap32(target_v86->regs.esi);
  455:     env->regs[R_EDI] = tswap32(target_v86->regs.edi);
  456:     env->regs[R_EBP] = tswap32(target_v86->regs.ebp);
  457:     env->regs[R_ESP] = tswap32(target_v86->regs.esp);
  458:     env->eip = tswap32(target_v86->regs.eip);
  459:     cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs));
  460:     cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss));
  461:     cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds));
  462:     cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es));
  463:     cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs));
  464:     cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs));
  465:     ret = tswap32(target_v86->regs.eax); /* eax will be restored at
  466:                                             the end of the syscall */
  467:     memcpy(&ts->vm86plus.int_revectored,
  468:            &target_v86->int_revectored, 32);
  469:     memcpy(&ts->vm86plus.int21_revectored,
  470:            &target_v86->int21_revectored, 32);
  471:     ts->vm86plus.vm86plus.flags = tswapl(target_v86->vm86plus.flags);
  472:     memcpy(&ts->vm86plus.vm86plus.vm86dbg_intxxtab,
  473:            target_v86->vm86plus.vm86dbg_intxxtab, 32);
  474:     unlock_user_struct(target_v86, vm86_addr, 0);
  475: 
  476:     LOG_VM86("do_vm86: cs:ip=%04x:%04x\n",
  477:              env->segs[R_CS].selector, env->eip);
  478:     /* now the virtual CPU is ready for vm86 execution ! */
  479:  out:
  480:     return ret;
  481: }

unix.superglobalmegacorp.com