File:  [Qemu by Fabrice Bellard] / qemu / cpu-exec.c
Revision 1.1.1.4 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 16:42:36 2018 UTC (3 years ago) by root
Branches: qemu, MAIN
CVS tags: qemu0082, HEAD
qemu 0.8.2

    1: /*
    2:  *  i386 emulator main execution loop
    3:  * 
    4:  *  Copyright (c) 2003-2005 Fabrice Bellard
    5:  *
    6:  * This library is free software; you can redistribute it and/or
    7:  * modify it under the terms of the GNU Lesser General Public
    8:  * License as published by the Free Software Foundation; either
    9:  * version 2 of the License, or (at your option) any later version.
   10:  *
   11:  * This library is distributed in the hope that it will be useful,
   12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14:  * Lesser General Public License for more details.
   15:  *
   16:  * You should have received a copy of the GNU Lesser General Public
   17:  * License along with this library; if not, write to the Free Software
   18:  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   19:  */
   20: #include "config.h"
   21: #include "exec.h"
   22: #include "disas.h"
   23: 
   24: #if !defined(CONFIG_SOFTMMU)
   25: #undef EAX
   26: #undef ECX
   27: #undef EDX
   28: #undef EBX
   29: #undef ESP
   30: #undef EBP
   31: #undef ESI
   32: #undef EDI
   33: #undef EIP
   34: #include <signal.h>
   35: #include <sys/ucontext.h>
   36: #endif
   37: 
   38: int tb_invalidated_flag;
   39: 
   40: //#define DEBUG_EXEC
   41: //#define DEBUG_SIGNAL
   42: 
   43: #if defined(TARGET_ARM) || defined(TARGET_SPARC)
   44: /* XXX: unify with i386 target */
   45: void cpu_loop_exit(void)
   46: {
   47:     longjmp(env->jmp_env, 1);
   48: }
   49: #endif
   50: #if !(defined(TARGET_SPARC) || defined(TARGET_SH4))
   51: #define reg_T2
   52: #endif
   53: 
   54: /* exit the current TB from a signal handler. The host registers are
   55:    restored in a state compatible with the CPU emulator
   56:  */
   57: void cpu_resume_from_signal(CPUState *env1, void *puc) 
   58: {
   59: #if !defined(CONFIG_SOFTMMU)
   60:     struct ucontext *uc = puc;
   61: #endif
   62: 
   63:     env = env1;
   64: 
   65:     /* XXX: restore cpu registers saved in host registers */
   66: 
   67: #if !defined(CONFIG_SOFTMMU)
   68:     if (puc) {
   69:         /* XXX: use siglongjmp ? */
   70:         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
   71:     }
   72: #endif
   73:     longjmp(env->jmp_env, 1);
   74: }
   75: 
   76: 
   77: static TranslationBlock *tb_find_slow(target_ulong pc,
   78:                                       target_ulong cs_base,
   79:                                       unsigned int flags)
   80: {
   81:     TranslationBlock *tb, **ptb1;
   82:     int code_gen_size;
   83:     unsigned int h;
   84:     target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
   85:     uint8_t *tc_ptr;
   86:     
   87:     spin_lock(&tb_lock);
   88: 
   89:     tb_invalidated_flag = 0;
   90:     
   91:     regs_to_env(); /* XXX: do it just before cpu_gen_code() */
   92:     
   93:     /* find translated block using physical mappings */
   94:     phys_pc = get_phys_addr_code(env, pc);
   95:     phys_page1 = phys_pc & TARGET_PAGE_MASK;
   96:     phys_page2 = -1;
   97:     h = tb_phys_hash_func(phys_pc);
   98:     ptb1 = &tb_phys_hash[h];
   99:     for(;;) {
  100:         tb = *ptb1;
  101:         if (!tb)
  102:             goto not_found;
  103:         if (tb->pc == pc && 
  104:             tb->page_addr[0] == phys_page1 &&
  105:             tb->cs_base == cs_base && 
  106:             tb->flags == flags) {
  107:             /* check next page if needed */
  108:             if (tb->page_addr[1] != -1) {
  109:                 virt_page2 = (pc & TARGET_PAGE_MASK) + 
  110:                     TARGET_PAGE_SIZE;
  111:                 phys_page2 = get_phys_addr_code(env, virt_page2);
  112:                 if (tb->page_addr[1] == phys_page2)
  113:                     goto found;
  114:             } else {
  115:                 goto found;
  116:             }
  117:         }
  118:         ptb1 = &tb->phys_hash_next;
  119:     }
  120:  not_found:
  121:     /* if no translated code available, then translate it now */
  122:     tb = tb_alloc(pc);
  123:     if (!tb) {
  124:         /* flush must be done */
  125:         tb_flush(env);
  126:         /* cannot fail at this point */
  127:         tb = tb_alloc(pc);
  128:         /* don't forget to invalidate previous TB info */
  129:         tb_invalidated_flag = 1;
  130:     }
  131:     tc_ptr = code_gen_ptr;
  132:     tb->tc_ptr = tc_ptr;
  133:     tb->cs_base = cs_base;
  134:     tb->flags = flags;
  135:     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
  136:     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
  137:     
  138:     /* check next page if needed */
  139:     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
  140:     phys_page2 = -1;
  141:     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
  142:         phys_page2 = get_phys_addr_code(env, virt_page2);
  143:     }
  144:     tb_link_phys(tb, phys_pc, phys_page2);
  145:     
  146:  found:
  147:     /* we add the TB in the virtual pc hash table */
  148:     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
  149:     spin_unlock(&tb_lock);
  150:     return tb;
  151: }
  152: 
  153: static inline TranslationBlock *tb_find_fast(void)
  154: {
  155:     TranslationBlock *tb;
  156:     target_ulong cs_base, pc;
  157:     unsigned int flags;
  158: 
  159:     /* we record a subset of the CPU state. It will
  160:        always be the same before a given translated block
  161:        is executed. */
  162: #if defined(TARGET_I386)
  163:     flags = env->hflags;
  164:     flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
  165:     cs_base = env->segs[R_CS].base;
  166:     pc = cs_base + env->eip;
  167: #elif defined(TARGET_ARM)
  168:     flags = env->thumb | (env->vfp.vec_len << 1)
  169:             | (env->vfp.vec_stride << 4);
  170:     if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
  171:         flags |= (1 << 6);
  172:     if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
  173:         flags |= (1 << 7);
  174:     cs_base = 0;
  175:     pc = env->regs[15];
  176: #elif defined(TARGET_SPARC)
  177: #ifdef TARGET_SPARC64
  178:     // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
  179:     flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
  180:         | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
  181: #else
  182:     // FPU enable . MMU enabled . MMU no-fault . Supervisor
  183:     flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
  184:         | env->psrs;
  185: #endif
  186:     cs_base = env->npc;
  187:     pc = env->pc;
  188: #elif defined(TARGET_PPC)
  189:     flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
  190:         (msr_se << MSR_SE) | (msr_le << MSR_LE);
  191:     cs_base = 0;
  192:     pc = env->nip;
  193: #elif defined(TARGET_MIPS)
  194:     flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
  195:     cs_base = 0;
  196:     pc = env->PC;
  197: #elif defined(TARGET_SH4)
  198:     flags = env->sr & (SR_MD | SR_RB);
  199:     cs_base = 0;         /* XXXXX */
  200:     pc = env->pc;
  201: #else
  202: #error unsupported CPU
  203: #endif
  204:     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
  205:     if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
  206:                          tb->flags != flags, 0)) {
  207:         tb = tb_find_slow(pc, cs_base, flags);
  208:         /* Note: we do it here to avoid a gcc bug on Mac OS X when
  209:            doing it in tb_find_slow */
  210:         if (tb_invalidated_flag) {
  211:             /* as some TB could have been invalidated because
  212:                of memory exceptions while generating the code, we
  213:                must recompute the hash index here */
  214:             T0 = 0;
  215:         }
  216:     }
  217:     return tb;
  218: }
  219: 
  220: 
  221: /* main execution loop */
  222: 
  223: int cpu_exec(CPUState *env1)
  224: {
  225:     int saved_T0, saved_T1;
  226: #if defined(reg_T2)
  227:     int saved_T2;
  228: #endif
  229:     CPUState *saved_env;
  230: #if defined(TARGET_I386)
  231: #ifdef reg_EAX
  232:     int saved_EAX;
  233: #endif
  234: #ifdef reg_ECX
  235:     int saved_ECX;
  236: #endif
  237: #ifdef reg_EDX
  238:     int saved_EDX;
  239: #endif
  240: #ifdef reg_EBX
  241:     int saved_EBX;
  242: #endif
  243: #ifdef reg_ESP
  244:     int saved_ESP;
  245: #endif
  246: #ifdef reg_EBP
  247:     int saved_EBP;
  248: #endif
  249: #ifdef reg_ESI
  250:     int saved_ESI;
  251: #endif
  252: #ifdef reg_EDI
  253:     int saved_EDI;
  254: #endif
  255: #elif defined(TARGET_SPARC)
  256: #if defined(reg_REGWPTR)
  257:     uint32_t *saved_regwptr;
  258: #endif
  259: #endif
  260: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  261:     int saved_i7, tmp_T0;
  262: #endif
  263:     int ret, interrupt_request;
  264:     void (*gen_func)(void);
  265:     TranslationBlock *tb;
  266:     uint8_t *tc_ptr;
  267: 
  268: #if defined(TARGET_I386)
  269:     /* handle exit of HALTED state */
  270:     if (env1->hflags & HF_HALTED_MASK) {
  271:         /* disable halt condition */
  272:         if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
  273:             (env1->eflags & IF_MASK)) {
  274:             env1->hflags &= ~HF_HALTED_MASK;
  275:         } else {
  276:             return EXCP_HALTED;
  277:         }
  278:     }
  279: #elif defined(TARGET_PPC)
  280:     if (env1->halted) {
  281:         if (env1->msr[MSR_EE] && 
  282:             (env1->interrupt_request & 
  283:              (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
  284:             env1->halted = 0;
  285:         } else {
  286:             return EXCP_HALTED;
  287:         }
  288:     }
  289: #elif defined(TARGET_SPARC)
  290:     if (env1->halted) {
  291:         if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
  292:             (env1->psret != 0)) {
  293:             env1->halted = 0;
  294:         } else {
  295:             return EXCP_HALTED;
  296:         }
  297:     }
  298: #elif defined(TARGET_ARM)
  299:     if (env1->halted) {
  300:         /* An interrupt wakes the CPU even if the I and F CPSR bits are
  301:            set.  */
  302:         if (env1->interrupt_request
  303:             & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
  304:             env1->halted = 0;
  305:         } else {
  306:             return EXCP_HALTED;
  307:         }
  308:     }
  309: #elif defined(TARGET_MIPS)
  310:     if (env1->halted) {
  311:         if (env1->interrupt_request &
  312:             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
  313:             env1->halted = 0;
  314:         } else {
  315:             return EXCP_HALTED;
  316:         }
  317:     }
  318: #endif
  319: 
  320:     cpu_single_env = env1; 
  321: 
  322:     /* first we save global registers */
  323:     saved_env = env;
  324:     env = env1;
  325:     saved_T0 = T0;
  326:     saved_T1 = T1;
  327: #if defined(reg_T2)
  328:     saved_T2 = T2;
  329: #endif
  330: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  331:     /* we also save i7 because longjmp may not restore it */
  332:     asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
  333: #endif
  334: 
  335: #if defined(TARGET_I386)
  336: #ifdef reg_EAX
  337:     saved_EAX = EAX;
  338: #endif
  339: #ifdef reg_ECX
  340:     saved_ECX = ECX;
  341: #endif
  342: #ifdef reg_EDX
  343:     saved_EDX = EDX;
  344: #endif
  345: #ifdef reg_EBX
  346:     saved_EBX = EBX;
  347: #endif
  348: #ifdef reg_ESP
  349:     saved_ESP = ESP;
  350: #endif
  351: #ifdef reg_EBP
  352:     saved_EBP = EBP;
  353: #endif
  354: #ifdef reg_ESI
  355:     saved_ESI = ESI;
  356: #endif
  357: #ifdef reg_EDI
  358:     saved_EDI = EDI;
  359: #endif
  360: 
  361:     env_to_regs();
  362:     /* put eflags in CPU temporary format */
  363:     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  364:     DF = 1 - (2 * ((env->eflags >> 10) & 1));
  365:     CC_OP = CC_OP_EFLAGS;
  366:     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  367: #elif defined(TARGET_ARM)
  368: #elif defined(TARGET_SPARC)
  369: #if defined(reg_REGWPTR)
  370:     saved_regwptr = REGWPTR;
  371: #endif
  372: #elif defined(TARGET_PPC)
  373: #elif defined(TARGET_MIPS)
  374: #elif defined(TARGET_SH4)
  375:     /* XXXXX */
  376: #else
  377: #error unsupported target CPU
  378: #endif
  379:     env->exception_index = -1;
  380: 
  381:     /* prepare setjmp context for exception handling */
  382:     for(;;) {
  383:         if (setjmp(env->jmp_env) == 0) {
  384:             env->current_tb = NULL;
  385:             /* if an exception is pending, we execute it here */
  386:             if (env->exception_index >= 0) {
  387:                 if (env->exception_index >= EXCP_INTERRUPT) {
  388:                     /* exit request from the cpu execution loop */
  389:                     ret = env->exception_index;
  390:                     break;
  391:                 } else if (env->user_mode_only) {
  392:                     /* if user mode only, we simulate a fake exception
  393:                        which will be hanlded outside the cpu execution
  394:                        loop */
  395: #if defined(TARGET_I386)
  396:                     do_interrupt_user(env->exception_index, 
  397:                                       env->exception_is_int, 
  398:                                       env->error_code, 
  399:                                       env->exception_next_eip);
  400: #endif
  401:                     ret = env->exception_index;
  402:                     break;
  403:                 } else {
  404: #if defined(TARGET_I386)
  405:                     /* simulate a real cpu exception. On i386, it can
  406:                        trigger new exceptions, but we do not handle
  407:                        double or triple faults yet. */
  408:                     do_interrupt(env->exception_index, 
  409:                                  env->exception_is_int, 
  410:                                  env->error_code, 
  411:                                  env->exception_next_eip, 0);
  412: #elif defined(TARGET_PPC)
  413:                     do_interrupt(env);
  414: #elif defined(TARGET_MIPS)
  415:                     do_interrupt(env);
  416: #elif defined(TARGET_SPARC)
  417:                     do_interrupt(env->exception_index);
  418: #elif defined(TARGET_ARM)
  419:                     do_interrupt(env);
  420: #elif defined(TARGET_SH4)
  421: 		    do_interrupt(env);
  422: #endif
  423:                 }
  424:                 env->exception_index = -1;
  425:             } 
  426: #ifdef USE_KQEMU
  427:             if (kqemu_is_ok(env) && env->interrupt_request == 0) {
  428:                 int ret;
  429:                 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
  430:                 ret = kqemu_cpu_exec(env);
  431:                 /* put eflags in CPU temporary format */
  432:                 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  433:                 DF = 1 - (2 * ((env->eflags >> 10) & 1));
  434:                 CC_OP = CC_OP_EFLAGS;
  435:                 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  436:                 if (ret == 1) {
  437:                     /* exception */
  438:                     longjmp(env->jmp_env, 1);
  439:                 } else if (ret == 2) {
  440:                     /* softmmu execution needed */
  441:                 } else {
  442:                     if (env->interrupt_request != 0) {
  443:                         /* hardware interrupt will be executed just after */
  444:                     } else {
  445:                         /* otherwise, we restart */
  446:                         longjmp(env->jmp_env, 1);
  447:                     }
  448:                 }
  449:             }
  450: #endif
  451: 
  452:             T0 = 0; /* force lookup of first TB */
  453:             for(;;) {
  454: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  455:                 /* g1 can be modified by some libc? functions */ 
  456:                 tmp_T0 = T0;
  457: #endif	    
  458:                 interrupt_request = env->interrupt_request;
  459:                 if (__builtin_expect(interrupt_request, 0)) {
  460: #if defined(TARGET_I386)
  461:                     /* if hardware interrupt pending, we execute it */
  462:                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  463:                         (env->eflags & IF_MASK) && 
  464:                         !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
  465:                         int intno;
  466:                         env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  467:                         intno = cpu_get_pic_interrupt(env);
  468:                         if (loglevel & CPU_LOG_TB_IN_ASM) {
  469:                             fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
  470:                         }
  471:                         do_interrupt(intno, 0, 0, 0, 1);
  472:                         /* ensure that no TB jump will be modified as
  473:                            the program flow was changed */
  474: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  475:                         tmp_T0 = 0;
  476: #else
  477:                         T0 = 0;
  478: #endif
  479:                     }
  480: #elif defined(TARGET_PPC)
  481: #if 0
  482:                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
  483:                         cpu_ppc_reset(env);
  484:                     }
  485: #endif
  486:                     if (msr_ee != 0) {
  487:                         if ((interrupt_request & CPU_INTERRUPT_HARD)) {
  488: 			    /* Raise it */
  489: 			    env->exception_index = EXCP_EXTERNAL;
  490: 			    env->error_code = 0;
  491:                             do_interrupt(env);
  492:                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  493: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  494:                             tmp_T0 = 0;
  495: #else
  496:                             T0 = 0;
  497: #endif
  498:                         } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
  499:                             /* Raise it */
  500:                             env->exception_index = EXCP_DECR;
  501:                             env->error_code = 0;
  502:                             do_interrupt(env);
  503:                             env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
  504: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  505:                             tmp_T0 = 0;
  506: #else
  507:                             T0 = 0;
  508: #endif
  509:                         }
  510:                     }
  511: #elif defined(TARGET_MIPS)
  512:                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  513:                         (env->CP0_Status & (1 << CP0St_IE)) &&
  514:                         (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
  515:                         !(env->hflags & MIPS_HFLAG_EXL) &&
  516:                         !(env->hflags & MIPS_HFLAG_ERL) &&
  517:                         !(env->hflags & MIPS_HFLAG_DM)) {
  518:                         /* Raise it */
  519:                         env->exception_index = EXCP_EXT_INTERRUPT;
  520:                         env->error_code = 0;
  521:                         do_interrupt(env);
  522:                         env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  523: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  524:                         tmp_T0 = 0;
  525: #else
  526:                         T0 = 0;
  527: #endif
  528:                     }
  529: #elif defined(TARGET_SPARC)
  530:                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  531: 			(env->psret != 0)) {
  532: 			int pil = env->interrupt_index & 15;
  533: 			int type = env->interrupt_index & 0xf0;
  534: 
  535: 			if (((type == TT_EXTINT) &&
  536: 			     (pil == 15 || pil > env->psrpil)) ||
  537: 			    type != TT_EXTINT) {
  538: 			    env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  539: 			    do_interrupt(env->interrupt_index);
  540: 			    env->interrupt_index = 0;
  541: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  542:                             tmp_T0 = 0;
  543: #else
  544:                             T0 = 0;
  545: #endif
  546: 			}
  547: 		    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
  548: 			//do_interrupt(0, 0, 0, 0, 0);
  549: 			env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
  550: 		    } else if (interrupt_request & CPU_INTERRUPT_HALT) {
  551:                         env1->halted = 1;
  552:                         return EXCP_HALTED;
  553:                     }
  554: #elif defined(TARGET_ARM)
  555:                     if (interrupt_request & CPU_INTERRUPT_FIQ
  556:                         && !(env->uncached_cpsr & CPSR_F)) {
  557:                         env->exception_index = EXCP_FIQ;
  558:                         do_interrupt(env);
  559:                     }
  560:                     if (interrupt_request & CPU_INTERRUPT_HARD
  561:                         && !(env->uncached_cpsr & CPSR_I)) {
  562:                         env->exception_index = EXCP_IRQ;
  563:                         do_interrupt(env);
  564:                     }
  565: #elif defined(TARGET_SH4)
  566: 		    /* XXXXX */
  567: #endif
  568:                    /* Don't use the cached interupt_request value,
  569:                       do_interrupt may have updated the EXITTB flag. */
  570:                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
  571:                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
  572:                         /* ensure that no TB jump will be modified as
  573:                            the program flow was changed */
  574: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  575:                         tmp_T0 = 0;
  576: #else
  577:                         T0 = 0;
  578: #endif
  579:                     }
  580:                     if (interrupt_request & CPU_INTERRUPT_EXIT) {
  581:                         env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
  582:                         env->exception_index = EXCP_INTERRUPT;
  583:                         cpu_loop_exit();
  584:                     }
  585:                 }
  586: #ifdef DEBUG_EXEC
  587:                 if ((loglevel & CPU_LOG_TB_CPU)) {
  588: #if defined(TARGET_I386)
  589:                     /* restore flags in standard format */
  590: #ifdef reg_EAX
  591:                     env->regs[R_EAX] = EAX;
  592: #endif
  593: #ifdef reg_EBX
  594:                     env->regs[R_EBX] = EBX;
  595: #endif
  596: #ifdef reg_ECX
  597:                     env->regs[R_ECX] = ECX;
  598: #endif
  599: #ifdef reg_EDX
  600:                     env->regs[R_EDX] = EDX;
  601: #endif
  602: #ifdef reg_ESI
  603:                     env->regs[R_ESI] = ESI;
  604: #endif
  605: #ifdef reg_EDI
  606:                     env->regs[R_EDI] = EDI;
  607: #endif
  608: #ifdef reg_EBP
  609:                     env->regs[R_EBP] = EBP;
  610: #endif
  611: #ifdef reg_ESP
  612:                     env->regs[R_ESP] = ESP;
  613: #endif
  614:                     env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
  615:                     cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
  616:                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  617: #elif defined(TARGET_ARM)
  618:                     cpu_dump_state(env, logfile, fprintf, 0);
  619: #elif defined(TARGET_SPARC)
  620: 		    REGWPTR = env->regbase + (env->cwp * 16);
  621: 		    env->regwptr = REGWPTR;
  622:                     cpu_dump_state(env, logfile, fprintf, 0);
  623: #elif defined(TARGET_PPC)
  624:                     cpu_dump_state(env, logfile, fprintf, 0);
  625: #elif defined(TARGET_MIPS)
  626:                     cpu_dump_state(env, logfile, fprintf, 0);
  627: #elif defined(TARGET_SH4)
  628: 		    cpu_dump_state(env, logfile, fprintf, 0);
  629: #else
  630: #error unsupported target CPU 
  631: #endif
  632:                 }
  633: #endif
  634:                 tb = tb_find_fast();
  635: #ifdef DEBUG_EXEC
  636:                 if ((loglevel & CPU_LOG_EXEC)) {
  637:                     fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
  638:                             (long)tb->tc_ptr, tb->pc,
  639:                             lookup_symbol(tb->pc));
  640:                 }
  641: #endif
  642: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  643:                 T0 = tmp_T0;
  644: #endif	    
  645:                 /* see if we can patch the calling TB. When the TB
  646:                    spans two pages, we cannot safely do a direct
  647:                    jump. */
  648:                 {
  649:                     if (T0 != 0 &&
  650: #if USE_KQEMU
  651:                         (env->kqemu_enabled != 2) &&
  652: #endif
  653:                         tb->page_addr[1] == -1
  654: #if defined(TARGET_I386) && defined(USE_CODE_COPY)
  655:                     && (tb->cflags & CF_CODE_COPY) == 
  656:                     (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
  657: #endif
  658:                     ) {
  659:                     spin_lock(&tb_lock);
  660:                     tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
  661: #if defined(USE_CODE_COPY)
  662:                     /* propagates the FP use info */
  663:                     ((TranslationBlock *)(T0 & ~3))->cflags |= 
  664:                         (tb->cflags & CF_FP_USED);
  665: #endif
  666:                     spin_unlock(&tb_lock);
  667:                 }
  668:                 }
  669:                 tc_ptr = tb->tc_ptr;
  670:                 env->current_tb = tb;
  671:                 /* execute the generated code */
  672:                 gen_func = (void *)tc_ptr;
  673: #if defined(__sparc__)
  674:                 __asm__ __volatile__("call	%0\n\t"
  675:                                      "mov	%%o7,%%i0"
  676:                                      : /* no outputs */
  677:                                      : "r" (gen_func) 
  678:                                      : "i0", "i1", "i2", "i3", "i4", "i5",
  679:                                        "l0", "l1", "l2", "l3", "l4", "l5",
  680:                                        "l6", "l7");
  681: #elif defined(__arm__)
  682:                 asm volatile ("mov pc, %0\n\t"
  683:                               ".global exec_loop\n\t"
  684:                               "exec_loop:\n\t"
  685:                               : /* no outputs */
  686:                               : "r" (gen_func)
  687:                               : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
  688: #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
  689: {
  690:     if (!(tb->cflags & CF_CODE_COPY)) {
  691:         if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
  692:             save_native_fp_state(env);
  693:         }
  694:         gen_func();
  695:     } else {
  696:         if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
  697:             restore_native_fp_state(env);
  698:         }
  699:         /* we work with native eflags */
  700:         CC_SRC = cc_table[CC_OP].compute_all();
  701:         CC_OP = CC_OP_EFLAGS;
  702:         asm(".globl exec_loop\n"
  703:             "\n"
  704:             "debug1:\n"
  705:             "    pushl %%ebp\n"
  706:             "    fs movl %10, %9\n"
  707:             "    fs movl %11, %%eax\n"
  708:             "    andl $0x400, %%eax\n"
  709:             "    fs orl %8, %%eax\n"
  710:             "    pushl %%eax\n"
  711:             "    popf\n"
  712:             "    fs movl %%esp, %12\n"
  713:             "    fs movl %0, %%eax\n"
  714:             "    fs movl %1, %%ecx\n"
  715:             "    fs movl %2, %%edx\n"
  716:             "    fs movl %3, %%ebx\n"
  717:             "    fs movl %4, %%esp\n"
  718:             "    fs movl %5, %%ebp\n"
  719:             "    fs movl %6, %%esi\n"
  720:             "    fs movl %7, %%edi\n"
  721:             "    fs jmp *%9\n"
  722:             "exec_loop:\n"
  723:             "    fs movl %%esp, %4\n"
  724:             "    fs movl %12, %%esp\n"
  725:             "    fs movl %%eax, %0\n"
  726:             "    fs movl %%ecx, %1\n"
  727:             "    fs movl %%edx, %2\n"
  728:             "    fs movl %%ebx, %3\n"
  729:             "    fs movl %%ebp, %5\n"
  730:             "    fs movl %%esi, %6\n"
  731:             "    fs movl %%edi, %7\n"
  732:             "    pushf\n"
  733:             "    popl %%eax\n"
  734:             "    movl %%eax, %%ecx\n"
  735:             "    andl $0x400, %%ecx\n"
  736:             "    shrl $9, %%ecx\n"
  737:             "    andl $0x8d5, %%eax\n"
  738:             "    fs movl %%eax, %8\n"
  739:             "    movl $1, %%eax\n"
  740:             "    subl %%ecx, %%eax\n"
  741:             "    fs movl %%eax, %11\n"
  742:             "    fs movl %9, %%ebx\n" /* get T0 value */
  743:             "    popl %%ebp\n"
  744:             :
  745:             : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
  746:             "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
  747:             "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
  748:             "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
  749:             "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
  750:             "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
  751:             "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
  752:             "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
  753:             "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
  754:             "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
  755:             "a" (gen_func),
  756:             "m" (*(uint8_t *)offsetof(CPUState, df)),
  757:             "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
  758:             : "%ecx", "%edx"
  759:             );
  760:     }
  761: }
  762: #elif defined(__ia64)
  763: 		struct fptr {
  764: 			void *ip;
  765: 			void *gp;
  766: 		} fp;
  767: 
  768: 		fp.ip = tc_ptr;
  769: 		fp.gp = code_gen_buffer + 2 * (1 << 20);
  770: 		(*(void (*)(void)) &fp)();
  771: #else
  772:                 gen_func();
  773: #endif
  774:                 env->current_tb = NULL;
  775:                 /* reset soft MMU for next block (it can currently
  776:                    only be set by a memory fault) */
  777: #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
  778:                 if (env->hflags & HF_SOFTMMU_MASK) {
  779:                     env->hflags &= ~HF_SOFTMMU_MASK;
  780:                     /* do not allow linking to another block */
  781:                     T0 = 0;
  782:                 }
  783: #endif
  784: #if defined(USE_KQEMU)
  785: #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
  786:                 if (kqemu_is_ok(env) &&
  787:                     (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
  788:                     cpu_loop_exit();
  789:                 }
  790: #endif
  791:             }
  792:         } else {
  793:             env_to_regs();
  794:         }
  795:     } /* for(;;) */
  796: 
  797: 
  798: #if defined(TARGET_I386)
  799: #if defined(USE_CODE_COPY)
  800:     if (env->native_fp_regs) {
  801:         save_native_fp_state(env);
  802:     }
  803: #endif
  804:     /* restore flags in standard format */
  805:     env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
  806: 
  807:     /* restore global registers */
  808: #ifdef reg_EAX
  809:     EAX = saved_EAX;
  810: #endif
  811: #ifdef reg_ECX
  812:     ECX = saved_ECX;
  813: #endif
  814: #ifdef reg_EDX
  815:     EDX = saved_EDX;
  816: #endif
  817: #ifdef reg_EBX
  818:     EBX = saved_EBX;
  819: #endif
  820: #ifdef reg_ESP
  821:     ESP = saved_ESP;
  822: #endif
  823: #ifdef reg_EBP
  824:     EBP = saved_EBP;
  825: #endif
  826: #ifdef reg_ESI
  827:     ESI = saved_ESI;
  828: #endif
  829: #ifdef reg_EDI
  830:     EDI = saved_EDI;
  831: #endif
  832: #elif defined(TARGET_ARM)
  833:     /* XXX: Save/restore host fpu exception state?.  */
  834: #elif defined(TARGET_SPARC)
  835: #if defined(reg_REGWPTR)
  836:     REGWPTR = saved_regwptr;
  837: #endif
  838: #elif defined(TARGET_PPC)
  839: #elif defined(TARGET_MIPS)
  840: #elif defined(TARGET_SH4)
  841:     /* XXXXX */
  842: #else
  843: #error unsupported target CPU
  844: #endif
  845: #if defined(__sparc__) && !defined(HOST_SOLARIS)
  846:     asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
  847: #endif
  848:     T0 = saved_T0;
  849:     T1 = saved_T1;
  850: #if defined(reg_T2)
  851:     T2 = saved_T2;
  852: #endif
  853:     env = saved_env;
  854:     /* fail safe : never use cpu_single_env outside cpu_exec() */
  855:     cpu_single_env = NULL; 
  856:     return ret;
  857: }
  858: 
  859: /* must only be called from the generated code as an exception can be
  860:    generated */
  861: void tb_invalidate_page_range(target_ulong start, target_ulong end)
  862: {
  863:     /* XXX: cannot enable it yet because it yields to MMU exception
  864:        where NIP != read address on PowerPC */
  865: #if 0
  866:     target_ulong phys_addr;
  867:     phys_addr = get_phys_addr_code(env, start);
  868:     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
  869: #endif
  870: }
  871: 
  872: #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
  873: 
  874: void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
  875: {
  876:     CPUX86State *saved_env;
  877: 
  878:     saved_env = env;
  879:     env = s;
  880:     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
  881:         selector &= 0xffff;
  882:         cpu_x86_load_seg_cache(env, seg_reg, selector, 
  883:                                (selector << 4), 0xffff, 0);
  884:     } else {
  885:         load_seg(seg_reg, selector);
  886:     }
  887:     env = saved_env;
  888: }
  889: 
  890: void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
  891: {
  892:     CPUX86State *saved_env;
  893: 
  894:     saved_env = env;
  895:     env = s;
  896:     
  897:     helper_fsave((target_ulong)ptr, data32);
  898: 
  899:     env = saved_env;
  900: }
  901: 
  902: void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
  903: {
  904:     CPUX86State *saved_env;
  905: 
  906:     saved_env = env;
  907:     env = s;
  908:     
  909:     helper_frstor((target_ulong)ptr, data32);
  910: 
  911:     env = saved_env;
  912: }
  913: 
  914: #endif /* TARGET_I386 */
  915: 
  916: #if !defined(CONFIG_SOFTMMU)
  917: 
  918: #if defined(TARGET_I386)
  919: 
  920: /* 'pc' is the host PC at which the exception was raised. 'address' is
  921:    the effective address of the memory exception. 'is_write' is 1 if a
  922:    write caused the exception and otherwise 0'. 'old_set' is the
  923:    signal set which should be restored */
  924: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  925:                                     int is_write, sigset_t *old_set, 
  926:                                     void *puc)
  927: {
  928:     TranslationBlock *tb;
  929:     int ret;
  930: 
  931:     if (cpu_single_env)
  932:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
  933: #if defined(DEBUG_SIGNAL)
  934:     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 
  935:                 pc, address, is_write, *(unsigned long *)old_set);
  936: #endif
  937:     /* XXX: locking issue */
  938:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
  939:         return 1;
  940:     }
  941: 
  942:     /* see if it is an MMU fault */
  943:     ret = cpu_x86_handle_mmu_fault(env, address, is_write, 
  944:                                    ((env->hflags & HF_CPL_MASK) == 3), 0);
  945:     if (ret < 0)
  946:         return 0; /* not an MMU fault */
  947:     if (ret == 0)
  948:         return 1; /* the MMU fault was handled without causing real CPU fault */
  949:     /* now we have a real cpu fault */
  950:     tb = tb_find_pc(pc);
  951:     if (tb) {
  952:         /* the PC is inside the translated code. It means that we have
  953:            a virtual CPU fault */
  954:         cpu_restore_state(tb, env, pc, puc);
  955:     }
  956:     if (ret == 1) {
  957: #if 0
  958:         printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", 
  959:                env->eip, env->cr[2], env->error_code);
  960: #endif
  961:         /* we restore the process signal mask as the sigreturn should
  962:            do it (XXX: use sigsetjmp) */
  963:         sigprocmask(SIG_SETMASK, old_set, NULL);
  964:         raise_exception_err(env->exception_index, env->error_code);
  965:     } else {
  966:         /* activate soft MMU for this block */
  967:         env->hflags |= HF_SOFTMMU_MASK;
  968:         cpu_resume_from_signal(env, puc);
  969:     }
  970:     /* never comes here */
  971:     return 1;
  972: }
  973: 
  974: #elif defined(TARGET_ARM)
  975: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  976:                                     int is_write, sigset_t *old_set,
  977:                                     void *puc)
  978: {
  979:     TranslationBlock *tb;
  980:     int ret;
  981: 
  982:     if (cpu_single_env)
  983:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
  984: #if defined(DEBUG_SIGNAL)
  985:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 
  986:            pc, address, is_write, *(unsigned long *)old_set);
  987: #endif
  988:     /* XXX: locking issue */
  989:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
  990:         return 1;
  991:     }
  992:     /* see if it is an MMU fault */
  993:     ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
  994:     if (ret < 0)
  995:         return 0; /* not an MMU fault */
  996:     if (ret == 0)
  997:         return 1; /* the MMU fault was handled without causing real CPU fault */
  998:     /* now we have a real cpu fault */
  999:     tb = tb_find_pc(pc);
 1000:     if (tb) {
 1001:         /* the PC is inside the translated code. It means that we have
 1002:            a virtual CPU fault */
 1003:         cpu_restore_state(tb, env, pc, puc);
 1004:     }
 1005:     /* we restore the process signal mask as the sigreturn should
 1006:        do it (XXX: use sigsetjmp) */
 1007:     sigprocmask(SIG_SETMASK, old_set, NULL);
 1008:     cpu_loop_exit();
 1009: }
 1010: #elif defined(TARGET_SPARC)
 1011: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
 1012:                                     int is_write, sigset_t *old_set,
 1013:                                     void *puc)
 1014: {
 1015:     TranslationBlock *tb;
 1016:     int ret;
 1017: 
 1018:     if (cpu_single_env)
 1019:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
 1020: #if defined(DEBUG_SIGNAL)
 1021:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 
 1022:            pc, address, is_write, *(unsigned long *)old_set);
 1023: #endif
 1024:     /* XXX: locking issue */
 1025:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
 1026:         return 1;
 1027:     }
 1028:     /* see if it is an MMU fault */
 1029:     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
 1030:     if (ret < 0)
 1031:         return 0; /* not an MMU fault */
 1032:     if (ret == 0)
 1033:         return 1; /* the MMU fault was handled without causing real CPU fault */
 1034:     /* now we have a real cpu fault */
 1035:     tb = tb_find_pc(pc);
 1036:     if (tb) {
 1037:         /* the PC is inside the translated code. It means that we have
 1038:            a virtual CPU fault */
 1039:         cpu_restore_state(tb, env, pc, puc);
 1040:     }
 1041:     /* we restore the process signal mask as the sigreturn should
 1042:        do it (XXX: use sigsetjmp) */
 1043:     sigprocmask(SIG_SETMASK, old_set, NULL);
 1044:     cpu_loop_exit();
 1045: }
 1046: #elif defined (TARGET_PPC)
 1047: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
 1048:                                     int is_write, sigset_t *old_set,
 1049:                                     void *puc)
 1050: {
 1051:     TranslationBlock *tb;
 1052:     int ret;
 1053:     
 1054:     if (cpu_single_env)
 1055:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
 1056: #if defined(DEBUG_SIGNAL)
 1057:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 
 1058:            pc, address, is_write, *(unsigned long *)old_set);
 1059: #endif
 1060:     /* XXX: locking issue */
 1061:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
 1062:         return 1;
 1063:     }
 1064: 
 1065:     /* see if it is an MMU fault */
 1066:     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
 1067:     if (ret < 0)
 1068:         return 0; /* not an MMU fault */
 1069:     if (ret == 0)
 1070:         return 1; /* the MMU fault was handled without causing real CPU fault */
 1071: 
 1072:     /* now we have a real cpu fault */
 1073:     tb = tb_find_pc(pc);
 1074:     if (tb) {
 1075:         /* the PC is inside the translated code. It means that we have
 1076:            a virtual CPU fault */
 1077:         cpu_restore_state(tb, env, pc, puc);
 1078:     }
 1079:     if (ret == 1) {
 1080: #if 0
 1081:         printf("PF exception: NIP=0x%08x error=0x%x %p\n", 
 1082:                env->nip, env->error_code, tb);
 1083: #endif
 1084:     /* we restore the process signal mask as the sigreturn should
 1085:        do it (XXX: use sigsetjmp) */
 1086:         sigprocmask(SIG_SETMASK, old_set, NULL);
 1087:         do_raise_exception_err(env->exception_index, env->error_code);
 1088:     } else {
 1089:         /* activate soft MMU for this block */
 1090:         cpu_resume_from_signal(env, puc);
 1091:     }
 1092:     /* never comes here */
 1093:     return 1;
 1094: }
 1095: 
 1096: #elif defined (TARGET_MIPS)
 1097: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
 1098:                                     int is_write, sigset_t *old_set,
 1099:                                     void *puc)
 1100: {
 1101:     TranslationBlock *tb;
 1102:     int ret;
 1103:     
 1104:     if (cpu_single_env)
 1105:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
 1106: #if defined(DEBUG_SIGNAL)
 1107:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 
 1108:            pc, address, is_write, *(unsigned long *)old_set);
 1109: #endif
 1110:     /* XXX: locking issue */
 1111:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
 1112:         return 1;
 1113:     }
 1114: 
 1115:     /* see if it is an MMU fault */
 1116:     ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
 1117:     if (ret < 0)
 1118:         return 0; /* not an MMU fault */
 1119:     if (ret == 0)
 1120:         return 1; /* the MMU fault was handled without causing real CPU fault */
 1121: 
 1122:     /* now we have a real cpu fault */
 1123:     tb = tb_find_pc(pc);
 1124:     if (tb) {
 1125:         /* the PC is inside the translated code. It means that we have
 1126:            a virtual CPU fault */
 1127:         cpu_restore_state(tb, env, pc, puc);
 1128:     }
 1129:     if (ret == 1) {
 1130: #if 0
 1131:         printf("PF exception: NIP=0x%08x error=0x%x %p\n", 
 1132:                env->nip, env->error_code, tb);
 1133: #endif
 1134:     /* we restore the process signal mask as the sigreturn should
 1135:        do it (XXX: use sigsetjmp) */
 1136:         sigprocmask(SIG_SETMASK, old_set, NULL);
 1137:         do_raise_exception_err(env->exception_index, env->error_code);
 1138:     } else {
 1139:         /* activate soft MMU for this block */
 1140:         cpu_resume_from_signal(env, puc);
 1141:     }
 1142:     /* never comes here */
 1143:     return 1;
 1144: }
 1145: 
 1146: #elif defined (TARGET_SH4)
 1147: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
 1148:                                     int is_write, sigset_t *old_set,
 1149:                                     void *puc)
 1150: {
 1151:     TranslationBlock *tb;
 1152:     int ret;
 1153:     
 1154:     if (cpu_single_env)
 1155:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
 1156: #if defined(DEBUG_SIGNAL)
 1157:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 
 1158:            pc, address, is_write, *(unsigned long *)old_set);
 1159: #endif
 1160:     /* XXX: locking issue */
 1161:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
 1162:         return 1;
 1163:     }
 1164: 
 1165:     /* see if it is an MMU fault */
 1166:     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
 1167:     if (ret < 0)
 1168:         return 0; /* not an MMU fault */
 1169:     if (ret == 0)
 1170:         return 1; /* the MMU fault was handled without causing real CPU fault */
 1171: 
 1172:     /* now we have a real cpu fault */
 1173:     tb = tb_find_pc(pc);
 1174:     if (tb) {
 1175:         /* the PC is inside the translated code. It means that we have
 1176:            a virtual CPU fault */
 1177:         cpu_restore_state(tb, env, pc, puc);
 1178:     }
 1179: #if 0
 1180:         printf("PF exception: NIP=0x%08x error=0x%x %p\n", 
 1181:                env->nip, env->error_code, tb);
 1182: #endif
 1183:     /* we restore the process signal mask as the sigreturn should
 1184:        do it (XXX: use sigsetjmp) */
 1185:     sigprocmask(SIG_SETMASK, old_set, NULL);
 1186:     cpu_loop_exit();
 1187:     /* never comes here */
 1188:     return 1;
 1189: }
 1190: #else
 1191: #error unsupported target CPU
 1192: #endif
 1193: 
 1194: #if defined(__i386__)
 1195: 
 1196: #if defined(USE_CODE_COPY)
 1197: static void cpu_send_trap(unsigned long pc, int trap, 
 1198:                           struct ucontext *uc)
 1199: {
 1200:     TranslationBlock *tb;
 1201: 
 1202:     if (cpu_single_env)
 1203:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
 1204:     /* now we have a real cpu fault */
 1205:     tb = tb_find_pc(pc);
 1206:     if (tb) {
 1207:         /* the PC is inside the translated code. It means that we have
 1208:            a virtual CPU fault */
 1209:         cpu_restore_state(tb, env, pc, uc);
 1210:     }
 1211:     sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
 1212:     raise_exception_err(trap, env->error_code);
 1213: }
 1214: #endif
 1215: 
 1216: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1217:                        void *puc)
 1218: {
 1219:     struct ucontext *uc = puc;
 1220:     unsigned long pc;
 1221:     int trapno;
 1222: 
 1223: #ifndef REG_EIP
 1224: /* for glibc 2.1 */
 1225: #define REG_EIP    EIP
 1226: #define REG_ERR    ERR
 1227: #define REG_TRAPNO TRAPNO
 1228: #endif
 1229:     pc = uc->uc_mcontext.gregs[REG_EIP];
 1230:     trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
 1231: #if defined(TARGET_I386) && defined(USE_CODE_COPY)
 1232:     if (trapno == 0x00 || trapno == 0x05) {
 1233:         /* send division by zero or bound exception */
 1234:         cpu_send_trap(pc, trapno, uc);
 1235:         return 1;
 1236:     } else
 1237: #endif
 1238:         return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1239:                                  trapno == 0xe ? 
 1240:                                  (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
 1241:                                  &uc->uc_sigmask, puc);
 1242: }
 1243: 
 1244: #elif defined(__x86_64__)
 1245: 
 1246: int cpu_signal_handler(int host_signum, struct siginfo *info,
 1247:                        void *puc)
 1248: {
 1249:     struct ucontext *uc = puc;
 1250:     unsigned long pc;
 1251: 
 1252:     pc = uc->uc_mcontext.gregs[REG_RIP];
 1253:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1254:                              uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ? 
 1255:                              (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
 1256:                              &uc->uc_sigmask, puc);
 1257: }
 1258: 
 1259: #elif defined(__powerpc__)
 1260: 
 1261: /***********************************************************************
 1262:  * signal context platform-specific definitions
 1263:  * From Wine
 1264:  */
 1265: #ifdef linux
 1266: /* All Registers access - only for local access */
 1267: # define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
 1268: /* Gpr Registers access  */
 1269: # define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
 1270: # define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
 1271: # define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
 1272: # define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
 1273: # define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
 1274: # define LR_sig(context)			REG_sig(link, context) /* Link register */
 1275: # define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
 1276: /* Float Registers access  */
 1277: # define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
 1278: # define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
 1279: /* Exception Registers access */
 1280: # define DAR_sig(context)			REG_sig(dar, context)
 1281: # define DSISR_sig(context)			REG_sig(dsisr, context)
 1282: # define TRAP_sig(context)			REG_sig(trap, context)
 1283: #endif /* linux */
 1284: 
 1285: #ifdef __APPLE__
 1286: # include <sys/ucontext.h>
 1287: typedef struct ucontext SIGCONTEXT;
 1288: /* All Registers access - only for local access */
 1289: # define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
 1290: # define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
 1291: # define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
 1292: # define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
 1293: /* Gpr Registers access */
 1294: # define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
 1295: # define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
 1296: # define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
 1297: # define CTR_sig(context)			REG_sig(ctr, context)
 1298: # define XER_sig(context)			REG_sig(xer, context) /* Link register */
 1299: # define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
 1300: # define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
 1301: /* Float Registers access */
 1302: # define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
 1303: # define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
 1304: /* Exception Registers access */
 1305: # define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
 1306: # define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
 1307: # define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
 1308: #endif /* __APPLE__ */
 1309: 
 1310: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1311:                        void *puc)
 1312: {
 1313:     struct ucontext *uc = puc;
 1314:     unsigned long pc;
 1315:     int is_write;
 1316: 
 1317:     pc = IAR_sig(uc);
 1318:     is_write = 0;
 1319: #if 0
 1320:     /* ppc 4xx case */
 1321:     if (DSISR_sig(uc) & 0x00800000)
 1322:         is_write = 1;
 1323: #else
 1324:     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
 1325:         is_write = 1;
 1326: #endif
 1327:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1328:                              is_write, &uc->uc_sigmask, puc);
 1329: }
 1330: 
 1331: #elif defined(__alpha__)
 1332: 
 1333: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1334:                            void *puc)
 1335: {
 1336:     struct ucontext *uc = puc;
 1337:     uint32_t *pc = uc->uc_mcontext.sc_pc;
 1338:     uint32_t insn = *pc;
 1339:     int is_write = 0;
 1340: 
 1341:     /* XXX: need kernel patch to get write flag faster */
 1342:     switch (insn >> 26) {
 1343:     case 0x0d: // stw
 1344:     case 0x0e: // stb
 1345:     case 0x0f: // stq_u
 1346:     case 0x24: // stf
 1347:     case 0x25: // stg
 1348:     case 0x26: // sts
 1349:     case 0x27: // stt
 1350:     case 0x2c: // stl
 1351:     case 0x2d: // stq
 1352:     case 0x2e: // stl_c
 1353:     case 0x2f: // stq_c
 1354: 	is_write = 1;
 1355:     }
 1356: 
 1357:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1358:                              is_write, &uc->uc_sigmask, puc);
 1359: }
 1360: #elif defined(__sparc__)
 1361: 
 1362: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1363:                        void *puc)
 1364: {
 1365:     uint32_t *regs = (uint32_t *)(info + 1);
 1366:     void *sigmask = (regs + 20);
 1367:     unsigned long pc;
 1368:     int is_write;
 1369:     uint32_t insn;
 1370:     
 1371:     /* XXX: is there a standard glibc define ? */
 1372:     pc = regs[1];
 1373:     /* XXX: need kernel patch to get write flag faster */
 1374:     is_write = 0;
 1375:     insn = *(uint32_t *)pc;
 1376:     if ((insn >> 30) == 3) {
 1377:       switch((insn >> 19) & 0x3f) {
 1378:       case 0x05: // stb
 1379:       case 0x06: // sth
 1380:       case 0x04: // st
 1381:       case 0x07: // std
 1382:       case 0x24: // stf
 1383:       case 0x27: // stdf
 1384:       case 0x25: // stfsr
 1385: 	is_write = 1;
 1386: 	break;
 1387:       }
 1388:     }
 1389:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1390:                              is_write, sigmask, NULL);
 1391: }
 1392: 
 1393: #elif defined(__arm__)
 1394: 
 1395: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1396:                        void *puc)
 1397: {
 1398:     struct ucontext *uc = puc;
 1399:     unsigned long pc;
 1400:     int is_write;
 1401:     
 1402:     pc = uc->uc_mcontext.gregs[R15];
 1403:     /* XXX: compute is_write */
 1404:     is_write = 0;
 1405:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1406:                              is_write,
 1407:                              &uc->uc_sigmask);
 1408: }
 1409: 
 1410: #elif defined(__mc68000)
 1411: 
 1412: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1413:                        void *puc)
 1414: {
 1415:     struct ucontext *uc = puc;
 1416:     unsigned long pc;
 1417:     int is_write;
 1418:     
 1419:     pc = uc->uc_mcontext.gregs[16];
 1420:     /* XXX: compute is_write */
 1421:     is_write = 0;
 1422:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1423:                              is_write,
 1424:                              &uc->uc_sigmask, puc);
 1425: }
 1426: 
 1427: #elif defined(__ia64)
 1428: 
 1429: #ifndef __ISR_VALID
 1430:   /* This ought to be in <bits/siginfo.h>... */
 1431: # define __ISR_VALID	1
 1432: #endif
 1433: 
 1434: int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
 1435: {
 1436:     struct ucontext *uc = puc;
 1437:     unsigned long ip;
 1438:     int is_write = 0;
 1439: 
 1440:     ip = uc->uc_mcontext.sc_ip;
 1441:     switch (host_signum) {
 1442:       case SIGILL:
 1443:       case SIGFPE:
 1444:       case SIGSEGV:
 1445:       case SIGBUS:
 1446:       case SIGTRAP:
 1447: 	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
 1448: 	      /* ISR.W (write-access) is bit 33:  */
 1449: 	      is_write = (info->si_isr >> 33) & 1;
 1450: 	  break;
 1451: 
 1452:       default:
 1453: 	  break;
 1454:     }
 1455:     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
 1456:                              is_write,
 1457:                              &uc->uc_sigmask, puc);
 1458: }
 1459: 
 1460: #elif defined(__s390__)
 1461: 
 1462: int cpu_signal_handler(int host_signum, struct siginfo *info, 
 1463:                        void *puc)
 1464: {
 1465:     struct ucontext *uc = puc;
 1466:     unsigned long pc;
 1467:     int is_write;
 1468:     
 1469:     pc = uc->uc_mcontext.psw.addr;
 1470:     /* XXX: compute is_write */
 1471:     is_write = 0;
 1472:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
 1473:                              is_write,
 1474:                              &uc->uc_sigmask, puc);
 1475: }
 1476: 
 1477: #else
 1478: 
 1479: #error host CPU specific signal handler needed
 1480: 
 1481: #endif
 1482: 
 1483: #endif /* !defined(CONFIG_SOFTMMU) */

unix.superglobalmegacorp.com