Annotation of qemu/cpu-exec.c, revision 1.1.1.8

1.1       root        1: /*
                      2:  *  i386 emulator main execution loop
1.1.1.6   root        3:  *
1.1       root        4:  *  Copyright (c) 2003-2005 Fabrice Bellard
                      5:  *
                      6:  * This library is free software; you can redistribute it and/or
                      7:  * modify it under the terms of the GNU Lesser General Public
                      8:  * License as published by the Free Software Foundation; either
                      9:  * version 2 of the License, or (at your option) any later version.
                     10:  *
                     11:  * This library is distributed in the hope that it will be useful,
                     12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
                     13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
                     14:  * Lesser General Public License for more details.
                     15:  *
                     16:  * You should have received a copy of the GNU Lesser General Public
                     17:  * License along with this library; if not, write to the Free Software
1.1.1.7   root       18:  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
1.1       root       19:  */
                     20: #include "config.h"
1.1.1.7   root       21: #define CPU_NO_GLOBAL_REGS
1.1       root       22: #include "exec.h"
                     23: #include "disas.h"
1.1.1.7   root       24: #include "tcg.h"
                     25: #include "kvm.h"
1.1       root       26: 
                     27: #if !defined(CONFIG_SOFTMMU)
                     28: #undef EAX
                     29: #undef ECX
                     30: #undef EDX
                     31: #undef EBX
                     32: #undef ESP
                     33: #undef EBP
                     34: #undef ESI
                     35: #undef EDI
                     36: #undef EIP
                     37: #include <signal.h>
1.1.1.7   root       38: #ifdef __linux__
1.1       root       39: #include <sys/ucontext.h>
                     40: #endif
1.1.1.7   root       41: #endif
1.1.1.6   root       42: 
                     43: #if defined(__sparc__) && !defined(HOST_SOLARIS)
                     44: // Work around ugly bugs in glibc that mangle global register contents
1.1.1.7   root       45: #undef env
                     46: #define env cpu_single_env
                     47: #endif
1.1.1.6   root       48: 
1.1.1.7   root       49: int tb_invalidated_flag;
1.1.1.6   root       50: 
1.1.1.7   root       51: //#define DEBUG_EXEC
                     52: //#define DEBUG_SIGNAL
1.1.1.6   root       53: 
1.1       root       54: void cpu_loop_exit(void)
                     55: {
1.1.1.6   root       56:     /* NOTE: the register at this point must be saved by hand because
                     57:        longjmp restore them */
                     58:     regs_to_env();
1.1       root       59:     longjmp(env->jmp_env, 1);
                     60: }
1.1.1.6   root       61: 
1.1       root       62: /* exit the current TB from a signal handler. The host registers are
                     63:    restored in a state compatible with the CPU emulator
                     64:  */
1.1.1.6   root       65: void cpu_resume_from_signal(CPUState *env1, void *puc)
1.1       root       66: {
                     67: #if !defined(CONFIG_SOFTMMU)
1.1.1.7   root       68: #ifdef __linux__
1.1       root       69:     struct ucontext *uc = puc;
1.1.1.7   root       70: #elif defined(__OpenBSD__)
                     71:     struct sigcontext *uc = puc;
                     72: #endif
1.1       root       73: #endif
                     74: 
                     75:     env = env1;
                     76: 
                     77:     /* XXX: restore cpu registers saved in host registers */
                     78: 
                     79: #if !defined(CONFIG_SOFTMMU)
                     80:     if (puc) {
                     81:         /* XXX: use siglongjmp ? */
1.1.1.7   root       82: #ifdef __linux__
1.1       root       83:         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1.1.1.7   root       84: #elif defined(__OpenBSD__)
                     85:         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
                     86: #endif
1.1       root       87:     }
                     88: #endif
1.1.1.7   root       89:     env->exception_index = -1;
1.1       root       90:     longjmp(env->jmp_env, 1);
                     91: }
                     92: 
1.1.1.7   root       93: /* Execute the code without caching the generated code. An interpreter
                     94:    could be used if available. */
                     95: static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
                     96: {
                     97:     unsigned long next_tb;
                     98:     TranslationBlock *tb;
                     99: 
                    100:     /* Should never happen.
                    101:        We only end up here when an existing TB is too long.  */
                    102:     if (max_cycles > CF_COUNT_MASK)
                    103:         max_cycles = CF_COUNT_MASK;
                    104: 
                    105:     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
                    106:                      max_cycles);
                    107:     env->current_tb = tb;
                    108:     /* execute the generated code */
                    109:     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
                    110: 
                    111:     if ((next_tb & 3) == 2) {
                    112:         /* Restore PC.  This may happen if async event occurs before
                    113:            the TB starts executing.  */
                    114:         cpu_pc_from_tb(env, tb);
                    115:     }
                    116:     tb_phys_invalidate(tb, -1);
                    117:     tb_free(tb);
                    118: }
1.1.1.2   root      119: 
                    120: static TranslationBlock *tb_find_slow(target_ulong pc,
                    121:                                       target_ulong cs_base,
1.1.1.6   root      122:                                       uint64_t flags)
1.1.1.2   root      123: {
                    124:     TranslationBlock *tb, **ptb1;
                    125:     unsigned int h;
                    126:     target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
                    127: 
                    128:     tb_invalidated_flag = 0;
1.1.1.6   root      129: 
1.1.1.2   root      130:     regs_to_env(); /* XXX: do it just before cpu_gen_code() */
1.1.1.6   root      131: 
1.1.1.2   root      132:     /* find translated block using physical mappings */
                    133:     phys_pc = get_phys_addr_code(env, pc);
                    134:     phys_page1 = phys_pc & TARGET_PAGE_MASK;
                    135:     phys_page2 = -1;
                    136:     h = tb_phys_hash_func(phys_pc);
                    137:     ptb1 = &tb_phys_hash[h];
                    138:     for(;;) {
                    139:         tb = *ptb1;
                    140:         if (!tb)
                    141:             goto not_found;
1.1.1.6   root      142:         if (tb->pc == pc &&
1.1.1.2   root      143:             tb->page_addr[0] == phys_page1 &&
1.1.1.6   root      144:             tb->cs_base == cs_base &&
1.1.1.2   root      145:             tb->flags == flags) {
                    146:             /* check next page if needed */
                    147:             if (tb->page_addr[1] != -1) {
1.1.1.6   root      148:                 virt_page2 = (pc & TARGET_PAGE_MASK) +
1.1.1.2   root      149:                     TARGET_PAGE_SIZE;
                    150:                 phys_page2 = get_phys_addr_code(env, virt_page2);
                    151:                 if (tb->page_addr[1] == phys_page2)
                    152:                     goto found;
                    153:             } else {
                    154:                 goto found;
                    155:             }
                    156:         }
                    157:         ptb1 = &tb->phys_hash_next;
                    158:     }
                    159:  not_found:
1.1.1.7   root      160:    /* if no translated code available, then translate it now */
                    161:     tb = tb_gen_code(env, pc, cs_base, flags, 0);
1.1.1.6   root      162: 
1.1.1.2   root      163:  found:
                    164:     /* we add the TB in the virtual pc hash table */
                    165:     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
                    166:     return tb;
                    167: }
                    168: 
                    169: static inline TranslationBlock *tb_find_fast(void)
                    170: {
                    171:     TranslationBlock *tb;
                    172:     target_ulong cs_base, pc;
1.1.1.7   root      173:     int flags;
1.1.1.2   root      174: 
                    175:     /* we record a subset of the CPU state. It will
                    176:        always be the same before a given translated block
                    177:        is executed. */
1.1.1.7   root      178:     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1.1.1.2   root      179:     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
1.1.1.7   root      180:     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                    181:                  tb->flags != flags)) {
1.1.1.2   root      182:         tb = tb_find_slow(pc, cs_base, flags);
                    183:     }
                    184:     return tb;
                    185: }
                    186: 
1.1.1.7   root      187: static CPUDebugExcpHandler *debug_excp_handler;
                    188: 
                    189: CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
                    190: {
                    191:     CPUDebugExcpHandler *old_handler = debug_excp_handler;
                    192: 
                    193:     debug_excp_handler = handler;
                    194:     return old_handler;
                    195: }
                    196: 
                    197: static void cpu_handle_debug_exception(CPUState *env)
                    198: {
                    199:     CPUWatchpoint *wp;
                    200: 
                    201:     if (!env->watchpoint_hit)
                    202:         TAILQ_FOREACH(wp, &env->watchpoints, entry)
                    203:             wp->flags &= ~BP_WATCHPOINT_HIT;
                    204: 
                    205:     if (debug_excp_handler)
                    206:         debug_excp_handler(env);
                    207: }
1.1.1.2   root      208: 
1.1       root      209: /* main execution loop */
                    210: 
                    211: int cpu_exec(CPUState *env1)
                    212: {
1.1.1.5   root      213: #define DECLARE_HOST_REGS 1
                    214: #include "hostregs_helper.h"
1.1.1.2   root      215:     int ret, interrupt_request;
                    216:     TranslationBlock *tb;
1.1       root      217:     uint8_t *tc_ptr;
1.1.1.7   root      218:     unsigned long next_tb;
1.1.1.2   root      219: 
1.1.1.6   root      220:     if (cpu_halted(env1) == EXCP_HALTED)
                    221:         return EXCP_HALTED;
1.1.1.2   root      222: 
1.1.1.6   root      223:     cpu_single_env = env1;
1.1       root      224: 
                    225:     /* first we save global registers */
1.1.1.5   root      226: #define SAVE_HOST_REGS 1
                    227: #include "hostregs_helper.h"
1.1       root      228:     env = env1;
                    229: 
                    230:     env_to_regs();
1.1.1.6   root      231: #if defined(TARGET_I386)
1.1       root      232:     /* put eflags in CPU temporary format */
                    233:     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
                    234:     DF = 1 - (2 * ((env->eflags >> 10) & 1));
                    235:     CC_OP = CC_OP_EFLAGS;
                    236:     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
                    237: #elif defined(TARGET_SPARC)
1.1.1.5   root      238: #elif defined(TARGET_M68K)
                    239:     env->cc_op = CC_OP_FLAGS;
                    240:     env->cc_dest = env->sr & 0xf;
                    241:     env->cc_x = (env->sr >> 4) & 1;
1.1.1.6   root      242: #elif defined(TARGET_ALPHA)
                    243: #elif defined(TARGET_ARM)
                    244: #elif defined(TARGET_PPC)
1.1       root      245: #elif defined(TARGET_MIPS)
1.1.1.3   root      246: #elif defined(TARGET_SH4)
1.1.1.6   root      247: #elif defined(TARGET_CRIS)
1.1.1.3   root      248:     /* XXXXX */
1.1       root      249: #else
                    250: #error unsupported target CPU
                    251: #endif
                    252:     env->exception_index = -1;
                    253: 
                    254:     /* prepare setjmp context for exception handling */
                    255:     for(;;) {
                    256:         if (setjmp(env->jmp_env) == 0) {
                    257:             env->current_tb = NULL;
                    258:             /* if an exception is pending, we execute it here */
                    259:             if (env->exception_index >= 0) {
                    260:                 if (env->exception_index >= EXCP_INTERRUPT) {
                    261:                     /* exit request from the cpu execution loop */
                    262:                     ret = env->exception_index;
1.1.1.7   root      263:                     if (ret == EXCP_DEBUG)
                    264:                         cpu_handle_debug_exception(env);
1.1       root      265:                     break;
1.1.1.7   root      266:                 } else {
                    267: #if defined(CONFIG_USER_ONLY)
1.1       root      268:                     /* if user mode only, we simulate a fake exception
1.1.1.5   root      269:                        which will be handled outside the cpu execution
1.1       root      270:                        loop */
                    271: #if defined(TARGET_I386)
1.1.1.6   root      272:                     do_interrupt_user(env->exception_index,
                    273:                                       env->exception_is_int,
                    274:                                       env->error_code,
1.1       root      275:                                       env->exception_next_eip);
1.1.1.7   root      276:                     /* successfully delivered */
                    277:                     env->old_exception = -1;
1.1       root      278: #endif
                    279:                     ret = env->exception_index;
                    280:                     break;
1.1.1.7   root      281: #else
1.1       root      282: #if defined(TARGET_I386)
                    283:                     /* simulate a real cpu exception. On i386, it can
                    284:                        trigger new exceptions, but we do not handle
                    285:                        double or triple faults yet. */
1.1.1.6   root      286:                     do_interrupt(env->exception_index,
                    287:                                  env->exception_is_int,
                    288:                                  env->error_code,
1.1       root      289:                                  env->exception_next_eip, 0);
1.1.1.6   root      290:                     /* successfully delivered */
                    291:                     env->old_exception = -1;
1.1       root      292: #elif defined(TARGET_PPC)
                    293:                     do_interrupt(env);
                    294: #elif defined(TARGET_MIPS)
                    295:                     do_interrupt(env);
                    296: #elif defined(TARGET_SPARC)
1.1.1.7   root      297:                     do_interrupt(env);
1.1.1.2   root      298: #elif defined(TARGET_ARM)
                    299:                     do_interrupt(env);
1.1.1.3   root      300: #elif defined(TARGET_SH4)
                    301:                    do_interrupt(env);
1.1.1.6   root      302: #elif defined(TARGET_ALPHA)
                    303:                     do_interrupt(env);
                    304: #elif defined(TARGET_CRIS)
                    305:                     do_interrupt(env);
                    306: #elif defined(TARGET_M68K)
                    307:                     do_interrupt(0);
1.1       root      308: #endif
1.1.1.7   root      309: #endif
1.1       root      310:                 }
                    311:                 env->exception_index = -1;
1.1.1.6   root      312:             }
1.1       root      313: #ifdef USE_KQEMU
1.1.1.8 ! root      314:             if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
1.1       root      315:                 int ret;
1.1.1.7   root      316:                 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1.1       root      317:                 ret = kqemu_cpu_exec(env);
                    318:                 /* put eflags in CPU temporary format */
                    319:                 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
                    320:                 DF = 1 - (2 * ((env->eflags >> 10) & 1));
                    321:                 CC_OP = CC_OP_EFLAGS;
                    322:                 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
                    323:                 if (ret == 1) {
                    324:                     /* exception */
                    325:                     longjmp(env->jmp_env, 1);
                    326:                 } else if (ret == 2) {
                    327:                     /* softmmu execution needed */
                    328:                 } else {
1.1.1.8 ! root      329:                     if (env->interrupt_request != 0 || env->exit_request != 0) {
1.1       root      330:                         /* hardware interrupt will be executed just after */
                    331:                     } else {
                    332:                         /* otherwise, we restart */
                    333:                         longjmp(env->jmp_env, 1);
                    334:                     }
                    335:                 }
                    336:             }
                    337: #endif
                    338: 
1.1.1.7   root      339:             if (kvm_enabled()) {
                    340:                 kvm_cpu_exec(env);
                    341:                 longjmp(env->jmp_env, 1);
                    342:             }
                    343: 
                    344:             next_tb = 0; /* force lookup of first TB */
1.1       root      345:             for(;;) {
                    346:                 interrupt_request = env->interrupt_request;
1.1.1.7   root      347:                 if (unlikely(interrupt_request)) {
                    348:                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
                    349:                         /* Mask out external interrupts for this step. */
                    350:                         interrupt_request &= ~(CPU_INTERRUPT_HARD |
                    351:                                                CPU_INTERRUPT_FIQ |
                    352:                                                CPU_INTERRUPT_SMI |
                    353:                                                CPU_INTERRUPT_NMI);
                    354:                     }
1.1.1.6   root      355:                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
                    356:                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
                    357:                         env->exception_index = EXCP_DEBUG;
                    358:                         cpu_loop_exit();
                    359:                     }
                    360: #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
                    361:     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
                    362:                     if (interrupt_request & CPU_INTERRUPT_HALT) {
                    363:                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
                    364:                         env->halted = 1;
                    365:                         env->exception_index = EXCP_HLT;
                    366:                         cpu_loop_exit();
                    367:                     }
                    368: #endif
1.1       root      369: #if defined(TARGET_I386)
1.1.1.7   root      370:                     if (env->hflags2 & HF2_GIF_MASK) {
                    371:                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
                    372:                             !(env->hflags & HF_SMM_MASK)) {
                    373:                             svm_check_intercept(SVM_EXIT_SMI);
                    374:                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
                    375:                             do_smm_enter();
                    376:                             next_tb = 0;
                    377:                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                    378:                                    !(env->hflags2 & HF2_NMI_MASK)) {
                    379:                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
                    380:                             env->hflags2 |= HF2_NMI_MASK;
                    381:                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
                    382:                             next_tb = 0;
                    383:                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                    384:                                    (((env->hflags2 & HF2_VINTR_MASK) && 
                    385:                                      (env->hflags2 & HF2_HIF_MASK)) ||
                    386:                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
                    387:                                      (env->eflags & IF_MASK && 
                    388:                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
                    389:                             int intno;
                    390:                             svm_check_intercept(SVM_EXIT_INTR);
                    391:                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
                    392:                             intno = cpu_get_pic_interrupt(env);
                    393:                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
                    394:                             do_interrupt(intno, 0, 0, 0, 1);
                    395:                             /* ensure that no TB jump will be modified as
                    396:                                the program flow was changed */
                    397:                             next_tb = 0;
1.1.1.6   root      398: #if !defined(CONFIG_USER_ONLY)
1.1.1.7   root      399:                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                    400:                                    (env->eflags & IF_MASK) && 
                    401:                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
                    402:                             int intno;
                    403:                             /* FIXME: this should respect TPR */
                    404:                             svm_check_intercept(SVM_EXIT_VINTR);
                    405:                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
                    406:                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
                    407:                             do_interrupt(intno, 0, 0, 0, 1);
                    408:                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
                    409:                             next_tb = 0;
1.1       root      410: #endif
1.1.1.7   root      411:                         }
1.1       root      412:                     }
                    413: #elif defined(TARGET_PPC)
                    414: #if 0
                    415:                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
                    416:                         cpu_ppc_reset(env);
                    417:                     }
                    418: #endif
1.1.1.6   root      419:                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                    420:                         ppc_hw_interrupt(env);
                    421:                         if (env->pending_interrupts == 0)
1.1.1.2   root      422:                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1.1.1.7   root      423:                         next_tb = 0;
1.1       root      424:                     }
                    425: #elif defined(TARGET_MIPS)
                    426:                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1.1.1.6   root      427:                         (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
1.1       root      428:                         (env->CP0_Status & (1 << CP0St_IE)) &&
1.1.1.6   root      429:                         !(env->CP0_Status & (1 << CP0St_EXL)) &&
                    430:                         !(env->CP0_Status & (1 << CP0St_ERL)) &&
1.1       root      431:                         !(env->hflags & MIPS_HFLAG_DM)) {
                    432:                         /* Raise it */
                    433:                         env->exception_index = EXCP_EXT_INTERRUPT;
                    434:                         env->error_code = 0;
                    435:                         do_interrupt(env);
1.1.1.7   root      436:                         next_tb = 0;
1.1       root      437:                     }
                    438: #elif defined(TARGET_SPARC)
                    439:                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                    440:                        (env->psret != 0)) {
                    441:                        int pil = env->interrupt_index & 15;
                    442:                        int type = env->interrupt_index & 0xf0;
                    443: 
                    444:                        if (((type == TT_EXTINT) &&
                    445:                             (pil == 15 || pil > env->psrpil)) ||
                    446:                            type != TT_EXTINT) {
                    447:                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1.1.1.7   root      448:                             env->exception_index = env->interrupt_index;
                    449:                             do_interrupt(env);
1.1       root      450:                            env->interrupt_index = 0;
1.1.1.6   root      451: #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
                    452:                             cpu_check_irqs(env);
1.1.1.2   root      453: #endif
1.1.1.7   root      454:                         next_tb = 0;
1.1       root      455:                        }
                    456:                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
                    457:                        //do_interrupt(0, 0, 0, 0, 0);
                    458:                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
1.1.1.6   root      459:                    }
1.1.1.2   root      460: #elif defined(TARGET_ARM)
                    461:                     if (interrupt_request & CPU_INTERRUPT_FIQ
                    462:                         && !(env->uncached_cpsr & CPSR_F)) {
                    463:                         env->exception_index = EXCP_FIQ;
                    464:                         do_interrupt(env);
1.1.1.7   root      465:                         next_tb = 0;
1.1.1.2   root      466:                     }
1.1.1.6   root      467:                     /* ARMv7-M interrupt return works by loading a magic value
                    468:                        into the PC.  On real hardware the load causes the
                    469:                        return to occur.  The qemu implementation performs the
                    470:                        jump normally, then does the exception return when the
                    471:                        CPU tries to execute code at the magic address.
                    472:                        This will cause the magic PC value to be pushed to
                    473:                        the stack if an interrupt occured at the wrong time.
                    474:                        We avoid this by disabling interrupts when
                    475:                        pc contains a magic address.  */
1.1.1.2   root      476:                     if (interrupt_request & CPU_INTERRUPT_HARD
1.1.1.6   root      477:                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
                    478:                             || !(env->uncached_cpsr & CPSR_I))) {
1.1.1.2   root      479:                         env->exception_index = EXCP_IRQ;
                    480:                         do_interrupt(env);
1.1.1.7   root      481:                         next_tb = 0;
1.1.1.2   root      482:                     }
1.1.1.3   root      483: #elif defined(TARGET_SH4)
1.1.1.6   root      484:                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                    485:                         do_interrupt(env);
1.1.1.7   root      486:                         next_tb = 0;
1.1.1.6   root      487:                     }
                    488: #elif defined(TARGET_ALPHA)
                    489:                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                    490:                         do_interrupt(env);
1.1.1.7   root      491:                         next_tb = 0;
1.1.1.6   root      492:                     }
                    493: #elif defined(TARGET_CRIS)
1.1.1.7   root      494:                     if (interrupt_request & CPU_INTERRUPT_HARD
                    495:                         && (env->pregs[PR_CCS] & I_FLAG)) {
                    496:                         env->exception_index = EXCP_IRQ;
1.1.1.6   root      497:                         do_interrupt(env);
1.1.1.7   root      498:                         next_tb = 0;
                    499:                     }
                    500:                     if (interrupt_request & CPU_INTERRUPT_NMI
                    501:                         && (env->pregs[PR_CCS] & M_FLAG)) {
                    502:                         env->exception_index = EXCP_NMI;
                    503:                         do_interrupt(env);
                    504:                         next_tb = 0;
1.1.1.6   root      505:                     }
                    506: #elif defined(TARGET_M68K)
                    507:                     if (interrupt_request & CPU_INTERRUPT_HARD
                    508:                         && ((env->sr & SR_I) >> SR_I_SHIFT)
                    509:                             < env->pending_level) {
                    510:                         /* Real hardware gets the interrupt vector via an
                    511:                            IACK cycle at this point.  Current emulated
                    512:                            hardware doesn't rely on this, so we
                    513:                            provide/save the vector when the interrupt is
                    514:                            first signalled.  */
                    515:                         env->exception_index = env->pending_vector;
                    516:                         do_interrupt(1);
1.1.1.7   root      517:                         next_tb = 0;
1.1.1.6   root      518:                     }
1.1       root      519: #endif
1.1.1.4   root      520:                    /* Don't use the cached interupt_request value,
                    521:                       do_interrupt may have updated the EXITTB flag. */
1.1.1.2   root      522:                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
1.1       root      523:                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
                    524:                         /* ensure that no TB jump will be modified as
                    525:                            the program flow was changed */
1.1.1.7   root      526:                         next_tb = 0;
1.1       root      527:                     }
1.1.1.8 ! root      528:                 }
        !           529:                 if (unlikely(env->exit_request)) {
        !           530:                     env->exit_request = 0;
        !           531:                     env->exception_index = EXCP_INTERRUPT;
        !           532:                     cpu_loop_exit();
1.1       root      533:                 }
                    534: #ifdef DEBUG_EXEC
1.1.1.7   root      535:                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
1.1       root      536:                     /* restore flags in standard format */
1.1.1.6   root      537:                     regs_to_env();
                    538: #if defined(TARGET_I386)
1.1.1.7   root      539:                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
                    540:                     log_cpu_state(env, X86_DUMP_CCOP);
1.1       root      541:                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
                    542: #elif defined(TARGET_ARM)
1.1.1.7   root      543:                     log_cpu_state(env, 0);
1.1       root      544: #elif defined(TARGET_SPARC)
1.1.1.7   root      545:                     log_cpu_state(env, 0);
1.1       root      546: #elif defined(TARGET_PPC)
1.1.1.7   root      547:                     log_cpu_state(env, 0);
1.1.1.5   root      548: #elif defined(TARGET_M68K)
                    549:                     cpu_m68k_flush_flags(env, env->cc_op);
                    550:                     env->cc_op = CC_OP_FLAGS;
                    551:                     env->sr = (env->sr & 0xffe0)
                    552:                               | env->cc_dest | (env->cc_x << 4);
1.1.1.7   root      553:                     log_cpu_state(env, 0);
1.1       root      554: #elif defined(TARGET_MIPS)
1.1.1.7   root      555:                     log_cpu_state(env, 0);
1.1.1.3   root      556: #elif defined(TARGET_SH4)
1.1.1.7   root      557:                    log_cpu_state(env, 0);
1.1.1.6   root      558: #elif defined(TARGET_ALPHA)
1.1.1.7   root      559:                     log_cpu_state(env, 0);
1.1.1.6   root      560: #elif defined(TARGET_CRIS)
1.1.1.7   root      561:                     log_cpu_state(env, 0);
1.1       root      562: #else
1.1.1.6   root      563: #error unsupported target CPU
1.1       root      564: #endif
                    565:                 }
                    566: #endif
1.1.1.7   root      567:                 spin_lock(&tb_lock);
1.1.1.2   root      568:                 tb = tb_find_fast();
1.1.1.7   root      569:                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
                    570:                    doing it in tb_find_slow */
                    571:                 if (tb_invalidated_flag) {
                    572:                     /* as some TB could have been invalidated because
                    573:                        of memory exceptions while generating the code, we
                    574:                        must recompute the hash index here */
                    575:                     next_tb = 0;
                    576:                     tb_invalidated_flag = 0;
1.1       root      577:                 }
1.1.1.7   root      578: #ifdef DEBUG_EXEC
                    579:                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
                    580:                              (long)tb->tc_ptr, tb->pc,
                    581:                              lookup_symbol(tb->pc));
1.1       root      582: #endif
1.1.1.2   root      583:                 /* see if we can patch the calling TB. When the TB
                    584:                    spans two pages, we cannot safely do a direct
                    585:                    jump. */
1.1       root      586:                 {
1.1.1.7   root      587:                     if (next_tb != 0 &&
                    588: #ifdef USE_KQEMU
1.1.1.3   root      589:                         (env->kqemu_enabled != 2) &&
                    590: #endif
1.1.1.6   root      591:                         tb->page_addr[1] == -1) {
1.1.1.7   root      592:                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
1.1       root      593:                 }
                    594:                 }
1.1.1.7   root      595:                 spin_unlock(&tb_lock);
1.1       root      596:                 env->current_tb = tb;
1.1.1.7   root      597: 
                    598:                 /* cpu_interrupt might be called while translating the
                    599:                    TB, but before it is linked into a potentially
                    600:                    infinite loop and becomes env->current_tb. Avoid
                    601:                    starting execution if there is a pending interrupt. */
1.1.1.8 ! root      602:                 if (unlikely (env->exit_request))
1.1.1.7   root      603:                     env->current_tb = NULL;
                    604: 
                    605:                 while (env->current_tb) {
                    606:                     tc_ptr = tb->tc_ptr;
1.1       root      607:                 /* execute the generated code */
1.1.1.7   root      608: #if defined(__sparc__) && !defined(HOST_SOLARIS)
                    609: #undef env
                    610:                     env = cpu_single_env;
                    611: #define env cpu_single_env
                    612: #endif
                    613:                     next_tb = tcg_qemu_tb_exec(tc_ptr);
                    614:                     env->current_tb = NULL;
                    615:                     if ((next_tb & 3) == 2) {
                    616:                         /* Instruction counter expired.  */
                    617:                         int insns_left;
                    618:                         tb = (TranslationBlock *)(long)(next_tb & ~3);
                    619:                         /* Restore PC.  */
                    620:                         cpu_pc_from_tb(env, tb);
                    621:                         insns_left = env->icount_decr.u32;
                    622:                         if (env->icount_extra && insns_left >= 0) {
                    623:                             /* Refill decrementer and continue execution.  */
                    624:                             env->icount_extra += insns_left;
                    625:                             if (env->icount_extra > 0xffff) {
                    626:                                 insns_left = 0xffff;
                    627:                             } else {
                    628:                                 insns_left = env->icount_extra;
                    629:                             }
                    630:                             env->icount_extra -= insns_left;
                    631:                             env->icount_decr.u16.low = insns_left;
                    632:                         } else {
                    633:                             if (insns_left > 0) {
                    634:                                 /* Execute remaining instructions.  */
                    635:                                 cpu_exec_nocache(insns_left, tb);
                    636:                             }
                    637:                             env->exception_index = EXCP_INTERRUPT;
                    638:                             next_tb = 0;
                    639:                             cpu_loop_exit();
                    640:                         }
                    641:                     }
                    642:                 }
1.1       root      643:                 /* reset soft MMU for next block (it can currently
                    644:                    only be set by a memory fault) */
1.1.1.3   root      645: #if defined(USE_KQEMU)
                    646: #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
                    647:                 if (kqemu_is_ok(env) &&
                    648:                     (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
                    649:                     cpu_loop_exit();
                    650:                 }
                    651: #endif
1.1.1.6   root      652:             } /* for(;;) */
1.1       root      653:         } else {
                    654:             env_to_regs();
                    655:         }
                    656:     } /* for(;;) */
                    657: 
                    658: 
                    659: #if defined(TARGET_I386)
                    660:     /* restore flags in standard format */
1.1.1.7   root      661:     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1.1       root      662: #elif defined(TARGET_ARM)
                    663:     /* XXX: Save/restore host fpu exception state?.  */
                    664: #elif defined(TARGET_SPARC)
                    665: #elif defined(TARGET_PPC)
1.1.1.5   root      666: #elif defined(TARGET_M68K)
                    667:     cpu_m68k_flush_flags(env, env->cc_op);
                    668:     env->cc_op = CC_OP_FLAGS;
                    669:     env->sr = (env->sr & 0xffe0)
                    670:               | env->cc_dest | (env->cc_x << 4);
1.1       root      671: #elif defined(TARGET_MIPS)
1.1.1.3   root      672: #elif defined(TARGET_SH4)
1.1.1.6   root      673: #elif defined(TARGET_ALPHA)
                    674: #elif defined(TARGET_CRIS)
1.1.1.3   root      675:     /* XXXXX */
1.1       root      676: #else
                    677: #error unsupported target CPU
                    678: #endif
1.1.1.5   root      679: 
                    680:     /* restore global registers */
                    681: #include "hostregs_helper.h"
                    682: 
1.1.1.2   root      683:     /* fail safe : never use cpu_single_env outside cpu_exec() */
1.1.1.6   root      684:     cpu_single_env = NULL;
1.1       root      685:     return ret;
                    686: }
                    687: 
                    688: /* must only be called from the generated code as an exception can be
                    689:    generated */
                    690: void tb_invalidate_page_range(target_ulong start, target_ulong end)
                    691: {
                    692:     /* XXX: cannot enable it yet because it yields to MMU exception
                    693:        where NIP != read address on PowerPC */
                    694: #if 0
                    695:     target_ulong phys_addr;
                    696:     phys_addr = get_phys_addr_code(env, start);
                    697:     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
                    698: #endif
                    699: }
                    700: 
                    701: #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
                    702: 
                    703: void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
                    704: {
                    705:     CPUX86State *saved_env;
                    706: 
                    707:     saved_env = env;
                    708:     env = s;
                    709:     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
                    710:         selector &= 0xffff;
1.1.1.6   root      711:         cpu_x86_load_seg_cache(env, seg_reg, selector,
1.1       root      712:                                (selector << 4), 0xffff, 0);
                    713:     } else {
1.1.1.7   root      714:         helper_load_seg(seg_reg, selector);
1.1       root      715:     }
                    716:     env = saved_env;
                    717: }
                    718: 
1.1.1.6   root      719: void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1.1       root      720: {
                    721:     CPUX86State *saved_env;
                    722: 
                    723:     saved_env = env;
                    724:     env = s;
1.1.1.6   root      725: 
                    726:     helper_fsave(ptr, data32);
1.1       root      727: 
                    728:     env = saved_env;
                    729: }
                    730: 
1.1.1.6   root      731: void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1.1       root      732: {
                    733:     CPUX86State *saved_env;
                    734: 
                    735:     saved_env = env;
                    736:     env = s;
1.1.1.6   root      737: 
                    738:     helper_frstor(ptr, data32);
1.1       root      739: 
                    740:     env = saved_env;
                    741: }
                    742: 
                    743: #endif /* TARGET_I386 */
                    744: 
                    745: #if !defined(CONFIG_SOFTMMU)
                    746: 
                    747: #if defined(TARGET_I386)
                    748: 
                    749: /* 'pc' is the host PC at which the exception was raised. 'address' is
                    750:    the effective address of the memory exception. 'is_write' is 1 if a
                    751:    write caused the exception and otherwise 0'. 'old_set' is the
                    752:    signal set which should be restored */
                    753: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1.1.1.6   root      754:                                     int is_write, sigset_t *old_set,
1.1       root      755:                                     void *puc)
                    756: {
                    757:     TranslationBlock *tb;
                    758:     int ret;
                    759: 
                    760:     if (cpu_single_env)
                    761:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                    762: #if defined(DEBUG_SIGNAL)
1.1.1.6   root      763:     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1       root      764:                 pc, address, is_write, *(unsigned long *)old_set);
                    765: #endif
                    766:     /* XXX: locking issue */
1.1.1.3   root      767:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1       root      768:         return 1;
                    769:     }
                    770: 
                    771:     /* see if it is an MMU fault */
1.1.1.6   root      772:     ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1       root      773:     if (ret < 0)
                    774:         return 0; /* not an MMU fault */
                    775:     if (ret == 0)
                    776:         return 1; /* the MMU fault was handled without causing real CPU fault */
                    777:     /* now we have a real cpu fault */
                    778:     tb = tb_find_pc(pc);
                    779:     if (tb) {
                    780:         /* the PC is inside the translated code. It means that we have
                    781:            a virtual CPU fault */
                    782:         cpu_restore_state(tb, env, pc, puc);
                    783:     }
                    784:     if (ret == 1) {
                    785: #if 0
1.1.1.6   root      786:         printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1.1       root      787:                env->eip, env->cr[2], env->error_code);
                    788: #endif
                    789:         /* we restore the process signal mask as the sigreturn should
                    790:            do it (XXX: use sigsetjmp) */
                    791:         sigprocmask(SIG_SETMASK, old_set, NULL);
1.1.1.2   root      792:         raise_exception_err(env->exception_index, env->error_code);
1.1       root      793:     } else {
                    794:         /* activate soft MMU for this block */
                    795:         env->hflags |= HF_SOFTMMU_MASK;
                    796:         cpu_resume_from_signal(env, puc);
                    797:     }
                    798:     /* never comes here */
                    799:     return 1;
                    800: }
                    801: 
                    802: #elif defined(TARGET_ARM)
                    803: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                    804:                                     int is_write, sigset_t *old_set,
                    805:                                     void *puc)
                    806: {
                    807:     TranslationBlock *tb;
                    808:     int ret;
                    809: 
                    810:     if (cpu_single_env)
                    811:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                    812: #if defined(DEBUG_SIGNAL)
1.1.1.6   root      813:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1       root      814:            pc, address, is_write, *(unsigned long *)old_set);
                    815: #endif
                    816:     /* XXX: locking issue */
1.1.1.3   root      817:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1       root      818:         return 1;
                    819:     }
                    820:     /* see if it is an MMU fault */
1.1.1.6   root      821:     ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1       root      822:     if (ret < 0)
                    823:         return 0; /* not an MMU fault */
                    824:     if (ret == 0)
                    825:         return 1; /* the MMU fault was handled without causing real CPU fault */
                    826:     /* now we have a real cpu fault */
                    827:     tb = tb_find_pc(pc);
                    828:     if (tb) {
                    829:         /* the PC is inside the translated code. It means that we have
                    830:            a virtual CPU fault */
                    831:         cpu_restore_state(tb, env, pc, puc);
                    832:     }
                    833:     /* we restore the process signal mask as the sigreturn should
                    834:        do it (XXX: use sigsetjmp) */
                    835:     sigprocmask(SIG_SETMASK, old_set, NULL);
                    836:     cpu_loop_exit();
1.1.1.7   root      837:     /* never comes here */
                    838:     return 1;
1.1       root      839: }
                    840: #elif defined(TARGET_SPARC)
                    841: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                    842:                                     int is_write, sigset_t *old_set,
                    843:                                     void *puc)
                    844: {
                    845:     TranslationBlock *tb;
                    846:     int ret;
                    847: 
                    848:     if (cpu_single_env)
                    849:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                    850: #if defined(DEBUG_SIGNAL)
1.1.1.6   root      851:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1       root      852:            pc, address, is_write, *(unsigned long *)old_set);
                    853: #endif
                    854:     /* XXX: locking issue */
1.1.1.3   root      855:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1       root      856:         return 1;
                    857:     }
                    858:     /* see if it is an MMU fault */
1.1.1.6   root      859:     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1       root      860:     if (ret < 0)
                    861:         return 0; /* not an MMU fault */
                    862:     if (ret == 0)
                    863:         return 1; /* the MMU fault was handled without causing real CPU fault */
                    864:     /* now we have a real cpu fault */
                    865:     tb = tb_find_pc(pc);
                    866:     if (tb) {
                    867:         /* the PC is inside the translated code. It means that we have
                    868:            a virtual CPU fault */
                    869:         cpu_restore_state(tb, env, pc, puc);
                    870:     }
                    871:     /* we restore the process signal mask as the sigreturn should
                    872:        do it (XXX: use sigsetjmp) */
                    873:     sigprocmask(SIG_SETMASK, old_set, NULL);
                    874:     cpu_loop_exit();
1.1.1.7   root      875:     /* never comes here */
                    876:     return 1;
1.1       root      877: }
                    878: #elif defined (TARGET_PPC)
                    879: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                    880:                                     int is_write, sigset_t *old_set,
                    881:                                     void *puc)
                    882: {
                    883:     TranslationBlock *tb;
                    884:     int ret;
1.1.1.6   root      885: 
1.1       root      886:     if (cpu_single_env)
                    887:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                    888: #if defined(DEBUG_SIGNAL)
1.1.1.6   root      889:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1       root      890:            pc, address, is_write, *(unsigned long *)old_set);
                    891: #endif
                    892:     /* XXX: locking issue */
1.1.1.3   root      893:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1       root      894:         return 1;
                    895:     }
                    896: 
                    897:     /* see if it is an MMU fault */
1.1.1.6   root      898:     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1       root      899:     if (ret < 0)
                    900:         return 0; /* not an MMU fault */
                    901:     if (ret == 0)
                    902:         return 1; /* the MMU fault was handled without causing real CPU fault */
                    903: 
                    904:     /* now we have a real cpu fault */
                    905:     tb = tb_find_pc(pc);
                    906:     if (tb) {
                    907:         /* the PC is inside the translated code. It means that we have
                    908:            a virtual CPU fault */
                    909:         cpu_restore_state(tb, env, pc, puc);
                    910:     }
                    911:     if (ret == 1) {
                    912: #if 0
1.1.1.6   root      913:         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1.1       root      914:                env->nip, env->error_code, tb);
                    915: #endif
                    916:     /* we restore the process signal mask as the sigreturn should
                    917:        do it (XXX: use sigsetjmp) */
                    918:         sigprocmask(SIG_SETMASK, old_set, NULL);
1.1.1.7   root      919:         cpu_loop_exit();
1.1       root      920:     } else {
                    921:         /* activate soft MMU for this block */
                    922:         cpu_resume_from_signal(env, puc);
                    923:     }
                    924:     /* never comes here */
                    925:     return 1;
                    926: }
                    927: 
1.1.1.5   root      928: #elif defined(TARGET_M68K)
                    929: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                    930:                                     int is_write, sigset_t *old_set,
                    931:                                     void *puc)
                    932: {
                    933:     TranslationBlock *tb;
                    934:     int ret;
                    935: 
                    936:     if (cpu_single_env)
                    937:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                    938: #if defined(DEBUG_SIGNAL)
1.1.1.6   root      939:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1.1.5   root      940:            pc, address, is_write, *(unsigned long *)old_set);
                    941: #endif
                    942:     /* XXX: locking issue */
                    943:     if (is_write && page_unprotect(address, pc, puc)) {
                    944:         return 1;
                    945:     }
                    946:     /* see if it is an MMU fault */
1.1.1.6   root      947:     ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1.1.5   root      948:     if (ret < 0)
                    949:         return 0; /* not an MMU fault */
                    950:     if (ret == 0)
                    951:         return 1; /* the MMU fault was handled without causing real CPU fault */
                    952:     /* now we have a real cpu fault */
                    953:     tb = tb_find_pc(pc);
                    954:     if (tb) {
                    955:         /* the PC is inside the translated code. It means that we have
                    956:            a virtual CPU fault */
                    957:         cpu_restore_state(tb, env, pc, puc);
                    958:     }
                    959:     /* we restore the process signal mask as the sigreturn should
                    960:        do it (XXX: use sigsetjmp) */
                    961:     sigprocmask(SIG_SETMASK, old_set, NULL);
                    962:     cpu_loop_exit();
                    963:     /* never comes here */
                    964:     return 1;
                    965: }
                    966: 
1.1       root      967: #elif defined (TARGET_MIPS)
                    968: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                    969:                                     int is_write, sigset_t *old_set,
                    970:                                     void *puc)
                    971: {
                    972:     TranslationBlock *tb;
                    973:     int ret;
1.1.1.6   root      974: 
1.1       root      975:     if (cpu_single_env)
                    976:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                    977: #if defined(DEBUG_SIGNAL)
1.1.1.6   root      978:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1       root      979:            pc, address, is_write, *(unsigned long *)old_set);
                    980: #endif
                    981:     /* XXX: locking issue */
1.1.1.3   root      982:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1       root      983:         return 1;
                    984:     }
                    985: 
                    986:     /* see if it is an MMU fault */
1.1.1.6   root      987:     ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1       root      988:     if (ret < 0)
                    989:         return 0; /* not an MMU fault */
                    990:     if (ret == 0)
                    991:         return 1; /* the MMU fault was handled without causing real CPU fault */
                    992: 
                    993:     /* now we have a real cpu fault */
                    994:     tb = tb_find_pc(pc);
                    995:     if (tb) {
                    996:         /* the PC is inside the translated code. It means that we have
                    997:            a virtual CPU fault */
                    998:         cpu_restore_state(tb, env, pc, puc);
                    999:     }
                   1000:     if (ret == 1) {
                   1001: #if 0
1.1.1.6   root     1002:         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
                   1003:                env->PC, env->error_code, tb);
1.1       root     1004: #endif
                   1005:     /* we restore the process signal mask as the sigreturn should
                   1006:        do it (XXX: use sigsetjmp) */
                   1007:         sigprocmask(SIG_SETMASK, old_set, NULL);
1.1.1.7   root     1008:         cpu_loop_exit();
1.1       root     1009:     } else {
                   1010:         /* activate soft MMU for this block */
                   1011:         cpu_resume_from_signal(env, puc);
                   1012:     }
                   1013:     /* never comes here */
                   1014:     return 1;
                   1015: }
                   1016: 
1.1.1.3   root     1017: #elif defined (TARGET_SH4)
                   1018: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                   1019:                                     int is_write, sigset_t *old_set,
                   1020:                                     void *puc)
                   1021: {
                   1022:     TranslationBlock *tb;
                   1023:     int ret;
1.1.1.6   root     1024: 
1.1.1.3   root     1025:     if (cpu_single_env)
                   1026:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                   1027: #if defined(DEBUG_SIGNAL)
1.1.1.6   root     1028:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1.1.3   root     1029:            pc, address, is_write, *(unsigned long *)old_set);
                   1030: #endif
                   1031:     /* XXX: locking issue */
                   1032:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
                   1033:         return 1;
                   1034:     }
                   1035: 
                   1036:     /* see if it is an MMU fault */
1.1.1.6   root     1037:     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1.1.3   root     1038:     if (ret < 0)
                   1039:         return 0; /* not an MMU fault */
                   1040:     if (ret == 0)
                   1041:         return 1; /* the MMU fault was handled without causing real CPU fault */
                   1042: 
                   1043:     /* now we have a real cpu fault */
                   1044:     tb = tb_find_pc(pc);
                   1045:     if (tb) {
                   1046:         /* the PC is inside the translated code. It means that we have
                   1047:            a virtual CPU fault */
                   1048:         cpu_restore_state(tb, env, pc, puc);
                   1049:     }
                   1050: #if 0
1.1.1.6   root     1051:         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1.1.1.3   root     1052:                env->nip, env->error_code, tb);
                   1053: #endif
                   1054:     /* we restore the process signal mask as the sigreturn should
                   1055:        do it (XXX: use sigsetjmp) */
1.1.1.4   root     1056:     sigprocmask(SIG_SETMASK, old_set, NULL);
                   1057:     cpu_loop_exit();
1.1.1.3   root     1058:     /* never comes here */
                   1059:     return 1;
                   1060: }
1.1       root     1061: 
1.1.1.6   root     1062: #elif defined (TARGET_ALPHA)
                   1063: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                   1064:                                     int is_write, sigset_t *old_set,
                   1065:                                     void *puc)
                   1066: {
                   1067:     TranslationBlock *tb;
                   1068:     int ret;
1.1.1.5   root     1069: 
1.1.1.6   root     1070:     if (cpu_single_env)
                   1071:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
                   1072: #if defined(DEBUG_SIGNAL)
                   1073:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
                   1074:            pc, address, is_write, *(unsigned long *)old_set);
1.1.1.5   root     1075: #endif
1.1.1.6   root     1076:     /* XXX: locking issue */
                   1077:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
                   1078:         return 1;
                   1079:     }
1.1.1.5   root     1080: 
1.1.1.6   root     1081:     /* see if it is an MMU fault */
                   1082:     ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
                   1083:     if (ret < 0)
                   1084:         return 0; /* not an MMU fault */
                   1085:     if (ret == 0)
                   1086:         return 1; /* the MMU fault was handled without causing real CPU fault */
                   1087: 
                   1088:     /* now we have a real cpu fault */
                   1089:     tb = tb_find_pc(pc);
                   1090:     if (tb) {
                   1091:         /* the PC is inside the translated code. It means that we have
                   1092:            a virtual CPU fault */
                   1093:         cpu_restore_state(tb, env, pc, puc);
                   1094:     }
                   1095: #if 0
                   1096:         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
                   1097:                env->nip, env->error_code, tb);
                   1098: #endif
                   1099:     /* we restore the process signal mask as the sigreturn should
                   1100:        do it (XXX: use sigsetjmp) */
                   1101:     sigprocmask(SIG_SETMASK, old_set, NULL);
                   1102:     cpu_loop_exit();
                   1103:     /* never comes here */
                   1104:     return 1;
                   1105: }
                   1106: #elif defined (TARGET_CRIS)
                   1107: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
                   1108:                                     int is_write, sigset_t *old_set,
                   1109:                                     void *puc)
1.1       root     1110: {
                   1111:     TranslationBlock *tb;
1.1.1.6   root     1112:     int ret;
1.1       root     1113: 
                   1114:     if (cpu_single_env)
                   1115:         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1.1.1.6   root     1116: #if defined(DEBUG_SIGNAL)
                   1117:     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
                   1118:            pc, address, is_write, *(unsigned long *)old_set);
                   1119: #endif
                   1120:     /* XXX: locking issue */
                   1121:     if (is_write && page_unprotect(h2g(address), pc, puc)) {
                   1122:         return 1;
                   1123:     }
                   1124: 
                   1125:     /* see if it is an MMU fault */
                   1126:     ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
                   1127:     if (ret < 0)
                   1128:         return 0; /* not an MMU fault */
                   1129:     if (ret == 0)
                   1130:         return 1; /* the MMU fault was handled without causing real CPU fault */
                   1131: 
1.1       root     1132:     /* now we have a real cpu fault */
                   1133:     tb = tb_find_pc(pc);
                   1134:     if (tb) {
                   1135:         /* the PC is inside the translated code. It means that we have
                   1136:            a virtual CPU fault */
1.1.1.6   root     1137:         cpu_restore_state(tb, env, pc, puc);
1.1       root     1138:     }
1.1.1.6   root     1139:     /* we restore the process signal mask as the sigreturn should
                   1140:        do it (XXX: use sigsetjmp) */
                   1141:     sigprocmask(SIG_SETMASK, old_set, NULL);
                   1142:     cpu_loop_exit();
                   1143:     /* never comes here */
                   1144:     return 1;
1.1       root     1145: }
1.1.1.6   root     1146: 
                   1147: #else
                   1148: #error unsupported target CPU
1.1       root     1149: #endif
                   1150: 
1.1.1.6   root     1151: #if defined(__i386__)
                   1152: 
                   1153: #if defined(__APPLE__)
                   1154: # include <sys/ucontext.h>
                   1155: 
                   1156: # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
                   1157: # define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
                   1158: # define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
                   1159: #else
                   1160: # define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
                   1161: # define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
                   1162: # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
                   1163: #endif
                   1164: 
                   1165: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1166:                        void *puc)
                   1167: {
1.1.1.5   root     1168:     siginfo_t *info = pinfo;
1.1       root     1169:     struct ucontext *uc = puc;
                   1170:     unsigned long pc;
                   1171:     int trapno;
                   1172: 
                   1173: #ifndef REG_EIP
                   1174: /* for glibc 2.1 */
                   1175: #define REG_EIP    EIP
                   1176: #define REG_ERR    ERR
                   1177: #define REG_TRAPNO TRAPNO
                   1178: #endif
1.1.1.5   root     1179:     pc = EIP_sig(uc);
                   1180:     trapno = TRAP_sig(uc);
1.1.1.6   root     1181:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
                   1182:                              trapno == 0xe ?
                   1183:                              (ERROR_sig(uc) >> 1) & 1 : 0,
                   1184:                              &uc->uc_sigmask, puc);
1.1       root     1185: }
                   1186: 
                   1187: #elif defined(__x86_64__)
                   1188: 
1.1.1.7   root     1189: #ifdef __NetBSD__
                   1190: #define REG_ERR _REG_ERR
                   1191: #define REG_TRAPNO _REG_TRAPNO
                   1192: 
                   1193: #define QEMU_UC_MCONTEXT_GREGS(uc, reg)        (uc)->uc_mcontext.__gregs[(reg)]
                   1194: #define QEMU_UC_MACHINE_PC(uc)         _UC_MACHINE_PC(uc)
                   1195: #else
                   1196: #define QEMU_UC_MCONTEXT_GREGS(uc, reg)        (uc)->uc_mcontext.gregs[(reg)]
                   1197: #define QEMU_UC_MACHINE_PC(uc)         QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
                   1198: #endif
                   1199: 
1.1.1.5   root     1200: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1201:                        void *puc)
                   1202: {
1.1.1.5   root     1203:     siginfo_t *info = pinfo;
1.1       root     1204:     unsigned long pc;
1.1.1.7   root     1205: #ifdef __NetBSD__
                   1206:     ucontext_t *uc = puc;
                   1207: #else
                   1208:     struct ucontext *uc = puc;
                   1209: #endif
1.1       root     1210: 
1.1.1.7   root     1211:     pc = QEMU_UC_MACHINE_PC(uc);
1.1.1.6   root     1212:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1.1.7   root     1213:                              QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
                   1214:                              (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1.1       root     1215:                              &uc->uc_sigmask, puc);
                   1216: }
                   1217: 
1.1.1.7   root     1218: #elif defined(_ARCH_PPC)
1.1       root     1219: 
                   1220: /***********************************************************************
                   1221:  * signal context platform-specific definitions
                   1222:  * From Wine
                   1223:  */
                   1224: #ifdef linux
                   1225: /* All Registers access - only for local access */
                   1226: # define REG_sig(reg_name, context)            ((context)->uc_mcontext.regs->reg_name)
                   1227: /* Gpr Registers access  */
                   1228: # define GPR_sig(reg_num, context)             REG_sig(gpr[reg_num], context)
                   1229: # define IAR_sig(context)                      REG_sig(nip, context)   /* Program counter */
                   1230: # define MSR_sig(context)                      REG_sig(msr, context)   /* Machine State Register (Supervisor) */
                   1231: # define CTR_sig(context)                      REG_sig(ctr, context)   /* Count register */
                   1232: # define XER_sig(context)                      REG_sig(xer, context) /* User's integer exception register */
                   1233: # define LR_sig(context)                       REG_sig(link, context) /* Link register */
                   1234: # define CR_sig(context)                       REG_sig(ccr, context) /* Condition register */
                   1235: /* Float Registers access  */
                   1236: # define FLOAT_sig(reg_num, context)           (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
                   1237: # define FPSCR_sig(context)                    (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
                   1238: /* Exception Registers access */
                   1239: # define DAR_sig(context)                      REG_sig(dar, context)
                   1240: # define DSISR_sig(context)                    REG_sig(dsisr, context)
                   1241: # define TRAP_sig(context)                     REG_sig(trap, context)
                   1242: #endif /* linux */
                   1243: 
                   1244: #ifdef __APPLE__
                   1245: # include <sys/ucontext.h>
                   1246: typedef struct ucontext SIGCONTEXT;
                   1247: /* All Registers access - only for local access */
                   1248: # define REG_sig(reg_name, context)            ((context)->uc_mcontext->ss.reg_name)
                   1249: # define FLOATREG_sig(reg_name, context)       ((context)->uc_mcontext->fs.reg_name)
                   1250: # define EXCEPREG_sig(reg_name, context)       ((context)->uc_mcontext->es.reg_name)
                   1251: # define VECREG_sig(reg_name, context)         ((context)->uc_mcontext->vs.reg_name)
                   1252: /* Gpr Registers access */
                   1253: # define GPR_sig(reg_num, context)             REG_sig(r##reg_num, context)
                   1254: # define IAR_sig(context)                      REG_sig(srr0, context)  /* Program counter */
                   1255: # define MSR_sig(context)                      REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
                   1256: # define CTR_sig(context)                      REG_sig(ctr, context)
                   1257: # define XER_sig(context)                      REG_sig(xer, context) /* Link register */
                   1258: # define LR_sig(context)                       REG_sig(lr, context)  /* User's integer exception register */
                   1259: # define CR_sig(context)                       REG_sig(cr, context)  /* Condition register */
                   1260: /* Float Registers access */
                   1261: # define FLOAT_sig(reg_num, context)           FLOATREG_sig(fpregs[reg_num], context)
                   1262: # define FPSCR_sig(context)                    ((double)FLOATREG_sig(fpscr, context))
                   1263: /* Exception Registers access */
                   1264: # define DAR_sig(context)                      EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
                   1265: # define DSISR_sig(context)                    EXCEPREG_sig(dsisr, context)
                   1266: # define TRAP_sig(context)                     EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
                   1267: #endif /* __APPLE__ */
                   1268: 
1.1.1.6   root     1269: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1270:                        void *puc)
                   1271: {
1.1.1.5   root     1272:     siginfo_t *info = pinfo;
1.1       root     1273:     struct ucontext *uc = puc;
                   1274:     unsigned long pc;
                   1275:     int is_write;
                   1276: 
                   1277:     pc = IAR_sig(uc);
                   1278:     is_write = 0;
                   1279: #if 0
                   1280:     /* ppc 4xx case */
                   1281:     if (DSISR_sig(uc) & 0x00800000)
                   1282:         is_write = 1;
                   1283: #else
                   1284:     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
                   1285:         is_write = 1;
                   1286: #endif
1.1.1.6   root     1287:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1       root     1288:                              is_write, &uc->uc_sigmask, puc);
                   1289: }
                   1290: 
                   1291: #elif defined(__alpha__)
                   1292: 
1.1.1.6   root     1293: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1294:                            void *puc)
                   1295: {
1.1.1.5   root     1296:     siginfo_t *info = pinfo;
1.1       root     1297:     struct ucontext *uc = puc;
                   1298:     uint32_t *pc = uc->uc_mcontext.sc_pc;
                   1299:     uint32_t insn = *pc;
                   1300:     int is_write = 0;
                   1301: 
                   1302:     /* XXX: need kernel patch to get write flag faster */
                   1303:     switch (insn >> 26) {
                   1304:     case 0x0d: // stw
                   1305:     case 0x0e: // stb
                   1306:     case 0x0f: // stq_u
                   1307:     case 0x24: // stf
                   1308:     case 0x25: // stg
                   1309:     case 0x26: // sts
                   1310:     case 0x27: // stt
                   1311:     case 0x2c: // stl
                   1312:     case 0x2d: // stq
                   1313:     case 0x2e: // stl_c
                   1314:     case 0x2f: // stq_c
                   1315:        is_write = 1;
                   1316:     }
                   1317: 
1.1.1.6   root     1318:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1       root     1319:                              is_write, &uc->uc_sigmask, puc);
                   1320: }
                   1321: #elif defined(__sparc__)
                   1322: 
1.1.1.6   root     1323: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1324:                        void *puc)
                   1325: {
1.1.1.5   root     1326:     siginfo_t *info = pinfo;
1.1       root     1327:     int is_write;
                   1328:     uint32_t insn;
1.1.1.7   root     1329: #if !defined(__arch64__) || defined(HOST_SOLARIS)
                   1330:     uint32_t *regs = (uint32_t *)(info + 1);
                   1331:     void *sigmask = (regs + 20);
1.1       root     1332:     /* XXX: is there a standard glibc define ? */
1.1.1.7   root     1333:     unsigned long pc = regs[1];
                   1334: #else
                   1335: #ifdef __linux__
                   1336:     struct sigcontext *sc = puc;
                   1337:     unsigned long pc = sc->sigc_regs.tpc;
                   1338:     void *sigmask = (void *)sc->sigc_mask;
                   1339: #elif defined(__OpenBSD__)
                   1340:     struct sigcontext *uc = puc;
                   1341:     unsigned long pc = uc->sc_pc;
                   1342:     void *sigmask = (void *)(long)uc->sc_mask;
                   1343: #endif
                   1344: #endif
                   1345: 
1.1       root     1346:     /* XXX: need kernel patch to get write flag faster */
                   1347:     is_write = 0;
                   1348:     insn = *(uint32_t *)pc;
                   1349:     if ((insn >> 30) == 3) {
                   1350:       switch((insn >> 19) & 0x3f) {
                   1351:       case 0x05: // stb
                   1352:       case 0x06: // sth
                   1353:       case 0x04: // st
                   1354:       case 0x07: // std
                   1355:       case 0x24: // stf
                   1356:       case 0x27: // stdf
                   1357:       case 0x25: // stfsr
                   1358:        is_write = 1;
                   1359:        break;
                   1360:       }
                   1361:     }
1.1.1.6   root     1362:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1       root     1363:                              is_write, sigmask, NULL);
                   1364: }
                   1365: 
                   1366: #elif defined(__arm__)
                   1367: 
1.1.1.6   root     1368: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1369:                        void *puc)
                   1370: {
1.1.1.5   root     1371:     siginfo_t *info = pinfo;
1.1       root     1372:     struct ucontext *uc = puc;
                   1373:     unsigned long pc;
                   1374:     int is_write;
1.1.1.6   root     1375: 
1.1.1.7   root     1376: #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1.1       root     1377:     pc = uc->uc_mcontext.gregs[R15];
1.1.1.7   root     1378: #else
                   1379:     pc = uc->uc_mcontext.arm_pc;
                   1380: #endif
1.1       root     1381:     /* XXX: compute is_write */
                   1382:     is_write = 0;
1.1.1.6   root     1383:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1       root     1384:                              is_write,
1.1.1.5   root     1385:                              &uc->uc_sigmask, puc);
1.1       root     1386: }
                   1387: 
                   1388: #elif defined(__mc68000)
                   1389: 
1.1.1.6   root     1390: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1391:                        void *puc)
                   1392: {
1.1.1.5   root     1393:     siginfo_t *info = pinfo;
1.1       root     1394:     struct ucontext *uc = puc;
                   1395:     unsigned long pc;
                   1396:     int is_write;
1.1.1.6   root     1397: 
1.1       root     1398:     pc = uc->uc_mcontext.gregs[16];
                   1399:     /* XXX: compute is_write */
                   1400:     is_write = 0;
1.1.1.6   root     1401:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1       root     1402:                              is_write,
                   1403:                              &uc->uc_sigmask, puc);
                   1404: }
                   1405: 
                   1406: #elif defined(__ia64)
                   1407: 
                   1408: #ifndef __ISR_VALID
                   1409:   /* This ought to be in <bits/siginfo.h>... */
                   1410: # define __ISR_VALID   1
                   1411: #endif
                   1412: 
1.1.1.5   root     1413: int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1.1       root     1414: {
1.1.1.5   root     1415:     siginfo_t *info = pinfo;
1.1       root     1416:     struct ucontext *uc = puc;
                   1417:     unsigned long ip;
                   1418:     int is_write = 0;
                   1419: 
                   1420:     ip = uc->uc_mcontext.sc_ip;
                   1421:     switch (host_signum) {
                   1422:       case SIGILL:
                   1423:       case SIGFPE:
                   1424:       case SIGSEGV:
                   1425:       case SIGBUS:
                   1426:       case SIGTRAP:
1.1.1.3   root     1427:          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1.1       root     1428:              /* ISR.W (write-access) is bit 33:  */
                   1429:              is_write = (info->si_isr >> 33) & 1;
                   1430:          break;
                   1431: 
                   1432:       default:
                   1433:          break;
                   1434:     }
                   1435:     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
                   1436:                              is_write,
                   1437:                              &uc->uc_sigmask, puc);
                   1438: }
                   1439: 
                   1440: #elif defined(__s390__)
                   1441: 
1.1.1.6   root     1442: int cpu_signal_handler(int host_signum, void *pinfo,
1.1       root     1443:                        void *puc)
                   1444: {
1.1.1.5   root     1445:     siginfo_t *info = pinfo;
1.1       root     1446:     struct ucontext *uc = puc;
                   1447:     unsigned long pc;
                   1448:     int is_write;
1.1.1.6   root     1449: 
1.1       root     1450:     pc = uc->uc_mcontext.psw.addr;
                   1451:     /* XXX: compute is_write */
                   1452:     is_write = 0;
1.1.1.6   root     1453:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
                   1454:                              is_write, &uc->uc_sigmask, puc);
                   1455: }
                   1456: 
                   1457: #elif defined(__mips__)
                   1458: 
                   1459: int cpu_signal_handler(int host_signum, void *pinfo,
                   1460:                        void *puc)
                   1461: {
                   1462:     siginfo_t *info = pinfo;
                   1463:     struct ucontext *uc = puc;
                   1464:     greg_t pc = uc->uc_mcontext.pc;
                   1465:     int is_write;
                   1466: 
                   1467:     /* XXX: compute is_write */
                   1468:     is_write = 0;
                   1469:     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
                   1470:                              is_write, &uc->uc_sigmask, puc);
1.1       root     1471: }
                   1472: 
1.1.1.7   root     1473: #elif defined(__hppa__)
                   1474: 
                   1475: int cpu_signal_handler(int host_signum, void *pinfo,
                   1476:                        void *puc)
                   1477: {
                   1478:     struct siginfo *info = pinfo;
                   1479:     struct ucontext *uc = puc;
                   1480:     unsigned long pc;
                   1481:     int is_write;
                   1482: 
                   1483:     pc = uc->uc_mcontext.sc_iaoq[0];
                   1484:     /* FIXME: compute is_write */
                   1485:     is_write = 0;
                   1486:     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
                   1487:                              is_write,
                   1488:                              &uc->uc_sigmask, puc);
                   1489: }
                   1490: 
1.1       root     1491: #else
                   1492: 
                   1493: #error host CPU specific signal handler needed
                   1494: 
                   1495: #endif
                   1496: 
                   1497: #endif /* !defined(CONFIG_SOFTMMU) */

unix.superglobalmegacorp.com