Annotation of qemu/cpu-exec.c, revision 1.1.1.13
1.1 root 1: /*
2: * i386 emulator main execution loop
1.1.1.6 root 3: *
1.1 root 4: * Copyright (c) 2003-2005 Fabrice Bellard
5: *
6: * This library is free software; you can redistribute it and/or
7: * modify it under the terms of the GNU Lesser General Public
8: * License as published by the Free Software Foundation; either
9: * version 2 of the License, or (at your option) any later version.
10: *
11: * This library is distributed in the hope that it will be useful,
12: * but WITHOUT ANY WARRANTY; without even the implied warranty of
13: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14: * Lesser General Public License for more details.
15: *
16: * You should have received a copy of the GNU Lesser General Public
1.1.1.9 root 17: * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1.1 root 18: */
19: #include "config.h"
20: #include "exec.h"
21: #include "disas.h"
1.1.1.7 root 22: #include "tcg.h"
23: #include "kvm.h"
1.1.1.12 root 24: #include "qemu-barrier.h"
1.1 root 25:
26: #if !defined(CONFIG_SOFTMMU)
27: #undef EAX
28: #undef ECX
29: #undef EDX
30: #undef EBX
31: #undef ESP
32: #undef EBP
33: #undef ESI
34: #undef EDI
35: #undef EIP
36: #include <signal.h>
1.1.1.7 root 37: #ifdef __linux__
1.1 root 38: #include <sys/ucontext.h>
39: #endif
1.1.1.7 root 40: #endif
1.1.1.6 root 41:
1.1.1.10 root 42: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.6 root 43: // Work around ugly bugs in glibc that mangle global register contents
1.1.1.7 root 44: #undef env
45: #define env cpu_single_env
46: #endif
1.1.1.6 root 47:
1.1.1.7 root 48: int tb_invalidated_flag;
1.1.1.6 root 49:
1.1.1.10 root 50: //#define CONFIG_DEBUG_EXEC
1.1.1.7 root 51: //#define DEBUG_SIGNAL
1.1.1.6 root 52:
1.1.1.9 root 53: int qemu_cpu_has_work(CPUState *env)
54: {
55: return cpu_has_work(env);
56: }
57:
1.1 root 58: void cpu_loop_exit(void)
59: {
1.1.1.12 root 60: env->current_tb = NULL;
1.1 root 61: longjmp(env->jmp_env, 1);
62: }
1.1.1.6 root 63:
1.1 root 64: /* exit the current TB from a signal handler. The host registers are
65: restored in a state compatible with the CPU emulator
66: */
1.1.1.6 root 67: void cpu_resume_from_signal(CPUState *env1, void *puc)
1.1 root 68: {
69: #if !defined(CONFIG_SOFTMMU)
1.1.1.7 root 70: #ifdef __linux__
1.1 root 71: struct ucontext *uc = puc;
1.1.1.7 root 72: #elif defined(__OpenBSD__)
73: struct sigcontext *uc = puc;
74: #endif
1.1 root 75: #endif
76:
77: env = env1;
78:
79: /* XXX: restore cpu registers saved in host registers */
80:
81: #if !defined(CONFIG_SOFTMMU)
82: if (puc) {
83: /* XXX: use siglongjmp ? */
1.1.1.7 root 84: #ifdef __linux__
1.1.1.12 root 85: #ifdef __ia64
86: sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87: #else
1.1 root 88: sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1.1.1.12 root 89: #endif
1.1.1.7 root 90: #elif defined(__OpenBSD__)
91: sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92: #endif
1.1 root 93: }
94: #endif
1.1.1.7 root 95: env->exception_index = -1;
1.1 root 96: longjmp(env->jmp_env, 1);
97: }
98:
1.1.1.7 root 99: /* Execute the code without caching the generated code. An interpreter
100: could be used if available. */
101: static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102: {
103: unsigned long next_tb;
104: TranslationBlock *tb;
105:
106: /* Should never happen.
107: We only end up here when an existing TB is too long. */
108: if (max_cycles > CF_COUNT_MASK)
109: max_cycles = CF_COUNT_MASK;
110:
111: tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112: max_cycles);
113: env->current_tb = tb;
114: /* execute the generated code */
115: next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
1.1.1.12 root 116: env->current_tb = NULL;
1.1.1.7 root 117:
118: if ((next_tb & 3) == 2) {
119: /* Restore PC. This may happen if async event occurs before
120: the TB starts executing. */
121: cpu_pc_from_tb(env, tb);
122: }
123: tb_phys_invalidate(tb, -1);
124: tb_free(tb);
125: }
1.1.1.2 root 126:
127: static TranslationBlock *tb_find_slow(target_ulong pc,
128: target_ulong cs_base,
1.1.1.6 root 129: uint64_t flags)
1.1.1.2 root 130: {
131: TranslationBlock *tb, **ptb1;
132: unsigned int h;
1.1.1.12 root 133: tb_page_addr_t phys_pc, phys_page1, phys_page2;
134: target_ulong virt_page2;
1.1.1.2 root 135:
136: tb_invalidated_flag = 0;
1.1.1.6 root 137:
1.1.1.2 root 138: /* find translated block using physical mappings */
1.1.1.12 root 139: phys_pc = get_page_addr_code(env, pc);
1.1.1.2 root 140: phys_page1 = phys_pc & TARGET_PAGE_MASK;
141: phys_page2 = -1;
142: h = tb_phys_hash_func(phys_pc);
143: ptb1 = &tb_phys_hash[h];
144: for(;;) {
145: tb = *ptb1;
146: if (!tb)
147: goto not_found;
1.1.1.6 root 148: if (tb->pc == pc &&
1.1.1.2 root 149: tb->page_addr[0] == phys_page1 &&
1.1.1.6 root 150: tb->cs_base == cs_base &&
1.1.1.2 root 151: tb->flags == flags) {
152: /* check next page if needed */
153: if (tb->page_addr[1] != -1) {
1.1.1.6 root 154: virt_page2 = (pc & TARGET_PAGE_MASK) +
1.1.1.2 root 155: TARGET_PAGE_SIZE;
1.1.1.12 root 156: phys_page2 = get_page_addr_code(env, virt_page2);
1.1.1.2 root 157: if (tb->page_addr[1] == phys_page2)
158: goto found;
159: } else {
160: goto found;
161: }
162: }
163: ptb1 = &tb->phys_hash_next;
164: }
165: not_found:
1.1.1.7 root 166: /* if no translated code available, then translate it now */
167: tb = tb_gen_code(env, pc, cs_base, flags, 0);
1.1.1.6 root 168:
1.1.1.2 root 169: found:
1.1.1.13! root 170: /* Move the last found TB to the head of the list */
! 171: if (likely(*ptb1)) {
! 172: *ptb1 = tb->phys_hash_next;
! 173: tb->phys_hash_next = tb_phys_hash[h];
! 174: tb_phys_hash[h] = tb;
! 175: }
1.1.1.2 root 176: /* we add the TB in the virtual pc hash table */
177: env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178: return tb;
179: }
180:
181: static inline TranslationBlock *tb_find_fast(void)
182: {
183: TranslationBlock *tb;
184: target_ulong cs_base, pc;
1.1.1.7 root 185: int flags;
1.1.1.2 root 186:
187: /* we record a subset of the CPU state. It will
188: always be the same before a given translated block
189: is executed. */
1.1.1.7 root 190: cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1.1.1.2 root 191: tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
1.1.1.7 root 192: if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193: tb->flags != flags)) {
1.1.1.2 root 194: tb = tb_find_slow(pc, cs_base, flags);
195: }
196: return tb;
197: }
198:
1.1.1.7 root 199: static CPUDebugExcpHandler *debug_excp_handler;
200:
201: CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
202: {
203: CPUDebugExcpHandler *old_handler = debug_excp_handler;
204:
205: debug_excp_handler = handler;
206: return old_handler;
207: }
208:
209: static void cpu_handle_debug_exception(CPUState *env)
210: {
211: CPUWatchpoint *wp;
212:
213: if (!env->watchpoint_hit)
1.1.1.10 root 214: QTAILQ_FOREACH(wp, &env->watchpoints, entry)
1.1.1.7 root 215: wp->flags &= ~BP_WATCHPOINT_HIT;
216:
217: if (debug_excp_handler)
218: debug_excp_handler(env);
219: }
1.1.1.2 root 220:
1.1 root 221: /* main execution loop */
222:
1.1.1.12 root 223: volatile sig_atomic_t exit_request;
224:
1.1 root 225: int cpu_exec(CPUState *env1)
226: {
1.1.1.12 root 227: volatile host_reg_t saved_env_reg;
1.1.1.2 root 228: int ret, interrupt_request;
229: TranslationBlock *tb;
1.1 root 230: uint8_t *tc_ptr;
1.1.1.7 root 231: unsigned long next_tb;
1.1.1.2 root 232:
1.1.1.6 root 233: if (cpu_halted(env1) == EXCP_HALTED)
234: return EXCP_HALTED;
1.1.1.2 root 235:
1.1.1.6 root 236: cpu_single_env = env1;
1.1 root 237:
1.1.1.12 root 238: /* the access to env below is actually saving the global register's
239: value, so that files not including target-xyz/exec.h are free to
240: use it. */
241: QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
242: saved_env_reg = (host_reg_t) env;
243: barrier();
1.1 root 244: env = env1;
245:
1.1.1.12 root 246: if (unlikely(exit_request)) {
247: env->exit_request = 1;
248: }
249:
1.1.1.6 root 250: #if defined(TARGET_I386)
1.1.1.11 root 251: if (!kvm_enabled()) {
252: /* put eflags in CPU temporary format */
253: CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
254: DF = 1 - (2 * ((env->eflags >> 10) & 1));
255: CC_OP = CC_OP_EFLAGS;
256: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
257: }
1.1 root 258: #elif defined(TARGET_SPARC)
1.1.1.5 root 259: #elif defined(TARGET_M68K)
260: env->cc_op = CC_OP_FLAGS;
261: env->cc_dest = env->sr & 0xf;
262: env->cc_x = (env->sr >> 4) & 1;
1.1.1.6 root 263: #elif defined(TARGET_ALPHA)
264: #elif defined(TARGET_ARM)
265: #elif defined(TARGET_PPC)
1.1.1.9 root 266: #elif defined(TARGET_MICROBLAZE)
1.1 root 267: #elif defined(TARGET_MIPS)
1.1.1.3 root 268: #elif defined(TARGET_SH4)
1.1.1.6 root 269: #elif defined(TARGET_CRIS)
1.1.1.10 root 270: #elif defined(TARGET_S390X)
1.1.1.3 root 271: /* XXXXX */
1.1 root 272: #else
273: #error unsupported target CPU
274: #endif
275: env->exception_index = -1;
276:
277: /* prepare setjmp context for exception handling */
278: for(;;) {
279: if (setjmp(env->jmp_env) == 0) {
1.1.1.10 root 280: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.9 root 281: #undef env
282: env = cpu_single_env;
283: #define env cpu_single_env
284: #endif
1.1 root 285: /* if an exception is pending, we execute it here */
286: if (env->exception_index >= 0) {
287: if (env->exception_index >= EXCP_INTERRUPT) {
288: /* exit request from the cpu execution loop */
289: ret = env->exception_index;
1.1.1.7 root 290: if (ret == EXCP_DEBUG)
291: cpu_handle_debug_exception(env);
1.1 root 292: break;
1.1.1.7 root 293: } else {
294: #if defined(CONFIG_USER_ONLY)
1.1 root 295: /* if user mode only, we simulate a fake exception
1.1.1.5 root 296: which will be handled outside the cpu execution
1.1 root 297: loop */
298: #if defined(TARGET_I386)
1.1.1.6 root 299: do_interrupt_user(env->exception_index,
300: env->exception_is_int,
301: env->error_code,
1.1 root 302: env->exception_next_eip);
1.1.1.7 root 303: /* successfully delivered */
304: env->old_exception = -1;
1.1 root 305: #endif
306: ret = env->exception_index;
307: break;
1.1.1.7 root 308: #else
1.1 root 309: #if defined(TARGET_I386)
310: /* simulate a real cpu exception. On i386, it can
311: trigger new exceptions, but we do not handle
312: double or triple faults yet. */
1.1.1.6 root 313: do_interrupt(env->exception_index,
314: env->exception_is_int,
315: env->error_code,
1.1 root 316: env->exception_next_eip, 0);
1.1.1.6 root 317: /* successfully delivered */
318: env->old_exception = -1;
1.1 root 319: #elif defined(TARGET_PPC)
320: do_interrupt(env);
1.1.1.9 root 321: #elif defined(TARGET_MICROBLAZE)
322: do_interrupt(env);
1.1 root 323: #elif defined(TARGET_MIPS)
324: do_interrupt(env);
325: #elif defined(TARGET_SPARC)
1.1.1.7 root 326: do_interrupt(env);
1.1.1.2 root 327: #elif defined(TARGET_ARM)
328: do_interrupt(env);
1.1.1.3 root 329: #elif defined(TARGET_SH4)
330: do_interrupt(env);
1.1.1.6 root 331: #elif defined(TARGET_ALPHA)
332: do_interrupt(env);
333: #elif defined(TARGET_CRIS)
334: do_interrupt(env);
335: #elif defined(TARGET_M68K)
336: do_interrupt(0);
1.1 root 337: #endif
1.1.1.12 root 338: env->exception_index = -1;
1.1.1.7 root 339: #endif
1.1 root 340: }
1.1.1.6 root 341: }
1.1 root 342:
1.1.1.7 root 343: if (kvm_enabled()) {
344: kvm_cpu_exec(env);
345: longjmp(env->jmp_env, 1);
346: }
347:
348: next_tb = 0; /* force lookup of first TB */
1.1 root 349: for(;;) {
350: interrupt_request = env->interrupt_request;
1.1.1.7 root 351: if (unlikely(interrupt_request)) {
352: if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
353: /* Mask out external interrupts for this step. */
354: interrupt_request &= ~(CPU_INTERRUPT_HARD |
355: CPU_INTERRUPT_FIQ |
356: CPU_INTERRUPT_SMI |
357: CPU_INTERRUPT_NMI);
358: }
1.1.1.6 root 359: if (interrupt_request & CPU_INTERRUPT_DEBUG) {
360: env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
361: env->exception_index = EXCP_DEBUG;
362: cpu_loop_exit();
363: }
364: #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
1.1.1.9 root 365: defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
366: defined(TARGET_MICROBLAZE)
1.1.1.6 root 367: if (interrupt_request & CPU_INTERRUPT_HALT) {
368: env->interrupt_request &= ~CPU_INTERRUPT_HALT;
369: env->halted = 1;
370: env->exception_index = EXCP_HLT;
371: cpu_loop_exit();
372: }
373: #endif
1.1 root 374: #if defined(TARGET_I386)
1.1.1.9 root 375: if (interrupt_request & CPU_INTERRUPT_INIT) {
376: svm_check_intercept(SVM_EXIT_INIT);
377: do_cpu_init(env);
378: env->exception_index = EXCP_HALTED;
379: cpu_loop_exit();
380: } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
381: do_cpu_sipi(env);
382: } else if (env->hflags2 & HF2_GIF_MASK) {
1.1.1.7 root 383: if ((interrupt_request & CPU_INTERRUPT_SMI) &&
384: !(env->hflags & HF_SMM_MASK)) {
385: svm_check_intercept(SVM_EXIT_SMI);
386: env->interrupt_request &= ~CPU_INTERRUPT_SMI;
387: do_smm_enter();
388: next_tb = 0;
389: } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
390: !(env->hflags2 & HF2_NMI_MASK)) {
391: env->interrupt_request &= ~CPU_INTERRUPT_NMI;
392: env->hflags2 |= HF2_NMI_MASK;
393: do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
394: next_tb = 0;
1.1.1.9 root 395: } else if (interrupt_request & CPU_INTERRUPT_MCE) {
396: env->interrupt_request &= ~CPU_INTERRUPT_MCE;
397: do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
398: next_tb = 0;
1.1.1.7 root 399: } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400: (((env->hflags2 & HF2_VINTR_MASK) &&
401: (env->hflags2 & HF2_HIF_MASK)) ||
402: (!(env->hflags2 & HF2_VINTR_MASK) &&
403: (env->eflags & IF_MASK &&
404: !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
405: int intno;
406: svm_check_intercept(SVM_EXIT_INTR);
407: env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
408: intno = cpu_get_pic_interrupt(env);
409: qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
1.1.1.10 root 410: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.9 root 411: #undef env
412: env = cpu_single_env;
413: #define env cpu_single_env
414: #endif
1.1.1.7 root 415: do_interrupt(intno, 0, 0, 0, 1);
416: /* ensure that no TB jump will be modified as
417: the program flow was changed */
418: next_tb = 0;
1.1.1.6 root 419: #if !defined(CONFIG_USER_ONLY)
1.1.1.7 root 420: } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
421: (env->eflags & IF_MASK) &&
422: !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
423: int intno;
424: /* FIXME: this should respect TPR */
425: svm_check_intercept(SVM_EXIT_VINTR);
426: intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427: qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
428: do_interrupt(intno, 0, 0, 0, 1);
429: env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
430: next_tb = 0;
1.1 root 431: #endif
1.1.1.7 root 432: }
1.1 root 433: }
434: #elif defined(TARGET_PPC)
435: #if 0
436: if ((interrupt_request & CPU_INTERRUPT_RESET)) {
1.1.1.10 root 437: cpu_reset(env);
1.1 root 438: }
439: #endif
1.1.1.6 root 440: if (interrupt_request & CPU_INTERRUPT_HARD) {
441: ppc_hw_interrupt(env);
442: if (env->pending_interrupts == 0)
1.1.1.2 root 443: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1.1.1.7 root 444: next_tb = 0;
1.1 root 445: }
1.1.1.9 root 446: #elif defined(TARGET_MICROBLAZE)
447: if ((interrupt_request & CPU_INTERRUPT_HARD)
448: && (env->sregs[SR_MSR] & MSR_IE)
449: && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
450: && !(env->iflags & (D_FLAG | IMM_FLAG))) {
451: env->exception_index = EXCP_IRQ;
452: do_interrupt(env);
453: next_tb = 0;
454: }
1.1 root 455: #elif defined(TARGET_MIPS)
456: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1.1.1.13! root 457: cpu_mips_hw_interrupts_pending(env)) {
1.1 root 458: /* Raise it */
459: env->exception_index = EXCP_EXT_INTERRUPT;
460: env->error_code = 0;
461: do_interrupt(env);
1.1.1.7 root 462: next_tb = 0;
1.1 root 463: }
464: #elif defined(TARGET_SPARC)
1.1.1.12 root 465: if (interrupt_request & CPU_INTERRUPT_HARD) {
466: if (cpu_interrupts_enabled(env) &&
467: env->interrupt_index > 0) {
468: int pil = env->interrupt_index & 0xf;
469: int type = env->interrupt_index & 0xf0;
470:
471: if (((type == TT_EXTINT) &&
472: cpu_pil_allowed(env, pil)) ||
473: type != TT_EXTINT) {
474: env->exception_index = env->interrupt_index;
475: do_interrupt(env);
476: next_tb = 0;
477: }
478: }
1.1 root 479: } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
480: //do_interrupt(0, 0, 0, 0, 0);
481: env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
1.1.1.6 root 482: }
1.1.1.2 root 483: #elif defined(TARGET_ARM)
484: if (interrupt_request & CPU_INTERRUPT_FIQ
485: && !(env->uncached_cpsr & CPSR_F)) {
486: env->exception_index = EXCP_FIQ;
487: do_interrupt(env);
1.1.1.7 root 488: next_tb = 0;
1.1.1.2 root 489: }
1.1.1.6 root 490: /* ARMv7-M interrupt return works by loading a magic value
491: into the PC. On real hardware the load causes the
492: return to occur. The qemu implementation performs the
493: jump normally, then does the exception return when the
494: CPU tries to execute code at the magic address.
495: This will cause the magic PC value to be pushed to
496: the stack if an interrupt occured at the wrong time.
497: We avoid this by disabling interrupts when
498: pc contains a magic address. */
1.1.1.2 root 499: if (interrupt_request & CPU_INTERRUPT_HARD
1.1.1.6 root 500: && ((IS_M(env) && env->regs[15] < 0xfffffff0)
501: || !(env->uncached_cpsr & CPSR_I))) {
1.1.1.2 root 502: env->exception_index = EXCP_IRQ;
503: do_interrupt(env);
1.1.1.7 root 504: next_tb = 0;
1.1.1.2 root 505: }
1.1.1.3 root 506: #elif defined(TARGET_SH4)
1.1.1.6 root 507: if (interrupt_request & CPU_INTERRUPT_HARD) {
508: do_interrupt(env);
1.1.1.7 root 509: next_tb = 0;
1.1.1.6 root 510: }
511: #elif defined(TARGET_ALPHA)
512: if (interrupt_request & CPU_INTERRUPT_HARD) {
513: do_interrupt(env);
1.1.1.7 root 514: next_tb = 0;
1.1.1.6 root 515: }
516: #elif defined(TARGET_CRIS)
1.1.1.7 root 517: if (interrupt_request & CPU_INTERRUPT_HARD
1.1.1.12 root 518: && (env->pregs[PR_CCS] & I_FLAG)
519: && !env->locked_irq) {
1.1.1.7 root 520: env->exception_index = EXCP_IRQ;
1.1.1.6 root 521: do_interrupt(env);
1.1.1.7 root 522: next_tb = 0;
523: }
524: if (interrupt_request & CPU_INTERRUPT_NMI
525: && (env->pregs[PR_CCS] & M_FLAG)) {
526: env->exception_index = EXCP_NMI;
527: do_interrupt(env);
528: next_tb = 0;
1.1.1.6 root 529: }
530: #elif defined(TARGET_M68K)
531: if (interrupt_request & CPU_INTERRUPT_HARD
532: && ((env->sr & SR_I) >> SR_I_SHIFT)
533: < env->pending_level) {
534: /* Real hardware gets the interrupt vector via an
535: IACK cycle at this point. Current emulated
536: hardware doesn't rely on this, so we
537: provide/save the vector when the interrupt is
538: first signalled. */
539: env->exception_index = env->pending_vector;
540: do_interrupt(1);
1.1.1.7 root 541: next_tb = 0;
1.1.1.6 root 542: }
1.1 root 543: #endif
1.1.1.4 root 544: /* Don't use the cached interupt_request value,
545: do_interrupt may have updated the EXITTB flag. */
1.1.1.2 root 546: if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
1.1 root 547: env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
548: /* ensure that no TB jump will be modified as
549: the program flow was changed */
1.1.1.7 root 550: next_tb = 0;
1.1 root 551: }
1.1.1.8 root 552: }
553: if (unlikely(env->exit_request)) {
554: env->exit_request = 0;
555: env->exception_index = EXCP_INTERRUPT;
556: cpu_loop_exit();
1.1 root 557: }
1.1.1.12 root 558: #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
1.1.1.7 root 559: if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
1.1 root 560: /* restore flags in standard format */
1.1.1.6 root 561: #if defined(TARGET_I386)
1.1.1.7 root 562: env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
563: log_cpu_state(env, X86_DUMP_CCOP);
1.1 root 564: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1.1.1.5 root 565: #elif defined(TARGET_M68K)
566: cpu_m68k_flush_flags(env, env->cc_op);
567: env->cc_op = CC_OP_FLAGS;
568: env->sr = (env->sr & 0xffe0)
569: | env->cc_dest | (env->cc_x << 4);
1.1.1.7 root 570: log_cpu_state(env, 0);
1.1 root 571: #else
1.1.1.12 root 572: log_cpu_state(env, 0);
1.1 root 573: #endif
574: }
1.1.1.12 root 575: #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
1.1.1.7 root 576: spin_lock(&tb_lock);
1.1.1.2 root 577: tb = tb_find_fast();
1.1.1.7 root 578: /* Note: we do it here to avoid a gcc bug on Mac OS X when
579: doing it in tb_find_slow */
580: if (tb_invalidated_flag) {
581: /* as some TB could have been invalidated because
582: of memory exceptions while generating the code, we
583: must recompute the hash index here */
584: next_tb = 0;
585: tb_invalidated_flag = 0;
1.1 root 586: }
1.1.1.10 root 587: #ifdef CONFIG_DEBUG_EXEC
1.1.1.7 root 588: qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
589: (long)tb->tc_ptr, tb->pc,
590: lookup_symbol(tb->pc));
1.1 root 591: #endif
1.1.1.2 root 592: /* see if we can patch the calling TB. When the TB
593: spans two pages, we cannot safely do a direct
594: jump. */
1.1.1.12 root 595: if (next_tb != 0 && tb->page_addr[1] == -1) {
1.1.1.7 root 596: tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
1.1 root 597: }
1.1.1.7 root 598: spin_unlock(&tb_lock);
599:
600: /* cpu_interrupt might be called while translating the
601: TB, but before it is linked into a potentially
602: infinite loop and becomes env->current_tb. Avoid
603: starting execution if there is a pending interrupt. */
1.1.1.12 root 604: env->current_tb = tb;
605: barrier();
606: if (likely(!env->exit_request)) {
1.1.1.7 root 607: tc_ptr = tb->tc_ptr;
1.1 root 608: /* execute the generated code */
1.1.1.10 root 609: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.7 root 610: #undef env
611: env = cpu_single_env;
612: #define env cpu_single_env
613: #endif
614: next_tb = tcg_qemu_tb_exec(tc_ptr);
615: if ((next_tb & 3) == 2) {
616: /* Instruction counter expired. */
617: int insns_left;
618: tb = (TranslationBlock *)(long)(next_tb & ~3);
619: /* Restore PC. */
620: cpu_pc_from_tb(env, tb);
621: insns_left = env->icount_decr.u32;
622: if (env->icount_extra && insns_left >= 0) {
623: /* Refill decrementer and continue execution. */
624: env->icount_extra += insns_left;
625: if (env->icount_extra > 0xffff) {
626: insns_left = 0xffff;
627: } else {
628: insns_left = env->icount_extra;
629: }
630: env->icount_extra -= insns_left;
631: env->icount_decr.u16.low = insns_left;
632: } else {
633: if (insns_left > 0) {
634: /* Execute remaining instructions. */
635: cpu_exec_nocache(insns_left, tb);
636: }
637: env->exception_index = EXCP_INTERRUPT;
638: next_tb = 0;
639: cpu_loop_exit();
640: }
641: }
642: }
1.1.1.12 root 643: env->current_tb = NULL;
1.1 root 644: /* reset soft MMU for next block (it can currently
645: only be set by a memory fault) */
1.1.1.6 root 646: } /* for(;;) */
1.1 root 647: }
648: } /* for(;;) */
649:
650:
651: #if defined(TARGET_I386)
652: /* restore flags in standard format */
1.1.1.7 root 653: env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1.1 root 654: #elif defined(TARGET_ARM)
655: /* XXX: Save/restore host fpu exception state?. */
656: #elif defined(TARGET_SPARC)
657: #elif defined(TARGET_PPC)
1.1.1.5 root 658: #elif defined(TARGET_M68K)
659: cpu_m68k_flush_flags(env, env->cc_op);
660: env->cc_op = CC_OP_FLAGS;
661: env->sr = (env->sr & 0xffe0)
662: | env->cc_dest | (env->cc_x << 4);
1.1.1.9 root 663: #elif defined(TARGET_MICROBLAZE)
1.1 root 664: #elif defined(TARGET_MIPS)
1.1.1.3 root 665: #elif defined(TARGET_SH4)
1.1.1.6 root 666: #elif defined(TARGET_ALPHA)
667: #elif defined(TARGET_CRIS)
1.1.1.10 root 668: #elif defined(TARGET_S390X)
1.1.1.3 root 669: /* XXXXX */
1.1 root 670: #else
671: #error unsupported target CPU
672: #endif
1.1.1.5 root 673:
674: /* restore global registers */
1.1.1.12 root 675: barrier();
676: env = (void *) saved_env_reg;
1.1.1.5 root 677:
1.1.1.2 root 678: /* fail safe : never use cpu_single_env outside cpu_exec() */
1.1.1.6 root 679: cpu_single_env = NULL;
1.1 root 680: return ret;
681: }
682:
683: /* must only be called from the generated code as an exception can be
684: generated */
685: void tb_invalidate_page_range(target_ulong start, target_ulong end)
686: {
687: /* XXX: cannot enable it yet because it yields to MMU exception
688: where NIP != read address on PowerPC */
689: #if 0
690: target_ulong phys_addr;
691: phys_addr = get_phys_addr_code(env, start);
692: tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
693: #endif
694: }
695:
696: #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
697:
698: void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
699: {
700: CPUX86State *saved_env;
701:
702: saved_env = env;
703: env = s;
704: if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
705: selector &= 0xffff;
1.1.1.6 root 706: cpu_x86_load_seg_cache(env, seg_reg, selector,
1.1 root 707: (selector << 4), 0xffff, 0);
708: } else {
1.1.1.7 root 709: helper_load_seg(seg_reg, selector);
1.1 root 710: }
711: env = saved_env;
712: }
713:
1.1.1.6 root 714: void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1.1 root 715: {
716: CPUX86State *saved_env;
717:
718: saved_env = env;
719: env = s;
1.1.1.6 root 720:
721: helper_fsave(ptr, data32);
1.1 root 722:
723: env = saved_env;
724: }
725:
1.1.1.6 root 726: void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1.1 root 727: {
728: CPUX86State *saved_env;
729:
730: saved_env = env;
731: env = s;
1.1.1.6 root 732:
733: helper_frstor(ptr, data32);
1.1 root 734:
735: env = saved_env;
736: }
737:
738: #endif /* TARGET_I386 */
739:
740: #if !defined(CONFIG_SOFTMMU)
741:
742: #if defined(TARGET_I386)
1.1.1.10 root 743: #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
744: #else
745: #define EXCEPTION_ACTION cpu_loop_exit()
746: #endif
1.1 root 747:
748: /* 'pc' is the host PC at which the exception was raised. 'address' is
749: the effective address of the memory exception. 'is_write' is 1 if a
750: write caused the exception and otherwise 0'. 'old_set' is the
751: signal set which should be restored */
752: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1.1.1.6 root 753: int is_write, sigset_t *old_set,
1.1 root 754: void *puc)
755: {
756: TranslationBlock *tb;
757: int ret;
758:
759: if (cpu_single_env)
760: env = cpu_single_env; /* XXX: find a correct solution for multithread */
761: #if defined(DEBUG_SIGNAL)
1.1.1.6 root 762: qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1 root 763: pc, address, is_write, *(unsigned long *)old_set);
764: #endif
765: /* XXX: locking issue */
1.1.1.3 root 766: if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1 root 767: return 1;
768: }
769:
770: /* see if it is an MMU fault */
1.1.1.10 root 771: ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1 root 772: if (ret < 0)
773: return 0; /* not an MMU fault */
774: if (ret == 0)
775: return 1; /* the MMU fault was handled without causing real CPU fault */
776: /* now we have a real cpu fault */
777: tb = tb_find_pc(pc);
778: if (tb) {
779: /* the PC is inside the translated code. It means that we have
780: a virtual CPU fault */
781: cpu_restore_state(tb, env, pc, puc);
782: }
783:
784: /* we restore the process signal mask as the sigreturn should
785: do it (XXX: use sigsetjmp) */
786: sigprocmask(SIG_SETMASK, old_set, NULL);
1.1.1.10 root 787: EXCEPTION_ACTION;
1.1 root 788:
789: /* never comes here */
790: return 1;
791: }
792:
1.1.1.6 root 793: #if defined(__i386__)
794:
795: #if defined(__APPLE__)
796: # include <sys/ucontext.h>
797:
798: # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
799: # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
800: # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1.1.1.9 root 801: # define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.10 root 802: #elif defined (__NetBSD__)
803: # include <ucontext.h>
804:
805: # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
806: # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
807: # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
808: # define MASK_sig(context) ((context)->uc_sigmask)
809: #elif defined (__FreeBSD__) || defined(__DragonFly__)
810: # include <ucontext.h>
811:
812: # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
813: # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
814: # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
815: # define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.9 root 816: #elif defined(__OpenBSD__)
817: # define EIP_sig(context) ((context)->sc_eip)
818: # define TRAP_sig(context) ((context)->sc_trapno)
819: # define ERROR_sig(context) ((context)->sc_err)
820: # define MASK_sig(context) ((context)->sc_mask)
1.1.1.6 root 821: #else
822: # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
823: # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
824: # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1.1.1.9 root 825: # define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.6 root 826: #endif
827:
828: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 829: void *puc)
830: {
1.1.1.5 root 831: siginfo_t *info = pinfo;
1.1.1.10 root 832: #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
833: ucontext_t *uc = puc;
834: #elif defined(__OpenBSD__)
1.1.1.9 root 835: struct sigcontext *uc = puc;
836: #else
1.1 root 837: struct ucontext *uc = puc;
1.1.1.9 root 838: #endif
1.1 root 839: unsigned long pc;
840: int trapno;
841:
842: #ifndef REG_EIP
843: /* for glibc 2.1 */
844: #define REG_EIP EIP
845: #define REG_ERR ERR
846: #define REG_TRAPNO TRAPNO
847: #endif
1.1.1.5 root 848: pc = EIP_sig(uc);
849: trapno = TRAP_sig(uc);
1.1.1.6 root 850: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
851: trapno == 0xe ?
852: (ERROR_sig(uc) >> 1) & 1 : 0,
1.1.1.9 root 853: &MASK_sig(uc), puc);
1.1 root 854: }
855:
856: #elif defined(__x86_64__)
857:
1.1.1.7 root 858: #ifdef __NetBSD__
1.1.1.9 root 859: #define PC_sig(context) _UC_MACHINE_PC(context)
860: #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
861: #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
862: #define MASK_sig(context) ((context)->uc_sigmask)
863: #elif defined(__OpenBSD__)
864: #define PC_sig(context) ((context)->sc_rip)
865: #define TRAP_sig(context) ((context)->sc_trapno)
866: #define ERROR_sig(context) ((context)->sc_err)
867: #define MASK_sig(context) ((context)->sc_mask)
1.1.1.10 root 868: #elif defined (__FreeBSD__) || defined(__DragonFly__)
869: #include <ucontext.h>
870:
871: #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
872: #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
873: #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
874: #define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.7 root 875: #else
1.1.1.9 root 876: #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
877: #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
878: #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
879: #define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.7 root 880: #endif
881:
1.1.1.5 root 882: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 883: void *puc)
884: {
1.1.1.5 root 885: siginfo_t *info = pinfo;
1.1 root 886: unsigned long pc;
1.1.1.10 root 887: #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1.1.1.7 root 888: ucontext_t *uc = puc;
1.1.1.9 root 889: #elif defined(__OpenBSD__)
890: struct sigcontext *uc = puc;
1.1.1.7 root 891: #else
892: struct ucontext *uc = puc;
893: #endif
1.1 root 894:
1.1.1.9 root 895: pc = PC_sig(uc);
1.1.1.6 root 896: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1.1.9 root 897: TRAP_sig(uc) == 0xe ?
898: (ERROR_sig(uc) >> 1) & 1 : 0,
899: &MASK_sig(uc), puc);
1.1 root 900: }
901:
1.1.1.7 root 902: #elif defined(_ARCH_PPC)
1.1 root 903:
904: /***********************************************************************
905: * signal context platform-specific definitions
906: * From Wine
907: */
908: #ifdef linux
909: /* All Registers access - only for local access */
910: # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
911: /* Gpr Registers access */
912: # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
913: # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
914: # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
915: # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
916: # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
917: # define LR_sig(context) REG_sig(link, context) /* Link register */
918: # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
919: /* Float Registers access */
920: # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
921: # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
922: /* Exception Registers access */
923: # define DAR_sig(context) REG_sig(dar, context)
924: # define DSISR_sig(context) REG_sig(dsisr, context)
925: # define TRAP_sig(context) REG_sig(trap, context)
926: #endif /* linux */
927:
1.1.1.12 root 928: #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
929: #include <ucontext.h>
930: # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
931: # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
932: # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
933: # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
934: # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
935: # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
936: /* Exception Registers access */
937: # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
938: # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
939: # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
940: #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
941:
1.1 root 942: #ifdef __APPLE__
943: # include <sys/ucontext.h>
944: typedef struct ucontext SIGCONTEXT;
945: /* All Registers access - only for local access */
946: # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
947: # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
948: # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
949: # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
950: /* Gpr Registers access */
951: # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
952: # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
953: # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
954: # define CTR_sig(context) REG_sig(ctr, context)
955: # define XER_sig(context) REG_sig(xer, context) /* Link register */
956: # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
957: # define CR_sig(context) REG_sig(cr, context) /* Condition register */
958: /* Float Registers access */
959: # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
960: # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
961: /* Exception Registers access */
962: # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
963: # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
964: # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
965: #endif /* __APPLE__ */
966:
1.1.1.6 root 967: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 968: void *puc)
969: {
1.1.1.5 root 970: siginfo_t *info = pinfo;
1.1.1.12 root 971: #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
972: ucontext_t *uc = puc;
973: #else
1.1 root 974: struct ucontext *uc = puc;
1.1.1.12 root 975: #endif
1.1 root 976: unsigned long pc;
977: int is_write;
978:
979: pc = IAR_sig(uc);
980: is_write = 0;
981: #if 0
982: /* ppc 4xx case */
983: if (DSISR_sig(uc) & 0x00800000)
984: is_write = 1;
985: #else
986: if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
987: is_write = 1;
988: #endif
1.1.1.6 root 989: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 990: is_write, &uc->uc_sigmask, puc);
991: }
992:
993: #elif defined(__alpha__)
994:
1.1.1.6 root 995: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 996: void *puc)
997: {
1.1.1.5 root 998: siginfo_t *info = pinfo;
1.1 root 999: struct ucontext *uc = puc;
1000: uint32_t *pc = uc->uc_mcontext.sc_pc;
1001: uint32_t insn = *pc;
1002: int is_write = 0;
1003:
1004: /* XXX: need kernel patch to get write flag faster */
1005: switch (insn >> 26) {
1006: case 0x0d: // stw
1007: case 0x0e: // stb
1008: case 0x0f: // stq_u
1009: case 0x24: // stf
1010: case 0x25: // stg
1011: case 0x26: // sts
1012: case 0x27: // stt
1013: case 0x2c: // stl
1014: case 0x2d: // stq
1015: case 0x2e: // stl_c
1016: case 0x2f: // stq_c
1017: is_write = 1;
1018: }
1019:
1.1.1.6 root 1020: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1021: is_write, &uc->uc_sigmask, puc);
1022: }
1023: #elif defined(__sparc__)
1024:
1.1.1.6 root 1025: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1026: void *puc)
1027: {
1.1.1.5 root 1028: siginfo_t *info = pinfo;
1.1 root 1029: int is_write;
1030: uint32_t insn;
1.1.1.10 root 1031: #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1.1.1.7 root 1032: uint32_t *regs = (uint32_t *)(info + 1);
1033: void *sigmask = (regs + 20);
1.1 root 1034: /* XXX: is there a standard glibc define ? */
1.1.1.7 root 1035: unsigned long pc = regs[1];
1036: #else
1037: #ifdef __linux__
1038: struct sigcontext *sc = puc;
1039: unsigned long pc = sc->sigc_regs.tpc;
1040: void *sigmask = (void *)sc->sigc_mask;
1041: #elif defined(__OpenBSD__)
1042: struct sigcontext *uc = puc;
1043: unsigned long pc = uc->sc_pc;
1044: void *sigmask = (void *)(long)uc->sc_mask;
1045: #endif
1046: #endif
1047:
1.1 root 1048: /* XXX: need kernel patch to get write flag faster */
1049: is_write = 0;
1050: insn = *(uint32_t *)pc;
1051: if ((insn >> 30) == 3) {
1052: switch((insn >> 19) & 0x3f) {
1053: case 0x05: // stb
1.1.1.9 root 1054: case 0x15: // stba
1.1 root 1055: case 0x06: // sth
1.1.1.9 root 1056: case 0x16: // stha
1.1 root 1057: case 0x04: // st
1.1.1.9 root 1058: case 0x14: // sta
1.1 root 1059: case 0x07: // std
1.1.1.9 root 1060: case 0x17: // stda
1061: case 0x0e: // stx
1062: case 0x1e: // stxa
1.1 root 1063: case 0x24: // stf
1.1.1.9 root 1064: case 0x34: // stfa
1.1 root 1065: case 0x27: // stdf
1.1.1.9 root 1066: case 0x37: // stdfa
1067: case 0x26: // stqf
1068: case 0x36: // stqfa
1.1 root 1069: case 0x25: // stfsr
1.1.1.9 root 1070: case 0x3c: // casa
1071: case 0x3e: // casxa
1.1 root 1072: is_write = 1;
1073: break;
1074: }
1075: }
1.1.1.6 root 1076: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1077: is_write, sigmask, NULL);
1078: }
1079:
1080: #elif defined(__arm__)
1081:
1.1.1.6 root 1082: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1083: void *puc)
1084: {
1.1.1.5 root 1085: siginfo_t *info = pinfo;
1.1 root 1086: struct ucontext *uc = puc;
1087: unsigned long pc;
1088: int is_write;
1.1.1.6 root 1089:
1.1.1.7 root 1090: #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1.1 root 1091: pc = uc->uc_mcontext.gregs[R15];
1.1.1.7 root 1092: #else
1093: pc = uc->uc_mcontext.arm_pc;
1094: #endif
1.1 root 1095: /* XXX: compute is_write */
1096: is_write = 0;
1.1.1.6 root 1097: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1098: is_write,
1.1.1.5 root 1099: &uc->uc_sigmask, puc);
1.1 root 1100: }
1101:
1102: #elif defined(__mc68000)
1103:
1.1.1.6 root 1104: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1105: void *puc)
1106: {
1.1.1.5 root 1107: siginfo_t *info = pinfo;
1.1 root 1108: struct ucontext *uc = puc;
1109: unsigned long pc;
1110: int is_write;
1.1.1.6 root 1111:
1.1 root 1112: pc = uc->uc_mcontext.gregs[16];
1113: /* XXX: compute is_write */
1114: is_write = 0;
1.1.1.6 root 1115: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1116: is_write,
1117: &uc->uc_sigmask, puc);
1118: }
1119:
1120: #elif defined(__ia64)
1121:
1122: #ifndef __ISR_VALID
1123: /* This ought to be in <bits/siginfo.h>... */
1124: # define __ISR_VALID 1
1125: #endif
1126:
1.1.1.5 root 1127: int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1.1 root 1128: {
1.1.1.5 root 1129: siginfo_t *info = pinfo;
1.1 root 1130: struct ucontext *uc = puc;
1131: unsigned long ip;
1132: int is_write = 0;
1133:
1134: ip = uc->uc_mcontext.sc_ip;
1135: switch (host_signum) {
1136: case SIGILL:
1137: case SIGFPE:
1138: case SIGSEGV:
1139: case SIGBUS:
1140: case SIGTRAP:
1.1.1.3 root 1141: if (info->si_code && (info->si_segvflags & __ISR_VALID))
1.1 root 1142: /* ISR.W (write-access) is bit 33: */
1143: is_write = (info->si_isr >> 33) & 1;
1144: break;
1145:
1146: default:
1147: break;
1148: }
1149: return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1150: is_write,
1.1.1.12 root 1151: (sigset_t *)&uc->uc_sigmask, puc);
1.1 root 1152: }
1153:
1154: #elif defined(__s390__)
1155:
1.1.1.6 root 1156: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1157: void *puc)
1158: {
1.1.1.5 root 1159: siginfo_t *info = pinfo;
1.1 root 1160: struct ucontext *uc = puc;
1161: unsigned long pc;
1.1.1.12 root 1162: uint16_t *pinsn;
1163: int is_write = 0;
1.1.1.6 root 1164:
1.1 root 1165: pc = uc->uc_mcontext.psw.addr;
1.1.1.12 root 1166:
1167: /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1168: of the normal 2 arguments. The 3rd argument contains the "int_code"
1169: from the hardware which does in fact contain the is_write value.
1170: The rt signal handler, as far as I can tell, does not give this value
1171: at all. Not that we could get to it from here even if it were. */
1172: /* ??? This is not even close to complete, since it ignores all
1173: of the read-modify-write instructions. */
1174: pinsn = (uint16_t *)pc;
1175: switch (pinsn[0] >> 8) {
1176: case 0x50: /* ST */
1177: case 0x42: /* STC */
1178: case 0x40: /* STH */
1179: is_write = 1;
1180: break;
1181: case 0xc4: /* RIL format insns */
1182: switch (pinsn[0] & 0xf) {
1183: case 0xf: /* STRL */
1184: case 0xb: /* STGRL */
1185: case 0x7: /* STHRL */
1186: is_write = 1;
1187: }
1188: break;
1189: case 0xe3: /* RXY format insns */
1190: switch (pinsn[2] & 0xff) {
1191: case 0x50: /* STY */
1192: case 0x24: /* STG */
1193: case 0x72: /* STCY */
1194: case 0x70: /* STHY */
1195: case 0x8e: /* STPQ */
1196: case 0x3f: /* STRVH */
1197: case 0x3e: /* STRV */
1198: case 0x2f: /* STRVG */
1199: is_write = 1;
1200: }
1201: break;
1202: }
1.1.1.6 root 1203: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1204: is_write, &uc->uc_sigmask, puc);
1205: }
1206:
1207: #elif defined(__mips__)
1208:
1209: int cpu_signal_handler(int host_signum, void *pinfo,
1210: void *puc)
1211: {
1212: siginfo_t *info = pinfo;
1213: struct ucontext *uc = puc;
1214: greg_t pc = uc->uc_mcontext.pc;
1215: int is_write;
1216:
1217: /* XXX: compute is_write */
1218: is_write = 0;
1219: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1220: is_write, &uc->uc_sigmask, puc);
1.1 root 1221: }
1222:
1.1.1.7 root 1223: #elif defined(__hppa__)
1224:
1225: int cpu_signal_handler(int host_signum, void *pinfo,
1226: void *puc)
1227: {
1228: struct siginfo *info = pinfo;
1229: struct ucontext *uc = puc;
1.1.1.12 root 1230: unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1231: uint32_t insn = *(uint32_t *)pc;
1232: int is_write = 0;
1233:
1234: /* XXX: need kernel patch to get write flag faster. */
1235: switch (insn >> 26) {
1236: case 0x1a: /* STW */
1237: case 0x19: /* STH */
1238: case 0x18: /* STB */
1239: case 0x1b: /* STWM */
1240: is_write = 1;
1241: break;
1242:
1243: case 0x09: /* CSTWX, FSTWX, FSTWS */
1244: case 0x0b: /* CSTDX, FSTDX, FSTDS */
1245: /* Distinguish from coprocessor load ... */
1246: is_write = (insn >> 9) & 1;
1247: break;
1248:
1249: case 0x03:
1250: switch ((insn >> 6) & 15) {
1251: case 0xa: /* STWS */
1252: case 0x9: /* STHS */
1253: case 0x8: /* STBS */
1254: case 0xe: /* STWAS */
1255: case 0xc: /* STBYS */
1256: is_write = 1;
1257: }
1258: break;
1259: }
1.1.1.7 root 1260:
1261: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1.1.12 root 1262: is_write, &uc->uc_sigmask, puc);
1.1.1.7 root 1263: }
1264:
1.1 root 1265: #else
1266:
1267: #error host CPU specific signal handler needed
1268:
1269: #endif
1270:
1271: #endif /* !defined(CONFIG_SOFTMMU) */
unix.superglobalmegacorp.com