Annotation of qemu/cpu-exec.c, revision 1.1.1.11
1.1 root 1: /*
2: * i386 emulator main execution loop
1.1.1.6 root 3: *
1.1 root 4: * Copyright (c) 2003-2005 Fabrice Bellard
5: *
6: * This library is free software; you can redistribute it and/or
7: * modify it under the terms of the GNU Lesser General Public
8: * License as published by the Free Software Foundation; either
9: * version 2 of the License, or (at your option) any later version.
10: *
11: * This library is distributed in the hope that it will be useful,
12: * but WITHOUT ANY WARRANTY; without even the implied warranty of
13: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14: * Lesser General Public License for more details.
15: *
16: * You should have received a copy of the GNU Lesser General Public
1.1.1.9 root 17: * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1.1 root 18: */
19: #include "config.h"
20: #include "exec.h"
21: #include "disas.h"
1.1.1.7 root 22: #include "tcg.h"
23: #include "kvm.h"
1.1 root 24:
25: #if !defined(CONFIG_SOFTMMU)
26: #undef EAX
27: #undef ECX
28: #undef EDX
29: #undef EBX
30: #undef ESP
31: #undef EBP
32: #undef ESI
33: #undef EDI
34: #undef EIP
35: #include <signal.h>
1.1.1.7 root 36: #ifdef __linux__
1.1 root 37: #include <sys/ucontext.h>
38: #endif
1.1.1.7 root 39: #endif
1.1.1.6 root 40:
1.1.1.10 root 41: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.6 root 42: // Work around ugly bugs in glibc that mangle global register contents
1.1.1.7 root 43: #undef env
44: #define env cpu_single_env
45: #endif
1.1.1.6 root 46:
1.1.1.7 root 47: int tb_invalidated_flag;
1.1.1.6 root 48:
1.1.1.10 root 49: //#define CONFIG_DEBUG_EXEC
1.1.1.7 root 50: //#define DEBUG_SIGNAL
1.1.1.6 root 51:
1.1.1.9 root 52: int qemu_cpu_has_work(CPUState *env)
53: {
54: return cpu_has_work(env);
55: }
56:
1.1 root 57: void cpu_loop_exit(void)
58: {
1.1.1.6 root 59: /* NOTE: the register at this point must be saved by hand because
60: longjmp restore them */
61: regs_to_env();
1.1 root 62: longjmp(env->jmp_env, 1);
63: }
1.1.1.6 root 64:
1.1 root 65: /* exit the current TB from a signal handler. The host registers are
66: restored in a state compatible with the CPU emulator
67: */
1.1.1.6 root 68: void cpu_resume_from_signal(CPUState *env1, void *puc)
1.1 root 69: {
70: #if !defined(CONFIG_SOFTMMU)
1.1.1.7 root 71: #ifdef __linux__
1.1 root 72: struct ucontext *uc = puc;
1.1.1.7 root 73: #elif defined(__OpenBSD__)
74: struct sigcontext *uc = puc;
75: #endif
1.1 root 76: #endif
77:
78: env = env1;
79:
80: /* XXX: restore cpu registers saved in host registers */
81:
82: #if !defined(CONFIG_SOFTMMU)
83: if (puc) {
84: /* XXX: use siglongjmp ? */
1.1.1.7 root 85: #ifdef __linux__
1.1 root 86: sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1.1.1.7 root 87: #elif defined(__OpenBSD__)
88: sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89: #endif
1.1 root 90: }
91: #endif
1.1.1.7 root 92: env->exception_index = -1;
1.1 root 93: longjmp(env->jmp_env, 1);
94: }
95:
1.1.1.7 root 96: /* Execute the code without caching the generated code. An interpreter
97: could be used if available. */
98: static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99: {
100: unsigned long next_tb;
101: TranslationBlock *tb;
102:
103: /* Should never happen.
104: We only end up here when an existing TB is too long. */
105: if (max_cycles > CF_COUNT_MASK)
106: max_cycles = CF_COUNT_MASK;
107:
108: tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109: max_cycles);
110: env->current_tb = tb;
111: /* execute the generated code */
112: next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113:
114: if ((next_tb & 3) == 2) {
115: /* Restore PC. This may happen if async event occurs before
116: the TB starts executing. */
117: cpu_pc_from_tb(env, tb);
118: }
119: tb_phys_invalidate(tb, -1);
120: tb_free(tb);
121: }
1.1.1.2 root 122:
123: static TranslationBlock *tb_find_slow(target_ulong pc,
124: target_ulong cs_base,
1.1.1.6 root 125: uint64_t flags)
1.1.1.2 root 126: {
127: TranslationBlock *tb, **ptb1;
128: unsigned int h;
129: target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130:
131: tb_invalidated_flag = 0;
1.1.1.6 root 132:
1.1.1.2 root 133: regs_to_env(); /* XXX: do it just before cpu_gen_code() */
1.1.1.6 root 134:
1.1.1.2 root 135: /* find translated block using physical mappings */
136: phys_pc = get_phys_addr_code(env, pc);
137: phys_page1 = phys_pc & TARGET_PAGE_MASK;
138: phys_page2 = -1;
139: h = tb_phys_hash_func(phys_pc);
140: ptb1 = &tb_phys_hash[h];
141: for(;;) {
142: tb = *ptb1;
143: if (!tb)
144: goto not_found;
1.1.1.6 root 145: if (tb->pc == pc &&
1.1.1.2 root 146: tb->page_addr[0] == phys_page1 &&
1.1.1.6 root 147: tb->cs_base == cs_base &&
1.1.1.2 root 148: tb->flags == flags) {
149: /* check next page if needed */
150: if (tb->page_addr[1] != -1) {
1.1.1.6 root 151: virt_page2 = (pc & TARGET_PAGE_MASK) +
1.1.1.2 root 152: TARGET_PAGE_SIZE;
153: phys_page2 = get_phys_addr_code(env, virt_page2);
154: if (tb->page_addr[1] == phys_page2)
155: goto found;
156: } else {
157: goto found;
158: }
159: }
160: ptb1 = &tb->phys_hash_next;
161: }
162: not_found:
1.1.1.7 root 163: /* if no translated code available, then translate it now */
164: tb = tb_gen_code(env, pc, cs_base, flags, 0);
1.1.1.6 root 165:
1.1.1.2 root 166: found:
167: /* we add the TB in the virtual pc hash table */
168: env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169: return tb;
170: }
171:
172: static inline TranslationBlock *tb_find_fast(void)
173: {
174: TranslationBlock *tb;
175: target_ulong cs_base, pc;
1.1.1.7 root 176: int flags;
1.1.1.2 root 177:
178: /* we record a subset of the CPU state. It will
179: always be the same before a given translated block
180: is executed. */
1.1.1.7 root 181: cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1.1.1.2 root 182: tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
1.1.1.7 root 183: if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184: tb->flags != flags)) {
1.1.1.2 root 185: tb = tb_find_slow(pc, cs_base, flags);
186: }
187: return tb;
188: }
189:
1.1.1.7 root 190: static CPUDebugExcpHandler *debug_excp_handler;
191:
192: CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193: {
194: CPUDebugExcpHandler *old_handler = debug_excp_handler;
195:
196: debug_excp_handler = handler;
197: return old_handler;
198: }
199:
200: static void cpu_handle_debug_exception(CPUState *env)
201: {
202: CPUWatchpoint *wp;
203:
204: if (!env->watchpoint_hit)
1.1.1.10 root 205: QTAILQ_FOREACH(wp, &env->watchpoints, entry)
1.1.1.7 root 206: wp->flags &= ~BP_WATCHPOINT_HIT;
207:
208: if (debug_excp_handler)
209: debug_excp_handler(env);
210: }
1.1.1.2 root 211:
1.1 root 212: /* main execution loop */
213:
214: int cpu_exec(CPUState *env1)
215: {
1.1.1.5 root 216: #define DECLARE_HOST_REGS 1
217: #include "hostregs_helper.h"
1.1.1.2 root 218: int ret, interrupt_request;
219: TranslationBlock *tb;
1.1 root 220: uint8_t *tc_ptr;
1.1.1.7 root 221: unsigned long next_tb;
1.1.1.2 root 222:
1.1.1.6 root 223: if (cpu_halted(env1) == EXCP_HALTED)
224: return EXCP_HALTED;
1.1.1.2 root 225:
1.1.1.6 root 226: cpu_single_env = env1;
1.1 root 227:
228: /* first we save global registers */
1.1.1.5 root 229: #define SAVE_HOST_REGS 1
230: #include "hostregs_helper.h"
1.1 root 231: env = env1;
232:
233: env_to_regs();
1.1.1.6 root 234: #if defined(TARGET_I386)
1.1.1.11! root 235: if (!kvm_enabled()) {
! 236: /* put eflags in CPU temporary format */
! 237: CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
! 238: DF = 1 - (2 * ((env->eflags >> 10) & 1));
! 239: CC_OP = CC_OP_EFLAGS;
! 240: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
! 241: }
1.1 root 242: #elif defined(TARGET_SPARC)
1.1.1.5 root 243: #elif defined(TARGET_M68K)
244: env->cc_op = CC_OP_FLAGS;
245: env->cc_dest = env->sr & 0xf;
246: env->cc_x = (env->sr >> 4) & 1;
1.1.1.6 root 247: #elif defined(TARGET_ALPHA)
248: #elif defined(TARGET_ARM)
249: #elif defined(TARGET_PPC)
1.1.1.9 root 250: #elif defined(TARGET_MICROBLAZE)
1.1 root 251: #elif defined(TARGET_MIPS)
1.1.1.3 root 252: #elif defined(TARGET_SH4)
1.1.1.6 root 253: #elif defined(TARGET_CRIS)
1.1.1.10 root 254: #elif defined(TARGET_S390X)
1.1.1.3 root 255: /* XXXXX */
1.1 root 256: #else
257: #error unsupported target CPU
258: #endif
259: env->exception_index = -1;
260:
261: /* prepare setjmp context for exception handling */
262: for(;;) {
263: if (setjmp(env->jmp_env) == 0) {
1.1.1.10 root 264: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.9 root 265: #undef env
266: env = cpu_single_env;
267: #define env cpu_single_env
268: #endif
1.1 root 269: env->current_tb = NULL;
270: /* if an exception is pending, we execute it here */
271: if (env->exception_index >= 0) {
272: if (env->exception_index >= EXCP_INTERRUPT) {
273: /* exit request from the cpu execution loop */
274: ret = env->exception_index;
1.1.1.7 root 275: if (ret == EXCP_DEBUG)
276: cpu_handle_debug_exception(env);
1.1 root 277: break;
1.1.1.7 root 278: } else {
279: #if defined(CONFIG_USER_ONLY)
1.1 root 280: /* if user mode only, we simulate a fake exception
1.1.1.5 root 281: which will be handled outside the cpu execution
1.1 root 282: loop */
283: #if defined(TARGET_I386)
1.1.1.6 root 284: do_interrupt_user(env->exception_index,
285: env->exception_is_int,
286: env->error_code,
1.1 root 287: env->exception_next_eip);
1.1.1.7 root 288: /* successfully delivered */
289: env->old_exception = -1;
1.1 root 290: #endif
291: ret = env->exception_index;
292: break;
1.1.1.7 root 293: #else
1.1 root 294: #if defined(TARGET_I386)
295: /* simulate a real cpu exception. On i386, it can
296: trigger new exceptions, but we do not handle
297: double or triple faults yet. */
1.1.1.6 root 298: do_interrupt(env->exception_index,
299: env->exception_is_int,
300: env->error_code,
1.1 root 301: env->exception_next_eip, 0);
1.1.1.6 root 302: /* successfully delivered */
303: env->old_exception = -1;
1.1 root 304: #elif defined(TARGET_PPC)
305: do_interrupt(env);
1.1.1.9 root 306: #elif defined(TARGET_MICROBLAZE)
307: do_interrupt(env);
1.1 root 308: #elif defined(TARGET_MIPS)
309: do_interrupt(env);
310: #elif defined(TARGET_SPARC)
1.1.1.7 root 311: do_interrupt(env);
1.1.1.2 root 312: #elif defined(TARGET_ARM)
313: do_interrupt(env);
1.1.1.3 root 314: #elif defined(TARGET_SH4)
315: do_interrupt(env);
1.1.1.6 root 316: #elif defined(TARGET_ALPHA)
317: do_interrupt(env);
318: #elif defined(TARGET_CRIS)
319: do_interrupt(env);
320: #elif defined(TARGET_M68K)
321: do_interrupt(0);
1.1 root 322: #endif
1.1.1.7 root 323: #endif
1.1 root 324: }
325: env->exception_index = -1;
1.1.1.6 root 326: }
1.1 root 327:
1.1.1.7 root 328: if (kvm_enabled()) {
329: kvm_cpu_exec(env);
330: longjmp(env->jmp_env, 1);
331: }
332:
333: next_tb = 0; /* force lookup of first TB */
1.1 root 334: for(;;) {
335: interrupt_request = env->interrupt_request;
1.1.1.7 root 336: if (unlikely(interrupt_request)) {
337: if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
338: /* Mask out external interrupts for this step. */
339: interrupt_request &= ~(CPU_INTERRUPT_HARD |
340: CPU_INTERRUPT_FIQ |
341: CPU_INTERRUPT_SMI |
342: CPU_INTERRUPT_NMI);
343: }
1.1.1.6 root 344: if (interrupt_request & CPU_INTERRUPT_DEBUG) {
345: env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
346: env->exception_index = EXCP_DEBUG;
347: cpu_loop_exit();
348: }
349: #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
1.1.1.9 root 350: defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
351: defined(TARGET_MICROBLAZE)
1.1.1.6 root 352: if (interrupt_request & CPU_INTERRUPT_HALT) {
353: env->interrupt_request &= ~CPU_INTERRUPT_HALT;
354: env->halted = 1;
355: env->exception_index = EXCP_HLT;
356: cpu_loop_exit();
357: }
358: #endif
1.1 root 359: #if defined(TARGET_I386)
1.1.1.9 root 360: if (interrupt_request & CPU_INTERRUPT_INIT) {
361: svm_check_intercept(SVM_EXIT_INIT);
362: do_cpu_init(env);
363: env->exception_index = EXCP_HALTED;
364: cpu_loop_exit();
365: } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
366: do_cpu_sipi(env);
367: } else if (env->hflags2 & HF2_GIF_MASK) {
1.1.1.7 root 368: if ((interrupt_request & CPU_INTERRUPT_SMI) &&
369: !(env->hflags & HF_SMM_MASK)) {
370: svm_check_intercept(SVM_EXIT_SMI);
371: env->interrupt_request &= ~CPU_INTERRUPT_SMI;
372: do_smm_enter();
373: next_tb = 0;
374: } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
375: !(env->hflags2 & HF2_NMI_MASK)) {
376: env->interrupt_request &= ~CPU_INTERRUPT_NMI;
377: env->hflags2 |= HF2_NMI_MASK;
378: do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
379: next_tb = 0;
1.1.1.9 root 380: } else if (interrupt_request & CPU_INTERRUPT_MCE) {
381: env->interrupt_request &= ~CPU_INTERRUPT_MCE;
382: do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
383: next_tb = 0;
1.1.1.7 root 384: } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
385: (((env->hflags2 & HF2_VINTR_MASK) &&
386: (env->hflags2 & HF2_HIF_MASK)) ||
387: (!(env->hflags2 & HF2_VINTR_MASK) &&
388: (env->eflags & IF_MASK &&
389: !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
390: int intno;
391: svm_check_intercept(SVM_EXIT_INTR);
392: env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
393: intno = cpu_get_pic_interrupt(env);
394: qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
1.1.1.10 root 395: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.9 root 396: #undef env
397: env = cpu_single_env;
398: #define env cpu_single_env
399: #endif
1.1.1.7 root 400: do_interrupt(intno, 0, 0, 0, 1);
401: /* ensure that no TB jump will be modified as
402: the program flow was changed */
403: next_tb = 0;
1.1.1.6 root 404: #if !defined(CONFIG_USER_ONLY)
1.1.1.7 root 405: } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
406: (env->eflags & IF_MASK) &&
407: !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
408: int intno;
409: /* FIXME: this should respect TPR */
410: svm_check_intercept(SVM_EXIT_VINTR);
411: intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
412: qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
413: do_interrupt(intno, 0, 0, 0, 1);
414: env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
415: next_tb = 0;
1.1 root 416: #endif
1.1.1.7 root 417: }
1.1 root 418: }
419: #elif defined(TARGET_PPC)
420: #if 0
421: if ((interrupt_request & CPU_INTERRUPT_RESET)) {
1.1.1.10 root 422: cpu_reset(env);
1.1 root 423: }
424: #endif
1.1.1.6 root 425: if (interrupt_request & CPU_INTERRUPT_HARD) {
426: ppc_hw_interrupt(env);
427: if (env->pending_interrupts == 0)
1.1.1.2 root 428: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1.1.1.7 root 429: next_tb = 0;
1.1 root 430: }
1.1.1.9 root 431: #elif defined(TARGET_MICROBLAZE)
432: if ((interrupt_request & CPU_INTERRUPT_HARD)
433: && (env->sregs[SR_MSR] & MSR_IE)
434: && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
435: && !(env->iflags & (D_FLAG | IMM_FLAG))) {
436: env->exception_index = EXCP_IRQ;
437: do_interrupt(env);
438: next_tb = 0;
439: }
1.1 root 440: #elif defined(TARGET_MIPS)
441: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1.1.1.6 root 442: (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
1.1 root 443: (env->CP0_Status & (1 << CP0St_IE)) &&
1.1.1.6 root 444: !(env->CP0_Status & (1 << CP0St_EXL)) &&
445: !(env->CP0_Status & (1 << CP0St_ERL)) &&
1.1 root 446: !(env->hflags & MIPS_HFLAG_DM)) {
447: /* Raise it */
448: env->exception_index = EXCP_EXT_INTERRUPT;
449: env->error_code = 0;
450: do_interrupt(env);
1.1.1.7 root 451: next_tb = 0;
1.1 root 452: }
453: #elif defined(TARGET_SPARC)
454: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1.1.1.9 root 455: cpu_interrupts_enabled(env)) {
1.1 root 456: int pil = env->interrupt_index & 15;
457: int type = env->interrupt_index & 0xf0;
458:
459: if (((type == TT_EXTINT) &&
460: (pil == 15 || pil > env->psrpil)) ||
461: type != TT_EXTINT) {
462: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1.1.1.7 root 463: env->exception_index = env->interrupt_index;
464: do_interrupt(env);
1.1 root 465: env->interrupt_index = 0;
1.1.1.7 root 466: next_tb = 0;
1.1 root 467: }
468: } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
469: //do_interrupt(0, 0, 0, 0, 0);
470: env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
1.1.1.6 root 471: }
1.1.1.2 root 472: #elif defined(TARGET_ARM)
473: if (interrupt_request & CPU_INTERRUPT_FIQ
474: && !(env->uncached_cpsr & CPSR_F)) {
475: env->exception_index = EXCP_FIQ;
476: do_interrupt(env);
1.1.1.7 root 477: next_tb = 0;
1.1.1.2 root 478: }
1.1.1.6 root 479: /* ARMv7-M interrupt return works by loading a magic value
480: into the PC. On real hardware the load causes the
481: return to occur. The qemu implementation performs the
482: jump normally, then does the exception return when the
483: CPU tries to execute code at the magic address.
484: This will cause the magic PC value to be pushed to
485: the stack if an interrupt occured at the wrong time.
486: We avoid this by disabling interrupts when
487: pc contains a magic address. */
1.1.1.2 root 488: if (interrupt_request & CPU_INTERRUPT_HARD
1.1.1.6 root 489: && ((IS_M(env) && env->regs[15] < 0xfffffff0)
490: || !(env->uncached_cpsr & CPSR_I))) {
1.1.1.2 root 491: env->exception_index = EXCP_IRQ;
492: do_interrupt(env);
1.1.1.7 root 493: next_tb = 0;
1.1.1.2 root 494: }
1.1.1.3 root 495: #elif defined(TARGET_SH4)
1.1.1.6 root 496: if (interrupt_request & CPU_INTERRUPT_HARD) {
497: do_interrupt(env);
1.1.1.7 root 498: next_tb = 0;
1.1.1.6 root 499: }
500: #elif defined(TARGET_ALPHA)
501: if (interrupt_request & CPU_INTERRUPT_HARD) {
502: do_interrupt(env);
1.1.1.7 root 503: next_tb = 0;
1.1.1.6 root 504: }
505: #elif defined(TARGET_CRIS)
1.1.1.7 root 506: if (interrupt_request & CPU_INTERRUPT_HARD
507: && (env->pregs[PR_CCS] & I_FLAG)) {
508: env->exception_index = EXCP_IRQ;
1.1.1.6 root 509: do_interrupt(env);
1.1.1.7 root 510: next_tb = 0;
511: }
512: if (interrupt_request & CPU_INTERRUPT_NMI
513: && (env->pregs[PR_CCS] & M_FLAG)) {
514: env->exception_index = EXCP_NMI;
515: do_interrupt(env);
516: next_tb = 0;
1.1.1.6 root 517: }
518: #elif defined(TARGET_M68K)
519: if (interrupt_request & CPU_INTERRUPT_HARD
520: && ((env->sr & SR_I) >> SR_I_SHIFT)
521: < env->pending_level) {
522: /* Real hardware gets the interrupt vector via an
523: IACK cycle at this point. Current emulated
524: hardware doesn't rely on this, so we
525: provide/save the vector when the interrupt is
526: first signalled. */
527: env->exception_index = env->pending_vector;
528: do_interrupt(1);
1.1.1.7 root 529: next_tb = 0;
1.1.1.6 root 530: }
1.1 root 531: #endif
1.1.1.4 root 532: /* Don't use the cached interupt_request value,
533: do_interrupt may have updated the EXITTB flag. */
1.1.1.2 root 534: if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
1.1 root 535: env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
536: /* ensure that no TB jump will be modified as
537: the program flow was changed */
1.1.1.7 root 538: next_tb = 0;
1.1 root 539: }
1.1.1.8 root 540: }
541: if (unlikely(env->exit_request)) {
542: env->exit_request = 0;
543: env->exception_index = EXCP_INTERRUPT;
544: cpu_loop_exit();
1.1 root 545: }
1.1.1.10 root 546: #ifdef CONFIG_DEBUG_EXEC
1.1.1.7 root 547: if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
1.1 root 548: /* restore flags in standard format */
1.1.1.6 root 549: regs_to_env();
550: #if defined(TARGET_I386)
1.1.1.7 root 551: env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
552: log_cpu_state(env, X86_DUMP_CCOP);
1.1 root 553: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
554: #elif defined(TARGET_ARM)
1.1.1.7 root 555: log_cpu_state(env, 0);
1.1 root 556: #elif defined(TARGET_SPARC)
1.1.1.7 root 557: log_cpu_state(env, 0);
1.1 root 558: #elif defined(TARGET_PPC)
1.1.1.7 root 559: log_cpu_state(env, 0);
1.1.1.5 root 560: #elif defined(TARGET_M68K)
561: cpu_m68k_flush_flags(env, env->cc_op);
562: env->cc_op = CC_OP_FLAGS;
563: env->sr = (env->sr & 0xffe0)
564: | env->cc_dest | (env->cc_x << 4);
1.1.1.7 root 565: log_cpu_state(env, 0);
1.1.1.9 root 566: #elif defined(TARGET_MICROBLAZE)
567: log_cpu_state(env, 0);
1.1 root 568: #elif defined(TARGET_MIPS)
1.1.1.7 root 569: log_cpu_state(env, 0);
1.1.1.3 root 570: #elif defined(TARGET_SH4)
1.1.1.7 root 571: log_cpu_state(env, 0);
1.1.1.6 root 572: #elif defined(TARGET_ALPHA)
1.1.1.7 root 573: log_cpu_state(env, 0);
1.1.1.6 root 574: #elif defined(TARGET_CRIS)
1.1.1.7 root 575: log_cpu_state(env, 0);
1.1 root 576: #else
1.1.1.6 root 577: #error unsupported target CPU
1.1 root 578: #endif
579: }
580: #endif
1.1.1.7 root 581: spin_lock(&tb_lock);
1.1.1.2 root 582: tb = tb_find_fast();
1.1.1.7 root 583: /* Note: we do it here to avoid a gcc bug on Mac OS X when
584: doing it in tb_find_slow */
585: if (tb_invalidated_flag) {
586: /* as some TB could have been invalidated because
587: of memory exceptions while generating the code, we
588: must recompute the hash index here */
589: next_tb = 0;
590: tb_invalidated_flag = 0;
1.1 root 591: }
1.1.1.10 root 592: #ifdef CONFIG_DEBUG_EXEC
1.1.1.7 root 593: qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594: (long)tb->tc_ptr, tb->pc,
595: lookup_symbol(tb->pc));
1.1 root 596: #endif
1.1.1.2 root 597: /* see if we can patch the calling TB. When the TB
598: spans two pages, we cannot safely do a direct
599: jump. */
1.1 root 600: {
1.1.1.10 root 601: if (next_tb != 0 && tb->page_addr[1] == -1) {
1.1.1.7 root 602: tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
1.1 root 603: }
604: }
1.1.1.7 root 605: spin_unlock(&tb_lock);
1.1 root 606: env->current_tb = tb;
1.1.1.7 root 607:
608: /* cpu_interrupt might be called while translating the
609: TB, but before it is linked into a potentially
610: infinite loop and becomes env->current_tb. Avoid
611: starting execution if there is a pending interrupt. */
1.1.1.8 root 612: if (unlikely (env->exit_request))
1.1.1.7 root 613: env->current_tb = NULL;
614:
615: while (env->current_tb) {
616: tc_ptr = tb->tc_ptr;
1.1 root 617: /* execute the generated code */
1.1.1.10 root 618: #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
1.1.1.7 root 619: #undef env
620: env = cpu_single_env;
621: #define env cpu_single_env
622: #endif
623: next_tb = tcg_qemu_tb_exec(tc_ptr);
624: env->current_tb = NULL;
625: if ((next_tb & 3) == 2) {
626: /* Instruction counter expired. */
627: int insns_left;
628: tb = (TranslationBlock *)(long)(next_tb & ~3);
629: /* Restore PC. */
630: cpu_pc_from_tb(env, tb);
631: insns_left = env->icount_decr.u32;
632: if (env->icount_extra && insns_left >= 0) {
633: /* Refill decrementer and continue execution. */
634: env->icount_extra += insns_left;
635: if (env->icount_extra > 0xffff) {
636: insns_left = 0xffff;
637: } else {
638: insns_left = env->icount_extra;
639: }
640: env->icount_extra -= insns_left;
641: env->icount_decr.u16.low = insns_left;
642: } else {
643: if (insns_left > 0) {
644: /* Execute remaining instructions. */
645: cpu_exec_nocache(insns_left, tb);
646: }
647: env->exception_index = EXCP_INTERRUPT;
648: next_tb = 0;
649: cpu_loop_exit();
650: }
651: }
652: }
1.1 root 653: /* reset soft MMU for next block (it can currently
654: only be set by a memory fault) */
1.1.1.6 root 655: } /* for(;;) */
1.1 root 656: } else {
657: env_to_regs();
658: }
659: } /* for(;;) */
660:
661:
662: #if defined(TARGET_I386)
663: /* restore flags in standard format */
1.1.1.7 root 664: env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1.1 root 665: #elif defined(TARGET_ARM)
666: /* XXX: Save/restore host fpu exception state?. */
667: #elif defined(TARGET_SPARC)
668: #elif defined(TARGET_PPC)
1.1.1.5 root 669: #elif defined(TARGET_M68K)
670: cpu_m68k_flush_flags(env, env->cc_op);
671: env->cc_op = CC_OP_FLAGS;
672: env->sr = (env->sr & 0xffe0)
673: | env->cc_dest | (env->cc_x << 4);
1.1.1.9 root 674: #elif defined(TARGET_MICROBLAZE)
1.1 root 675: #elif defined(TARGET_MIPS)
1.1.1.3 root 676: #elif defined(TARGET_SH4)
1.1.1.6 root 677: #elif defined(TARGET_ALPHA)
678: #elif defined(TARGET_CRIS)
1.1.1.10 root 679: #elif defined(TARGET_S390X)
1.1.1.3 root 680: /* XXXXX */
1.1 root 681: #else
682: #error unsupported target CPU
683: #endif
1.1.1.5 root 684:
685: /* restore global registers */
686: #include "hostregs_helper.h"
687:
1.1.1.2 root 688: /* fail safe : never use cpu_single_env outside cpu_exec() */
1.1.1.6 root 689: cpu_single_env = NULL;
1.1 root 690: return ret;
691: }
692:
693: /* must only be called from the generated code as an exception can be
694: generated */
695: void tb_invalidate_page_range(target_ulong start, target_ulong end)
696: {
697: /* XXX: cannot enable it yet because it yields to MMU exception
698: where NIP != read address on PowerPC */
699: #if 0
700: target_ulong phys_addr;
701: phys_addr = get_phys_addr_code(env, start);
702: tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
703: #endif
704: }
705:
706: #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
707:
708: void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
709: {
710: CPUX86State *saved_env;
711:
712: saved_env = env;
713: env = s;
714: if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
715: selector &= 0xffff;
1.1.1.6 root 716: cpu_x86_load_seg_cache(env, seg_reg, selector,
1.1 root 717: (selector << 4), 0xffff, 0);
718: } else {
1.1.1.7 root 719: helper_load_seg(seg_reg, selector);
1.1 root 720: }
721: env = saved_env;
722: }
723:
1.1.1.6 root 724: void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1.1 root 725: {
726: CPUX86State *saved_env;
727:
728: saved_env = env;
729: env = s;
1.1.1.6 root 730:
731: helper_fsave(ptr, data32);
1.1 root 732:
733: env = saved_env;
734: }
735:
1.1.1.6 root 736: void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1.1 root 737: {
738: CPUX86State *saved_env;
739:
740: saved_env = env;
741: env = s;
1.1.1.6 root 742:
743: helper_frstor(ptr, data32);
1.1 root 744:
745: env = saved_env;
746: }
747:
748: #endif /* TARGET_I386 */
749:
750: #if !defined(CONFIG_SOFTMMU)
751:
752: #if defined(TARGET_I386)
1.1.1.10 root 753: #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
754: #else
755: #define EXCEPTION_ACTION cpu_loop_exit()
756: #endif
1.1 root 757:
758: /* 'pc' is the host PC at which the exception was raised. 'address' is
759: the effective address of the memory exception. 'is_write' is 1 if a
760: write caused the exception and otherwise 0'. 'old_set' is the
761: signal set which should be restored */
762: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1.1.1.6 root 763: int is_write, sigset_t *old_set,
1.1 root 764: void *puc)
765: {
766: TranslationBlock *tb;
767: int ret;
768:
769: if (cpu_single_env)
770: env = cpu_single_env; /* XXX: find a correct solution for multithread */
771: #if defined(DEBUG_SIGNAL)
1.1.1.6 root 772: qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1.1 root 773: pc, address, is_write, *(unsigned long *)old_set);
774: #endif
775: /* XXX: locking issue */
1.1.1.3 root 776: if (is_write && page_unprotect(h2g(address), pc, puc)) {
1.1 root 777: return 1;
778: }
779:
780: /* see if it is an MMU fault */
1.1.1.10 root 781: ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1.1 root 782: if (ret < 0)
783: return 0; /* not an MMU fault */
784: if (ret == 0)
785: return 1; /* the MMU fault was handled without causing real CPU fault */
786: /* now we have a real cpu fault */
787: tb = tb_find_pc(pc);
788: if (tb) {
789: /* the PC is inside the translated code. It means that we have
790: a virtual CPU fault */
791: cpu_restore_state(tb, env, pc, puc);
792: }
793:
794: /* we restore the process signal mask as the sigreturn should
795: do it (XXX: use sigsetjmp) */
796: sigprocmask(SIG_SETMASK, old_set, NULL);
1.1.1.10 root 797: EXCEPTION_ACTION;
1.1 root 798:
799: /* never comes here */
800: return 1;
801: }
802:
1.1.1.6 root 803: #if defined(__i386__)
804:
805: #if defined(__APPLE__)
806: # include <sys/ucontext.h>
807:
808: # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
809: # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
810: # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1.1.1.9 root 811: # define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.10 root 812: #elif defined (__NetBSD__)
813: # include <ucontext.h>
814:
815: # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
816: # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
817: # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
818: # define MASK_sig(context) ((context)->uc_sigmask)
819: #elif defined (__FreeBSD__) || defined(__DragonFly__)
820: # include <ucontext.h>
821:
822: # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
823: # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
824: # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
825: # define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.9 root 826: #elif defined(__OpenBSD__)
827: # define EIP_sig(context) ((context)->sc_eip)
828: # define TRAP_sig(context) ((context)->sc_trapno)
829: # define ERROR_sig(context) ((context)->sc_err)
830: # define MASK_sig(context) ((context)->sc_mask)
1.1.1.6 root 831: #else
832: # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
833: # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
834: # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1.1.1.9 root 835: # define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.6 root 836: #endif
837:
838: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 839: void *puc)
840: {
1.1.1.5 root 841: siginfo_t *info = pinfo;
1.1.1.10 root 842: #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
843: ucontext_t *uc = puc;
844: #elif defined(__OpenBSD__)
1.1.1.9 root 845: struct sigcontext *uc = puc;
846: #else
1.1 root 847: struct ucontext *uc = puc;
1.1.1.9 root 848: #endif
1.1 root 849: unsigned long pc;
850: int trapno;
851:
852: #ifndef REG_EIP
853: /* for glibc 2.1 */
854: #define REG_EIP EIP
855: #define REG_ERR ERR
856: #define REG_TRAPNO TRAPNO
857: #endif
1.1.1.5 root 858: pc = EIP_sig(uc);
859: trapno = TRAP_sig(uc);
1.1.1.6 root 860: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
861: trapno == 0xe ?
862: (ERROR_sig(uc) >> 1) & 1 : 0,
1.1.1.9 root 863: &MASK_sig(uc), puc);
1.1 root 864: }
865:
866: #elif defined(__x86_64__)
867:
1.1.1.7 root 868: #ifdef __NetBSD__
1.1.1.9 root 869: #define PC_sig(context) _UC_MACHINE_PC(context)
870: #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
871: #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
872: #define MASK_sig(context) ((context)->uc_sigmask)
873: #elif defined(__OpenBSD__)
874: #define PC_sig(context) ((context)->sc_rip)
875: #define TRAP_sig(context) ((context)->sc_trapno)
876: #define ERROR_sig(context) ((context)->sc_err)
877: #define MASK_sig(context) ((context)->sc_mask)
1.1.1.10 root 878: #elif defined (__FreeBSD__) || defined(__DragonFly__)
879: #include <ucontext.h>
880:
881: #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
882: #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
883: #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
884: #define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.7 root 885: #else
1.1.1.9 root 886: #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
887: #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
888: #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
889: #define MASK_sig(context) ((context)->uc_sigmask)
1.1.1.7 root 890: #endif
891:
1.1.1.5 root 892: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 893: void *puc)
894: {
1.1.1.5 root 895: siginfo_t *info = pinfo;
1.1 root 896: unsigned long pc;
1.1.1.10 root 897: #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1.1.1.7 root 898: ucontext_t *uc = puc;
1.1.1.9 root 899: #elif defined(__OpenBSD__)
900: struct sigcontext *uc = puc;
1.1.1.7 root 901: #else
902: struct ucontext *uc = puc;
903: #endif
1.1 root 904:
1.1.1.9 root 905: pc = PC_sig(uc);
1.1.1.6 root 906: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1.1.9 root 907: TRAP_sig(uc) == 0xe ?
908: (ERROR_sig(uc) >> 1) & 1 : 0,
909: &MASK_sig(uc), puc);
1.1 root 910: }
911:
1.1.1.7 root 912: #elif defined(_ARCH_PPC)
1.1 root 913:
914: /***********************************************************************
915: * signal context platform-specific definitions
916: * From Wine
917: */
918: #ifdef linux
919: /* All Registers access - only for local access */
920: # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
921: /* Gpr Registers access */
922: # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
923: # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
924: # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
925: # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
926: # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
927: # define LR_sig(context) REG_sig(link, context) /* Link register */
928: # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
929: /* Float Registers access */
930: # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
931: # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
932: /* Exception Registers access */
933: # define DAR_sig(context) REG_sig(dar, context)
934: # define DSISR_sig(context) REG_sig(dsisr, context)
935: # define TRAP_sig(context) REG_sig(trap, context)
936: #endif /* linux */
937:
938: #ifdef __APPLE__
939: # include <sys/ucontext.h>
940: typedef struct ucontext SIGCONTEXT;
941: /* All Registers access - only for local access */
942: # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
943: # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
944: # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
945: # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
946: /* Gpr Registers access */
947: # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
948: # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
949: # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
950: # define CTR_sig(context) REG_sig(ctr, context)
951: # define XER_sig(context) REG_sig(xer, context) /* Link register */
952: # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
953: # define CR_sig(context) REG_sig(cr, context) /* Condition register */
954: /* Float Registers access */
955: # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
956: # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
957: /* Exception Registers access */
958: # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
959: # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
960: # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
961: #endif /* __APPLE__ */
962:
1.1.1.6 root 963: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 964: void *puc)
965: {
1.1.1.5 root 966: siginfo_t *info = pinfo;
1.1 root 967: struct ucontext *uc = puc;
968: unsigned long pc;
969: int is_write;
970:
971: pc = IAR_sig(uc);
972: is_write = 0;
973: #if 0
974: /* ppc 4xx case */
975: if (DSISR_sig(uc) & 0x00800000)
976: is_write = 1;
977: #else
978: if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
979: is_write = 1;
980: #endif
1.1.1.6 root 981: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 982: is_write, &uc->uc_sigmask, puc);
983: }
984:
985: #elif defined(__alpha__)
986:
1.1.1.6 root 987: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 988: void *puc)
989: {
1.1.1.5 root 990: siginfo_t *info = pinfo;
1.1 root 991: struct ucontext *uc = puc;
992: uint32_t *pc = uc->uc_mcontext.sc_pc;
993: uint32_t insn = *pc;
994: int is_write = 0;
995:
996: /* XXX: need kernel patch to get write flag faster */
997: switch (insn >> 26) {
998: case 0x0d: // stw
999: case 0x0e: // stb
1000: case 0x0f: // stq_u
1001: case 0x24: // stf
1002: case 0x25: // stg
1003: case 0x26: // sts
1004: case 0x27: // stt
1005: case 0x2c: // stl
1006: case 0x2d: // stq
1007: case 0x2e: // stl_c
1008: case 0x2f: // stq_c
1009: is_write = 1;
1010: }
1011:
1.1.1.6 root 1012: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1013: is_write, &uc->uc_sigmask, puc);
1014: }
1015: #elif defined(__sparc__)
1016:
1.1.1.6 root 1017: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1018: void *puc)
1019: {
1.1.1.5 root 1020: siginfo_t *info = pinfo;
1.1 root 1021: int is_write;
1022: uint32_t insn;
1.1.1.10 root 1023: #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1.1.1.7 root 1024: uint32_t *regs = (uint32_t *)(info + 1);
1025: void *sigmask = (regs + 20);
1.1 root 1026: /* XXX: is there a standard glibc define ? */
1.1.1.7 root 1027: unsigned long pc = regs[1];
1028: #else
1029: #ifdef __linux__
1030: struct sigcontext *sc = puc;
1031: unsigned long pc = sc->sigc_regs.tpc;
1032: void *sigmask = (void *)sc->sigc_mask;
1033: #elif defined(__OpenBSD__)
1034: struct sigcontext *uc = puc;
1035: unsigned long pc = uc->sc_pc;
1036: void *sigmask = (void *)(long)uc->sc_mask;
1037: #endif
1038: #endif
1039:
1.1 root 1040: /* XXX: need kernel patch to get write flag faster */
1041: is_write = 0;
1042: insn = *(uint32_t *)pc;
1043: if ((insn >> 30) == 3) {
1044: switch((insn >> 19) & 0x3f) {
1045: case 0x05: // stb
1.1.1.9 root 1046: case 0x15: // stba
1.1 root 1047: case 0x06: // sth
1.1.1.9 root 1048: case 0x16: // stha
1.1 root 1049: case 0x04: // st
1.1.1.9 root 1050: case 0x14: // sta
1.1 root 1051: case 0x07: // std
1.1.1.9 root 1052: case 0x17: // stda
1053: case 0x0e: // stx
1054: case 0x1e: // stxa
1.1 root 1055: case 0x24: // stf
1.1.1.9 root 1056: case 0x34: // stfa
1.1 root 1057: case 0x27: // stdf
1.1.1.9 root 1058: case 0x37: // stdfa
1059: case 0x26: // stqf
1060: case 0x36: // stqfa
1.1 root 1061: case 0x25: // stfsr
1.1.1.9 root 1062: case 0x3c: // casa
1063: case 0x3e: // casxa
1.1 root 1064: is_write = 1;
1065: break;
1066: }
1067: }
1.1.1.6 root 1068: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1069: is_write, sigmask, NULL);
1070: }
1071:
1072: #elif defined(__arm__)
1073:
1.1.1.6 root 1074: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1075: void *puc)
1076: {
1.1.1.5 root 1077: siginfo_t *info = pinfo;
1.1 root 1078: struct ucontext *uc = puc;
1079: unsigned long pc;
1080: int is_write;
1.1.1.6 root 1081:
1.1.1.7 root 1082: #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1.1 root 1083: pc = uc->uc_mcontext.gregs[R15];
1.1.1.7 root 1084: #else
1085: pc = uc->uc_mcontext.arm_pc;
1086: #endif
1.1 root 1087: /* XXX: compute is_write */
1088: is_write = 0;
1.1.1.6 root 1089: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1090: is_write,
1.1.1.5 root 1091: &uc->uc_sigmask, puc);
1.1 root 1092: }
1093:
1094: #elif defined(__mc68000)
1095:
1.1.1.6 root 1096: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1097: void *puc)
1098: {
1.1.1.5 root 1099: siginfo_t *info = pinfo;
1.1 root 1100: struct ucontext *uc = puc;
1101: unsigned long pc;
1102: int is_write;
1.1.1.6 root 1103:
1.1 root 1104: pc = uc->uc_mcontext.gregs[16];
1105: /* XXX: compute is_write */
1106: is_write = 0;
1.1.1.6 root 1107: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1.1 root 1108: is_write,
1109: &uc->uc_sigmask, puc);
1110: }
1111:
1112: #elif defined(__ia64)
1113:
1114: #ifndef __ISR_VALID
1115: /* This ought to be in <bits/siginfo.h>... */
1116: # define __ISR_VALID 1
1117: #endif
1118:
1.1.1.5 root 1119: int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1.1 root 1120: {
1.1.1.5 root 1121: siginfo_t *info = pinfo;
1.1 root 1122: struct ucontext *uc = puc;
1123: unsigned long ip;
1124: int is_write = 0;
1125:
1126: ip = uc->uc_mcontext.sc_ip;
1127: switch (host_signum) {
1128: case SIGILL:
1129: case SIGFPE:
1130: case SIGSEGV:
1131: case SIGBUS:
1132: case SIGTRAP:
1.1.1.3 root 1133: if (info->si_code && (info->si_segvflags & __ISR_VALID))
1.1 root 1134: /* ISR.W (write-access) is bit 33: */
1135: is_write = (info->si_isr >> 33) & 1;
1136: break;
1137:
1138: default:
1139: break;
1140: }
1141: return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1142: is_write,
1143: &uc->uc_sigmask, puc);
1144: }
1145:
1146: #elif defined(__s390__)
1147:
1.1.1.6 root 1148: int cpu_signal_handler(int host_signum, void *pinfo,
1.1 root 1149: void *puc)
1150: {
1.1.1.5 root 1151: siginfo_t *info = pinfo;
1.1 root 1152: struct ucontext *uc = puc;
1153: unsigned long pc;
1154: int is_write;
1.1.1.6 root 1155:
1.1 root 1156: pc = uc->uc_mcontext.psw.addr;
1157: /* XXX: compute is_write */
1158: is_write = 0;
1.1.1.6 root 1159: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1160: is_write, &uc->uc_sigmask, puc);
1161: }
1162:
1163: #elif defined(__mips__)
1164:
1165: int cpu_signal_handler(int host_signum, void *pinfo,
1166: void *puc)
1167: {
1168: siginfo_t *info = pinfo;
1169: struct ucontext *uc = puc;
1170: greg_t pc = uc->uc_mcontext.pc;
1171: int is_write;
1172:
1173: /* XXX: compute is_write */
1174: is_write = 0;
1175: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1176: is_write, &uc->uc_sigmask, puc);
1.1 root 1177: }
1178:
1.1.1.7 root 1179: #elif defined(__hppa__)
1180:
1181: int cpu_signal_handler(int host_signum, void *pinfo,
1182: void *puc)
1183: {
1184: struct siginfo *info = pinfo;
1185: struct ucontext *uc = puc;
1186: unsigned long pc;
1187: int is_write;
1188:
1189: pc = uc->uc_mcontext.sc_iaoq[0];
1190: /* FIXME: compute is_write */
1191: is_write = 0;
1192: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1193: is_write,
1194: &uc->uc_sigmask, puc);
1195: }
1196:
1.1 root 1197: #else
1198:
1199: #error host CPU specific signal handler needed
1200:
1201: #endif
1202:
1203: #endif /* !defined(CONFIG_SOFTMMU) */
unix.superglobalmegacorp.com