Annotation of qemu/cpu-exec.c, revision 1.1.1.1
1.1 root 1: /*
2: * i386 emulator main execution loop
3: *
4: * Copyright (c) 2003-2005 Fabrice Bellard
5: *
6: * This library is free software; you can redistribute it and/or
7: * modify it under the terms of the GNU Lesser General Public
8: * License as published by the Free Software Foundation; either
9: * version 2 of the License, or (at your option) any later version.
10: *
11: * This library is distributed in the hope that it will be useful,
12: * but WITHOUT ANY WARRANTY; without even the implied warranty of
13: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14: * Lesser General Public License for more details.
15: *
16: * You should have received a copy of the GNU Lesser General Public
17: * License along with this library; if not, write to the Free Software
18: * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19: */
20: #include "config.h"
21: #include "exec.h"
22: #include "disas.h"
23:
24: #if !defined(CONFIG_SOFTMMU)
25: #undef EAX
26: #undef ECX
27: #undef EDX
28: #undef EBX
29: #undef ESP
30: #undef EBP
31: #undef ESI
32: #undef EDI
33: #undef EIP
34: #include <signal.h>
35: #include <sys/ucontext.h>
36: #endif
37:
38: int tb_invalidated_flag;
39:
40: //#define DEBUG_EXEC
41: //#define DEBUG_SIGNAL
42:
43: #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44: /* XXX: unify with i386 target */
45: void cpu_loop_exit(void)
46: {
47: longjmp(env->jmp_env, 1);
48: }
49: #endif
50: #ifndef TARGET_SPARC
51: #define reg_T2
52: #endif
53:
54: /* exit the current TB from a signal handler. The host registers are
55: restored in a state compatible with the CPU emulator
56: */
57: void cpu_resume_from_signal(CPUState *env1, void *puc)
58: {
59: #if !defined(CONFIG_SOFTMMU)
60: struct ucontext *uc = puc;
61: #endif
62:
63: env = env1;
64:
65: /* XXX: restore cpu registers saved in host registers */
66:
67: #if !defined(CONFIG_SOFTMMU)
68: if (puc) {
69: /* XXX: use siglongjmp ? */
70: sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
71: }
72: #endif
73: longjmp(env->jmp_env, 1);
74: }
75:
76: /* main execution loop */
77:
78: int cpu_exec(CPUState *env1)
79: {
80: int saved_T0, saved_T1;
81: #if defined(reg_T2)
82: int saved_T2;
83: #endif
84: CPUState *saved_env;
85: #if defined(TARGET_I386)
86: #ifdef reg_EAX
87: int saved_EAX;
88: #endif
89: #ifdef reg_ECX
90: int saved_ECX;
91: #endif
92: #ifdef reg_EDX
93: int saved_EDX;
94: #endif
95: #ifdef reg_EBX
96: int saved_EBX;
97: #endif
98: #ifdef reg_ESP
99: int saved_ESP;
100: #endif
101: #ifdef reg_EBP
102: int saved_EBP;
103: #endif
104: #ifdef reg_ESI
105: int saved_ESI;
106: #endif
107: #ifdef reg_EDI
108: int saved_EDI;
109: #endif
110: #elif defined(TARGET_SPARC)
111: #if defined(reg_REGWPTR)
112: uint32_t *saved_regwptr;
113: #endif
114: #endif
115: #ifdef __sparc__
116: int saved_i7, tmp_T0;
117: #endif
118: int code_gen_size, ret, interrupt_request;
119: void (*gen_func)(void);
120: TranslationBlock *tb, **ptb;
121: target_ulong cs_base, pc;
122: uint8_t *tc_ptr;
123: unsigned int flags;
124:
125: /* first we save global registers */
126: saved_env = env;
127: env = env1;
128: saved_T0 = T0;
129: saved_T1 = T1;
130: #if defined(reg_T2)
131: saved_T2 = T2;
132: #endif
133: #ifdef __sparc__
134: /* we also save i7 because longjmp may not restore it */
135: asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
136: #endif
137:
138: #if defined(TARGET_I386)
139: #ifdef reg_EAX
140: saved_EAX = EAX;
141: #endif
142: #ifdef reg_ECX
143: saved_ECX = ECX;
144: #endif
145: #ifdef reg_EDX
146: saved_EDX = EDX;
147: #endif
148: #ifdef reg_EBX
149: saved_EBX = EBX;
150: #endif
151: #ifdef reg_ESP
152: saved_ESP = ESP;
153: #endif
154: #ifdef reg_EBP
155: saved_EBP = EBP;
156: #endif
157: #ifdef reg_ESI
158: saved_ESI = ESI;
159: #endif
160: #ifdef reg_EDI
161: saved_EDI = EDI;
162: #endif
163:
164: env_to_regs();
165: /* put eflags in CPU temporary format */
166: CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
167: DF = 1 - (2 * ((env->eflags >> 10) & 1));
168: CC_OP = CC_OP_EFLAGS;
169: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
170: #elif defined(TARGET_ARM)
171: {
172: unsigned int psr;
173: psr = env->cpsr;
174: env->CF = (psr >> 29) & 1;
175: env->NZF = (psr & 0xc0000000) ^ 0x40000000;
176: env->VF = (psr << 3) & 0x80000000;
177: env->QF = (psr >> 27) & 1;
178: env->cpsr = psr & ~CACHED_CPSR_BITS;
179: }
180: #elif defined(TARGET_SPARC)
181: #if defined(reg_REGWPTR)
182: saved_regwptr = REGWPTR;
183: #endif
184: #elif defined(TARGET_PPC)
185: #elif defined(TARGET_MIPS)
186: #else
187: #error unsupported target CPU
188: #endif
189: env->exception_index = -1;
190:
191: /* prepare setjmp context for exception handling */
192: for(;;) {
193: if (setjmp(env->jmp_env) == 0) {
194: env->current_tb = NULL;
195: /* if an exception is pending, we execute it here */
196: if (env->exception_index >= 0) {
197: if (env->exception_index >= EXCP_INTERRUPT) {
198: /* exit request from the cpu execution loop */
199: ret = env->exception_index;
200: break;
201: } else if (env->user_mode_only) {
202: /* if user mode only, we simulate a fake exception
203: which will be hanlded outside the cpu execution
204: loop */
205: #if defined(TARGET_I386)
206: do_interrupt_user(env->exception_index,
207: env->exception_is_int,
208: env->error_code,
209: env->exception_next_eip);
210: #endif
211: ret = env->exception_index;
212: break;
213: } else {
214: #if defined(TARGET_I386)
215: /* simulate a real cpu exception. On i386, it can
216: trigger new exceptions, but we do not handle
217: double or triple faults yet. */
218: do_interrupt(env->exception_index,
219: env->exception_is_int,
220: env->error_code,
221: env->exception_next_eip, 0);
222: #elif defined(TARGET_PPC)
223: do_interrupt(env);
224: #elif defined(TARGET_MIPS)
225: do_interrupt(env);
226: #elif defined(TARGET_SPARC)
227: do_interrupt(env->exception_index);
228: #endif
229: }
230: env->exception_index = -1;
231: }
232: #ifdef USE_KQEMU
233: if (kqemu_is_ok(env) && env->interrupt_request == 0) {
234: int ret;
235: env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
236: ret = kqemu_cpu_exec(env);
237: /* put eflags in CPU temporary format */
238: CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
239: DF = 1 - (2 * ((env->eflags >> 10) & 1));
240: CC_OP = CC_OP_EFLAGS;
241: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242: if (ret == 1) {
243: /* exception */
244: longjmp(env->jmp_env, 1);
245: } else if (ret == 2) {
246: /* softmmu execution needed */
247: } else {
248: if (env->interrupt_request != 0) {
249: /* hardware interrupt will be executed just after */
250: } else {
251: /* otherwise, we restart */
252: longjmp(env->jmp_env, 1);
253: }
254: }
255: }
256: #endif
257:
258: T0 = 0; /* force lookup of first TB */
259: for(;;) {
260: #ifdef __sparc__
261: /* g1 can be modified by some libc? functions */
262: tmp_T0 = T0;
263: #endif
264: interrupt_request = env->interrupt_request;
265: if (__builtin_expect(interrupt_request, 0)) {
266: #if defined(TARGET_I386)
267: /* if hardware interrupt pending, we execute it */
268: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
269: (env->eflags & IF_MASK) &&
270: !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
271: int intno;
272: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
273: intno = cpu_get_pic_interrupt(env);
274: if (loglevel & CPU_LOG_TB_IN_ASM) {
275: fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
276: }
277: do_interrupt(intno, 0, 0, 0, 1);
278: /* ensure that no TB jump will be modified as
279: the program flow was changed */
280: #ifdef __sparc__
281: tmp_T0 = 0;
282: #else
283: T0 = 0;
284: #endif
285: }
286: #elif defined(TARGET_PPC)
287: #if 0
288: if ((interrupt_request & CPU_INTERRUPT_RESET)) {
289: cpu_ppc_reset(env);
290: }
291: #endif
292: if (msr_ee != 0) {
293: if ((interrupt_request & CPU_INTERRUPT_HARD)) {
294: /* Raise it */
295: env->exception_index = EXCP_EXTERNAL;
296: env->error_code = 0;
297: do_interrupt(env);
298: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
299: } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
300: /* Raise it */
301: env->exception_index = EXCP_DECR;
302: env->error_code = 0;
303: do_interrupt(env);
304: env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
305: }
306: }
307: #elif defined(TARGET_MIPS)
308: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
309: (env->CP0_Status & (1 << CP0St_IE)) &&
310: (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
311: !(env->hflags & MIPS_HFLAG_EXL) &&
312: !(env->hflags & MIPS_HFLAG_ERL) &&
313: !(env->hflags & MIPS_HFLAG_DM)) {
314: /* Raise it */
315: env->exception_index = EXCP_EXT_INTERRUPT;
316: env->error_code = 0;
317: do_interrupt(env);
318: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
319: }
320: #elif defined(TARGET_SPARC)
321: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
322: (env->psret != 0)) {
323: int pil = env->interrupt_index & 15;
324: int type = env->interrupt_index & 0xf0;
325:
326: if (((type == TT_EXTINT) &&
327: (pil == 15 || pil > env->psrpil)) ||
328: type != TT_EXTINT) {
329: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
330: do_interrupt(env->interrupt_index);
331: env->interrupt_index = 0;
332: }
333: } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
334: //do_interrupt(0, 0, 0, 0, 0);
335: env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
336: }
337: #endif
338: if (interrupt_request & CPU_INTERRUPT_EXITTB) {
339: env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
340: /* ensure that no TB jump will be modified as
341: the program flow was changed */
342: #ifdef __sparc__
343: tmp_T0 = 0;
344: #else
345: T0 = 0;
346: #endif
347: }
348: if (interrupt_request & CPU_INTERRUPT_EXIT) {
349: env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
350: env->exception_index = EXCP_INTERRUPT;
351: cpu_loop_exit();
352: }
353: }
354: #ifdef DEBUG_EXEC
355: if ((loglevel & CPU_LOG_EXEC)) {
356: #if defined(TARGET_I386)
357: /* restore flags in standard format */
358: #ifdef reg_EAX
359: env->regs[R_EAX] = EAX;
360: #endif
361: #ifdef reg_EBX
362: env->regs[R_EBX] = EBX;
363: #endif
364: #ifdef reg_ECX
365: env->regs[R_ECX] = ECX;
366: #endif
367: #ifdef reg_EDX
368: env->regs[R_EDX] = EDX;
369: #endif
370: #ifdef reg_ESI
371: env->regs[R_ESI] = ESI;
372: #endif
373: #ifdef reg_EDI
374: env->regs[R_EDI] = EDI;
375: #endif
376: #ifdef reg_EBP
377: env->regs[R_EBP] = EBP;
378: #endif
379: #ifdef reg_ESP
380: env->regs[R_ESP] = ESP;
381: #endif
382: env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
383: cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
384: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
385: #elif defined(TARGET_ARM)
386: env->cpsr = compute_cpsr();
387: cpu_dump_state(env, logfile, fprintf, 0);
388: env->cpsr &= ~CACHED_CPSR_BITS;
389: #elif defined(TARGET_SPARC)
390: REGWPTR = env->regbase + (env->cwp * 16);
391: env->regwptr = REGWPTR;
392: cpu_dump_state(env, logfile, fprintf, 0);
393: #elif defined(TARGET_PPC)
394: cpu_dump_state(env, logfile, fprintf, 0);
395: #elif defined(TARGET_MIPS)
396: cpu_dump_state(env, logfile, fprintf, 0);
397: #else
398: #error unsupported target CPU
399: #endif
400: }
401: #endif
402: /* we record a subset of the CPU state. It will
403: always be the same before a given translated block
404: is executed. */
405: #if defined(TARGET_I386)
406: flags = env->hflags;
407: flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
408: cs_base = env->segs[R_CS].base;
409: pc = cs_base + env->eip;
410: #elif defined(TARGET_ARM)
411: flags = env->thumb | (env->vfp.vec_len << 1)
412: | (env->vfp.vec_stride << 4);
413: cs_base = 0;
414: pc = env->regs[15];
415: #elif defined(TARGET_SPARC)
416: #ifdef TARGET_SPARC64
417: flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
418: #else
419: flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);
420: #endif
421: cs_base = env->npc;
422: pc = env->pc;
423: #elif defined(TARGET_PPC)
424: flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
425: (msr_se << MSR_SE) | (msr_le << MSR_LE);
426: cs_base = 0;
427: pc = env->nip;
428: #elif defined(TARGET_MIPS)
429: flags = env->hflags & MIPS_HFLAGS_TMASK;
430: cs_base = NULL;
431: pc = env->PC;
432: #else
433: #error unsupported CPU
434: #endif
435: tb = tb_find(&ptb, pc, cs_base,
436: flags);
437: if (!tb) {
438: TranslationBlock **ptb1;
439: unsigned int h;
440: target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
441:
442:
443: spin_lock(&tb_lock);
444:
445: tb_invalidated_flag = 0;
446:
447: regs_to_env(); /* XXX: do it just before cpu_gen_code() */
448:
449: /* find translated block using physical mappings */
450: phys_pc = get_phys_addr_code(env, pc);
451: phys_page1 = phys_pc & TARGET_PAGE_MASK;
452: phys_page2 = -1;
453: h = tb_phys_hash_func(phys_pc);
454: ptb1 = &tb_phys_hash[h];
455: for(;;) {
456: tb = *ptb1;
457: if (!tb)
458: goto not_found;
459: if (tb->pc == pc &&
460: tb->page_addr[0] == phys_page1 &&
461: tb->cs_base == cs_base &&
462: tb->flags == flags) {
463: /* check next page if needed */
464: if (tb->page_addr[1] != -1) {
465: virt_page2 = (pc & TARGET_PAGE_MASK) +
466: TARGET_PAGE_SIZE;
467: phys_page2 = get_phys_addr_code(env, virt_page2);
468: if (tb->page_addr[1] == phys_page2)
469: goto found;
470: } else {
471: goto found;
472: }
473: }
474: ptb1 = &tb->phys_hash_next;
475: }
476: not_found:
477: /* if no translated code available, then translate it now */
478: tb = tb_alloc(pc);
479: if (!tb) {
480: /* flush must be done */
481: tb_flush(env);
482: /* cannot fail at this point */
483: tb = tb_alloc(pc);
484: /* don't forget to invalidate previous TB info */
485: ptb = &tb_hash[tb_hash_func(pc)];
486: T0 = 0;
487: }
488: tc_ptr = code_gen_ptr;
489: tb->tc_ptr = tc_ptr;
490: tb->cs_base = cs_base;
491: tb->flags = flags;
492: cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
493: code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
494:
495: /* check next page if needed */
496: virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
497: phys_page2 = -1;
498: if ((pc & TARGET_PAGE_MASK) != virt_page2) {
499: phys_page2 = get_phys_addr_code(env, virt_page2);
500: }
501: tb_link_phys(tb, phys_pc, phys_page2);
502:
503: found:
504: if (tb_invalidated_flag) {
505: /* as some TB could have been invalidated because
506: of memory exceptions while generating the code, we
507: must recompute the hash index here */
508: ptb = &tb_hash[tb_hash_func(pc)];
509: while (*ptb != NULL)
510: ptb = &(*ptb)->hash_next;
511: T0 = 0;
512: }
513: /* we add the TB in the virtual pc hash table */
514: *ptb = tb;
515: tb->hash_next = NULL;
516: tb_link(tb);
517: spin_unlock(&tb_lock);
518: }
519: #ifdef DEBUG_EXEC
520: if ((loglevel & CPU_LOG_EXEC)) {
521: fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
522: (long)tb->tc_ptr, tb->pc,
523: lookup_symbol(tb->pc));
524: }
525: #endif
526: #ifdef __sparc__
527: T0 = tmp_T0;
528: #endif
529: /* see if we can patch the calling TB. */
530: {
531: if (T0 != 0
532: #if defined(TARGET_I386) && defined(USE_CODE_COPY)
533: && (tb->cflags & CF_CODE_COPY) ==
534: (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
535: #endif
536: ) {
537: spin_lock(&tb_lock);
538: tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
539: #if defined(USE_CODE_COPY)
540: /* propagates the FP use info */
541: ((TranslationBlock *)(T0 & ~3))->cflags |=
542: (tb->cflags & CF_FP_USED);
543: #endif
544: spin_unlock(&tb_lock);
545: }
546: }
547: tc_ptr = tb->tc_ptr;
548: env->current_tb = tb;
549: /* execute the generated code */
550: gen_func = (void *)tc_ptr;
551: #if defined(__sparc__)
552: __asm__ __volatile__("call %0\n\t"
553: "mov %%o7,%%i0"
554: : /* no outputs */
555: : "r" (gen_func)
556: : "i0", "i1", "i2", "i3", "i4", "i5");
557: #elif defined(__arm__)
558: asm volatile ("mov pc, %0\n\t"
559: ".global exec_loop\n\t"
560: "exec_loop:\n\t"
561: : /* no outputs */
562: : "r" (gen_func)
563: : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
564: #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
565: {
566: if (!(tb->cflags & CF_CODE_COPY)) {
567: if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
568: save_native_fp_state(env);
569: }
570: gen_func();
571: } else {
572: if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
573: restore_native_fp_state(env);
574: }
575: /* we work with native eflags */
576: CC_SRC = cc_table[CC_OP].compute_all();
577: CC_OP = CC_OP_EFLAGS;
578: asm(".globl exec_loop\n"
579: "\n"
580: "debug1:\n"
581: " pushl %%ebp\n"
582: " fs movl %10, %9\n"
583: " fs movl %11, %%eax\n"
584: " andl $0x400, %%eax\n"
585: " fs orl %8, %%eax\n"
586: " pushl %%eax\n"
587: " popf\n"
588: " fs movl %%esp, %12\n"
589: " fs movl %0, %%eax\n"
590: " fs movl %1, %%ecx\n"
591: " fs movl %2, %%edx\n"
592: " fs movl %3, %%ebx\n"
593: " fs movl %4, %%esp\n"
594: " fs movl %5, %%ebp\n"
595: " fs movl %6, %%esi\n"
596: " fs movl %7, %%edi\n"
597: " fs jmp *%9\n"
598: "exec_loop:\n"
599: " fs movl %%esp, %4\n"
600: " fs movl %12, %%esp\n"
601: " fs movl %%eax, %0\n"
602: " fs movl %%ecx, %1\n"
603: " fs movl %%edx, %2\n"
604: " fs movl %%ebx, %3\n"
605: " fs movl %%ebp, %5\n"
606: " fs movl %%esi, %6\n"
607: " fs movl %%edi, %7\n"
608: " pushf\n"
609: " popl %%eax\n"
610: " movl %%eax, %%ecx\n"
611: " andl $0x400, %%ecx\n"
612: " shrl $9, %%ecx\n"
613: " andl $0x8d5, %%eax\n"
614: " fs movl %%eax, %8\n"
615: " movl $1, %%eax\n"
616: " subl %%ecx, %%eax\n"
617: " fs movl %%eax, %11\n"
618: " fs movl %9, %%ebx\n" /* get T0 value */
619: " popl %%ebp\n"
620: :
621: : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
622: "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
623: "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
624: "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
625: "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
626: "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
627: "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
628: "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
629: "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
630: "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
631: "a" (gen_func),
632: "m" (*(uint8_t *)offsetof(CPUState, df)),
633: "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
634: : "%ecx", "%edx"
635: );
636: }
637: }
638: #elif defined(__ia64)
639: struct fptr {
640: void *ip;
641: void *gp;
642: } fp;
643:
644: fp.ip = tc_ptr;
645: fp.gp = code_gen_buffer + 2 * (1 << 20);
646: (*(void (*)(void)) &fp)();
647: #else
648: gen_func();
649: #endif
650: env->current_tb = NULL;
651: /* reset soft MMU for next block (it can currently
652: only be set by a memory fault) */
653: #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
654: if (env->hflags & HF_SOFTMMU_MASK) {
655: env->hflags &= ~HF_SOFTMMU_MASK;
656: /* do not allow linking to another block */
657: T0 = 0;
658: }
659: #endif
660: }
661: } else {
662: env_to_regs();
663: }
664: } /* for(;;) */
665:
666:
667: #if defined(TARGET_I386)
668: #if defined(USE_CODE_COPY)
669: if (env->native_fp_regs) {
670: save_native_fp_state(env);
671: }
672: #endif
673: /* restore flags in standard format */
674: env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
675:
676: /* restore global registers */
677: #ifdef reg_EAX
678: EAX = saved_EAX;
679: #endif
680: #ifdef reg_ECX
681: ECX = saved_ECX;
682: #endif
683: #ifdef reg_EDX
684: EDX = saved_EDX;
685: #endif
686: #ifdef reg_EBX
687: EBX = saved_EBX;
688: #endif
689: #ifdef reg_ESP
690: ESP = saved_ESP;
691: #endif
692: #ifdef reg_EBP
693: EBP = saved_EBP;
694: #endif
695: #ifdef reg_ESI
696: ESI = saved_ESI;
697: #endif
698: #ifdef reg_EDI
699: EDI = saved_EDI;
700: #endif
701: #elif defined(TARGET_ARM)
702: env->cpsr = compute_cpsr();
703: /* XXX: Save/restore host fpu exception state?. */
704: #elif defined(TARGET_SPARC)
705: #if defined(reg_REGWPTR)
706: REGWPTR = saved_regwptr;
707: #endif
708: #elif defined(TARGET_PPC)
709: #elif defined(TARGET_MIPS)
710: #else
711: #error unsupported target CPU
712: #endif
713: #ifdef __sparc__
714: asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
715: #endif
716: T0 = saved_T0;
717: T1 = saved_T1;
718: #if defined(reg_T2)
719: T2 = saved_T2;
720: #endif
721: env = saved_env;
722: return ret;
723: }
724:
725: /* must only be called from the generated code as an exception can be
726: generated */
727: void tb_invalidate_page_range(target_ulong start, target_ulong end)
728: {
729: /* XXX: cannot enable it yet because it yields to MMU exception
730: where NIP != read address on PowerPC */
731: #if 0
732: target_ulong phys_addr;
733: phys_addr = get_phys_addr_code(env, start);
734: tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
735: #endif
736: }
737:
738: #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
739:
740: void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
741: {
742: CPUX86State *saved_env;
743:
744: saved_env = env;
745: env = s;
746: if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
747: selector &= 0xffff;
748: cpu_x86_load_seg_cache(env, seg_reg, selector,
749: (selector << 4), 0xffff, 0);
750: } else {
751: load_seg(seg_reg, selector);
752: }
753: env = saved_env;
754: }
755:
756: void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
757: {
758: CPUX86State *saved_env;
759:
760: saved_env = env;
761: env = s;
762:
763: helper_fsave((target_ulong)ptr, data32);
764:
765: env = saved_env;
766: }
767:
768: void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
769: {
770: CPUX86State *saved_env;
771:
772: saved_env = env;
773: env = s;
774:
775: helper_frstor((target_ulong)ptr, data32);
776:
777: env = saved_env;
778: }
779:
780: #endif /* TARGET_I386 */
781:
782: #if !defined(CONFIG_SOFTMMU)
783:
784: #if defined(TARGET_I386)
785:
786: /* 'pc' is the host PC at which the exception was raised. 'address' is
787: the effective address of the memory exception. 'is_write' is 1 if a
788: write caused the exception and otherwise 0'. 'old_set' is the
789: signal set which should be restored */
790: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
791: int is_write, sigset_t *old_set,
792: void *puc)
793: {
794: TranslationBlock *tb;
795: int ret;
796:
797: if (cpu_single_env)
798: env = cpu_single_env; /* XXX: find a correct solution for multithread */
799: #if defined(DEBUG_SIGNAL)
800: qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
801: pc, address, is_write, *(unsigned long *)old_set);
802: #endif
803: /* XXX: locking issue */
804: if (is_write && page_unprotect(address, pc, puc)) {
805: return 1;
806: }
807:
808: /* see if it is an MMU fault */
809: ret = cpu_x86_handle_mmu_fault(env, address, is_write,
810: ((env->hflags & HF_CPL_MASK) == 3), 0);
811: if (ret < 0)
812: return 0; /* not an MMU fault */
813: if (ret == 0)
814: return 1; /* the MMU fault was handled without causing real CPU fault */
815: /* now we have a real cpu fault */
816: tb = tb_find_pc(pc);
817: if (tb) {
818: /* the PC is inside the translated code. It means that we have
819: a virtual CPU fault */
820: cpu_restore_state(tb, env, pc, puc);
821: }
822: if (ret == 1) {
823: #if 0
824: printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
825: env->eip, env->cr[2], env->error_code);
826: #endif
827: /* we restore the process signal mask as the sigreturn should
828: do it (XXX: use sigsetjmp) */
829: sigprocmask(SIG_SETMASK, old_set, NULL);
830: raise_exception_err(EXCP0E_PAGE, env->error_code);
831: } else {
832: /* activate soft MMU for this block */
833: env->hflags |= HF_SOFTMMU_MASK;
834: cpu_resume_from_signal(env, puc);
835: }
836: /* never comes here */
837: return 1;
838: }
839:
840: #elif defined(TARGET_ARM)
841: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
842: int is_write, sigset_t *old_set,
843: void *puc)
844: {
845: TranslationBlock *tb;
846: int ret;
847:
848: if (cpu_single_env)
849: env = cpu_single_env; /* XXX: find a correct solution for multithread */
850: #if defined(DEBUG_SIGNAL)
851: printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
852: pc, address, is_write, *(unsigned long *)old_set);
853: #endif
854: /* XXX: locking issue */
855: if (is_write && page_unprotect(address, pc, puc)) {
856: return 1;
857: }
858: /* see if it is an MMU fault */
859: ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
860: if (ret < 0)
861: return 0; /* not an MMU fault */
862: if (ret == 0)
863: return 1; /* the MMU fault was handled without causing real CPU fault */
864: /* now we have a real cpu fault */
865: tb = tb_find_pc(pc);
866: if (tb) {
867: /* the PC is inside the translated code. It means that we have
868: a virtual CPU fault */
869: cpu_restore_state(tb, env, pc, puc);
870: }
871: /* we restore the process signal mask as the sigreturn should
872: do it (XXX: use sigsetjmp) */
873: sigprocmask(SIG_SETMASK, old_set, NULL);
874: cpu_loop_exit();
875: }
876: #elif defined(TARGET_SPARC)
877: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
878: int is_write, sigset_t *old_set,
879: void *puc)
880: {
881: TranslationBlock *tb;
882: int ret;
883:
884: if (cpu_single_env)
885: env = cpu_single_env; /* XXX: find a correct solution for multithread */
886: #if defined(DEBUG_SIGNAL)
887: printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
888: pc, address, is_write, *(unsigned long *)old_set);
889: #endif
890: /* XXX: locking issue */
891: if (is_write && page_unprotect(address, pc, puc)) {
892: return 1;
893: }
894: /* see if it is an MMU fault */
895: ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
896: if (ret < 0)
897: return 0; /* not an MMU fault */
898: if (ret == 0)
899: return 1; /* the MMU fault was handled without causing real CPU fault */
900: /* now we have a real cpu fault */
901: tb = tb_find_pc(pc);
902: if (tb) {
903: /* the PC is inside the translated code. It means that we have
904: a virtual CPU fault */
905: cpu_restore_state(tb, env, pc, puc);
906: }
907: /* we restore the process signal mask as the sigreturn should
908: do it (XXX: use sigsetjmp) */
909: sigprocmask(SIG_SETMASK, old_set, NULL);
910: cpu_loop_exit();
911: }
912: #elif defined (TARGET_PPC)
913: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
914: int is_write, sigset_t *old_set,
915: void *puc)
916: {
917: TranslationBlock *tb;
918: int ret;
919:
920: if (cpu_single_env)
921: env = cpu_single_env; /* XXX: find a correct solution for multithread */
922: #if defined(DEBUG_SIGNAL)
923: printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
924: pc, address, is_write, *(unsigned long *)old_set);
925: #endif
926: /* XXX: locking issue */
927: if (is_write && page_unprotect(address, pc, puc)) {
928: return 1;
929: }
930:
931: /* see if it is an MMU fault */
932: ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
933: if (ret < 0)
934: return 0; /* not an MMU fault */
935: if (ret == 0)
936: return 1; /* the MMU fault was handled without causing real CPU fault */
937:
938: /* now we have a real cpu fault */
939: tb = tb_find_pc(pc);
940: if (tb) {
941: /* the PC is inside the translated code. It means that we have
942: a virtual CPU fault */
943: cpu_restore_state(tb, env, pc, puc);
944: }
945: if (ret == 1) {
946: #if 0
947: printf("PF exception: NIP=0x%08x error=0x%x %p\n",
948: env->nip, env->error_code, tb);
949: #endif
950: /* we restore the process signal mask as the sigreturn should
951: do it (XXX: use sigsetjmp) */
952: sigprocmask(SIG_SETMASK, old_set, NULL);
953: do_raise_exception_err(env->exception_index, env->error_code);
954: } else {
955: /* activate soft MMU for this block */
956: cpu_resume_from_signal(env, puc);
957: }
958: /* never comes here */
959: return 1;
960: }
961:
962: #elif defined (TARGET_MIPS)
963: static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
964: int is_write, sigset_t *old_set,
965: void *puc)
966: {
967: TranslationBlock *tb;
968: int ret;
969:
970: if (cpu_single_env)
971: env = cpu_single_env; /* XXX: find a correct solution for multithread */
972: #if defined(DEBUG_SIGNAL)
973: printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
974: pc, address, is_write, *(unsigned long *)old_set);
975: #endif
976: /* XXX: locking issue */
977: if (is_write && page_unprotect(address, pc, puc)) {
978: return 1;
979: }
980:
981: /* see if it is an MMU fault */
982: ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
983: if (ret < 0)
984: return 0; /* not an MMU fault */
985: if (ret == 0)
986: return 1; /* the MMU fault was handled without causing real CPU fault */
987:
988: /* now we have a real cpu fault */
989: tb = tb_find_pc(pc);
990: if (tb) {
991: /* the PC is inside the translated code. It means that we have
992: a virtual CPU fault */
993: cpu_restore_state(tb, env, pc, puc);
994: }
995: if (ret == 1) {
996: #if 0
997: printf("PF exception: NIP=0x%08x error=0x%x %p\n",
998: env->nip, env->error_code, tb);
999: #endif
1000: /* we restore the process signal mask as the sigreturn should
1001: do it (XXX: use sigsetjmp) */
1002: sigprocmask(SIG_SETMASK, old_set, NULL);
1003: do_raise_exception_err(env->exception_index, env->error_code);
1004: } else {
1005: /* activate soft MMU for this block */
1006: cpu_resume_from_signal(env, puc);
1007: }
1008: /* never comes here */
1009: return 1;
1010: }
1011:
1012: #else
1013: #error unsupported target CPU
1014: #endif
1015:
1016: #if defined(__i386__)
1017:
1018: #if defined(USE_CODE_COPY)
1019: static void cpu_send_trap(unsigned long pc, int trap,
1020: struct ucontext *uc)
1021: {
1022: TranslationBlock *tb;
1023:
1024: if (cpu_single_env)
1025: env = cpu_single_env; /* XXX: find a correct solution for multithread */
1026: /* now we have a real cpu fault */
1027: tb = tb_find_pc(pc);
1028: if (tb) {
1029: /* the PC is inside the translated code. It means that we have
1030: a virtual CPU fault */
1031: cpu_restore_state(tb, env, pc, uc);
1032: }
1033: sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1034: raise_exception_err(trap, env->error_code);
1035: }
1036: #endif
1037:
1038: int cpu_signal_handler(int host_signum, struct siginfo *info,
1039: void *puc)
1040: {
1041: struct ucontext *uc = puc;
1042: unsigned long pc;
1043: int trapno;
1044:
1045: #ifndef REG_EIP
1046: /* for glibc 2.1 */
1047: #define REG_EIP EIP
1048: #define REG_ERR ERR
1049: #define REG_TRAPNO TRAPNO
1050: #endif
1051: pc = uc->uc_mcontext.gregs[REG_EIP];
1052: trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1053: #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1054: if (trapno == 0x00 || trapno == 0x05) {
1055: /* send division by zero or bound exception */
1056: cpu_send_trap(pc, trapno, uc);
1057: return 1;
1058: } else
1059: #endif
1060: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1061: trapno == 0xe ?
1062: (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1063: &uc->uc_sigmask, puc);
1064: }
1065:
1066: #elif defined(__x86_64__)
1067:
1068: int cpu_signal_handler(int host_signum, struct siginfo *info,
1069: void *puc)
1070: {
1071: struct ucontext *uc = puc;
1072: unsigned long pc;
1073:
1074: pc = uc->uc_mcontext.gregs[REG_RIP];
1075: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1076: uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1077: (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1078: &uc->uc_sigmask, puc);
1079: }
1080:
1081: #elif defined(__powerpc__)
1082:
1083: /***********************************************************************
1084: * signal context platform-specific definitions
1085: * From Wine
1086: */
1087: #ifdef linux
1088: /* All Registers access - only for local access */
1089: # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1090: /* Gpr Registers access */
1091: # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1092: # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1093: # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1094: # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1095: # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1096: # define LR_sig(context) REG_sig(link, context) /* Link register */
1097: # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1098: /* Float Registers access */
1099: # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1100: # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1101: /* Exception Registers access */
1102: # define DAR_sig(context) REG_sig(dar, context)
1103: # define DSISR_sig(context) REG_sig(dsisr, context)
1104: # define TRAP_sig(context) REG_sig(trap, context)
1105: #endif /* linux */
1106:
1107: #ifdef __APPLE__
1108: # include <sys/ucontext.h>
1109: typedef struct ucontext SIGCONTEXT;
1110: /* All Registers access - only for local access */
1111: # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1112: # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1113: # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1114: # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1115: /* Gpr Registers access */
1116: # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1117: # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1118: # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1119: # define CTR_sig(context) REG_sig(ctr, context)
1120: # define XER_sig(context) REG_sig(xer, context) /* Link register */
1121: # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1122: # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1123: /* Float Registers access */
1124: # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1125: # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1126: /* Exception Registers access */
1127: # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1128: # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1129: # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1130: #endif /* __APPLE__ */
1131:
1132: int cpu_signal_handler(int host_signum, struct siginfo *info,
1133: void *puc)
1134: {
1135: struct ucontext *uc = puc;
1136: unsigned long pc;
1137: int is_write;
1138:
1139: pc = IAR_sig(uc);
1140: is_write = 0;
1141: #if 0
1142: /* ppc 4xx case */
1143: if (DSISR_sig(uc) & 0x00800000)
1144: is_write = 1;
1145: #else
1146: if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1147: is_write = 1;
1148: #endif
1149: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1150: is_write, &uc->uc_sigmask, puc);
1151: }
1152:
1153: #elif defined(__alpha__)
1154:
1155: int cpu_signal_handler(int host_signum, struct siginfo *info,
1156: void *puc)
1157: {
1158: struct ucontext *uc = puc;
1159: uint32_t *pc = uc->uc_mcontext.sc_pc;
1160: uint32_t insn = *pc;
1161: int is_write = 0;
1162:
1163: /* XXX: need kernel patch to get write flag faster */
1164: switch (insn >> 26) {
1165: case 0x0d: // stw
1166: case 0x0e: // stb
1167: case 0x0f: // stq_u
1168: case 0x24: // stf
1169: case 0x25: // stg
1170: case 0x26: // sts
1171: case 0x27: // stt
1172: case 0x2c: // stl
1173: case 0x2d: // stq
1174: case 0x2e: // stl_c
1175: case 0x2f: // stq_c
1176: is_write = 1;
1177: }
1178:
1179: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1180: is_write, &uc->uc_sigmask, puc);
1181: }
1182: #elif defined(__sparc__)
1183:
1184: int cpu_signal_handler(int host_signum, struct siginfo *info,
1185: void *puc)
1186: {
1187: uint32_t *regs = (uint32_t *)(info + 1);
1188: void *sigmask = (regs + 20);
1189: unsigned long pc;
1190: int is_write;
1191: uint32_t insn;
1192:
1193: /* XXX: is there a standard glibc define ? */
1194: pc = regs[1];
1195: /* XXX: need kernel patch to get write flag faster */
1196: is_write = 0;
1197: insn = *(uint32_t *)pc;
1198: if ((insn >> 30) == 3) {
1199: switch((insn >> 19) & 0x3f) {
1200: case 0x05: // stb
1201: case 0x06: // sth
1202: case 0x04: // st
1203: case 0x07: // std
1204: case 0x24: // stf
1205: case 0x27: // stdf
1206: case 0x25: // stfsr
1207: is_write = 1;
1208: break;
1209: }
1210: }
1211: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1212: is_write, sigmask, NULL);
1213: }
1214:
1215: #elif defined(__arm__)
1216:
1217: int cpu_signal_handler(int host_signum, struct siginfo *info,
1218: void *puc)
1219: {
1220: struct ucontext *uc = puc;
1221: unsigned long pc;
1222: int is_write;
1223:
1224: pc = uc->uc_mcontext.gregs[R15];
1225: /* XXX: compute is_write */
1226: is_write = 0;
1227: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1228: is_write,
1229: &uc->uc_sigmask);
1230: }
1231:
1232: #elif defined(__mc68000)
1233:
1234: int cpu_signal_handler(int host_signum, struct siginfo *info,
1235: void *puc)
1236: {
1237: struct ucontext *uc = puc;
1238: unsigned long pc;
1239: int is_write;
1240:
1241: pc = uc->uc_mcontext.gregs[16];
1242: /* XXX: compute is_write */
1243: is_write = 0;
1244: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1245: is_write,
1246: &uc->uc_sigmask, puc);
1247: }
1248:
1249: #elif defined(__ia64)
1250:
1251: #ifndef __ISR_VALID
1252: /* This ought to be in <bits/siginfo.h>... */
1253: # define __ISR_VALID 1
1254: # define si_flags _sifields._sigfault._si_pad0
1255: #endif
1256:
1257: int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
1258: {
1259: struct ucontext *uc = puc;
1260: unsigned long ip;
1261: int is_write = 0;
1262:
1263: ip = uc->uc_mcontext.sc_ip;
1264: switch (host_signum) {
1265: case SIGILL:
1266: case SIGFPE:
1267: case SIGSEGV:
1268: case SIGBUS:
1269: case SIGTRAP:
1270: if (info->si_code && (info->si_flags & __ISR_VALID))
1271: /* ISR.W (write-access) is bit 33: */
1272: is_write = (info->si_isr >> 33) & 1;
1273: break;
1274:
1275: default:
1276: break;
1277: }
1278: return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1279: is_write,
1280: &uc->uc_sigmask, puc);
1281: }
1282:
1283: #elif defined(__s390__)
1284:
1285: int cpu_signal_handler(int host_signum, struct siginfo *info,
1286: void *puc)
1287: {
1288: struct ucontext *uc = puc;
1289: unsigned long pc;
1290: int is_write;
1291:
1292: pc = uc->uc_mcontext.psw.addr;
1293: /* XXX: compute is_write */
1294: is_write = 0;
1295: return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1296: is_write,
1297: &uc->uc_sigmask, puc);
1298: }
1299:
1300: #else
1301:
1302: #error host CPU specific signal handler needed
1303:
1304: #endif
1305:
1306: #endif /* !defined(CONFIG_SOFTMMU) */
unix.superglobalmegacorp.com