Annotation of qemu/cpu-exec.c, revision 1.1.1.14
1.1 root 1: /*
2: * i386 emulator main execution loop
1.1.1.6 root 3: *
1.1 root 4: * Copyright (c) 2003-2005 Fabrice Bellard
5: *
6: * This library is free software; you can redistribute it and/or
7: * modify it under the terms of the GNU Lesser General Public
8: * License as published by the Free Software Foundation; either
9: * version 2 of the License, or (at your option) any later version.
10: *
11: * This library is distributed in the hope that it will be useful,
12: * but WITHOUT ANY WARRANTY; without even the implied warranty of
13: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14: * Lesser General Public License for more details.
15: *
16: * You should have received a copy of the GNU Lesser General Public
1.1.1.9 root 17: * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1.1 root 18: */
19: #include "config.h"
1.1.1.14! root 20: #include "cpu.h"
1.1 root 21: #include "disas.h"
1.1.1.7 root 22: #include "tcg.h"
1.1.1.12 root 23: #include "qemu-barrier.h"
1.1 root 24:
1.1.1.7 root 25: int tb_invalidated_flag;
1.1.1.6 root 26:
1.1.1.10 root 27: //#define CONFIG_DEBUG_EXEC
1.1.1.6 root 28:
1.1.1.14! root 29: bool qemu_cpu_has_work(CPUState *env)
1.1.1.9 root 30: {
31: return cpu_has_work(env);
32: }
33:
1.1.1.14! root 34: void cpu_loop_exit(CPUState *env)
1.1 root 35: {
1.1.1.12 root 36: env->current_tb = NULL;
1.1 root 37: longjmp(env->jmp_env, 1);
38: }
1.1.1.6 root 39:
1.1 root 40: /* exit the current TB from a signal handler. The host registers are
41: restored in a state compatible with the CPU emulator
42: */
1.1.1.14! root 43: #if defined(CONFIG_SOFTMMU)
! 44: void cpu_resume_from_signal(CPUState *env, void *puc)
1.1 root 45: {
46: /* XXX: restore cpu registers saved in host registers */
47:
1.1.1.7 root 48: env->exception_index = -1;
1.1 root 49: longjmp(env->jmp_env, 1);
50: }
1.1.1.14! root 51: #endif
1.1 root 52:
1.1.1.7 root 53: /* Execute the code without caching the generated code. An interpreter
54: could be used if available. */
1.1.1.14! root 55: static void cpu_exec_nocache(CPUState *env, int max_cycles,
! 56: TranslationBlock *orig_tb)
1.1.1.7 root 57: {
58: unsigned long next_tb;
59: TranslationBlock *tb;
60:
61: /* Should never happen.
62: We only end up here when an existing TB is too long. */
63: if (max_cycles > CF_COUNT_MASK)
64: max_cycles = CF_COUNT_MASK;
65:
66: tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67: max_cycles);
68: env->current_tb = tb;
69: /* execute the generated code */
1.1.1.14! root 70: next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
1.1.1.12 root 71: env->current_tb = NULL;
1.1.1.7 root 72:
73: if ((next_tb & 3) == 2) {
74: /* Restore PC. This may happen if async event occurs before
75: the TB starts executing. */
76: cpu_pc_from_tb(env, tb);
77: }
78: tb_phys_invalidate(tb, -1);
79: tb_free(tb);
80: }
1.1.1.2 root 81:
1.1.1.14! root 82: static TranslationBlock *tb_find_slow(CPUState *env,
! 83: target_ulong pc,
1.1.1.2 root 84: target_ulong cs_base,
1.1.1.6 root 85: uint64_t flags)
1.1.1.2 root 86: {
87: TranslationBlock *tb, **ptb1;
88: unsigned int h;
1.1.1.12 root 89: tb_page_addr_t phys_pc, phys_page1, phys_page2;
90: target_ulong virt_page2;
1.1.1.2 root 91:
92: tb_invalidated_flag = 0;
1.1.1.6 root 93:
1.1.1.2 root 94: /* find translated block using physical mappings */
1.1.1.12 root 95: phys_pc = get_page_addr_code(env, pc);
1.1.1.2 root 96: phys_page1 = phys_pc & TARGET_PAGE_MASK;
97: phys_page2 = -1;
98: h = tb_phys_hash_func(phys_pc);
99: ptb1 = &tb_phys_hash[h];
100: for(;;) {
101: tb = *ptb1;
102: if (!tb)
103: goto not_found;
1.1.1.6 root 104: if (tb->pc == pc &&
1.1.1.2 root 105: tb->page_addr[0] == phys_page1 &&
1.1.1.6 root 106: tb->cs_base == cs_base &&
1.1.1.2 root 107: tb->flags == flags) {
108: /* check next page if needed */
109: if (tb->page_addr[1] != -1) {
1.1.1.6 root 110: virt_page2 = (pc & TARGET_PAGE_MASK) +
1.1.1.2 root 111: TARGET_PAGE_SIZE;
1.1.1.12 root 112: phys_page2 = get_page_addr_code(env, virt_page2);
1.1.1.2 root 113: if (tb->page_addr[1] == phys_page2)
114: goto found;
115: } else {
116: goto found;
117: }
118: }
119: ptb1 = &tb->phys_hash_next;
120: }
121: not_found:
1.1.1.7 root 122: /* if no translated code available, then translate it now */
123: tb = tb_gen_code(env, pc, cs_base, flags, 0);
1.1.1.6 root 124:
1.1.1.2 root 125: found:
1.1.1.13 root 126: /* Move the last found TB to the head of the list */
127: if (likely(*ptb1)) {
128: *ptb1 = tb->phys_hash_next;
129: tb->phys_hash_next = tb_phys_hash[h];
130: tb_phys_hash[h] = tb;
131: }
1.1.1.2 root 132: /* we add the TB in the virtual pc hash table */
133: env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
134: return tb;
135: }
136:
1.1.1.14! root 137: static inline TranslationBlock *tb_find_fast(CPUState *env)
1.1.1.2 root 138: {
139: TranslationBlock *tb;
140: target_ulong cs_base, pc;
1.1.1.7 root 141: int flags;
1.1.1.2 root 142:
143: /* we record a subset of the CPU state. It will
144: always be the same before a given translated block
145: is executed. */
1.1.1.7 root 146: cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1.1.1.2 root 147: tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
1.1.1.7 root 148: if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
149: tb->flags != flags)) {
1.1.1.14! root 150: tb = tb_find_slow(env, pc, cs_base, flags);
1.1.1.2 root 151: }
152: return tb;
153: }
154:
1.1.1.7 root 155: static CPUDebugExcpHandler *debug_excp_handler;
156:
157: CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
158: {
159: CPUDebugExcpHandler *old_handler = debug_excp_handler;
160:
161: debug_excp_handler = handler;
162: return old_handler;
163: }
164:
165: static void cpu_handle_debug_exception(CPUState *env)
166: {
167: CPUWatchpoint *wp;
168:
1.1.1.14! root 169: if (!env->watchpoint_hit) {
! 170: QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1.1.1.7 root 171: wp->flags &= ~BP_WATCHPOINT_HIT;
1.1.1.14! root 172: }
! 173: }
! 174: if (debug_excp_handler) {
1.1.1.7 root 175: debug_excp_handler(env);
1.1.1.14! root 176: }
1.1.1.7 root 177: }
1.1.1.2 root 178:
1.1 root 179: /* main execution loop */
180:
1.1.1.12 root 181: volatile sig_atomic_t exit_request;
182:
1.1.1.14! root 183: int cpu_exec(CPUState *env)
1.1 root 184: {
1.1.1.2 root 185: int ret, interrupt_request;
186: TranslationBlock *tb;
1.1 root 187: uint8_t *tc_ptr;
1.1.1.7 root 188: unsigned long next_tb;
1.1.1.2 root 189:
1.1.1.14! root 190: if (env->halted) {
! 191: if (!cpu_has_work(env)) {
! 192: return EXCP_HALTED;
! 193: }
1.1.1.2 root 194:
1.1.1.14! root 195: env->halted = 0;
! 196: }
1.1 root 197:
1.1.1.14! root 198: cpu_single_env = env;
1.1 root 199:
1.1.1.12 root 200: if (unlikely(exit_request)) {
201: env->exit_request = 1;
202: }
203:
1.1.1.6 root 204: #if defined(TARGET_I386)
1.1.1.14! root 205: /* put eflags in CPU temporary format */
! 206: CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
! 207: DF = 1 - (2 * ((env->eflags >> 10) & 1));
! 208: CC_OP = CC_OP_EFLAGS;
! 209: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1.1 root 210: #elif defined(TARGET_SPARC)
1.1.1.5 root 211: #elif defined(TARGET_M68K)
212: env->cc_op = CC_OP_FLAGS;
213: env->cc_dest = env->sr & 0xf;
214: env->cc_x = (env->sr >> 4) & 1;
1.1.1.6 root 215: #elif defined(TARGET_ALPHA)
216: #elif defined(TARGET_ARM)
1.1.1.14! root 217: #elif defined(TARGET_UNICORE32)
1.1.1.6 root 218: #elif defined(TARGET_PPC)
1.1.1.14! root 219: #elif defined(TARGET_LM32)
1.1.1.9 root 220: #elif defined(TARGET_MICROBLAZE)
1.1 root 221: #elif defined(TARGET_MIPS)
1.1.1.3 root 222: #elif defined(TARGET_SH4)
1.1.1.6 root 223: #elif defined(TARGET_CRIS)
1.1.1.10 root 224: #elif defined(TARGET_S390X)
1.1.1.3 root 225: /* XXXXX */
1.1 root 226: #else
227: #error unsupported target CPU
228: #endif
229: env->exception_index = -1;
230:
231: /* prepare setjmp context for exception handling */
232: for(;;) {
233: if (setjmp(env->jmp_env) == 0) {
234: /* if an exception is pending, we execute it here */
235: if (env->exception_index >= 0) {
236: if (env->exception_index >= EXCP_INTERRUPT) {
237: /* exit request from the cpu execution loop */
238: ret = env->exception_index;
1.1.1.14! root 239: if (ret == EXCP_DEBUG) {
1.1.1.7 root 240: cpu_handle_debug_exception(env);
1.1.1.14! root 241: }
1.1 root 242: break;
1.1.1.7 root 243: } else {
244: #if defined(CONFIG_USER_ONLY)
1.1 root 245: /* if user mode only, we simulate a fake exception
1.1.1.5 root 246: which will be handled outside the cpu execution
1.1 root 247: loop */
248: #if defined(TARGET_I386)
1.1.1.14! root 249: do_interrupt(env);
1.1 root 250: #endif
251: ret = env->exception_index;
252: break;
1.1.1.7 root 253: #else
254: do_interrupt(env);
1.1.1.12 root 255: env->exception_index = -1;
1.1.1.7 root 256: #endif
1.1 root 257: }
1.1.1.6 root 258: }
1.1 root 259:
1.1.1.7 root 260: next_tb = 0; /* force lookup of first TB */
1.1 root 261: for(;;) {
262: interrupt_request = env->interrupt_request;
1.1.1.7 root 263: if (unlikely(interrupt_request)) {
264: if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
265: /* Mask out external interrupts for this step. */
1.1.1.14! root 266: interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
1.1.1.7 root 267: }
1.1.1.6 root 268: if (interrupt_request & CPU_INTERRUPT_DEBUG) {
269: env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
270: env->exception_index = EXCP_DEBUG;
1.1.1.14! root 271: cpu_loop_exit(env);
1.1.1.6 root 272: }
273: #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
1.1.1.9 root 274: defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
1.1.1.14! root 275: defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
1.1.1.6 root 276: if (interrupt_request & CPU_INTERRUPT_HALT) {
277: env->interrupt_request &= ~CPU_INTERRUPT_HALT;
278: env->halted = 1;
279: env->exception_index = EXCP_HLT;
1.1.1.14! root 280: cpu_loop_exit(env);
1.1.1.6 root 281: }
282: #endif
1.1 root 283: #if defined(TARGET_I386)
1.1.1.9 root 284: if (interrupt_request & CPU_INTERRUPT_INIT) {
1.1.1.14! root 285: svm_check_intercept(env, SVM_EXIT_INIT);
1.1.1.9 root 286: do_cpu_init(env);
287: env->exception_index = EXCP_HALTED;
1.1.1.14! root 288: cpu_loop_exit(env);
1.1.1.9 root 289: } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
290: do_cpu_sipi(env);
291: } else if (env->hflags2 & HF2_GIF_MASK) {
1.1.1.7 root 292: if ((interrupt_request & CPU_INTERRUPT_SMI) &&
293: !(env->hflags & HF_SMM_MASK)) {
1.1.1.14! root 294: svm_check_intercept(env, SVM_EXIT_SMI);
1.1.1.7 root 295: env->interrupt_request &= ~CPU_INTERRUPT_SMI;
1.1.1.14! root 296: do_smm_enter(env);
1.1.1.7 root 297: next_tb = 0;
298: } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
299: !(env->hflags2 & HF2_NMI_MASK)) {
300: env->interrupt_request &= ~CPU_INTERRUPT_NMI;
301: env->hflags2 |= HF2_NMI_MASK;
1.1.1.14! root 302: do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1.1.1.7 root 303: next_tb = 0;
1.1.1.9 root 304: } else if (interrupt_request & CPU_INTERRUPT_MCE) {
305: env->interrupt_request &= ~CPU_INTERRUPT_MCE;
1.1.1.14! root 306: do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1.1.1.9 root 307: next_tb = 0;
1.1.1.7 root 308: } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
309: (((env->hflags2 & HF2_VINTR_MASK) &&
310: (env->hflags2 & HF2_HIF_MASK)) ||
311: (!(env->hflags2 & HF2_VINTR_MASK) &&
312: (env->eflags & IF_MASK &&
313: !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
314: int intno;
1.1.1.14! root 315: svm_check_intercept(env, SVM_EXIT_INTR);
1.1.1.7 root 316: env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
317: intno = cpu_get_pic_interrupt(env);
318: qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
1.1.1.14! root 319: do_interrupt_x86_hardirq(env, intno, 1);
1.1.1.7 root 320: /* ensure that no TB jump will be modified as
321: the program flow was changed */
322: next_tb = 0;
1.1.1.6 root 323: #if !defined(CONFIG_USER_ONLY)
1.1.1.7 root 324: } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
325: (env->eflags & IF_MASK) &&
326: !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
327: int intno;
328: /* FIXME: this should respect TPR */
1.1.1.14! root 329: svm_check_intercept(env, SVM_EXIT_VINTR);
1.1.1.7 root 330: intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
331: qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
1.1.1.14! root 332: do_interrupt_x86_hardirq(env, intno, 1);
1.1.1.7 root 333: env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
334: next_tb = 0;
1.1 root 335: #endif
1.1.1.7 root 336: }
1.1 root 337: }
338: #elif defined(TARGET_PPC)
339: #if 0
340: if ((interrupt_request & CPU_INTERRUPT_RESET)) {
1.1.1.10 root 341: cpu_reset(env);
1.1 root 342: }
343: #endif
1.1.1.6 root 344: if (interrupt_request & CPU_INTERRUPT_HARD) {
345: ppc_hw_interrupt(env);
346: if (env->pending_interrupts == 0)
1.1.1.2 root 347: env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1.1.1.7 root 348: next_tb = 0;
1.1 root 349: }
1.1.1.14! root 350: #elif defined(TARGET_LM32)
! 351: if ((interrupt_request & CPU_INTERRUPT_HARD)
! 352: && (env->ie & IE_IE)) {
! 353: env->exception_index = EXCP_IRQ;
! 354: do_interrupt(env);
! 355: next_tb = 0;
! 356: }
1.1.1.9 root 357: #elif defined(TARGET_MICROBLAZE)
358: if ((interrupt_request & CPU_INTERRUPT_HARD)
359: && (env->sregs[SR_MSR] & MSR_IE)
360: && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
361: && !(env->iflags & (D_FLAG | IMM_FLAG))) {
362: env->exception_index = EXCP_IRQ;
363: do_interrupt(env);
364: next_tb = 0;
365: }
1.1 root 366: #elif defined(TARGET_MIPS)
367: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1.1.1.13 root 368: cpu_mips_hw_interrupts_pending(env)) {
1.1 root 369: /* Raise it */
370: env->exception_index = EXCP_EXT_INTERRUPT;
371: env->error_code = 0;
372: do_interrupt(env);
1.1.1.7 root 373: next_tb = 0;
1.1 root 374: }
375: #elif defined(TARGET_SPARC)
1.1.1.12 root 376: if (interrupt_request & CPU_INTERRUPT_HARD) {
377: if (cpu_interrupts_enabled(env) &&
378: env->interrupt_index > 0) {
379: int pil = env->interrupt_index & 0xf;
380: int type = env->interrupt_index & 0xf0;
381:
382: if (((type == TT_EXTINT) &&
383: cpu_pil_allowed(env, pil)) ||
384: type != TT_EXTINT) {
385: env->exception_index = env->interrupt_index;
386: do_interrupt(env);
387: next_tb = 0;
388: }
389: }
1.1.1.6 root 390: }
1.1.1.2 root 391: #elif defined(TARGET_ARM)
392: if (interrupt_request & CPU_INTERRUPT_FIQ
393: && !(env->uncached_cpsr & CPSR_F)) {
394: env->exception_index = EXCP_FIQ;
395: do_interrupt(env);
1.1.1.7 root 396: next_tb = 0;
1.1.1.2 root 397: }
1.1.1.6 root 398: /* ARMv7-M interrupt return works by loading a magic value
399: into the PC. On real hardware the load causes the
400: return to occur. The qemu implementation performs the
401: jump normally, then does the exception return when the
402: CPU tries to execute code at the magic address.
403: This will cause the magic PC value to be pushed to
1.1.1.14! root 404: the stack if an interrupt occurred at the wrong time.
1.1.1.6 root 405: We avoid this by disabling interrupts when
406: pc contains a magic address. */
1.1.1.2 root 407: if (interrupt_request & CPU_INTERRUPT_HARD
1.1.1.6 root 408: && ((IS_M(env) && env->regs[15] < 0xfffffff0)
409: || !(env->uncached_cpsr & CPSR_I))) {
1.1.1.2 root 410: env->exception_index = EXCP_IRQ;
411: do_interrupt(env);
1.1.1.7 root 412: next_tb = 0;
1.1.1.2 root 413: }
1.1.1.14! root 414: #elif defined(TARGET_UNICORE32)
! 415: if (interrupt_request & CPU_INTERRUPT_HARD
! 416: && !(env->uncached_asr & ASR_I)) {
1.1.1.6 root 417: do_interrupt(env);
1.1.1.7 root 418: next_tb = 0;
1.1.1.6 root 419: }
1.1.1.14! root 420: #elif defined(TARGET_SH4)
1.1.1.6 root 421: if (interrupt_request & CPU_INTERRUPT_HARD) {
422: do_interrupt(env);
1.1.1.7 root 423: next_tb = 0;
1.1.1.6 root 424: }
1.1.1.14! root 425: #elif defined(TARGET_ALPHA)
! 426: {
! 427: int idx = -1;
! 428: /* ??? This hard-codes the OSF/1 interrupt levels. */
! 429: switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
! 430: case 0 ... 3:
! 431: if (interrupt_request & CPU_INTERRUPT_HARD) {
! 432: idx = EXCP_DEV_INTERRUPT;
! 433: }
! 434: /* FALLTHRU */
! 435: case 4:
! 436: if (interrupt_request & CPU_INTERRUPT_TIMER) {
! 437: idx = EXCP_CLK_INTERRUPT;
! 438: }
! 439: /* FALLTHRU */
! 440: case 5:
! 441: if (interrupt_request & CPU_INTERRUPT_SMP) {
! 442: idx = EXCP_SMP_INTERRUPT;
! 443: }
! 444: /* FALLTHRU */
! 445: case 6:
! 446: if (interrupt_request & CPU_INTERRUPT_MCHK) {
! 447: idx = EXCP_MCHK;
! 448: }
! 449: }
! 450: if (idx >= 0) {
! 451: env->exception_index = idx;
! 452: env->error_code = 0;
! 453: do_interrupt(env);
! 454: next_tb = 0;
! 455: }
! 456: }
1.1.1.6 root 457: #elif defined(TARGET_CRIS)
1.1.1.7 root 458: if (interrupt_request & CPU_INTERRUPT_HARD
1.1.1.12 root 459: && (env->pregs[PR_CCS] & I_FLAG)
460: && !env->locked_irq) {
1.1.1.7 root 461: env->exception_index = EXCP_IRQ;
1.1.1.6 root 462: do_interrupt(env);
1.1.1.7 root 463: next_tb = 0;
464: }
465: if (interrupt_request & CPU_INTERRUPT_NMI
466: && (env->pregs[PR_CCS] & M_FLAG)) {
467: env->exception_index = EXCP_NMI;
468: do_interrupt(env);
469: next_tb = 0;
1.1.1.6 root 470: }
471: #elif defined(TARGET_M68K)
472: if (interrupt_request & CPU_INTERRUPT_HARD
473: && ((env->sr & SR_I) >> SR_I_SHIFT)
474: < env->pending_level) {
475: /* Real hardware gets the interrupt vector via an
476: IACK cycle at this point. Current emulated
477: hardware doesn't rely on this, so we
478: provide/save the vector when the interrupt is
479: first signalled. */
480: env->exception_index = env->pending_vector;
1.1.1.14! root 481: do_interrupt_m68k_hardirq(env);
! 482: next_tb = 0;
! 483: }
! 484: #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
! 485: if ((interrupt_request & CPU_INTERRUPT_HARD) &&
! 486: (env->psw.mask & PSW_MASK_EXT)) {
! 487: do_interrupt(env);
1.1.1.7 root 488: next_tb = 0;
1.1.1.6 root 489: }
1.1 root 490: #endif
1.1.1.14! root 491: /* Don't use the cached interrupt_request value,
1.1.1.4 root 492: do_interrupt may have updated the EXITTB flag. */
1.1.1.2 root 493: if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
1.1 root 494: env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
495: /* ensure that no TB jump will be modified as
496: the program flow was changed */
1.1.1.7 root 497: next_tb = 0;
1.1 root 498: }
1.1.1.8 root 499: }
500: if (unlikely(env->exit_request)) {
501: env->exit_request = 0;
502: env->exception_index = EXCP_INTERRUPT;
1.1.1.14! root 503: cpu_loop_exit(env);
1.1 root 504: }
1.1.1.12 root 505: #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
1.1.1.7 root 506: if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
1.1 root 507: /* restore flags in standard format */
1.1.1.6 root 508: #if defined(TARGET_I386)
1.1.1.14! root 509: env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
! 510: | (DF & DF_MASK);
1.1.1.7 root 511: log_cpu_state(env, X86_DUMP_CCOP);
1.1 root 512: env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1.1.1.5 root 513: #elif defined(TARGET_M68K)
514: cpu_m68k_flush_flags(env, env->cc_op);
515: env->cc_op = CC_OP_FLAGS;
516: env->sr = (env->sr & 0xffe0)
517: | env->cc_dest | (env->cc_x << 4);
1.1.1.7 root 518: log_cpu_state(env, 0);
1.1 root 519: #else
1.1.1.12 root 520: log_cpu_state(env, 0);
1.1 root 521: #endif
522: }
1.1.1.12 root 523: #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
1.1.1.7 root 524: spin_lock(&tb_lock);
1.1.1.14! root 525: tb = tb_find_fast(env);
1.1.1.7 root 526: /* Note: we do it here to avoid a gcc bug on Mac OS X when
527: doing it in tb_find_slow */
528: if (tb_invalidated_flag) {
529: /* as some TB could have been invalidated because
530: of memory exceptions while generating the code, we
531: must recompute the hash index here */
532: next_tb = 0;
533: tb_invalidated_flag = 0;
1.1 root 534: }
1.1.1.10 root 535: #ifdef CONFIG_DEBUG_EXEC
1.1.1.7 root 536: qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
537: (long)tb->tc_ptr, tb->pc,
538: lookup_symbol(tb->pc));
1.1 root 539: #endif
1.1.1.2 root 540: /* see if we can patch the calling TB. When the TB
541: spans two pages, we cannot safely do a direct
542: jump. */
1.1.1.12 root 543: if (next_tb != 0 && tb->page_addr[1] == -1) {
1.1.1.7 root 544: tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
1.1 root 545: }
1.1.1.7 root 546: spin_unlock(&tb_lock);
547:
548: /* cpu_interrupt might be called while translating the
549: TB, but before it is linked into a potentially
550: infinite loop and becomes env->current_tb. Avoid
551: starting execution if there is a pending interrupt. */
1.1.1.12 root 552: env->current_tb = tb;
553: barrier();
554: if (likely(!env->exit_request)) {
1.1.1.7 root 555: tc_ptr = tb->tc_ptr;
1.1 root 556: /* execute the generated code */
1.1.1.14! root 557: next_tb = tcg_qemu_tb_exec(env, tc_ptr);
1.1.1.7 root 558: if ((next_tb & 3) == 2) {
559: /* Instruction counter expired. */
560: int insns_left;
561: tb = (TranslationBlock *)(long)(next_tb & ~3);
562: /* Restore PC. */
563: cpu_pc_from_tb(env, tb);
564: insns_left = env->icount_decr.u32;
565: if (env->icount_extra && insns_left >= 0) {
566: /* Refill decrementer and continue execution. */
567: env->icount_extra += insns_left;
568: if (env->icount_extra > 0xffff) {
569: insns_left = 0xffff;
570: } else {
571: insns_left = env->icount_extra;
572: }
573: env->icount_extra -= insns_left;
574: env->icount_decr.u16.low = insns_left;
575: } else {
576: if (insns_left > 0) {
577: /* Execute remaining instructions. */
1.1.1.14! root 578: cpu_exec_nocache(env, insns_left, tb);
1.1.1.7 root 579: }
580: env->exception_index = EXCP_INTERRUPT;
581: next_tb = 0;
1.1.1.14! root 582: cpu_loop_exit(env);
1.1.1.7 root 583: }
584: }
585: }
1.1.1.12 root 586: env->current_tb = NULL;
1.1 root 587: /* reset soft MMU for next block (it can currently
588: only be set by a memory fault) */
1.1.1.6 root 589: } /* for(;;) */
1.1.1.14! root 590: } else {
! 591: /* Reload env after longjmp - the compiler may have smashed all
! 592: * local variables as longjmp is marked 'noreturn'. */
! 593: env = cpu_single_env;
1.1 root 594: }
595: } /* for(;;) */
596:
597:
598: #if defined(TARGET_I386)
599: /* restore flags in standard format */
1.1.1.14! root 600: env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
! 601: | (DF & DF_MASK);
1.1 root 602: #elif defined(TARGET_ARM)
603: /* XXX: Save/restore host fpu exception state?. */
1.1.1.14! root 604: #elif defined(TARGET_UNICORE32)
1.1 root 605: #elif defined(TARGET_SPARC)
606: #elif defined(TARGET_PPC)
1.1.1.14! root 607: #elif defined(TARGET_LM32)
1.1.1.5 root 608: #elif defined(TARGET_M68K)
609: cpu_m68k_flush_flags(env, env->cc_op);
610: env->cc_op = CC_OP_FLAGS;
611: env->sr = (env->sr & 0xffe0)
612: | env->cc_dest | (env->cc_x << 4);
1.1.1.9 root 613: #elif defined(TARGET_MICROBLAZE)
1.1 root 614: #elif defined(TARGET_MIPS)
1.1.1.3 root 615: #elif defined(TARGET_SH4)
1.1.1.6 root 616: #elif defined(TARGET_ALPHA)
617: #elif defined(TARGET_CRIS)
1.1.1.10 root 618: #elif defined(TARGET_S390X)
1.1.1.3 root 619: /* XXXXX */
1.1 root 620: #else
621: #error unsupported target CPU
622: #endif
1.1.1.5 root 623:
1.1.1.2 root 624: /* fail safe : never use cpu_single_env outside cpu_exec() */
1.1.1.6 root 625: cpu_single_env = NULL;
1.1 root 626: return ret;
627: }
unix.superglobalmegacorp.com