version 1.1.1.6, 2018/04/24 16:47:11
|
version 1.1.1.7, 2018/04/24 16:50:25
|
Line 15
|
Line 15
|
* |
* |
* You should have received a copy of the GNU Lesser General Public |
* You should have received a copy of the GNU Lesser General Public |
* License along with this library; if not, write to the Free Software |
* License along with this library; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA |
*/ |
*/ |
#include "config.h" |
#include "config.h" |
|
#define CPU_NO_GLOBAL_REGS |
#include "exec.h" |
#include "exec.h" |
#include "disas.h" |
#include "disas.h" |
|
#include "tcg.h" |
|
#include "kvm.h" |
|
|
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
#undef EAX |
#undef EAX |
Line 32
|
Line 35
|
#undef EDI |
#undef EDI |
#undef EIP |
#undef EIP |
#include <signal.h> |
#include <signal.h> |
|
#ifdef __linux__ |
#include <sys/ucontext.h> |
#include <sys/ucontext.h> |
#endif |
#endif |
|
#endif |
int tb_invalidated_flag; |
|
|
|
//#define DEBUG_EXEC |
|
//#define DEBUG_SIGNAL |
|
|
|
#define SAVE_GLOBALS() |
|
#define RESTORE_GLOBALS() |
|
|
|
#if defined(__sparc__) && !defined(HOST_SOLARIS) |
#if defined(__sparc__) && !defined(HOST_SOLARIS) |
#include <features.h> |
|
#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \ |
|
((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90))) |
|
// Work around ugly bugs in glibc that mangle global register contents |
// Work around ugly bugs in glibc that mangle global register contents |
|
#undef env |
|
#define env cpu_single_env |
|
#endif |
|
|
static volatile void *saved_env; |
int tb_invalidated_flag; |
static volatile unsigned long saved_t0, saved_i7; |
|
#undef SAVE_GLOBALS |
|
#define SAVE_GLOBALS() do { \ |
|
saved_env = env; \ |
|
saved_t0 = T0; \ |
|
asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \ |
|
} while(0) |
|
|
|
#undef RESTORE_GLOBALS |
|
#define RESTORE_GLOBALS() do { \ |
|
env = (void *)saved_env; \ |
|
T0 = saved_t0; \ |
|
asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \ |
|
} while(0) |
|
|
|
static int sparc_setjmp(jmp_buf buf) |
|
{ |
|
int ret; |
|
|
|
SAVE_GLOBALS(); |
|
ret = setjmp(buf); |
|
RESTORE_GLOBALS(); |
|
return ret; |
|
} |
|
#undef setjmp |
|
#define setjmp(jmp_buf) sparc_setjmp(jmp_buf) |
|
|
|
static void sparc_longjmp(jmp_buf buf, int val) |
//#define DEBUG_EXEC |
{ |
//#define DEBUG_SIGNAL |
SAVE_GLOBALS(); |
|
longjmp(buf, val); |
|
} |
|
#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val) |
|
#endif |
|
#endif |
|
|
|
void cpu_loop_exit(void) |
void cpu_loop_exit(void) |
{ |
{ |
Line 94 void cpu_loop_exit(void)
|
Line 59 void cpu_loop_exit(void)
|
longjmp(env->jmp_env, 1); |
longjmp(env->jmp_env, 1); |
} |
} |
|
|
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K)) |
|
#define reg_T2 |
|
#endif |
|
|
|
/* exit the current TB from a signal handler. The host registers are |
/* exit the current TB from a signal handler. The host registers are |
restored in a state compatible with the CPU emulator |
restored in a state compatible with the CPU emulator |
*/ |
*/ |
void cpu_resume_from_signal(CPUState *env1, void *puc) |
void cpu_resume_from_signal(CPUState *env1, void *puc) |
{ |
{ |
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
|
#ifdef __linux__ |
struct ucontext *uc = puc; |
struct ucontext *uc = puc; |
|
#elif defined(__OpenBSD__) |
|
struct sigcontext *uc = puc; |
|
#endif |
#endif |
#endif |
|
|
env = env1; |
env = env1; |
Line 114 void cpu_resume_from_signal(CPUState *en
|
Line 79 void cpu_resume_from_signal(CPUState *en
|
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
if (puc) { |
if (puc) { |
/* XXX: use siglongjmp ? */ |
/* XXX: use siglongjmp ? */ |
|
#ifdef __linux__ |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); |
|
#elif defined(__OpenBSD__) |
|
sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL); |
|
#endif |
} |
} |
#endif |
#endif |
|
env->exception_index = -1; |
longjmp(env->jmp_env, 1); |
longjmp(env->jmp_env, 1); |
} |
} |
|
|
|
/* Execute the code without caching the generated code. An interpreter |
|
could be used if available. */ |
|
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb) |
|
{ |
|
unsigned long next_tb; |
|
TranslationBlock *tb; |
|
|
|
/* Should never happen. |
|
We only end up here when an existing TB is too long. */ |
|
if (max_cycles > CF_COUNT_MASK) |
|
max_cycles = CF_COUNT_MASK; |
|
|
|
tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, |
|
max_cycles); |
|
env->current_tb = tb; |
|
/* execute the generated code */ |
|
next_tb = tcg_qemu_tb_exec(tb->tc_ptr); |
|
|
|
if ((next_tb & 3) == 2) { |
|
/* Restore PC. This may happen if async event occurs before |
|
the TB starts executing. */ |
|
cpu_pc_from_tb(env, tb); |
|
} |
|
tb_phys_invalidate(tb, -1); |
|
tb_free(tb); |
|
} |
|
|
static TranslationBlock *tb_find_slow(target_ulong pc, |
static TranslationBlock *tb_find_slow(target_ulong pc, |
target_ulong cs_base, |
target_ulong cs_base, |
uint64_t flags) |
uint64_t flags) |
{ |
{ |
TranslationBlock *tb, **ptb1; |
TranslationBlock *tb, **ptb1; |
int code_gen_size; |
|
unsigned int h; |
unsigned int h; |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
uint8_t *tc_ptr; |
|
|
|
spin_lock(&tb_lock); |
|
|
|
tb_invalidated_flag = 0; |
tb_invalidated_flag = 0; |
|
|
Line 165 static TranslationBlock *tb_find_slow(ta
|
Line 157 static TranslationBlock *tb_find_slow(ta
|
ptb1 = &tb->phys_hash_next; |
ptb1 = &tb->phys_hash_next; |
} |
} |
not_found: |
not_found: |
/* if no translated code available, then translate it now */ |
/* if no translated code available, then translate it now */ |
tb = tb_alloc(pc); |
tb = tb_gen_code(env, pc, cs_base, flags, 0); |
if (!tb) { |
|
/* flush must be done */ |
|
tb_flush(env); |
|
/* cannot fail at this point */ |
|
tb = tb_alloc(pc); |
|
/* don't forget to invalidate previous TB info */ |
|
tb_invalidated_flag = 1; |
|
} |
|
tc_ptr = code_gen_ptr; |
|
tb->tc_ptr = tc_ptr; |
|
tb->cs_base = cs_base; |
|
tb->flags = flags; |
|
SAVE_GLOBALS(); |
|
cpu_gen_code(env, tb, &code_gen_size); |
|
RESTORE_GLOBALS(); |
|
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
|
|
|
/* check next page if needed */ |
|
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
|
phys_page2 = -1; |
|
if ((pc & TARGET_PAGE_MASK) != virt_page2) { |
|
phys_page2 = get_phys_addr_code(env, virt_page2); |
|
} |
|
tb_link_phys(tb, phys_pc, phys_page2); |
|
|
|
found: |
found: |
/* we add the TB in the virtual pc hash table */ |
/* we add the TB in the virtual pc hash table */ |
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; |
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; |
spin_unlock(&tb_lock); |
|
return tb; |
return tb; |
} |
} |
|
|
Line 203 static inline TranslationBlock *tb_find_
|
Line 170 static inline TranslationBlock *tb_find_
|
{ |
{ |
TranslationBlock *tb; |
TranslationBlock *tb; |
target_ulong cs_base, pc; |
target_ulong cs_base, pc; |
uint64_t flags; |
int flags; |
|
|
/* we record a subset of the CPU state. It will |
/* we record a subset of the CPU state. It will |
always be the same before a given translated block |
always be the same before a given translated block |
is executed. */ |
is executed. */ |
#if defined(TARGET_I386) |
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
flags = env->hflags; |
|
flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
|
flags |= env->intercept; |
|
cs_base = env->segs[R_CS].base; |
|
pc = cs_base + env->eip; |
|
#elif defined(TARGET_ARM) |
|
flags = env->thumb | (env->vfp.vec_len << 1) |
|
| (env->vfp.vec_stride << 4); |
|
if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) |
|
flags |= (1 << 6); |
|
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) |
|
flags |= (1 << 7); |
|
flags |= (env->condexec_bits << 8); |
|
cs_base = 0; |
|
pc = env->regs[15]; |
|
#elif defined(TARGET_SPARC) |
|
#ifdef TARGET_SPARC64 |
|
// Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled |
|
flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2)) |
|
| (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2); |
|
#else |
|
// FPU enable . Supervisor |
|
flags = (env->psref << 4) | env->psrs; |
|
#endif |
|
cs_base = env->npc; |
|
pc = env->pc; |
|
#elif defined(TARGET_PPC) |
|
flags = env->hflags; |
|
cs_base = 0; |
|
pc = env->nip; |
|
#elif defined(TARGET_MIPS) |
|
flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK); |
|
cs_base = 0; |
|
pc = env->PC[env->current_tc]; |
|
#elif defined(TARGET_M68K) |
|
flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */ |
|
| (env->sr & SR_S) /* Bit 13 */ |
|
| ((env->macsr >> 4) & 0xf); /* Bits 0-3 */ |
|
cs_base = 0; |
|
pc = env->pc; |
|
#elif defined(TARGET_SH4) |
|
flags = env->flags; |
|
cs_base = 0; |
|
pc = env->pc; |
|
#elif defined(TARGET_ALPHA) |
|
flags = env->ps; |
|
cs_base = 0; |
|
pc = env->pc; |
|
#elif defined(TARGET_CRIS) |
|
flags = 0; |
|
cs_base = 0; |
|
pc = env->pc; |
|
#else |
|
#error unsupported CPU |
|
#endif |
|
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; |
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; |
if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base || |
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || |
tb->flags != flags, 0)) { |
tb->flags != flags)) { |
tb = tb_find_slow(pc, cs_base, flags); |
tb = tb_find_slow(pc, cs_base, flags); |
/* Note: we do it here to avoid a gcc bug on Mac OS X when |
|
doing it in tb_find_slow */ |
|
if (tb_invalidated_flag) { |
|
/* as some TB could have been invalidated because |
|
of memory exceptions while generating the code, we |
|
must recompute the hash index here */ |
|
T0 = 0; |
|
} |
|
} |
} |
return tb; |
return tb; |
} |
} |
|
|
#define BREAK_CHAIN T0 = 0 |
static CPUDebugExcpHandler *debug_excp_handler; |
|
|
|
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) |
|
{ |
|
CPUDebugExcpHandler *old_handler = debug_excp_handler; |
|
|
|
debug_excp_handler = handler; |
|
return old_handler; |
|
} |
|
|
|
static void cpu_handle_debug_exception(CPUState *env) |
|
{ |
|
CPUWatchpoint *wp; |
|
|
|
if (!env->watchpoint_hit) |
|
TAILQ_FOREACH(wp, &env->watchpoints, entry) |
|
wp->flags &= ~BP_WATCHPOINT_HIT; |
|
|
|
if (debug_excp_handler) |
|
debug_excp_handler(env); |
|
} |
|
|
/* main execution loop */ |
/* main execution loop */ |
|
|
Line 288 int cpu_exec(CPUState *env1)
|
Line 212 int cpu_exec(CPUState *env1)
|
{ |
{ |
#define DECLARE_HOST_REGS 1 |
#define DECLARE_HOST_REGS 1 |
#include "hostregs_helper.h" |
#include "hostregs_helper.h" |
#if defined(TARGET_SPARC) |
|
#if defined(reg_REGWPTR) |
|
uint32_t *saved_regwptr; |
|
#endif |
|
#endif |
|
int ret, interrupt_request; |
int ret, interrupt_request; |
void (*gen_func)(void); |
|
TranslationBlock *tb; |
TranslationBlock *tb; |
uint8_t *tc_ptr; |
uint8_t *tc_ptr; |
|
unsigned long next_tb; |
|
|
if (cpu_halted(env1) == EXCP_HALTED) |
if (cpu_halted(env1) == EXCP_HALTED) |
return EXCP_HALTED; |
return EXCP_HALTED; |
Line 307 int cpu_exec(CPUState *env1)
|
Line 226 int cpu_exec(CPUState *env1)
|
#define SAVE_HOST_REGS 1 |
#define SAVE_HOST_REGS 1 |
#include "hostregs_helper.h" |
#include "hostregs_helper.h" |
env = env1; |
env = env1; |
SAVE_GLOBALS(); |
|
|
|
env_to_regs(); |
env_to_regs(); |
#if defined(TARGET_I386) |
#if defined(TARGET_I386) |
Line 317 int cpu_exec(CPUState *env1)
|
Line 235 int cpu_exec(CPUState *env1)
|
CC_OP = CC_OP_EFLAGS; |
CC_OP = CC_OP_EFLAGS; |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
#elif defined(TARGET_SPARC) |
#elif defined(TARGET_SPARC) |
#if defined(reg_REGWPTR) |
|
saved_regwptr = REGWPTR; |
|
#endif |
|
#elif defined(TARGET_M68K) |
#elif defined(TARGET_M68K) |
env->cc_op = CC_OP_FLAGS; |
env->cc_op = CC_OP_FLAGS; |
env->cc_dest = env->sr & 0xf; |
env->cc_dest = env->sr & 0xf; |
Line 345 int cpu_exec(CPUState *env1)
|
Line 260 int cpu_exec(CPUState *env1)
|
if (env->exception_index >= EXCP_INTERRUPT) { |
if (env->exception_index >= EXCP_INTERRUPT) { |
/* exit request from the cpu execution loop */ |
/* exit request from the cpu execution loop */ |
ret = env->exception_index; |
ret = env->exception_index; |
|
if (ret == EXCP_DEBUG) |
|
cpu_handle_debug_exception(env); |
break; |
break; |
} else if (env->user_mode_only) { |
} else { |
|
#if defined(CONFIG_USER_ONLY) |
/* if user mode only, we simulate a fake exception |
/* if user mode only, we simulate a fake exception |
which will be handled outside the cpu execution |
which will be handled outside the cpu execution |
loop */ |
loop */ |
Line 355 int cpu_exec(CPUState *env1)
|
Line 273 int cpu_exec(CPUState *env1)
|
env->exception_is_int, |
env->exception_is_int, |
env->error_code, |
env->error_code, |
env->exception_next_eip); |
env->exception_next_eip); |
|
/* successfully delivered */ |
|
env->old_exception = -1; |
#endif |
#endif |
ret = env->exception_index; |
ret = env->exception_index; |
break; |
break; |
} else { |
#else |
#if defined(TARGET_I386) |
#if defined(TARGET_I386) |
/* simulate a real cpu exception. On i386, it can |
/* simulate a real cpu exception. On i386, it can |
trigger new exceptions, but we do not handle |
trigger new exceptions, but we do not handle |
Line 374 int cpu_exec(CPUState *env1)
|
Line 294 int cpu_exec(CPUState *env1)
|
#elif defined(TARGET_MIPS) |
#elif defined(TARGET_MIPS) |
do_interrupt(env); |
do_interrupt(env); |
#elif defined(TARGET_SPARC) |
#elif defined(TARGET_SPARC) |
do_interrupt(env->exception_index); |
do_interrupt(env); |
#elif defined(TARGET_ARM) |
#elif defined(TARGET_ARM) |
do_interrupt(env); |
do_interrupt(env); |
#elif defined(TARGET_SH4) |
#elif defined(TARGET_SH4) |
Line 386 int cpu_exec(CPUState *env1)
|
Line 306 int cpu_exec(CPUState *env1)
|
#elif defined(TARGET_M68K) |
#elif defined(TARGET_M68K) |
do_interrupt(0); |
do_interrupt(0); |
#endif |
#endif |
|
#endif |
} |
} |
env->exception_index = -1; |
env->exception_index = -1; |
} |
} |
#ifdef USE_KQEMU |
#ifdef USE_KQEMU |
if (kqemu_is_ok(env) && env->interrupt_request == 0) { |
if (kqemu_is_ok(env) && env->interrupt_request == 0) { |
int ret; |
int ret; |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
ret = kqemu_cpu_exec(env); |
ret = kqemu_cpu_exec(env); |
/* put eflags in CPU temporary format */ |
/* put eflags in CPU temporary format */ |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
Line 415 int cpu_exec(CPUState *env1)
|
Line 336 int cpu_exec(CPUState *env1)
|
} |
} |
#endif |
#endif |
|
|
T0 = 0; /* force lookup of first TB */ |
if (kvm_enabled()) { |
|
kvm_cpu_exec(env); |
|
longjmp(env->jmp_env, 1); |
|
} |
|
|
|
next_tb = 0; /* force lookup of first TB */ |
for(;;) { |
for(;;) { |
SAVE_GLOBALS(); |
|
interrupt_request = env->interrupt_request; |
interrupt_request = env->interrupt_request; |
if (__builtin_expect(interrupt_request, 0) |
if (unlikely(interrupt_request)) { |
#if defined(TARGET_I386) |
if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { |
&& env->hflags & HF_GIF_MASK |
/* Mask out external interrupts for this step. */ |
#endif |
interrupt_request &= ~(CPU_INTERRUPT_HARD | |
) { |
CPU_INTERRUPT_FIQ | |
|
CPU_INTERRUPT_SMI | |
|
CPU_INTERRUPT_NMI); |
|
} |
if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
env->exception_index = EXCP_DEBUG; |
env->exception_index = EXCP_DEBUG; |
Line 439 int cpu_exec(CPUState *env1)
|
Line 367 int cpu_exec(CPUState *env1)
|
} |
} |
#endif |
#endif |
#if defined(TARGET_I386) |
#if defined(TARGET_I386) |
if ((interrupt_request & CPU_INTERRUPT_SMI) && |
if (env->hflags2 & HF2_GIF_MASK) { |
!(env->hflags & HF_SMM_MASK)) { |
if ((interrupt_request & CPU_INTERRUPT_SMI) && |
svm_check_intercept(SVM_EXIT_SMI); |
!(env->hflags & HF_SMM_MASK)) { |
env->interrupt_request &= ~CPU_INTERRUPT_SMI; |
svm_check_intercept(SVM_EXIT_SMI); |
do_smm_enter(); |
env->interrupt_request &= ~CPU_INTERRUPT_SMI; |
BREAK_CHAIN; |
do_smm_enter(); |
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
next_tb = 0; |
(env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) && |
} else if ((interrupt_request & CPU_INTERRUPT_NMI) && |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
!(env->hflags2 & HF2_NMI_MASK)) { |
int intno; |
env->interrupt_request &= ~CPU_INTERRUPT_NMI; |
svm_check_intercept(SVM_EXIT_INTR); |
env->hflags2 |= HF2_NMI_MASK; |
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); |
do_interrupt(EXCP02_NMI, 0, 0, 0, 1); |
intno = cpu_get_pic_interrupt(env); |
next_tb = 0; |
if (loglevel & CPU_LOG_TB_IN_ASM) { |
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); |
(((env->hflags2 & HF2_VINTR_MASK) && |
} |
(env->hflags2 & HF2_HIF_MASK)) || |
do_interrupt(intno, 0, 0, 0, 1); |
(!(env->hflags2 & HF2_VINTR_MASK) && |
/* ensure that no TB jump will be modified as |
(env->eflags & IF_MASK && |
the program flow was changed */ |
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) { |
BREAK_CHAIN; |
int intno; |
|
svm_check_intercept(SVM_EXIT_INTR); |
|
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); |
|
intno = cpu_get_pic_interrupt(env); |
|
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); |
|
do_interrupt(intno, 0, 0, 0, 1); |
|
/* ensure that no TB jump will be modified as |
|
the program flow was changed */ |
|
next_tb = 0; |
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && |
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && |
(env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
(env->eflags & IF_MASK) && |
int intno; |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
/* FIXME: this should respect TPR */ |
int intno; |
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
/* FIXME: this should respect TPR */ |
svm_check_intercept(SVM_EXIT_VINTR); |
svm_check_intercept(SVM_EXIT_VINTR); |
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); |
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); |
if (loglevel & CPU_LOG_TB_IN_ASM) |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno); |
fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno); |
do_interrupt(intno, 0, 0, 0, 1); |
do_interrupt(intno, 0, 0, -1, 1); |
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), |
next_tb = 0; |
ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK); |
|
BREAK_CHAIN; |
|
#endif |
#endif |
|
} |
} |
} |
#elif defined(TARGET_PPC) |
#elif defined(TARGET_PPC) |
#if 0 |
#if 0 |
Line 485 int cpu_exec(CPUState *env1)
|
Line 420 int cpu_exec(CPUState *env1)
|
ppc_hw_interrupt(env); |
ppc_hw_interrupt(env); |
if (env->pending_interrupts == 0) |
if (env->pending_interrupts == 0) |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
#elif defined(TARGET_MIPS) |
#elif defined(TARGET_MIPS) |
if ((interrupt_request & CPU_INTERRUPT_HARD) && |
if ((interrupt_request & CPU_INTERRUPT_HARD) && |
Line 498 int cpu_exec(CPUState *env1)
|
Line 433 int cpu_exec(CPUState *env1)
|
env->exception_index = EXCP_EXT_INTERRUPT; |
env->exception_index = EXCP_EXT_INTERRUPT; |
env->error_code = 0; |
env->error_code = 0; |
do_interrupt(env); |
do_interrupt(env); |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
#elif defined(TARGET_SPARC) |
#elif defined(TARGET_SPARC) |
if ((interrupt_request & CPU_INTERRUPT_HARD) && |
if ((interrupt_request & CPU_INTERRUPT_HARD) && |
Line 510 int cpu_exec(CPUState *env1)
|
Line 445 int cpu_exec(CPUState *env1)
|
(pil == 15 || pil > env->psrpil)) || |
(pil == 15 || pil > env->psrpil)) || |
type != TT_EXTINT) { |
type != TT_EXTINT) { |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
do_interrupt(env->interrupt_index); |
env->exception_index = env->interrupt_index; |
|
do_interrupt(env); |
env->interrupt_index = 0; |
env->interrupt_index = 0; |
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) |
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) |
cpu_check_irqs(env); |
cpu_check_irqs(env); |
#endif |
#endif |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
} else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
} else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
//do_interrupt(0, 0, 0, 0, 0); |
//do_interrupt(0, 0, 0, 0, 0); |
Line 526 int cpu_exec(CPUState *env1)
|
Line 462 int cpu_exec(CPUState *env1)
|
&& !(env->uncached_cpsr & CPSR_F)) { |
&& !(env->uncached_cpsr & CPSR_F)) { |
env->exception_index = EXCP_FIQ; |
env->exception_index = EXCP_FIQ; |
do_interrupt(env); |
do_interrupt(env); |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
/* ARMv7-M interrupt return works by loading a magic value |
/* ARMv7-M interrupt return works by loading a magic value |
into the PC. On real hardware the load causes the |
into the PC. On real hardware the load causes the |
Line 542 int cpu_exec(CPUState *env1)
|
Line 478 int cpu_exec(CPUState *env1)
|
|| !(env->uncached_cpsr & CPSR_I))) { |
|| !(env->uncached_cpsr & CPSR_I))) { |
env->exception_index = EXCP_IRQ; |
env->exception_index = EXCP_IRQ; |
do_interrupt(env); |
do_interrupt(env); |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
#elif defined(TARGET_SH4) |
#elif defined(TARGET_SH4) |
if (interrupt_request & CPU_INTERRUPT_HARD) { |
if (interrupt_request & CPU_INTERRUPT_HARD) { |
do_interrupt(env); |
do_interrupt(env); |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
#elif defined(TARGET_ALPHA) |
#elif defined(TARGET_ALPHA) |
if (interrupt_request & CPU_INTERRUPT_HARD) { |
if (interrupt_request & CPU_INTERRUPT_HARD) { |
do_interrupt(env); |
do_interrupt(env); |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
#elif defined(TARGET_CRIS) |
#elif defined(TARGET_CRIS) |
if (interrupt_request & CPU_INTERRUPT_HARD) { |
if (interrupt_request & CPU_INTERRUPT_HARD |
|
&& (env->pregs[PR_CCS] & I_FLAG)) { |
|
env->exception_index = EXCP_IRQ; |
do_interrupt(env); |
do_interrupt(env); |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
next_tb = 0; |
BREAK_CHAIN; |
} |
|
if (interrupt_request & CPU_INTERRUPT_NMI |
|
&& (env->pregs[PR_CCS] & M_FLAG)) { |
|
env->exception_index = EXCP_NMI; |
|
do_interrupt(env); |
|
next_tb = 0; |
} |
} |
#elif defined(TARGET_M68K) |
#elif defined(TARGET_M68K) |
if (interrupt_request & CPU_INTERRUPT_HARD |
if (interrupt_request & CPU_INTERRUPT_HARD |
Line 571 int cpu_exec(CPUState *env1)
|
Line 514 int cpu_exec(CPUState *env1)
|
first signalled. */ |
first signalled. */ |
env->exception_index = env->pending_vector; |
env->exception_index = env->pending_vector; |
do_interrupt(1); |
do_interrupt(1); |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
#endif |
#endif |
/* Don't use the cached interupt_request value, |
/* Don't use the cached interupt_request value, |
Line 580 int cpu_exec(CPUState *env1)
|
Line 523 int cpu_exec(CPUState *env1)
|
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
/* ensure that no TB jump will be modified as |
/* ensure that no TB jump will be modified as |
the program flow was changed */ |
the program flow was changed */ |
BREAK_CHAIN; |
next_tb = 0; |
} |
} |
if (interrupt_request & CPU_INTERRUPT_EXIT) { |
if (interrupt_request & CPU_INTERRUPT_EXIT) { |
env->interrupt_request &= ~CPU_INTERRUPT_EXIT; |
env->interrupt_request &= ~CPU_INTERRUPT_EXIT; |
Line 589 int cpu_exec(CPUState *env1)
|
Line 532 int cpu_exec(CPUState *env1)
|
} |
} |
} |
} |
#ifdef DEBUG_EXEC |
#ifdef DEBUG_EXEC |
if ((loglevel & CPU_LOG_TB_CPU)) { |
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
/* restore flags in standard format */ |
/* restore flags in standard format */ |
regs_to_env(); |
regs_to_env(); |
#if defined(TARGET_I386) |
#if defined(TARGET_I386) |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); |
log_cpu_state(env, X86_DUMP_CCOP); |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
#elif defined(TARGET_ARM) |
#elif defined(TARGET_ARM) |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#elif defined(TARGET_SPARC) |
#elif defined(TARGET_SPARC) |
REGWPTR = env->regbase + (env->cwp * 16); |
log_cpu_state(env, 0); |
env->regwptr = REGWPTR; |
|
cpu_dump_state(env, logfile, fprintf, 0); |
|
#elif defined(TARGET_PPC) |
#elif defined(TARGET_PPC) |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#elif defined(TARGET_M68K) |
#elif defined(TARGET_M68K) |
cpu_m68k_flush_flags(env, env->cc_op); |
cpu_m68k_flush_flags(env, env->cc_op); |
env->cc_op = CC_OP_FLAGS; |
env->cc_op = CC_OP_FLAGS; |
env->sr = (env->sr & 0xffe0) |
env->sr = (env->sr & 0xffe0) |
| env->cc_dest | (env->cc_x << 4); |
| env->cc_dest | (env->cc_x << 4); |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#elif defined(TARGET_MIPS) |
#elif defined(TARGET_MIPS) |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#elif defined(TARGET_SH4) |
#elif defined(TARGET_SH4) |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#elif defined(TARGET_ALPHA) |
#elif defined(TARGET_ALPHA) |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#elif defined(TARGET_CRIS) |
#elif defined(TARGET_CRIS) |
cpu_dump_state(env, logfile, fprintf, 0); |
log_cpu_state(env, 0); |
#else |
#else |
#error unsupported target CPU |
#error unsupported target CPU |
#endif |
#endif |
} |
} |
#endif |
#endif |
|
spin_lock(&tb_lock); |
tb = tb_find_fast(); |
tb = tb_find_fast(); |
#ifdef DEBUG_EXEC |
/* Note: we do it here to avoid a gcc bug on Mac OS X when |
if ((loglevel & CPU_LOG_EXEC)) { |
doing it in tb_find_slow */ |
fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", |
if (tb_invalidated_flag) { |
(long)tb->tc_ptr, tb->pc, |
/* as some TB could have been invalidated because |
lookup_symbol(tb->pc)); |
of memory exceptions while generating the code, we |
|
must recompute the hash index here */ |
|
next_tb = 0; |
|
tb_invalidated_flag = 0; |
} |
} |
|
#ifdef DEBUG_EXEC |
|
qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", |
|
(long)tb->tc_ptr, tb->pc, |
|
lookup_symbol(tb->pc)); |
#endif |
#endif |
RESTORE_GLOBALS(); |
|
/* see if we can patch the calling TB. When the TB |
/* see if we can patch the calling TB. When the TB |
spans two pages, we cannot safely do a direct |
spans two pages, we cannot safely do a direct |
jump. */ |
jump. */ |
{ |
{ |
if (T0 != 0 && |
if (next_tb != 0 && |
#if USE_KQEMU |
#ifdef USE_KQEMU |
(env->kqemu_enabled != 2) && |
(env->kqemu_enabled != 2) && |
#endif |
#endif |
tb->page_addr[1] == -1) { |
tb->page_addr[1] == -1) { |
spin_lock(&tb_lock); |
tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); |
tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb); |
|
spin_unlock(&tb_lock); |
|
} |
} |
} |
} |
tc_ptr = tb->tc_ptr; |
spin_unlock(&tb_lock); |
env->current_tb = tb; |
env->current_tb = tb; |
|
|
|
/* cpu_interrupt might be called while translating the |
|
TB, but before it is linked into a potentially |
|
infinite loop and becomes env->current_tb. Avoid |
|
starting execution if there is a pending interrupt. */ |
|
if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT)) |
|
env->current_tb = NULL; |
|
|
|
while (env->current_tb) { |
|
tc_ptr = tb->tc_ptr; |
/* execute the generated code */ |
/* execute the generated code */ |
gen_func = (void *)tc_ptr; |
#if defined(__sparc__) && !defined(HOST_SOLARIS) |
#if defined(__sparc__) |
#undef env |
__asm__ __volatile__("call %0\n\t" |
env = cpu_single_env; |
"mov %%o7,%%i0" |
#define env cpu_single_env |
: /* no outputs */ |
#endif |
: "r" (gen_func) |
next_tb = tcg_qemu_tb_exec(tc_ptr); |
: "i0", "i1", "i2", "i3", "i4", "i5", |
env->current_tb = NULL; |
"o0", "o1", "o2", "o3", "o4", "o5", |
if ((next_tb & 3) == 2) { |
"l0", "l1", "l2", "l3", "l4", "l5", |
/* Instruction counter expired. */ |
"l6", "l7"); |
int insns_left; |
#elif defined(__arm__) |
tb = (TranslationBlock *)(long)(next_tb & ~3); |
asm volatile ("mov pc, %0\n\t" |
/* Restore PC. */ |
".global exec_loop\n\t" |
cpu_pc_from_tb(env, tb); |
"exec_loop:\n\t" |
insns_left = env->icount_decr.u32; |
: /* no outputs */ |
if (env->icount_extra && insns_left >= 0) { |
: "r" (gen_func) |
/* Refill decrementer and continue execution. */ |
: "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); |
env->icount_extra += insns_left; |
#elif defined(__ia64) |
if (env->icount_extra > 0xffff) { |
struct fptr { |
insns_left = 0xffff; |
void *ip; |
} else { |
void *gp; |
insns_left = env->icount_extra; |
} fp; |
} |
|
env->icount_extra -= insns_left; |
fp.ip = tc_ptr; |
env->icount_decr.u16.low = insns_left; |
fp.gp = code_gen_buffer + 2 * (1 << 20); |
} else { |
(*(void (*)(void)) &fp)(); |
if (insns_left > 0) { |
#else |
/* Execute remaining instructions. */ |
gen_func(); |
cpu_exec_nocache(insns_left, tb); |
#endif |
} |
env->current_tb = NULL; |
env->exception_index = EXCP_INTERRUPT; |
|
next_tb = 0; |
|
cpu_loop_exit(); |
|
} |
|
} |
|
} |
/* reset soft MMU for next block (it can currently |
/* reset soft MMU for next block (it can currently |
only be set by a memory fault) */ |
only be set by a memory fault) */ |
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) |
|
if (env->hflags & HF_SOFTMMU_MASK) { |
|
env->hflags &= ~HF_SOFTMMU_MASK; |
|
/* do not allow linking to another block */ |
|
T0 = 0; |
|
} |
|
#endif |
|
#if defined(USE_KQEMU) |
#if defined(USE_KQEMU) |
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000) |
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000) |
if (kqemu_is_ok(env) && |
if (kqemu_is_ok(env) && |
Line 704 int cpu_exec(CPUState *env1)
|
Line 658 int cpu_exec(CPUState *env1)
|
|
|
#if defined(TARGET_I386) |
#if defined(TARGET_I386) |
/* restore flags in standard format */ |
/* restore flags in standard format */ |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
#elif defined(TARGET_ARM) |
#elif defined(TARGET_ARM) |
/* XXX: Save/restore host fpu exception state?. */ |
/* XXX: Save/restore host fpu exception state?. */ |
#elif defined(TARGET_SPARC) |
#elif defined(TARGET_SPARC) |
#if defined(reg_REGWPTR) |
|
REGWPTR = saved_regwptr; |
|
#endif |
|
#elif defined(TARGET_PPC) |
#elif defined(TARGET_PPC) |
#elif defined(TARGET_M68K) |
#elif defined(TARGET_M68K) |
cpu_m68k_flush_flags(env, env->cc_op); |
cpu_m68k_flush_flags(env, env->cc_op); |
Line 727 int cpu_exec(CPUState *env1)
|
Line 678 int cpu_exec(CPUState *env1)
|
#endif |
#endif |
|
|
/* restore global registers */ |
/* restore global registers */ |
RESTORE_GLOBALS(); |
|
#include "hostregs_helper.h" |
#include "hostregs_helper.h" |
|
|
/* fail safe : never use cpu_single_env outside cpu_exec() */ |
/* fail safe : never use cpu_single_env outside cpu_exec() */ |
Line 761 void cpu_x86_load_seg(CPUX86State *s, in
|
Line 711 void cpu_x86_load_seg(CPUX86State *s, in
|
cpu_x86_load_seg_cache(env, seg_reg, selector, |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
(selector << 4), 0xffff, 0); |
(selector << 4), 0xffff, 0); |
} else { |
} else { |
load_seg(seg_reg, selector); |
helper_load_seg(seg_reg, selector); |
} |
} |
env = saved_env; |
env = saved_env; |
} |
} |
Line 884 static inline int handle_cpu_signal(unsi
|
Line 834 static inline int handle_cpu_signal(unsi
|
do it (XXX: use sigsetjmp) */ |
do it (XXX: use sigsetjmp) */ |
sigprocmask(SIG_SETMASK, old_set, NULL); |
sigprocmask(SIG_SETMASK, old_set, NULL); |
cpu_loop_exit(); |
cpu_loop_exit(); |
|
/* never comes here */ |
|
return 1; |
} |
} |
#elif defined(TARGET_SPARC) |
#elif defined(TARGET_SPARC) |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
Line 920 static inline int handle_cpu_signal(unsi
|
Line 872 static inline int handle_cpu_signal(unsi
|
do it (XXX: use sigsetjmp) */ |
do it (XXX: use sigsetjmp) */ |
sigprocmask(SIG_SETMASK, old_set, NULL); |
sigprocmask(SIG_SETMASK, old_set, NULL); |
cpu_loop_exit(); |
cpu_loop_exit(); |
|
/* never comes here */ |
|
return 1; |
} |
} |
#elif defined (TARGET_PPC) |
#elif defined (TARGET_PPC) |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
Line 962 static inline int handle_cpu_signal(unsi
|
Line 916 static inline int handle_cpu_signal(unsi
|
/* we restore the process signal mask as the sigreturn should |
/* we restore the process signal mask as the sigreturn should |
do it (XXX: use sigsetjmp) */ |
do it (XXX: use sigsetjmp) */ |
sigprocmask(SIG_SETMASK, old_set, NULL); |
sigprocmask(SIG_SETMASK, old_set, NULL); |
do_raise_exception_err(env->exception_index, env->error_code); |
cpu_loop_exit(); |
} else { |
} else { |
/* activate soft MMU for this block */ |
/* activate soft MMU for this block */ |
cpu_resume_from_signal(env, puc); |
cpu_resume_from_signal(env, puc); |
Line 1051 static inline int handle_cpu_signal(unsi
|
Line 1005 static inline int handle_cpu_signal(unsi
|
/* we restore the process signal mask as the sigreturn should |
/* we restore the process signal mask as the sigreturn should |
do it (XXX: use sigsetjmp) */ |
do it (XXX: use sigsetjmp) */ |
sigprocmask(SIG_SETMASK, old_set, NULL); |
sigprocmask(SIG_SETMASK, old_set, NULL); |
do_raise_exception_err(env->exception_index, env->error_code); |
cpu_loop_exit(); |
} else { |
} else { |
/* activate soft MMU for this block */ |
/* activate soft MMU for this block */ |
cpu_resume_from_signal(env, puc); |
cpu_resume_from_signal(env, puc); |
Line 1182 static inline int handle_cpu_signal(unsi
|
Line 1136 static inline int handle_cpu_signal(unsi
|
a virtual CPU fault */ |
a virtual CPU fault */ |
cpu_restore_state(tb, env, pc, puc); |
cpu_restore_state(tb, env, pc, puc); |
} |
} |
#if 0 |
|
printf("PF exception: NIP=0x%08x error=0x%x %p\n", |
|
env->nip, env->error_code, tb); |
|
#endif |
|
/* we restore the process signal mask as the sigreturn should |
/* we restore the process signal mask as the sigreturn should |
do it (XXX: use sigsetjmp) */ |
do it (XXX: use sigsetjmp) */ |
sigprocmask(SIG_SETMASK, old_set, NULL); |
sigprocmask(SIG_SETMASK, old_set, NULL); |
Line 1236 int cpu_signal_handler(int host_signum,
|
Line 1186 int cpu_signal_handler(int host_signum,
|
|
|
#elif defined(__x86_64__) |
#elif defined(__x86_64__) |
|
|
|
#ifdef __NetBSD__ |
|
#define REG_ERR _REG_ERR |
|
#define REG_TRAPNO _REG_TRAPNO |
|
|
|
#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)] |
|
#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc) |
|
#else |
|
#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)] |
|
#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP) |
|
#endif |
|
|
int cpu_signal_handler(int host_signum, void *pinfo, |
int cpu_signal_handler(int host_signum, void *pinfo, |
void *puc) |
void *puc) |
{ |
{ |
siginfo_t *info = pinfo; |
siginfo_t *info = pinfo; |
struct ucontext *uc = puc; |
|
unsigned long pc; |
unsigned long pc; |
|
#ifdef __NetBSD__ |
|
ucontext_t *uc = puc; |
|
#else |
|
struct ucontext *uc = puc; |
|
#endif |
|
|
pc = uc->uc_mcontext.gregs[REG_RIP]; |
pc = QEMU_UC_MACHINE_PC(uc); |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ? |
QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ? |
(uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, |
(QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0, |
&uc->uc_sigmask, puc); |
&uc->uc_sigmask, puc); |
} |
} |
|
|
#elif defined(__powerpc__) |
#elif defined(_ARCH_PPC) |
|
|
/*********************************************************************** |
/*********************************************************************** |
* signal context platform-specific definitions |
* signal context platform-specific definitions |
Line 1359 int cpu_signal_handler(int host_signum,
|
Line 1324 int cpu_signal_handler(int host_signum,
|
void *puc) |
void *puc) |
{ |
{ |
siginfo_t *info = pinfo; |
siginfo_t *info = pinfo; |
uint32_t *regs = (uint32_t *)(info + 1); |
|
void *sigmask = (regs + 20); |
|
unsigned long pc; |
|
int is_write; |
int is_write; |
uint32_t insn; |
uint32_t insn; |
|
#if !defined(__arch64__) || defined(HOST_SOLARIS) |
|
uint32_t *regs = (uint32_t *)(info + 1); |
|
void *sigmask = (regs + 20); |
/* XXX: is there a standard glibc define ? */ |
/* XXX: is there a standard glibc define ? */ |
pc = regs[1]; |
unsigned long pc = regs[1]; |
|
#else |
|
#ifdef __linux__ |
|
struct sigcontext *sc = puc; |
|
unsigned long pc = sc->sigc_regs.tpc; |
|
void *sigmask = (void *)sc->sigc_mask; |
|
#elif defined(__OpenBSD__) |
|
struct sigcontext *uc = puc; |
|
unsigned long pc = uc->sc_pc; |
|
void *sigmask = (void *)(long)uc->sc_mask; |
|
#endif |
|
#endif |
|
|
/* XXX: need kernel patch to get write flag faster */ |
/* XXX: need kernel patch to get write flag faster */ |
is_write = 0; |
is_write = 0; |
insn = *(uint32_t *)pc; |
insn = *(uint32_t *)pc; |
Line 1397 int cpu_signal_handler(int host_signum,
|
Line 1373 int cpu_signal_handler(int host_signum,
|
unsigned long pc; |
unsigned long pc; |
int is_write; |
int is_write; |
|
|
|
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) |
pc = uc->uc_mcontext.gregs[R15]; |
pc = uc->uc_mcontext.gregs[R15]; |
|
#else |
|
pc = uc->uc_mcontext.arm_pc; |
|
#endif |
/* XXX: compute is_write */ |
/* XXX: compute is_write */ |
is_write = 0; |
is_write = 0; |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
Line 1490 int cpu_signal_handler(int host_signum,
|
Line 1470 int cpu_signal_handler(int host_signum,
|
is_write, &uc->uc_sigmask, puc); |
is_write, &uc->uc_sigmask, puc); |
} |
} |
|
|
|
#elif defined(__hppa__) |
|
|
|
int cpu_signal_handler(int host_signum, void *pinfo, |
|
void *puc) |
|
{ |
|
struct siginfo *info = pinfo; |
|
struct ucontext *uc = puc; |
|
unsigned long pc; |
|
int is_write; |
|
|
|
pc = uc->uc_mcontext.sc_iaoq[0]; |
|
/* FIXME: compute is_write */ |
|
is_write = 0; |
|
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
|
is_write, |
|
&uc->uc_sigmask, puc); |
|
} |
|
|
#else |
#else |
|
|
#error host CPU specific signal handler needed |
#error host CPU specific signal handler needed |