Annotation of qemu/exec-all.h, revision 1.1.1.3

1.1       root        1: /*
                      2:  * internal execution defines for qemu
                      3:  * 
                      4:  *  Copyright (c) 2003 Fabrice Bellard
                      5:  *
                      6:  * This library is free software; you can redistribute it and/or
                      7:  * modify it under the terms of the GNU Lesser General Public
                      8:  * License as published by the Free Software Foundation; either
                      9:  * version 2 of the License, or (at your option) any later version.
                     10:  *
                     11:  * This library is distributed in the hope that it will be useful,
                     12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
                     13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
                     14:  * Lesser General Public License for more details.
                     15:  *
                     16:  * You should have received a copy of the GNU Lesser General Public
                     17:  * License along with this library; if not, write to the Free Software
                     18:  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
                     19:  */
                     20: 
                     21: /* allow to see translation results - the slowdown should be negligible, so we leave it */
                     22: #define DEBUG_DISAS
                     23: 
                     24: #ifndef glue
                     25: #define xglue(x, y) x ## y
                     26: #define glue(x, y) xglue(x, y)
                     27: #define stringify(s)   tostring(s)
                     28: #define tostring(s)    #s
                     29: #endif
                     30: 
                     31: #if __GNUC__ < 3
                     32: #define __builtin_expect(x, n) (x)
                     33: #endif
                     34: 
                     35: #ifdef __i386__
                     36: #define REGPARM(n) __attribute((regparm(n)))
                     37: #else
                     38: #define REGPARM(n)
                     39: #endif
                     40: 
                     41: /* is_jmp field values */
                     42: #define DISAS_NEXT    0 /* next instruction can be analyzed */
                     43: #define DISAS_JUMP    1 /* only pc was modified dynamically */
                     44: #define DISAS_UPDATE  2 /* cpu state was modified dynamically */
                     45: #define DISAS_TB_JUMP 3 /* only pc was modified statically */
                     46: 
                     47: struct TranslationBlock;
                     48: 
                     49: /* XXX: make safe guess about sizes */
                     50: #define MAX_OP_PER_INSTR 32
                     51: #define OPC_BUF_SIZE 512
                     52: #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
                     53: 
                     54: #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
                     55: 
                     56: extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
                     57: extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
                     58: extern long gen_labels[OPC_BUF_SIZE];
                     59: extern int nb_gen_labels;
                     60: extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
                     61: extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
                     62: extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
                     63: extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
                     64: extern target_ulong gen_opc_jump_pc[2];
1.1.1.2   root       65: extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
1.1       root       66: 
                     67: typedef void (GenOpFunc)(void);
                     68: typedef void (GenOpFunc1)(long);
                     69: typedef void (GenOpFunc2)(long, long);
                     70: typedef void (GenOpFunc3)(long, long, long);
                     71:                     
                     72: #if defined(TARGET_I386)
                     73: 
                     74: void optimize_flags_init(void);
                     75: 
                     76: #endif
                     77: 
                     78: extern FILE *logfile;
                     79: extern int loglevel;
                     80: 
                     81: int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
                     82: int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
                     83: void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
                     84: int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
                     85:                  int max_code_size, int *gen_code_size_ptr);
                     86: int cpu_restore_state(struct TranslationBlock *tb, 
                     87:                       CPUState *env, unsigned long searched_pc,
                     88:                       void *puc);
                     89: int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
                     90:                       int max_code_size, int *gen_code_size_ptr);
                     91: int cpu_restore_state_copy(struct TranslationBlock *tb, 
                     92:                            CPUState *env, unsigned long searched_pc,
                     93:                            void *puc);
                     94: void cpu_resume_from_signal(CPUState *env1, void *puc);
1.1.1.2   root       95: void cpu_exec_init(CPUState *env);
1.1.1.3 ! root       96: int page_unprotect(target_ulong address, unsigned long pc, void *puc);
1.1       root       97: void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
                     98:                                    int is_cpu_write_access);
                     99: void tb_invalidate_page_range(target_ulong start, target_ulong end);
                    100: void tlb_flush_page(CPUState *env, target_ulong addr);
                    101: void tlb_flush(CPUState *env, int flush_global);
1.1.1.2   root      102: int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
                    103:                       target_phys_addr_t paddr, int prot, 
                    104:                       int is_user, int is_softmmu);
                    105: static inline int tlb_set_page(CPUState *env, target_ulong vaddr, 
                    106:                                target_phys_addr_t paddr, int prot, 
                    107:                                int is_user, int is_softmmu)
                    108: {
                    109:     if (prot & PAGE_READ)
                    110:         prot |= PAGE_EXEC;
                    111:     return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
                    112: }
1.1       root      113: 
                    114: #define CODE_GEN_MAX_SIZE        65536
                    115: #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
                    116: 
                    117: #define CODE_GEN_PHYS_HASH_BITS     15
                    118: #define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
                    119: 
                    120: /* maximum total translate dcode allocated */
                    121: 
                    122: /* NOTE: the translated code area cannot be too big because on some
                    123:    archs the range of "fast" function calls is limited. Here is a
                    124:    summary of the ranges:
                    125: 
                    126:    i386  : signed 32 bits
                    127:    arm   : signed 26 bits
                    128:    ppc   : signed 24 bits
                    129:    sparc : signed 32 bits
                    130:    alpha : signed 23 bits
                    131: */
                    132: 
                    133: #if defined(__alpha__)
                    134: #define CODE_GEN_BUFFER_SIZE     (2 * 1024 * 1024)
                    135: #elif defined(__ia64)
                    136: #define CODE_GEN_BUFFER_SIZE     (4 * 1024 * 1024)     /* range of addl */
                    137: #elif defined(__powerpc__)
                    138: #define CODE_GEN_BUFFER_SIZE     (6 * 1024 * 1024)
                    139: #else
                    140: #define CODE_GEN_BUFFER_SIZE     (16 * 1024 * 1024)
                    141: #endif
                    142: 
                    143: //#define CODE_GEN_BUFFER_SIZE     (128 * 1024)
                    144: 
                    145: /* estimated block size for TB allocation */
                    146: /* XXX: use a per code average code fragment size and modulate it
                    147:    according to the host CPU */
                    148: #if defined(CONFIG_SOFTMMU)
                    149: #define CODE_GEN_AVG_BLOCK_SIZE 128
                    150: #else
                    151: #define CODE_GEN_AVG_BLOCK_SIZE 64
                    152: #endif
                    153: 
                    154: #define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
                    155: 
                    156: #if defined(__powerpc__) 
                    157: #define USE_DIRECT_JUMP
                    158: #endif
                    159: #if defined(__i386__) && !defined(_WIN32)
                    160: #define USE_DIRECT_JUMP
                    161: #endif
                    162: 
                    163: typedef struct TranslationBlock {
                    164:     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
                    165:     target_ulong cs_base; /* CS base for this block */
                    166:     unsigned int flags; /* flags defining in which context the code was generated */
                    167:     uint16_t size;      /* size of target code for this block (1 <=
                    168:                            size <= TARGET_PAGE_SIZE) */
                    169:     uint16_t cflags;    /* compile flags */
                    170: #define CF_CODE_COPY   0x0001 /* block was generated in code copy mode */
                    171: #define CF_TB_FP_USED  0x0002 /* fp ops are used in the TB */
                    172: #define CF_FP_USED     0x0004 /* fp ops are used in the TB or in a chained TB */
                    173: #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
                    174: 
                    175:     uint8_t *tc_ptr;    /* pointer to the translated code */
                    176:     /* next matching tb for physical address. */
                    177:     struct TranslationBlock *phys_hash_next; 
                    178:     /* first and second physical page containing code. The lower bit
                    179:        of the pointer tells the index in page_next[] */
                    180:     struct TranslationBlock *page_next[2]; 
                    181:     target_ulong page_addr[2]; 
                    182: 
                    183:     /* the following data are used to directly call another TB from
                    184:        the code of this one. */
                    185:     uint16_t tb_next_offset[2]; /* offset of original jump target */
                    186: #ifdef USE_DIRECT_JUMP
                    187:     uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
                    188: #else
                    189:     uint32_t tb_next[2]; /* address of jump generated code */
                    190: #endif
                    191:     /* list of TBs jumping to this one. This is a circular list using
                    192:        the two least significant bits of the pointers to tell what is
                    193:        the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
                    194:        jmp_first */
                    195:     struct TranslationBlock *jmp_next[2]; 
                    196:     struct TranslationBlock *jmp_first;
                    197: } TranslationBlock;
                    198: 
1.1.1.2   root      199: static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
1.1       root      200: {
1.1.1.2   root      201:     return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
1.1       root      202: }
                    203: 
                    204: static inline unsigned int tb_phys_hash_func(unsigned long pc)
                    205: {
                    206:     return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
                    207: }
                    208: 
                    209: TranslationBlock *tb_alloc(target_ulong pc);
                    210: void tb_flush(CPUState *env);
                    211: void tb_link_phys(TranslationBlock *tb, 
                    212:                   target_ulong phys_pc, target_ulong phys_page2);
                    213: 
                    214: extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
                    215: 
                    216: extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
                    217: extern uint8_t *code_gen_ptr;
                    218: 
                    219: #if defined(USE_DIRECT_JUMP)
                    220: 
                    221: #if defined(__powerpc__)
                    222: static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
                    223: {
                    224:     uint32_t val, *ptr;
                    225: 
                    226:     /* patch the branch destination */
                    227:     ptr = (uint32_t *)jmp_addr;
                    228:     val = *ptr;
                    229:     val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
                    230:     *ptr = val;
                    231:     /* flush icache */
                    232:     asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
                    233:     asm volatile ("sync" : : : "memory");
                    234:     asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
                    235:     asm volatile ("sync" : : : "memory");
                    236:     asm volatile ("isync" : : : "memory");
                    237: }
                    238: #elif defined(__i386__)
                    239: static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
                    240: {
                    241:     /* patch the branch destination */
                    242:     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
                    243:     /* no need to flush icache explicitely */
                    244: }
                    245: #endif
                    246: 
                    247: static inline void tb_set_jmp_target(TranslationBlock *tb, 
                    248:                                      int n, unsigned long addr)
                    249: {
                    250:     unsigned long offset;
                    251: 
                    252:     offset = tb->tb_jmp_offset[n];
                    253:     tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
                    254:     offset = tb->tb_jmp_offset[n + 2];
                    255:     if (offset != 0xffff)
                    256:         tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
                    257: }
                    258: 
                    259: #else
                    260: 
                    261: /* set the jump target */
                    262: static inline void tb_set_jmp_target(TranslationBlock *tb, 
                    263:                                      int n, unsigned long addr)
                    264: {
                    265:     tb->tb_next[n] = addr;
                    266: }
                    267: 
                    268: #endif
                    269: 
                    270: static inline void tb_add_jump(TranslationBlock *tb, int n, 
                    271:                                TranslationBlock *tb_next)
                    272: {
                    273:     /* NOTE: this test is only needed for thread safety */
                    274:     if (!tb->jmp_next[n]) {
                    275:         /* patch the native jump address */
                    276:         tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
                    277:         
                    278:         /* add in TB jmp circular list */
                    279:         tb->jmp_next[n] = tb_next->jmp_first;
                    280:         tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
                    281:     }
                    282: }
                    283: 
                    284: TranslationBlock *tb_find_pc(unsigned long pc_ptr);
                    285: 
                    286: #ifndef offsetof
                    287: #define offsetof(type, field) ((size_t) &((type *)0)->field)
                    288: #endif
                    289: 
                    290: #if defined(_WIN32)
                    291: #define ASM_DATA_SECTION ".section \".data\"\n"
                    292: #define ASM_PREVIOUS_SECTION ".section .text\n"
                    293: #elif defined(__APPLE__)
                    294: #define ASM_DATA_SECTION ".data\n"
                    295: #define ASM_PREVIOUS_SECTION ".text\n"
                    296: #else
                    297: #define ASM_DATA_SECTION ".section \".data\"\n"
                    298: #define ASM_PREVIOUS_SECTION ".previous\n"
                    299: #endif
                    300: 
                    301: #define ASM_OP_LABEL_NAME(n, opname) \
                    302:     ASM_NAME(__op_label) #n "." ASM_NAME(opname)
                    303: 
                    304: #if defined(__powerpc__)
                    305: 
                    306: /* we patch the jump instruction directly */
                    307: #define GOTO_TB(opname, tbparam, n)\
                    308: do {\
                    309:     asm volatile (ASM_DATA_SECTION\
                    310:                  ASM_OP_LABEL_NAME(n, opname) ":\n"\
                    311:                  ".long 1f\n"\
                    312:                  ASM_PREVIOUS_SECTION \
                    313:                   "b " ASM_NAME(__op_jmp) #n "\n"\
                    314:                  "1:\n");\
                    315: } while (0)
                    316: 
                    317: #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
                    318: 
                    319: /* we patch the jump instruction directly */
                    320: #define GOTO_TB(opname, tbparam, n)\
                    321: do {\
                    322:     asm volatile (".section .data\n"\
                    323:                  ASM_OP_LABEL_NAME(n, opname) ":\n"\
                    324:                  ".long 1f\n"\
                    325:                  ASM_PREVIOUS_SECTION \
                    326:                   "jmp " ASM_NAME(__op_jmp) #n "\n"\
                    327:                  "1:\n");\
                    328: } while (0)
                    329: 
                    330: #else
                    331: 
                    332: /* jump to next block operations (more portable code, does not need
                    333:    cache flushing, but slower because of indirect jump) */
                    334: #define GOTO_TB(opname, tbparam, n)\
                    335: do {\
                    336:     static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
                    337:     static void __attribute__((unused)) *__op_label ## n \
                    338:         __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
                    339:     goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
                    340: label ## n: ;\
                    341: dummy_label ## n: ;\
                    342: } while (0)
                    343: 
                    344: #endif
                    345: 
                    346: extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
                    347: extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
                    348: extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
                    349: 
                    350: #ifdef __powerpc__
                    351: static inline int testandset (int *p)
                    352: {
                    353:     int ret;
                    354:     __asm__ __volatile__ (
                    355:                           "0:    lwarx %0,0,%1\n"
                    356:                           "      xor. %0,%3,%0\n"
                    357:                           "      bne 1f\n"
                    358:                           "      stwcx. %2,0,%1\n"
                    359:                           "      bne- 0b\n"
                    360:                           "1:    "
                    361:                           : "=&r" (ret)
                    362:                           : "r" (p), "r" (1), "r" (0)
                    363:                           : "cr0", "memory");
                    364:     return ret;
                    365: }
                    366: #endif
                    367: 
                    368: #ifdef __i386__
                    369: static inline int testandset (int *p)
                    370: {
                    371:     long int readval = 0;
                    372:     
                    373:     __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
                    374:                           : "+m" (*p), "+a" (readval)
                    375:                           : "r" (1)
                    376:                           : "cc");
                    377:     return readval;
                    378: }
                    379: #endif
                    380: 
                    381: #ifdef __x86_64__
                    382: static inline int testandset (int *p)
                    383: {
                    384:     long int readval = 0;
                    385:     
                    386:     __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
                    387:                           : "+m" (*p), "+a" (readval)
                    388:                           : "r" (1)
                    389:                           : "cc");
                    390:     return readval;
                    391: }
                    392: #endif
                    393: 
                    394: #ifdef __s390__
                    395: static inline int testandset (int *p)
                    396: {
                    397:     int ret;
                    398: 
                    399:     __asm__ __volatile__ ("0: cs    %0,%1,0(%2)\n"
                    400:                          "   jl    0b"
                    401:                          : "=&d" (ret)
                    402:                          : "r" (1), "a" (p), "0" (*p) 
                    403:                          : "cc", "memory" );
                    404:     return ret;
                    405: }
                    406: #endif
                    407: 
                    408: #ifdef __alpha__
                    409: static inline int testandset (int *p)
                    410: {
                    411:     int ret;
                    412:     unsigned long one;
                    413: 
                    414:     __asm__ __volatile__ ("0:  mov 1,%2\n"
                    415:                          "     ldl_l %0,%1\n"
                    416:                          "     stl_c %2,%1\n"
                    417:                          "     beq %2,1f\n"
                    418:                          ".subsection 2\n"
                    419:                          "1:   br 0b\n"
                    420:                          ".previous"
                    421:                          : "=r" (ret), "=m" (*p), "=r" (one)
                    422:                          : "m" (*p));
                    423:     return ret;
                    424: }
                    425: #endif
                    426: 
                    427: #ifdef __sparc__
                    428: static inline int testandset (int *p)
                    429: {
                    430:        int ret;
                    431: 
                    432:        __asm__ __volatile__("ldstub    [%1], %0"
                    433:                             : "=r" (ret)
                    434:                             : "r" (p)
                    435:                             : "memory");
                    436: 
                    437:        return (ret ? 1 : 0);
                    438: }
                    439: #endif
                    440: 
                    441: #ifdef __arm__
                    442: static inline int testandset (int *spinlock)
                    443: {
                    444:     register unsigned int ret;
                    445:     __asm__ __volatile__("swp %0, %1, [%2]"
                    446:                          : "=r"(ret)
                    447:                          : "0"(1), "r"(spinlock));
                    448:     
                    449:     return ret;
                    450: }
                    451: #endif
                    452: 
                    453: #ifdef __mc68000
                    454: static inline int testandset (int *p)
                    455: {
                    456:     char ret;
                    457:     __asm__ __volatile__("tas %1; sne %0"
                    458:                          : "=r" (ret)
                    459:                          : "m" (p)
                    460:                          : "cc","memory");
                    461:     return ret;
                    462: }
                    463: #endif
                    464: 
                    465: #ifdef __ia64
                    466: #include <ia64intrin.h>
                    467: 
                    468: static inline int testandset (int *p)
                    469: {
                    470:     return __sync_lock_test_and_set (p, 1);
                    471: }
                    472: #endif
                    473: 
                    474: typedef int spinlock_t;
                    475: 
                    476: #define SPIN_LOCK_UNLOCKED 0
                    477: 
                    478: #if defined(CONFIG_USER_ONLY)
                    479: static inline void spin_lock(spinlock_t *lock)
                    480: {
                    481:     while (testandset(lock));
                    482: }
                    483: 
                    484: static inline void spin_unlock(spinlock_t *lock)
                    485: {
                    486:     *lock = 0;
                    487: }
                    488: 
                    489: static inline int spin_trylock(spinlock_t *lock)
                    490: {
                    491:     return !testandset(lock);
                    492: }
                    493: #else
                    494: static inline void spin_lock(spinlock_t *lock)
                    495: {
                    496: }
                    497: 
                    498: static inline void spin_unlock(spinlock_t *lock)
                    499: {
                    500: }
                    501: 
                    502: static inline int spin_trylock(spinlock_t *lock)
                    503: {
                    504:     return 1;
                    505: }
                    506: #endif
                    507: 
                    508: extern spinlock_t tb_lock;
                    509: 
                    510: extern int tb_invalidated_flag;
                    511: 
                    512: #if !defined(CONFIG_USER_ONLY)
                    513: 
                    514: void tlb_fill(target_ulong addr, int is_write, int is_user, 
                    515:               void *retaddr);
                    516: 
                    517: #define ACCESS_TYPE 3
                    518: #define MEMSUFFIX _code
                    519: #define env cpu_single_env
                    520: 
                    521: #define DATA_SIZE 1
                    522: #include "softmmu_header.h"
                    523: 
                    524: #define DATA_SIZE 2
                    525: #include "softmmu_header.h"
                    526: 
                    527: #define DATA_SIZE 4
                    528: #include "softmmu_header.h"
                    529: 
                    530: #define DATA_SIZE 8
                    531: #include "softmmu_header.h"
                    532: 
                    533: #undef ACCESS_TYPE
                    534: #undef MEMSUFFIX
                    535: #undef env
                    536: 
                    537: #endif
                    538: 
                    539: #if defined(CONFIG_USER_ONLY)
                    540: static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
                    541: {
                    542:     return addr;
                    543: }
                    544: #else
                    545: /* NOTE: this function can trigger an exception */
                    546: /* NOTE2: the returned address is not exactly the physical address: it
                    547:    is the offset relative to phys_ram_base */
                    548: static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
                    549: {
                    550:     int is_user, index, pd;
                    551: 
                    552:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
                    553: #if defined(TARGET_I386)
                    554:     is_user = ((env->hflags & HF_CPL_MASK) == 3);
                    555: #elif defined (TARGET_PPC)
                    556:     is_user = msr_pr;
                    557: #elif defined (TARGET_MIPS)
                    558:     is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
                    559: #elif defined (TARGET_SPARC)
                    560:     is_user = (env->psrs == 0);
1.1.1.2   root      561: #elif defined (TARGET_ARM)
                    562:     is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
1.1.1.3 ! root      563: #elif defined (TARGET_SH4)
        !           564:     is_user = ((env->sr & SR_MD) == 0);
1.1       root      565: #else
1.1.1.2   root      566: #error unimplemented CPU
1.1       root      567: #endif
1.1.1.2   root      568:     if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 
1.1       root      569:                          (addr & TARGET_PAGE_MASK), 0)) {
                    570:         ldub_code(addr);
                    571:     }
1.1.1.2   root      572:     pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
1.1       root      573:     if (pd > IO_MEM_ROM) {
                    574:         cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
                    575:     }
1.1.1.2   root      576:     return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
1.1       root      577: }
                    578: #endif
                    579: 
                    580: 
                    581: #ifdef USE_KQEMU
1.1.1.3 ! root      582: #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
        !           583: 
1.1       root      584: int kqemu_init(CPUState *env);
                    585: int kqemu_cpu_exec(CPUState *env);
                    586: void kqemu_flush_page(CPUState *env, target_ulong addr);
                    587: void kqemu_flush(CPUState *env, int global);
                    588: void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
1.1.1.3 ! root      589: void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
1.1       root      590: void kqemu_cpu_interrupt(CPUState *env);
1.1.1.3 ! root      591: void kqemu_record_dump(void);
1.1       root      592: 
                    593: static inline int kqemu_is_ok(CPUState *env)
                    594: {
                    595:     return(env->kqemu_enabled &&
                    596:            (env->cr[0] & CR0_PE_MASK) && 
1.1.1.3 ! root      597:            !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
1.1       root      598:            (env->eflags & IF_MASK) &&
1.1.1.3 ! root      599:            !(env->eflags & VM_MASK) &&
        !           600:            (env->kqemu_enabled == 2 || 
        !           601:             ((env->hflags & HF_CPL_MASK) == 3 &&
        !           602:              (env->eflags & IOPL_MASK) != IOPL_MASK)));
1.1       root      603: }
                    604: 
                    605: #endif

unix.superglobalmegacorp.com