Diff for /qemu/exec-all.h between versions 1.1.1.14 and 1.1.1.15

version 1.1.1.14, 2018/04/24 19:16:37 version 1.1.1.15, 2018/04/24 19:33:34
Line 76  extern uint16_t gen_opc_icount[OPC_BUF_S Line 76  extern uint16_t gen_opc_icount[OPC_BUF_S
   
 #include "qemu-log.h"  #include "qemu-log.h"
   
 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);  void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);  void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
 void restore_state_to_opc(CPUState *env, struct TranslationBlock *tb,  void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
                           int pc_pos);                            int pc_pos);
   
 void cpu_gen_init(void);  void cpu_gen_init(void);
 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,  int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
                  int *gen_code_size_ptr);                   int *gen_code_size_ptr);
 int cpu_restore_state(struct TranslationBlock *tb,  int cpu_restore_state(struct TranslationBlock *tb,
                       CPUState *env, unsigned long searched_pc);                        CPUArchState *env, uintptr_t searched_pc);
 void cpu_resume_from_signal(CPUState *env1, void *puc);  void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
 void cpu_io_recompile(CPUState *env, void *retaddr);  void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
 TranslationBlock *tb_gen_code(CPUState *env,   TranslationBlock *tb_gen_code(CPUArchState *env, 
                               target_ulong pc, target_ulong cs_base, int flags,                                target_ulong pc, target_ulong cs_base, int flags,
                               int cflags);                                int cflags);
 void cpu_exec_init(CPUState *env);  void cpu_exec_init(CPUArchState *env);
 void QEMU_NORETURN cpu_loop_exit(CPUState *env1);  void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
 int page_unprotect(target_ulong address, unsigned long pc, void *puc);  int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,  void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
                                    int is_cpu_write_access);                                     int is_cpu_write_access);
 void tlb_flush_page(CPUState *env, target_ulong addr);  void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
 void tlb_flush(CPUState *env, int flush_global);                                int is_cpu_write_access);
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
 void tlb_set_page(CPUState *env, target_ulong vaddr,  /* cputlb.c */
   void tlb_flush_page(CPUArchState *env, target_ulong addr);
   void tlb_flush(CPUArchState *env, int flush_global);
   void tlb_set_page(CPUArchState *env, target_ulong vaddr,
                   target_phys_addr_t paddr, int prot,                    target_phys_addr_t paddr, int prot,
                   int mmu_idx, target_ulong size);                    int mmu_idx, target_ulong size);
   void tb_invalidate_phys_addr(target_phys_addr_t addr);
   #else
   static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
   {
   }
   
   static inline void tlb_flush(CPUArchState *env, int flush_global)
   {
   }
 #endif  #endif
   
 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */  #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
Line 150  struct TranslationBlock { Line 162  struct TranslationBlock {
 #ifdef USE_DIRECT_JUMP  #ifdef USE_DIRECT_JUMP
     uint16_t tb_jmp_offset[2]; /* offset of jump instruction */      uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
 #else  #else
     unsigned long tb_next[2]; /* address of jump generated code */      uintptr_t tb_next[2]; /* address of jump generated code */
 #endif  #endif
     /* list of TBs jumping to this one. This is a circular list using      /* list of TBs jumping to this one. This is a circular list using
        the two least significant bits of the pointers to tell what is         the two least significant bits of the pointers to tell what is
Line 182  static inline unsigned int tb_phys_hash_ Line 194  static inline unsigned int tb_phys_hash_
 }  }
   
 void tb_free(TranslationBlock *tb);  void tb_free(TranslationBlock *tb);
 void tb_flush(CPUState *env);  void tb_flush(CPUArchState *env);
 void tb_link_page(TranslationBlock *tb,  void tb_link_page(TranslationBlock *tb,
                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);                    tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);  void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
Line 202  static inline void tb_set_jmp_target1(ui Line 214  static inline void tb_set_jmp_target1(ui
 void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);  void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
 #define tb_set_jmp_target1 ppc_tb_set_jmp_target  #define tb_set_jmp_target1 ppc_tb_set_jmp_target
 #elif defined(__i386__) || defined(__x86_64__)  #elif defined(__i386__) || defined(__x86_64__)
 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)  static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 {  {
     /* patch the branch destination */      /* patch the branch destination */
     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);      *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
     /* no need to flush icache explicitly */      /* no need to flush icache explicitly */
 }  }
 #elif defined(__arm__)  #elif defined(__arm__)
 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)  static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 {  {
 #if !QEMU_GNUC_PREREQ(4, 1)  #if !QEMU_GNUC_PREREQ(4, 1)
     register unsigned long _beg __asm ("a1");      register unsigned long _beg __asm ("a1");
Line 237  static inline void tb_set_jmp_target1(un Line 249  static inline void tb_set_jmp_target1(un
 #endif  #endif
   
 static inline void tb_set_jmp_target(TranslationBlock *tb,  static inline void tb_set_jmp_target(TranslationBlock *tb,
                                      int n, unsigned long addr)                                       int n, uintptr_t addr)
 {  {
     unsigned long offset;      uint16_t offset = tb->tb_jmp_offset[n];
       tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
     offset = tb->tb_jmp_offset[n];  
     tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);  
 }  }
   
 #else  #else
   
 /* set the jump target */  /* set the jump target */
 static inline void tb_set_jmp_target(TranslationBlock *tb,  static inline void tb_set_jmp_target(TranslationBlock *tb,
                                      int n, unsigned long addr)                                       int n, uintptr_t addr)
 {  {
     tb->tb_next[n] = addr;      tb->tb_next[n] = addr;
 }  }
Line 262  static inline void tb_add_jump(Translati Line 272  static inline void tb_add_jump(Translati
     /* NOTE: this test is only needed for thread safety */      /* NOTE: this test is only needed for thread safety */
     if (!tb->jmp_next[n]) {      if (!tb->jmp_next[n]) {
         /* patch the native jump address */          /* patch the native jump address */
         tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);          tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
   
         /* add in TB jmp circular list */          /* add in TB jmp circular list */
         tb->jmp_next[n] = tb_next->jmp_first;          tb->jmp_next[n] = tb_next->jmp_first;
         tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));          tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
     }      }
 }  }
   
 TranslationBlock *tb_find_pc(unsigned long pc_ptr);  TranslationBlock *tb_find_pc(uintptr_t pc_ptr);
   
 #include "qemu-lock.h"  #include "qemu-lock.h"
   
Line 284  extern int tb_invalidated_flag; Line 294  extern int tb_invalidated_flag;
 /* Alpha and SH4 user mode emulations and Softmmu call GETPC().  /* Alpha and SH4 user mode emulations and Softmmu call GETPC().
    For all others, GETPC remains undefined (which makes TCI a little faster. */     For all others, GETPC remains undefined (which makes TCI a little faster. */
 # if defined(CONFIG_SOFTMMU) || defined(TARGET_ALPHA) || defined(TARGET_SH4)  # if defined(CONFIG_SOFTMMU) || defined(TARGET_ALPHA) || defined(TARGET_SH4)
 extern void *tci_tb_ptr;  extern uintptr_t tci_tb_ptr;
 #  define GETPC() tci_tb_ptr  #  define GETPC() tci_tb_ptr
 # endif  # endif
 #elif defined(__s390__) && !defined(__s390x__)  #elif defined(__s390__) && !defined(__s390x__)
 # define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1))  # define GETPC() \
       (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
 #elif defined(__arm__)  #elif defined(__arm__)
 /* Thumb return addresses have the low bit set, so we need to subtract two.  /* Thumb return addresses have the low bit set, so we need to subtract two.
    This is still safe in ARM mode because instructions are 4 bytes.  */     This is still safe in ARM mode because instructions are 4 bytes.  */
 # define GETPC() ((void *)((unsigned long)__builtin_return_address(0) - 2))  # define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
 #else  #else
 # define GETPC() ((void *)((unsigned long)__builtin_return_address(0) - 1))  # define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
 #endif  #endif
   
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
   
 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];  struct MemoryRegion *iotlb_to_region(target_phys_addr_t index);
 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];  uint64_t io_mem_read(struct MemoryRegion *mr, target_phys_addr_t addr,
 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];                       unsigned size);
   void io_mem_write(struct MemoryRegion *mr, target_phys_addr_t addr,
                     uint64_t value, unsigned size);
   
 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,  void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
               void *retaddr);                uintptr_t retaddr);
   
 #include "softmmu_defs.h"  #include "softmmu_defs.h"
   
 #define ACCESS_TYPE (NB_MMU_MODES + 1)  #define ACCESS_TYPE (NB_MMU_MODES + 1)
 #define MEMSUFFIX _code  #define MEMSUFFIX _code
   #ifndef CONFIG_TCG_PASS_AREG0
 #define env cpu_single_env  #define env cpu_single_env
   #endif
   
 #define DATA_SIZE 1  #define DATA_SIZE 1
 #include "softmmu_header.h"  #include "softmmu_header.h"
Line 331  void tlb_fill(CPUState *env1, target_ulo Line 346  void tlb_fill(CPUState *env1, target_ulo
 #endif  #endif
   
 #if defined(CONFIG_USER_ONLY)  #if defined(CONFIG_USER_ONLY)
 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)  static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
 {  {
     return addr;      return addr;
 }  }
 #else  #else
 /* NOTE: this function can trigger an exception */  /* cputlb.c */
 /* NOTE2: the returned address is not exactly the physical address: it  tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
    is the offset relative to phys_ram_base */  
 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)  
 {  
     int mmu_idx, page_index, pd;  
     void *p;  
   
     page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);  
     mmu_idx = cpu_mmu_index(env1);  
     if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=  
                  (addr & TARGET_PAGE_MASK))) {  
         ldub_code(addr);  
     }  
     pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;  
     if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {  
 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)  
         cpu_unassigned_access(env1, addr, 0, 1, 0, 4);  
 #else  
         cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);  
 #endif  
     }  
     p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);  
     return qemu_ram_addr_from_host_nofail(p);  
 }  
 #endif  #endif
   
 typedef void (CPUDebugExcpHandler)(CPUState *env);  typedef void (CPUDebugExcpHandler)(CPUArchState *env);
   
 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);  CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
   
Line 375  extern volatile sig_atomic_t exit_reques Line 367  extern volatile sig_atomic_t exit_reques
   
 /* Deterministic execution requires that IO only be performed on the last  /* Deterministic execution requires that IO only be performed on the last
    instruction of a TB so that interrupts take effect immediately.  */     instruction of a TB so that interrupts take effect immediately.  */
 static inline int can_do_io(CPUState *env)  static inline int can_do_io(CPUArchState *env)
 {  {
     if (!use_icount) {      if (!use_icount) {
         return 1;          return 1;

Removed from v.1.1.1.14  
changed lines
  Added in v.1.1.1.15


unix.superglobalmegacorp.com