Diff for /qemu/exec.c between versions 1.1.1.6 and 1.1.1.7

version 1.1.1.6, 2018/04/24 16:47:27 version 1.1.1.7, 2018/04/24 16:50:47
Line 15 Line 15
  *   *
  * You should have received a copy of the GNU Lesser General Public   * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software   * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
  */   */
 #include "config.h"  #include "config.h"
 #ifdef _WIN32  #ifdef _WIN32
Line 35 Line 35
   
 #include "cpu.h"  #include "cpu.h"
 #include "exec-all.h"  #include "exec-all.h"
   #include "qemu-common.h"
   #include "tcg.h"
   #include "hw/hw.h"
   #include "osdep.h"
   #include "kvm.h"
 #if defined(CONFIG_USER_ONLY)  #if defined(CONFIG_USER_ONLY)
 #include <qemu.h>  #include <qemu.h>
 #endif  #endif
Line 56 Line 61
 #undef DEBUG_TB_CHECK  #undef DEBUG_TB_CHECK
 #endif  #endif
   
 /* threshold to flush the translated code buffer */  
 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())  
   
 #define SMC_BITMAP_USE_THRESHOLD 10  #define SMC_BITMAP_USE_THRESHOLD 10
   
 #define MMAP_AREA_START        0x00000000  #define MMAP_AREA_START        0x00000000
Line 73 Line 75
 #define TARGET_VIRT_ADDR_SPACE_BITS 42  #define TARGET_VIRT_ADDR_SPACE_BITS 42
 #elif defined(TARGET_PPC64)  #elif defined(TARGET_PPC64)
 #define TARGET_PHYS_ADDR_SPACE_BITS 42  #define TARGET_PHYS_ADDR_SPACE_BITS 42
   #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
   #define TARGET_PHYS_ADDR_SPACE_BITS 42
   #elif defined(TARGET_I386) && !defined(USE_KQEMU)
   #define TARGET_PHYS_ADDR_SPACE_BITS 36
 #else  #else
 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */  /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
 #define TARGET_PHYS_ADDR_SPACE_BITS 32  #define TARGET_PHYS_ADDR_SPACE_BITS 32
 #endif  #endif
   
 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];  static TranslationBlock *tbs;
   int code_gen_max_blocks;
 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];  TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
 int nb_tbs;  static int nb_tbs;
 /* any access to the tbs or the page table must use this lock */  /* any access to the tbs or the page table must use this lock */
 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;  spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
   
 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));  #if defined(__arm__) || defined(__sparc_v9__)
   /* The prologue must be reachable with a direct jump. ARM and Sparc64
    have limited branch ranges (possibly also PPC) so place it in a
    section close to code segment. */
   #define code_gen_section                                \
       __attribute__((__section__(".gen_code")))           \
       __attribute__((aligned (32)))
   #else
   #define code_gen_section                                \
       __attribute__((aligned (32)))
   #endif
   
   uint8_t code_gen_prologue[1024] code_gen_section;
   static uint8_t *code_gen_buffer;
   static unsigned long code_gen_buffer_size;
   /* threshold to flush the translated code buffer */
   static unsigned long code_gen_buffer_max_size;
 uint8_t *code_gen_ptr;  uint8_t *code_gen_ptr;
   
 int phys_ram_size;  #if !defined(CONFIG_USER_ONLY)
   ram_addr_t phys_ram_size;
 int phys_ram_fd;  int phys_ram_fd;
 uint8_t *phys_ram_base;  uint8_t *phys_ram_base;
 uint8_t *phys_ram_dirty;  uint8_t *phys_ram_dirty;
   static int in_migration;
 static ram_addr_t phys_ram_alloc_offset = 0;  static ram_addr_t phys_ram_alloc_offset = 0;
   #endif
   
 CPUState *first_cpu;  CPUState *first_cpu;
 /* current CPU in the current thread. It is only valid inside  /* current CPU in the current thread. It is only valid inside
    cpu_exec() */     cpu_exec() */
 CPUState *cpu_single_env;  CPUState *cpu_single_env;
   /* 0 = Do not count executed instructions.
      1 = Precise instruction counting.
      2 = Adaptive rate instruction counting.  */
   int use_icount = 0;
   /* Current instruction counter.  While executing translated code this may
      include some instructions that have not yet been executed.  */
   int64_t qemu_icount;
   
 typedef struct PageDesc {  typedef struct PageDesc {
     /* list of TBs intersecting this ram page */      /* list of TBs intersecting this ram page */
Line 111  typedef struct PageDesc { Line 144  typedef struct PageDesc {
 } PageDesc;  } PageDesc;
   
 typedef struct PhysPageDesc {  typedef struct PhysPageDesc {
     /* offset in host memory of the page + io_index in the low 12 bits */      /* offset in host memory of the page + io_index in the low bits */
     uint32_t phys_offset;      ram_addr_t phys_offset;
       ram_addr_t region_offset;
 } PhysPageDesc;  } PhysPageDesc;
   
 #define L2_BITS 10  #define L2_BITS 10
Line 129  typedef struct PhysPageDesc { Line 163  typedef struct PhysPageDesc {
 #define L1_SIZE (1 << L1_BITS)  #define L1_SIZE (1 << L1_BITS)
 #define L2_SIZE (1 << L2_BITS)  #define L2_SIZE (1 << L2_BITS)
   
 static void io_mem_init(void);  
   
 unsigned long qemu_real_host_page_size;  unsigned long qemu_real_host_page_size;
 unsigned long qemu_host_page_bits;  unsigned long qemu_host_page_bits;
 unsigned long qemu_host_page_size;  unsigned long qemu_host_page_size;
Line 138  unsigned long qemu_host_page_mask; Line 170  unsigned long qemu_host_page_mask;
   
 /* XXX: for system emulation, it could just be an array */  /* XXX: for system emulation, it could just be an array */
 static PageDesc *l1_map[L1_SIZE];  static PageDesc *l1_map[L1_SIZE];
 PhysPageDesc **l1_phys_map;  static PhysPageDesc **l1_phys_map;
   
   #if !defined(CONFIG_USER_ONLY)
   static void io_mem_init(void);
   
 /* io memory support */  /* io memory support */
 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];  CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];  CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
 void *io_mem_opaque[IO_MEM_NB_ENTRIES];  void *io_mem_opaque[IO_MEM_NB_ENTRIES];
 static int io_mem_nb;  char io_mem_used[IO_MEM_NB_ENTRIES];
 #if defined(CONFIG_SOFTMMU)  
 static int io_mem_watch;  static int io_mem_watch;
 #endif  #endif
   
 /* log support */  /* log support */
 char *logfilename = "/tmp/qemu.log";  static const char *logfilename = "/tmp/qemu.log";
 FILE *logfile;  FILE *logfile;
 int loglevel;  int loglevel;
 static int log_append = 0;  static int log_append = 0;
Line 166  typedef struct subpage_t { Line 200  typedef struct subpage_t {
     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];      CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];      CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
     void *opaque[TARGET_PAGE_SIZE][2][4];      void *opaque[TARGET_PAGE_SIZE][2][4];
       ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
 } subpage_t;  } subpage_t;
   
   #ifdef _WIN32
   static void map_exec(void *addr, long size)
   {
       DWORD old_protect;
       VirtualProtect(addr, size,
                      PAGE_EXECUTE_READWRITE, &old_protect);
       
   }
   #else
   static void map_exec(void *addr, long size)
   {
       unsigned long start, end, page_size;
       
       page_size = getpagesize();
       start = (unsigned long)addr;
       start &= ~(page_size - 1);
       
       end = (unsigned long)addr + size;
       end += page_size - 1;
       end &= ~(page_size - 1);
       
       mprotect((void *)start, end - start,
                PROT_READ | PROT_WRITE | PROT_EXEC);
   }
   #endif
   
 static void page_init(void)  static void page_init(void)
 {  {
     /* NOTE: we can always suppose that qemu_host_page_size >=      /* NOTE: we can always suppose that qemu_host_page_size >=
Line 175  static void page_init(void) Line 236  static void page_init(void)
 #ifdef _WIN32  #ifdef _WIN32
     {      {
         SYSTEM_INFO system_info;          SYSTEM_INFO system_info;
         DWORD old_protect;  
   
         GetSystemInfo(&system_info);          GetSystemInfo(&system_info);
         qemu_real_host_page_size = system_info.dwPageSize;          qemu_real_host_page_size = system_info.dwPageSize;
   
         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),  
                        PAGE_EXECUTE_READWRITE, &old_protect);  
     }      }
 #else  #else
     qemu_real_host_page_size = getpagesize();      qemu_real_host_page_size = getpagesize();
     {  
         unsigned long start, end;  
   
         start = (unsigned long)code_gen_buffer;  
         start &= ~(qemu_real_host_page_size - 1);  
   
         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);  
         end += qemu_real_host_page_size - 1;  
         end &= ~(qemu_real_host_page_size - 1);  
   
         mprotect((void *)start, end - start,  
                  PROT_READ | PROT_WRITE | PROT_EXEC);  
     }  
 #endif  #endif
   
     if (qemu_host_page_size == 0)      if (qemu_host_page_size == 0)
         qemu_host_page_size = qemu_real_host_page_size;          qemu_host_page_size = qemu_real_host_page_size;
     if (qemu_host_page_size < TARGET_PAGE_SIZE)      if (qemu_host_page_size < TARGET_PAGE_SIZE)
Line 217  static void page_init(void) Line 260  static void page_init(void)
         FILE *f;          FILE *f;
         int n;          int n;
   
           mmap_lock();
           last_brk = (unsigned long)sbrk(0);
         f = fopen("/proc/self/maps", "r");          f = fopen("/proc/self/maps", "r");
         if (f) {          if (f) {
             do {              do {
                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);                  n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
                 if (n == 2) {                  if (n == 2) {
                     page_set_flags(TARGET_PAGE_ALIGN(startaddr),                      startaddr = MIN(startaddr,
                                       (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
                       endaddr = MIN(endaddr,
                                       (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
                       page_set_flags(startaddr & TARGET_PAGE_MASK,
                                    TARGET_PAGE_ALIGN(endaddr),                                     TARGET_PAGE_ALIGN(endaddr),
                                    PAGE_RESERVED);                                      PAGE_RESERVED); 
                 }                  }
             } while (!feof(f));              } while (!feof(f));
             fclose(f);              fclose(f);
         }          }
           mmap_unlock();
     }      }
 #endif  #endif
 }  }
   
 static inline PageDesc *page_find_alloc(unsigned int index)  static inline PageDesc **page_l1_map(target_ulong index)
   {
   #if TARGET_LONG_BITS > 32
       /* Host memory outside guest VM.  For 32-bit targets we have already
          excluded high addresses.  */
       if (index > ((target_ulong)L2_SIZE * L1_SIZE))
           return NULL;
   #endif
       return &l1_map[index >> L2_BITS];
   }
   
   static inline PageDesc *page_find_alloc(target_ulong index)
 {  {
     PageDesc **lp, *p;      PageDesc **lp, *p;
       lp = page_l1_map(index);
       if (!lp)
           return NULL;
   
     lp = &l1_map[index >> L2_BITS];  
     p = *lp;      p = *lp;
     if (!p) {      if (!p) {
         /* allocate if not found */          /* allocate if not found */
         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);  #if defined(CONFIG_USER_ONLY)
         memset(p, 0, sizeof(PageDesc) * L2_SIZE);          size_t len = sizeof(PageDesc) * L2_SIZE;
           /* Don't use qemu_malloc because it may recurse.  */
           p = mmap(0, len, PROT_READ | PROT_WRITE,
                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
         *lp = p;          *lp = p;
           if (h2g_valid(p)) {
               unsigned long addr = h2g(p);
               page_set_flags(addr & TARGET_PAGE_MASK,
                              TARGET_PAGE_ALIGN(addr + len),
                              PAGE_RESERVED); 
           }
   #else
           p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
           *lp = p;
   #endif
     }      }
     return p + (index & (L2_SIZE - 1));      return p + (index & (L2_SIZE - 1));
 }  }
   
 static inline PageDesc *page_find(unsigned int index)  static inline PageDesc *page_find(target_ulong index)
 {  {
     PageDesc *p;      PageDesc **lp, *p;
       lp = page_l1_map(index);
       if (!lp)
           return NULL;
   
     p = l1_map[index >> L2_BITS];      p = *lp;
     if (!p)      if (!p)
         return 0;          return 0;
     return p + (index & (L2_SIZE - 1));      return p + (index & (L2_SIZE - 1));
Line 289  static PhysPageDesc *phys_page_find_allo Line 368  static PhysPageDesc *phys_page_find_allo
             return NULL;              return NULL;
         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);          pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
         *lp = pd;          *lp = pd;
         for (i = 0; i < L2_SIZE; i++)          for (i = 0; i < L2_SIZE; i++) {
           pd[i].phys_offset = IO_MEM_UNASSIGNED;            pd[i].phys_offset = IO_MEM_UNASSIGNED;
             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
           }
     }      }
     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));      return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
 }  }
Line 304  static inline PhysPageDesc *phys_page_fi Line 385  static inline PhysPageDesc *phys_page_fi
 static void tlb_protect_code(ram_addr_t ram_addr);  static void tlb_protect_code(ram_addr_t ram_addr);
 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,  static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
                                     target_ulong vaddr);                                      target_ulong vaddr);
   #define mmap_lock() do { } while(0)
   #define mmap_unlock() do { } while(0)
   #endif
   
   #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
   
   #if defined(CONFIG_USER_ONLY)
   /* Currently it is not recommanded to allocate big chunks of data in
      user mode. It will change when a dedicated libc will be used */
   #define USE_STATIC_CODE_GEN_BUFFER
   #endif
   
   #ifdef USE_STATIC_CODE_GEN_BUFFER
   static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
   #endif
   
   static void code_gen_alloc(unsigned long tb_size)
   {
   #ifdef USE_STATIC_CODE_GEN_BUFFER
       code_gen_buffer = static_code_gen_buffer;
       code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
       map_exec(code_gen_buffer, code_gen_buffer_size);
   #else
       code_gen_buffer_size = tb_size;
       if (code_gen_buffer_size == 0) {
   #if defined(CONFIG_USER_ONLY)
           /* in user mode, phys_ram_size is not meaningful */
           code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
   #else
           /* XXX: needs ajustments */
           code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
   #endif
       }
       if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
           code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
       /* The code gen buffer location may have constraints depending on
          the host cpu and OS */
   #if defined(__linux__) 
       {
           int flags;
           void *start = NULL;
   
           flags = MAP_PRIVATE | MAP_ANONYMOUS;
   #if defined(__x86_64__)
           flags |= MAP_32BIT;
           /* Cannot map more than that */
           if (code_gen_buffer_size > (800 * 1024 * 1024))
               code_gen_buffer_size = (800 * 1024 * 1024);
   #elif defined(__sparc_v9__)
           // Map the buffer below 2G, so we can use direct calls and branches
           flags |= MAP_FIXED;
           start = (void *) 0x60000000UL;
           if (code_gen_buffer_size > (512 * 1024 * 1024))
               code_gen_buffer_size = (512 * 1024 * 1024);
   #elif defined(__arm__)
           /* Map the buffer below 32M, so we can use direct calls and branches */
           flags |= MAP_FIXED;
           start = (void *) 0x01000000UL;
           if (code_gen_buffer_size > 16 * 1024 * 1024)
               code_gen_buffer_size = 16 * 1024 * 1024;
   #endif
           code_gen_buffer = mmap(start, code_gen_buffer_size,
                                  PROT_WRITE | PROT_READ | PROT_EXEC,
                                  flags, -1, 0);
           if (code_gen_buffer == MAP_FAILED) {
               fprintf(stderr, "Could not allocate dynamic translator buffer\n");
               exit(1);
           }
       }
   #elif defined(__FreeBSD__)
       {
           int flags;
           void *addr = NULL;
           flags = MAP_PRIVATE | MAP_ANONYMOUS;
   #if defined(__x86_64__)
           /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
            * 0x40000000 is free */
           flags |= MAP_FIXED;
           addr = (void *)0x40000000;
           /* Cannot map more than that */
           if (code_gen_buffer_size > (800 * 1024 * 1024))
               code_gen_buffer_size = (800 * 1024 * 1024);
   #endif
           code_gen_buffer = mmap(addr, code_gen_buffer_size,
                                  PROT_WRITE | PROT_READ | PROT_EXEC, 
                                  flags, -1, 0);
           if (code_gen_buffer == MAP_FAILED) {
               fprintf(stderr, "Could not allocate dynamic translator buffer\n");
               exit(1);
           }
       }
   #else
       code_gen_buffer = qemu_malloc(code_gen_buffer_size);
       map_exec(code_gen_buffer, code_gen_buffer_size);
   #endif
   #endif /* !USE_STATIC_CODE_GEN_BUFFER */
       map_exec(code_gen_prologue, sizeof(code_gen_prologue));
       code_gen_buffer_max_size = code_gen_buffer_size - 
           code_gen_max_block_size();
       code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
       tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
   }
   
   /* Must be called before using the QEMU cpus. 'tb_size' is the size
      (in bytes) allocated to the translation buffer. Zero means default
      size. */
   void cpu_exec_init_all(unsigned long tb_size)
   {
       cpu_gen_init();
       code_gen_alloc(tb_size);
       code_gen_ptr = code_gen_buffer;
       page_init();
   #if !defined(CONFIG_USER_ONLY)
       io_mem_init();
   #endif
   }
   
   #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
   
   #define CPU_COMMON_SAVE_VERSION 1
   
   static void cpu_common_save(QEMUFile *f, void *opaque)
   {
       CPUState *env = opaque;
   
       qemu_put_be32s(f, &env->halted);
       qemu_put_be32s(f, &env->interrupt_request);
   }
   
   static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
   {
       CPUState *env = opaque;
   
       if (version_id != CPU_COMMON_SAVE_VERSION)
           return -EINVAL;
   
       qemu_get_be32s(f, &env->halted);
       qemu_get_be32s(f, &env->interrupt_request);
       tlb_flush(env, 1);
   
       return 0;
   }
 #endif  #endif
   
 void cpu_exec_init(CPUState *env)  void cpu_exec_init(CPUState *env)
Line 311  void cpu_exec_init(CPUState *env) Line 534  void cpu_exec_init(CPUState *env)
     CPUState **penv;      CPUState **penv;
     int cpu_index;      int cpu_index;
   
     if (!code_gen_ptr) {  
         code_gen_ptr = code_gen_buffer;  
         page_init();  
         io_mem_init();  
     }  
     env->next_cpu = NULL;      env->next_cpu = NULL;
     penv = &first_cpu;      penv = &first_cpu;
     cpu_index = 0;      cpu_index = 0;
Line 324  void cpu_exec_init(CPUState *env) Line 542  void cpu_exec_init(CPUState *env)
         cpu_index++;          cpu_index++;
     }      }
     env->cpu_index = cpu_index;      env->cpu_index = cpu_index;
     env->nb_watchpoints = 0;      TAILQ_INIT(&env->breakpoints);
       TAILQ_INIT(&env->watchpoints);
     *penv = env;      *penv = env;
   #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
       register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
                       cpu_common_save, cpu_common_load, env);
       register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
                       cpu_save, cpu_load, env);
   #endif
 }  }
   
 static inline void invalidate_page_bitmap(PageDesc *p)  static inline void invalidate_page_bitmap(PageDesc *p)
Line 366  void tb_flush(CPUState *env1) Line 591  void tb_flush(CPUState *env1)
            nb_tbs, nb_tbs > 0 ?             nb_tbs, nb_tbs > 0 ?
            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);             ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
 #endif  #endif
       if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
           cpu_abort(env1, "Internal error: code buffer overflow\n");
   
     nb_tbs = 0;      nb_tbs = 0;
   
     for(env = first_cpu; env != NULL; env = env->next_cpu) {      for(env = first_cpu; env != NULL; env = env->next_cpu) {
Line 417  static void tb_page_check(void) Line 645  static void tb_page_check(void)
     }      }
 }  }
   
 void tb_jmp_check(TranslationBlock *tb)  static void tb_jmp_check(TranslationBlock *tb)
 {  {
     TranslationBlock *tb1;      TranslationBlock *tb1;
     unsigned int n1;      unsigned int n1;
Line 506  static inline void tb_reset_jump(Transla Line 734  static inline void tb_reset_jump(Transla
     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));      tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
 }  }
   
 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)  void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
 {  {
     CPUState *env;      CPUState *env;
     PageDesc *p;      PageDesc *p;
     unsigned int h, n1;      unsigned int h, n1;
     target_ulong phys_pc;      target_phys_addr_t phys_pc;
     TranslationBlock *tb1, *tb2;      TranslationBlock *tb1, *tb2;
   
     /* remove the TB from the hash list */      /* remove the TB from the hash list */
Line 594  static void build_page_bitmap(PageDesc * Line 822  static void build_page_bitmap(PageDesc *
     int n, tb_start, tb_end;      int n, tb_start, tb_end;
     TranslationBlock *tb;      TranslationBlock *tb;
   
     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);      p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
     if (!p->code_bitmap)  
         return;  
     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);  
   
     tb = p->first_tb;      tb = p->first_tb;
     while (tb != NULL) {      while (tb != NULL) {
Line 620  static void build_page_bitmap(PageDesc * Line 845  static void build_page_bitmap(PageDesc *
     }      }
 }  }
   
 #ifdef TARGET_HAS_PRECISE_SMC  TranslationBlock *tb_gen_code(CPUState *env,
                                 target_ulong pc, target_ulong cs_base,
 static void tb_gen_code(CPUState *env,                                int flags, int cflags)
                         target_ulong pc, target_ulong cs_base, int flags,  
                         int cflags)  
 {  {
     TranslationBlock *tb;      TranslationBlock *tb;
     uint8_t *tc_ptr;      uint8_t *tc_ptr;
Line 638  static void tb_gen_code(CPUState *env, Line 861  static void tb_gen_code(CPUState *env,
         tb_flush(env);          tb_flush(env);
         /* cannot fail at this point */          /* cannot fail at this point */
         tb = tb_alloc(pc);          tb = tb_alloc(pc);
           /* Don't forget to invalidate previous TB info.  */
           tb_invalidated_flag = 1;
     }      }
     tc_ptr = code_gen_ptr;      tc_ptr = code_gen_ptr;
     tb->tc_ptr = tc_ptr;      tb->tc_ptr = tc_ptr;
Line 654  static void tb_gen_code(CPUState *env, Line 879  static void tb_gen_code(CPUState *env,
         phys_page2 = get_phys_addr_code(env, virt_page2);          phys_page2 = get_phys_addr_code(env, virt_page2);
     }      }
     tb_link_phys(tb, phys_pc, phys_page2);      tb_link_phys(tb, phys_pc, phys_page2);
       return tb;
 }  }
 #endif  
   
 /* invalidate all TBs which intersect with the target physical page  /* invalidate all TBs which intersect with the target physical page
    starting in range [start;end[. NOTE: start and end must refer to     starting in range [start;end[. NOTE: start and end must refer to
    the same physical page. 'is_cpu_write_access' should be true if called     the same physical page. 'is_cpu_write_access' should be true if called
    from a real cpu write access: the virtual CPU will exit the current     from a real cpu write access: the virtual CPU will exit the current
    TB if code is modified inside this TB. */     TB if code is modified inside this TB. */
 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,  void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
                                    int is_cpu_write_access)                                     int is_cpu_write_access)
 {  {
     int n, current_tb_modified, current_tb_not_found, current_flags;      TranslationBlock *tb, *tb_next, *saved_tb;
     CPUState *env = cpu_single_env;      CPUState *env = cpu_single_env;
     PageDesc *p;  
     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;  
     target_ulong tb_start, tb_end;      target_ulong tb_start, tb_end;
     target_ulong current_pc, current_cs_base;      PageDesc *p;
       int n;
   #ifdef TARGET_HAS_PRECISE_SMC
       int current_tb_not_found = is_cpu_write_access;
       TranslationBlock *current_tb = NULL;
       int current_tb_modified = 0;
       target_ulong current_pc = 0;
       target_ulong current_cs_base = 0;
       int current_flags = 0;
   #endif /* TARGET_HAS_PRECISE_SMC */
   
     p = page_find(start >> TARGET_PAGE_BITS);      p = page_find(start >> TARGET_PAGE_BITS);
     if (!p)      if (!p)
Line 684  void tb_invalidate_phys_page_range(targe Line 916  void tb_invalidate_phys_page_range(targe
   
     /* we remove all the TBs in the range [start, end[ */      /* we remove all the TBs in the range [start, end[ */
     /* XXX: see if in some cases it could be faster to invalidate all the code */      /* XXX: see if in some cases it could be faster to invalidate all the code */
     current_tb_not_found = is_cpu_write_access;  
     current_tb_modified = 0;  
     current_tb = NULL; /* avoid warning */  
     current_pc = 0; /* avoid warning */  
     current_cs_base = 0; /* avoid warning */  
     current_flags = 0; /* avoid warning */  
     tb = p->first_tb;      tb = p->first_tb;
     while (tb != NULL) {      while (tb != NULL) {
         n = (long)tb & 3;          n = (long)tb & 3;
Line 710  void tb_invalidate_phys_page_range(targe Line 936  void tb_invalidate_phys_page_range(targe
             if (current_tb_not_found) {              if (current_tb_not_found) {
                 current_tb_not_found = 0;                  current_tb_not_found = 0;
                 current_tb = NULL;                  current_tb = NULL;
                 if (env->mem_write_pc) {                  if (env->mem_io_pc) {
                     /* now we have a real cpu fault */                      /* now we have a real cpu fault */
                     current_tb = tb_find_pc(env->mem_write_pc);                      current_tb = tb_find_pc(env->mem_io_pc);
                 }                  }
             }              }
             if (current_tb == tb &&              if (current_tb == tb &&
                 !(current_tb->cflags & CF_SINGLE_INSN)) {                  (current_tb->cflags & CF_COUNT_MASK) != 1) {
                 /* If we are modifying the current TB, we must stop                  /* If we are modifying the current TB, we must stop
                 its execution. We could be more precise by checking                  its execution. We could be more precise by checking
                 that the modification is after the current PC, but it                  that the modification is after the current PC, but it
Line 725  void tb_invalidate_phys_page_range(targe Line 951  void tb_invalidate_phys_page_range(targe
   
                 current_tb_modified = 1;                  current_tb_modified = 1;
                 cpu_restore_state(current_tb, env,                  cpu_restore_state(current_tb, env,
                                   env->mem_write_pc, NULL);                                    env->mem_io_pc, NULL);
 #if defined(TARGET_I386)                  cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                 current_flags = env->hflags;                                       &current_flags);
                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));  
                 current_cs_base = (target_ulong)env->segs[R_CS].base;  
                 current_pc = current_cs_base + env->eip;  
 #else  
 #error unsupported CPU  
 #endif  
             }              }
 #endif /* TARGET_HAS_PRECISE_SMC */  #endif /* TARGET_HAS_PRECISE_SMC */
             /* we need to do that to handle the case where a signal              /* we need to do that to handle the case where a signal
Line 757  void tb_invalidate_phys_page_range(targe Line 977  void tb_invalidate_phys_page_range(targe
     if (!p->first_tb) {      if (!p->first_tb) {
         invalidate_page_bitmap(p);          invalidate_page_bitmap(p);
         if (is_cpu_write_access) {          if (is_cpu_write_access) {
             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);              tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
         }          }
     }      }
 #endif  #endif
Line 767  void tb_invalidate_phys_page_range(targe Line 987  void tb_invalidate_phys_page_range(targe
            modifying the memory. It will ensure that it cannot modify             modifying the memory. It will ensure that it cannot modify
            itself */             itself */
         env->current_tb = NULL;          env->current_tb = NULL;
         tb_gen_code(env, current_pc, current_cs_base, current_flags,          tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
                     CF_SINGLE_INSN);  
         cpu_resume_from_signal(env, NULL);          cpu_resume_from_signal(env, NULL);
     }      }
 #endif  #endif
 }  }
   
 /* len must be <= 8 and start must be a multiple of len */  /* len must be <= 8 and start must be a multiple of len */
 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)  static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
 {  {
     PageDesc *p;      PageDesc *p;
     int offset, b;      int offset, b;
 #if 0  #if 0
     if (1) {      if (1) {
         if (loglevel) {          qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",                    cpu_single_env->mem_io_vaddr, len,
                    cpu_single_env->mem_write_vaddr, len,                    cpu_single_env->eip,
                    cpu_single_env->eip,                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);  
         }  
     }      }
 #endif  #endif
     p = page_find(start >> TARGET_PAGE_BITS);      p = page_find(start >> TARGET_PAGE_BITS);
Line 804  static inline void tb_invalidate_phys_pa Line 1021  static inline void tb_invalidate_phys_pa
 }  }
   
 #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
 static void tb_invalidate_phys_page(target_ulong addr,  static void tb_invalidate_phys_page(target_phys_addr_t addr,
                                     unsigned long pc, void *puc)                                      unsigned long pc, void *puc)
 {  {
     int n, current_flags, current_tb_modified;      TranslationBlock *tb;
     target_ulong current_pc, current_cs_base;  
     PageDesc *p;      PageDesc *p;
     TranslationBlock *tb, *current_tb;      int n;
 #ifdef TARGET_HAS_PRECISE_SMC  #ifdef TARGET_HAS_PRECISE_SMC
       TranslationBlock *current_tb = NULL;
     CPUState *env = cpu_single_env;      CPUState *env = cpu_single_env;
       int current_tb_modified = 0;
       target_ulong current_pc = 0;
       target_ulong current_cs_base = 0;
       int current_flags = 0;
 #endif  #endif
   
     addr &= TARGET_PAGE_MASK;      addr &= TARGET_PAGE_MASK;
Line 820  static void tb_invalidate_phys_page(targ Line 1041  static void tb_invalidate_phys_page(targ
     if (!p)      if (!p)
         return;          return;
     tb = p->first_tb;      tb = p->first_tb;
     current_tb_modified = 0;  
     current_tb = NULL;  
     current_pc = 0; /* avoid warning */  
     current_cs_base = 0; /* avoid warning */  
     current_flags = 0; /* avoid warning */  
 #ifdef TARGET_HAS_PRECISE_SMC  #ifdef TARGET_HAS_PRECISE_SMC
     if (tb && pc != 0) {      if (tb && pc != 0) {
         current_tb = tb_find_pc(pc);          current_tb = tb_find_pc(pc);
Line 835  static void tb_invalidate_phys_page(targ Line 1051  static void tb_invalidate_phys_page(targ
         tb = (TranslationBlock *)((long)tb & ~3);          tb = (TranslationBlock *)((long)tb & ~3);
 #ifdef TARGET_HAS_PRECISE_SMC  #ifdef TARGET_HAS_PRECISE_SMC
         if (current_tb == tb &&          if (current_tb == tb &&
             !(current_tb->cflags & CF_SINGLE_INSN)) {              (current_tb->cflags & CF_COUNT_MASK) != 1) {
                 /* If we are modifying the current TB, we must stop                  /* If we are modifying the current TB, we must stop
                    its execution. We could be more precise by checking                     its execution. We could be more precise by checking
                    that the modification is after the current PC, but it                     that the modification is after the current PC, but it
Line 844  static void tb_invalidate_phys_page(targ Line 1060  static void tb_invalidate_phys_page(targ
   
             current_tb_modified = 1;              current_tb_modified = 1;
             cpu_restore_state(current_tb, env, pc, puc);              cpu_restore_state(current_tb, env, pc, puc);
 #if defined(TARGET_I386)              cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
             current_flags = env->hflags;                                   &current_flags);
             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));  
             current_cs_base = (target_ulong)env->segs[R_CS].base;  
             current_pc = current_cs_base + env->eip;  
 #else  
 #error unsupported CPU  
 #endif  
         }          }
 #endif /* TARGET_HAS_PRECISE_SMC */  #endif /* TARGET_HAS_PRECISE_SMC */
         tb_phys_invalidate(tb, addr);          tb_phys_invalidate(tb, addr);
Line 864  static void tb_invalidate_phys_page(targ Line 1074  static void tb_invalidate_phys_page(targ
            modifying the memory. It will ensure that it cannot modify             modifying the memory. It will ensure that it cannot modify
            itself */             itself */
         env->current_tb = NULL;          env->current_tb = NULL;
         tb_gen_code(env, current_pc, current_cs_base, current_flags,          tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
                     CF_SINGLE_INSN);  
         cpu_resume_from_signal(env, puc);          cpu_resume_from_signal(env, puc);
     }      }
 #endif  #endif
Line 933  TranslationBlock *tb_alloc(target_ulong  Line 1142  TranslationBlock *tb_alloc(target_ulong 
 {  {
     TranslationBlock *tb;      TranslationBlock *tb;
   
     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||      if (nb_tbs >= code_gen_max_blocks ||
         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)          (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
         return NULL;          return NULL;
     tb = &tbs[nb_tbs++];      tb = &tbs[nb_tbs++];
     tb->pc = pc;      tb->pc = pc;
Line 942  TranslationBlock *tb_alloc(target_ulong  Line 1151  TranslationBlock *tb_alloc(target_ulong 
     return tb;      return tb;
 }  }
   
   void tb_free(TranslationBlock *tb)
   {
       /* In practice this is mostly used for single use temporary TB
          Ignore the hard cases and just back up if this TB happens to
          be the last one generated.  */
       if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
           code_gen_ptr = tb->tc_ptr;
           nb_tbs--;
       }
   }
   
 /* add a new TB and link it to the physical page tables. phys_page2 is  /* add a new TB and link it to the physical page tables. phys_page2 is
    (-1) to indicate that only one page contains the TB. */     (-1) to indicate that only one page contains the TB. */
 void tb_link_phys(TranslationBlock *tb,  void tb_link_phys(TranslationBlock *tb,
Line 950  void tb_link_phys(TranslationBlock *tb, Line 1170  void tb_link_phys(TranslationBlock *tb,
     unsigned int h;      unsigned int h;
     TranslationBlock **ptb;      TranslationBlock **ptb;
   
       /* Grab the mmap lock to stop another thread invalidating this TB
          before we are done.  */
       mmap_lock();
     /* add in the physical hash table */      /* add in the physical hash table */
     h = tb_phys_hash_func(phys_pc);      h = tb_phys_hash_func(phys_pc);
     ptb = &tb_phys_hash[h];      ptb = &tb_phys_hash[h];
Line 976  void tb_link_phys(TranslationBlock *tb, Line 1199  void tb_link_phys(TranslationBlock *tb,
 #ifdef DEBUG_TB_CHECK  #ifdef DEBUG_TB_CHECK
     tb_page_check();      tb_page_check();
 #endif  #endif
       mmap_unlock();
 }  }
   
 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <  /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
Line 1077  static void breakpoint_invalidate(CPUSta Line 1301  static void breakpoint_invalidate(CPUSta
 #endif  #endif
   
 /* Add a watchpoint.  */  /* Add a watchpoint.  */
 int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)  int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
                             int flags, CPUWatchpoint **watchpoint)
 {  {
     int i;      target_ulong len_mask = ~(len - 1);
       CPUWatchpoint *wp;
   
       /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
       if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
           fprintf(stderr, "qemu: tried to set invalid watchpoint at "
                   TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
           return -EINVAL;
       }
       wp = qemu_malloc(sizeof(*wp));
   
       wp->vaddr = addr;
       wp->len_mask = len_mask;
       wp->flags = flags;
   
       /* keep all GDB-injected watchpoints in front */
       if (flags & BP_GDB)
           TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
       else
           TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
   
       tlb_flush_page(env, addr);
   
       if (watchpoint)
           *watchpoint = wp;
       return 0;
   }
   
     for (i = 0; i < env->nb_watchpoints; i++) {  /* Remove a specific watchpoint.  */
         if (addr == env->watchpoint[i].vaddr)  int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
                             int flags)
   {
       target_ulong len_mask = ~(len - 1);
       CPUWatchpoint *wp;
   
       TAILQ_FOREACH(wp, &env->watchpoints, entry) {
           if (addr == wp->vaddr && len_mask == wp->len_mask
                   && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
               cpu_watchpoint_remove_by_ref(env, wp);
             return 0;              return 0;
           }
     }      }
     if (env->nb_watchpoints >= MAX_WATCHPOINTS)      return -ENOENT;
         return -1;  }
   
     i = env->nb_watchpoints++;  /* Remove a specific watchpoint by reference.  */
     env->watchpoint[i].vaddr = addr;  void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
     tlb_flush_page(env, addr);  {
     /* FIXME: This flush is needed because of the hack to make memory ops      TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
        terminate the TB.  It can be removed once the proper IO trap and  
        re-execute bits are in.  */      tlb_flush_page(env, watchpoint->vaddr);
     tb_flush(env);  
     return i;      qemu_free(watchpoint);
 }  }
   
 /* Remove a watchpoint.  */  /* Remove all matching watchpoints.  */
 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)  void cpu_watchpoint_remove_all(CPUState *env, int mask)
 {  {
     int i;      CPUWatchpoint *wp, *next;
   
     for (i = 0; i < env->nb_watchpoints; i++) {      TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
         if (addr == env->watchpoint[i].vaddr) {          if (wp->flags & mask)
             env->nb_watchpoints--;              cpu_watchpoint_remove_by_ref(env, wp);
             env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];  
             tlb_flush_page(env, addr);  
             return 0;  
         }  
     }      }
     return -1;  
 }  }
   
 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a  /* Add a breakpoint.  */
    breakpoint is reached */  int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)                            CPUBreakpoint **breakpoint)
 {  {
 #if defined(TARGET_HAS_ICE)  #if defined(TARGET_HAS_ICE)
     int i;      CPUBreakpoint *bp;
   
     for(i = 0; i < env->nb_breakpoints; i++) {      bp = qemu_malloc(sizeof(*bp));
         if (env->breakpoints[i] == pc)  
             return 0;  
     }  
   
     if (env->nb_breakpoints >= MAX_BREAKPOINTS)      bp->pc = pc;
         return -1;      bp->flags = flags;
     env->breakpoints[env->nb_breakpoints++] = pc;  
       /* keep all GDB-injected breakpoints in front */
       if (flags & BP_GDB)
           TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
       else
           TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
   
     breakpoint_invalidate(env, pc);      breakpoint_invalidate(env, pc);
   
       if (breakpoint)
           *breakpoint = bp;
     return 0;      return 0;
 #else  #else
     return -1;      return -ENOSYS;
 #endif  #endif
 }  }
   
 /* remove a breakpoint */  /* Remove a specific breakpoint.  */
 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)  int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
 {  {
 #if defined(TARGET_HAS_ICE)  #if defined(TARGET_HAS_ICE)
     int i;      CPUBreakpoint *bp;
     for(i = 0; i < env->nb_breakpoints; i++) {  
         if (env->breakpoints[i] == pc)  
             goto found;  
     }  
     return -1;  
  found:  
     env->nb_breakpoints--;  
     if (i < env->nb_breakpoints)  
       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];  
   
     breakpoint_invalidate(env, pc);      TAILQ_FOREACH(bp, &env->breakpoints, entry) {
     return 0;          if (bp->pc == pc && bp->flags == flags) {
               cpu_breakpoint_remove_by_ref(env, bp);
               return 0;
           }
       }
       return -ENOENT;
 #else  #else
     return -1;      return -ENOSYS;
   #endif
   }
   
   /* Remove a specific breakpoint by reference.  */
   void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
   {
   #if defined(TARGET_HAS_ICE)
       TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
   
       breakpoint_invalidate(env, breakpoint->pc);
   
       qemu_free(breakpoint);
   #endif
   }
   
   /* Remove all matching breakpoints. */
   void cpu_breakpoint_remove_all(CPUState *env, int mask)
   {
   #if defined(TARGET_HAS_ICE)
       CPUBreakpoint *bp, *next;
   
       TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
           if (bp->flags & mask)
               cpu_breakpoint_remove_by_ref(env, bp);
       }
 #endif  #endif
 }  }
   
Line 1186  void cpu_set_log(int log_flags) Line 1468  void cpu_set_log(int log_flags)
 #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */          /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
         {          {
             static uint8_t logfile_buf[4096];              static char logfile_buf[4096];
             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));              setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
         }          }
 #else  #else
Line 1213  void cpu_set_log_filename(const char *fi Line 1495  void cpu_set_log_filename(const char *fi
 /* mask must never be zero, except for A20 change call */  /* mask must never be zero, except for A20 change call */
 void cpu_interrupt(CPUState *env, int mask)  void cpu_interrupt(CPUState *env, int mask)
 {  {
   #if !defined(USE_NPTL)
     TranslationBlock *tb;      TranslationBlock *tb;
     static int interrupt_lock;      static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
   #endif
       int old_mask;
   
       old_mask = env->interrupt_request;
       /* FIXME: This is probably not threadsafe.  A different thread could
          be in the middle of a read-modify-write operation.  */
     env->interrupt_request |= mask;      env->interrupt_request |= mask;
     /* if the cpu is currently executing code, we must unlink it and  #if defined(USE_NPTL)
        all the potentially executing TB */      /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
     tb = env->current_tb;         problem and hope the cpu will stop of its own accord.  For userspace
     if (tb && !testandset(&interrupt_lock)) {         emulation this often isn't actually as bad as it sounds.  Often
         env->current_tb = NULL;         signals are used primarily to interrupt blocking syscalls.  */
         tb_reset_jump_recursive(tb);  #else
         interrupt_lock = 0;      if (use_icount) {
           env->icount_decr.u16.high = 0xffff;
   #ifndef CONFIG_USER_ONLY
           /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
              an async event happened and we need to process it.  */
           if (!can_do_io(env)
               && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
               cpu_abort(env, "Raised interrupt while not in I/O function");
           }
   #endif
       } else {
           tb = env->current_tb;
           /* if the cpu is currently executing code, we must unlink it and
              all the potentially executing TB */
           if (tb && !testandset(&interrupt_lock)) {
               env->current_tb = NULL;
               tb_reset_jump_recursive(tb);
               resetlock(&interrupt_lock);
           }
     }      }
   #endif
 }  }
   
 void cpu_reset_interrupt(CPUState *env, int mask)  void cpu_reset_interrupt(CPUState *env, int mask)
Line 1232  void cpu_reset_interrupt(CPUState *env,  Line 1539  void cpu_reset_interrupt(CPUState *env, 
     env->interrupt_request &= ~mask;      env->interrupt_request &= ~mask;
 }  }
   
 CPULogItem cpu_log_items[] = {  const CPULogItem cpu_log_items[] = {
     { CPU_LOG_TB_OUT_ASM, "out_asm",      { CPU_LOG_TB_OUT_ASM, "out_asm",
       "show generated host assembly code for each compiled TB" },        "show generated host assembly code for each compiled TB" },
     { CPU_LOG_TB_IN_ASM, "in_asm",      { CPU_LOG_TB_IN_ASM, "in_asm",
       "show target assembly code for each compiled TB" },        "show target assembly code for each compiled TB" },
     { CPU_LOG_TB_OP, "op",      { CPU_LOG_TB_OP, "op",
       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },        "show micro ops for each compiled TB" },
 #ifdef TARGET_I386  
     { CPU_LOG_TB_OP_OPT, "op_opt",      { CPU_LOG_TB_OP_OPT, "op_opt",
       "show micro ops after optimization for each compiled TB" },        "show micro ops "
   #ifdef TARGET_I386
         "before eflags optimization and "
 #endif  #endif
         "after liveness analysis" },
     { CPU_LOG_INT, "int",      { CPU_LOG_INT, "int",
       "show interrupts/exceptions in short format" },        "show interrupts/exceptions in short format" },
     { CPU_LOG_EXEC, "exec",      { CPU_LOG_EXEC, "exec",
Line 1252  CPULogItem cpu_log_items[] = { Line 1561  CPULogItem cpu_log_items[] = {
 #ifdef TARGET_I386  #ifdef TARGET_I386
     { CPU_LOG_PCALL, "pcall",      { CPU_LOG_PCALL, "pcall",
       "show protected mode far calls/returns/exceptions" },        "show protected mode far calls/returns/exceptions" },
       { CPU_LOG_RESET, "cpu_reset",
         "show CPU state before CPU resets" },
 #endif  #endif
 #ifdef DEBUG_IOPORT  #ifdef DEBUG_IOPORT
     { CPU_LOG_IOPORT, "ioport",      { CPU_LOG_IOPORT, "ioport",
Line 1270  static int cmp1(const char *s1, int n, c Line 1581  static int cmp1(const char *s1, int n, c
 /* takes a comma separated list of log masks. Return 0 if error. */  /* takes a comma separated list of log masks. Return 0 if error. */
 int cpu_str_to_log_mask(const char *str)  int cpu_str_to_log_mask(const char *str)
 {  {
     CPULogItem *item;      const CPULogItem *item;
     int mask;      int mask;
     const char *p, *p1;      const char *p, *p1;
   
Line 1311  void cpu_abort(CPUState *env, const char Line 1622  void cpu_abort(CPUState *env, const char
     vfprintf(stderr, fmt, ap);      vfprintf(stderr, fmt, ap);
     fprintf(stderr, "\n");      fprintf(stderr, "\n");
 #ifdef TARGET_I386  #ifdef TARGET_I386
     if(env->intercept & INTERCEPT_SVM_MASK) {  
         /* most probably the virtual machine should not  
            be shut down but rather caught by the VMM */  
         vmexit(SVM_EXIT_SHUTDOWN, 0);  
     }  
     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);      cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
 #else  #else
     cpu_dump_state(env, stderr, fprintf, 0);      cpu_dump_state(env, stderr, fprintf, 0);
 #endif  #endif
     if (logfile) {      if (qemu_log_enabled()) {
         fprintf(logfile, "qemu: fatal: ");          qemu_log("qemu: fatal: ");
         vfprintf(logfile, fmt, ap2);          qemu_log_vprintf(fmt, ap2);
         fprintf(logfile, "\n");          qemu_log("\n");
 #ifdef TARGET_I386  #ifdef TARGET_I386
         cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);          log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
 #else  #else
         cpu_dump_state(env, logfile, fprintf, 0);          log_cpu_state(env, 0);
 #endif  #endif
         fflush(logfile);          qemu_log_flush();
         fclose(logfile);          qemu_log_close();
     }      }
     va_end(ap2);      va_end(ap2);
     va_end(ap);      va_end(ap);
Line 1340  void cpu_abort(CPUState *env, const char Line 1646  void cpu_abort(CPUState *env, const char
 CPUState *cpu_copy(CPUState *env)  CPUState *cpu_copy(CPUState *env)
 {  {
     CPUState *new_env = cpu_init(env->cpu_model_str);      CPUState *new_env = cpu_init(env->cpu_model_str);
     /* preserve chaining and index */  
     CPUState *next_cpu = new_env->next_cpu;      CPUState *next_cpu = new_env->next_cpu;
     int cpu_index = new_env->cpu_index;      int cpu_index = new_env->cpu_index;
   #if defined(TARGET_HAS_ICE)
       CPUBreakpoint *bp;
       CPUWatchpoint *wp;
   #endif
   
     memcpy(new_env, env, sizeof(CPUState));      memcpy(new_env, env, sizeof(CPUState));
   
       /* Preserve chaining and index. */
     new_env->next_cpu = next_cpu;      new_env->next_cpu = next_cpu;
     new_env->cpu_index = cpu_index;      new_env->cpu_index = cpu_index;
   
       /* Clone all break/watchpoints.
          Note: Once we support ptrace with hw-debug register access, make sure
          BP_CPU break/watchpoints are handled correctly on clone. */
       TAILQ_INIT(&env->breakpoints);
       TAILQ_INIT(&env->watchpoints);
   #if defined(TARGET_HAS_ICE)
       TAILQ_FOREACH(bp, &env->breakpoints, entry) {
           cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
       }
       TAILQ_FOREACH(wp, &env->watchpoints, entry) {
           cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
                                 wp->flags, NULL);
       }
   #endif
   
     return new_env;      return new_env;
 }  }
   
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
   
   static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
   {
       unsigned int i;
   
       /* Discard jump cache entries for any tb which might potentially
          overlap the flushed page.  */
       i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
       memset (&env->tb_jmp_cache[i], 0, 
               TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
   
       i = tb_jmp_cache_hash_page(addr);
       memset (&env->tb_jmp_cache[i], 0, 
               TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
   }
   
 /* NOTE: if flush_global is true, also flush global entries (not  /* NOTE: if flush_global is true, also flush global entries (not
    implemented yet) */     implemented yet) */
 void tlb_flush(CPUState *env, int flush_global)  void tlb_flush(CPUState *env, int flush_global)
Line 1385  void tlb_flush(CPUState *env, int flush_ Line 1728  void tlb_flush(CPUState *env, int flush_
   
     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));      memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
   
 #if !defined(CONFIG_SOFTMMU)  
     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);  
 #endif  
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (env->kqemu_enabled) {      if (env->kqemu_enabled) {
         kqemu_flush(env, flush_global);          kqemu_flush(env, flush_global);
Line 1413  static inline void tlb_flush_entry(CPUTL Line 1753  static inline void tlb_flush_entry(CPUTL
 void tlb_flush_page(CPUState *env, target_ulong addr)  void tlb_flush_page(CPUState *env, target_ulong addr)
 {  {
     int i;      int i;
     TranslationBlock *tb;  
   
 #if defined(DEBUG_TLB)  #if defined(DEBUG_TLB)
     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);      printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
Line 1433  void tlb_flush_page(CPUState *env, targe Line 1772  void tlb_flush_page(CPUState *env, targe
 #endif  #endif
 #endif  #endif
   
     /* Discard jump cache entries for any tb which might potentially      tlb_flush_jmp_cache(env, addr);
        overlap the flushed page.  */  
     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);  
     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));  
   
     i = tb_jmp_cache_hash_page(addr);  
     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));  
   
 #if !defined(CONFIG_SOFTMMU)  
     if (addr < MMAP_AREA_END)  
         munmap((void *)addr, TARGET_PAGE_SIZE);  
 #endif  
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (env->kqemu_enabled) {      if (env->kqemu_enabled) {
         kqemu_flush_page(env, addr);          kqemu_flush_page(env, addr);
Line 1476  static inline void tlb_reset_dirty_range Line 1805  static inline void tlb_reset_dirty_range
     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {      if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;          addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
         if ((addr - start) < length) {          if ((addr - start) < length) {
             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;              tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
         }          }
     }      }
 }  }
Line 1530  void cpu_physical_memory_reset_dirty(ram Line 1859  void cpu_physical_memory_reset_dirty(ram
 #endif  #endif
 #endif  #endif
     }      }
   }
   
 #if !defined(CONFIG_SOFTMMU)  int cpu_physical_memory_set_dirty_tracking(int enable)
     /* XXX: this is expensive */  {
     {      in_migration = enable;
         VirtPageDesc *p;      return 0;
         int j;  }
         target_ulong addr;  
   
         for(i = 0; i < L1_SIZE; i++) {  int cpu_physical_memory_get_dirty_tracking(void)
             p = l1_virt_map[i];  {
             if (p) {      return in_migration;
                 addr = i << (TARGET_PAGE_BITS + L2_BITS);  }
                 for(j = 0; j < L2_SIZE; j++) {  
                     if (p->valid_tag == virt_valid_tag &&  void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
                         p->phys_addr >= start && p->phys_addr < end &&  {
                         (p->prot & PROT_WRITE)) {      if (kvm_enabled())
                         if (addr < MMAP_AREA_END) {          kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
                             mprotect((void *)addr, TARGET_PAGE_SIZE,  
                                      p->prot & ~PROT_WRITE);  
                         }  
                     }  
                     addr += TARGET_PAGE_SIZE;  
                     p++;  
                 }  
             }  
         }  
     }  
 #endif  
 }  }
   
 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)  static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
Line 1568  static inline void tlb_update_dirty(CPUT Line 1886  static inline void tlb_update_dirty(CPUT
         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +          ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
             tlb_entry->addend - (unsigned long)phys_ram_base;              tlb_entry->addend - (unsigned long)phys_ram_base;
         if (!cpu_physical_memory_is_dirty(ram_addr)) {          if (!cpu_physical_memory_is_dirty(ram_addr)) {
             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;              tlb_entry->addr_write |= TLB_NOTDIRTY;
         }          }
     }      }
 }  }
Line 1591  void cpu_tlb_update_dirty(CPUState *env) Line 1909  void cpu_tlb_update_dirty(CPUState *env)
 #endif  #endif
 }  }
   
 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,  static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
                                   unsigned long start)  
 {  {
     unsigned long addr;      if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {          tlb_entry->addr_write = vaddr;
         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;  
         if (addr == start) {  
             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;  
         }  
     }  
 }  }
   
 /* update the TLB corresponding to virtual page vaddr and phys addr  /* update the TLB corresponding to virtual page vaddr
    addr so that it is no longer dirty */     so that it is no longer dirty */
 static inline void tlb_set_dirty(CPUState *env,  static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
                                  unsigned long addr, target_ulong vaddr)  
 {  {
     int i;      int i;
   
     addr &= TARGET_PAGE_MASK;      vaddr &= TARGET_PAGE_MASK;
     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);      i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     tlb_set_dirty1(&env->tlb_table[0][i], addr);      tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
     tlb_set_dirty1(&env->tlb_table[1][i], addr);      tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
 #if (NB_MMU_MODES >= 3)  #if (NB_MMU_MODES >= 3)
     tlb_set_dirty1(&env->tlb_table[2][i], addr);      tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
 #if (NB_MMU_MODES == 4)  #if (NB_MMU_MODES == 4)
     tlb_set_dirty1(&env->tlb_table[3][i], addr);      tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
 #endif  #endif
 #endif  #endif
 }  }
Line 1634  int tlb_set_page_exec(CPUState *env, tar Line 1945  int tlb_set_page_exec(CPUState *env, tar
     unsigned long pd;      unsigned long pd;
     unsigned int index;      unsigned int index;
     target_ulong address;      target_ulong address;
       target_ulong code_address;
     target_phys_addr_t addend;      target_phys_addr_t addend;
     int ret;      int ret;
     CPUTLBEntry *te;      CPUTLBEntry *te;
     int i;      CPUWatchpoint *wp;
       target_phys_addr_t iotlb;
   
     p = phys_page_find(paddr >> TARGET_PAGE_BITS);      p = phys_page_find(paddr >> TARGET_PAGE_BITS);
     if (!p) {      if (!p) {
Line 1651  int tlb_set_page_exec(CPUState *env, tar Line 1964  int tlb_set_page_exec(CPUState *env, tar
 #endif  #endif
   
     ret = 0;      ret = 0;
 #if !defined(CONFIG_SOFTMMU)      address = vaddr;
     if (is_softmmu)      if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
 #endif          /* IO memory case (romd handled later) */
     {          address |= TLB_MMIO;
         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {      }
             /* IO memory case */      addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
             address = vaddr | pd;      if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
             addend = paddr;          /* Normal RAM.  */
           iotlb = pd & TARGET_PAGE_MASK;
           if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
               iotlb |= IO_MEM_NOTDIRTY;
           else
               iotlb |= IO_MEM_ROM;
       } else {
           /* IO handlers are currently passed a phsical address.
              It would be nice to pass an offset from the base address
              of that region.  This would avoid having to special case RAM,
              and avoid full address decoding in every device.
              We can't use the high bits of pd for this because
              IO_MEM_ROMD uses these as a ram address.  */
           iotlb = (pd & ~TARGET_PAGE_MASK);
           if (p) {
               iotlb += p->region_offset;
         } else {          } else {
             /* standard memory */              iotlb += paddr;
             address = vaddr;  
             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);  
         }          }
       }
   
         /* Make accesses to pages with watchpoints go via the      code_address = address;
            watchpoint trap routines.  */      /* Make accesses to pages with watchpoints go via the
         for (i = 0; i < env->nb_watchpoints; i++) {         watchpoint trap routines.  */
             if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {      TAILQ_FOREACH(wp, &env->watchpoints, entry) {
                 if (address & ~TARGET_PAGE_MASK) {          if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
                     env->watchpoint[i].addend = 0;              iotlb = io_mem_watch + paddr;
                     address = vaddr | io_mem_watch;              /* TODO: The memory case can be optimized by not trapping
                 } else {                 reads of pages with a write breakpoint.  */
                     env->watchpoint[i].addend = pd - paddr +              address |= TLB_MMIO;
                         (unsigned long) phys_ram_base;  
                     /* TODO: Figure out how to make read watchpoints coexist  
                        with code.  */  
                     pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;  
                 }  
             }  
         }          }
       }
   
         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);      index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
         addend -= vaddr;      env->iotlb[mmu_idx][index] = iotlb - vaddr;
         te = &env->tlb_table[mmu_idx][index];      te = &env->tlb_table[mmu_idx][index];
         te->addend = addend;      te->addend = addend - vaddr;
         if (prot & PAGE_READ) {      if (prot & PAGE_READ) {
             te->addr_read = address;          te->addr_read = address;
         } else {      } else {
             te->addr_read = -1;          te->addr_read = -1;
         }  
         if (prot & PAGE_EXEC) {  
             te->addr_code = address;  
         } else {  
             te->addr_code = -1;  
         }  
         if (prot & PAGE_WRITE) {  
             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||  
                 (pd & IO_MEM_ROMD)) {  
                 /* write access calls the I/O callback */  
                 te->addr_write = vaddr |  
                     (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));  
             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&  
                        !cpu_physical_memory_is_dirty(pd)) {  
                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;  
             } else {  
                 te->addr_write = address;  
             }  
         } else {  
             te->addr_write = -1;  
         }  
     }      }
 #if !defined(CONFIG_SOFTMMU)  
     else {  
         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {  
             /* IO access: no mapping is done as it will be handled by the  
                soft MMU */  
             if (!(env->hflags & HF_SOFTMMU_MASK))  
                 ret = 2;  
         } else {  
             void *map_addr;  
   
             if (vaddr >= MMAP_AREA_END) {      if (prot & PAGE_EXEC) {
                 ret = 2;          te->addr_code = code_address;
             } else {      } else {
                 if (prot & PROT_WRITE) {          te->addr_code = -1;
                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||      }
 #if defined(TARGET_HAS_SMC) || 1      if (prot & PAGE_WRITE) {
                         first_tb ||          if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
 #endif              (pd & IO_MEM_ROMD)) {
                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&              /* Write access calls the I/O callback.  */
                          !cpu_physical_memory_is_dirty(pd))) {              te->addr_write = address | TLB_MMIO;
                         /* ROM: we do as if code was inside */          } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
                         /* if code is present, we only map as read only and save the                     !cpu_physical_memory_is_dirty(pd)) {
                            original mapping */              te->addr_write = address | TLB_NOTDIRTY;
                         VirtPageDesc *vp;          } else {
               te->addr_write = address;
                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);  
                         vp->phys_addr = pd;  
                         vp->prot = prot;  
                         vp->valid_tag = virt_valid_tag;  
                         prot &= ~PAGE_WRITE;  
                     }  
                 }  
                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,  
                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));  
                 if (map_addr == MAP_FAILED) {  
                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",  
                               paddr, vaddr);  
                 }  
             }  
         }          }
       } else {
           te->addr_write = -1;
     }      }
 #endif  
     return ret;      return ret;
 }  }
   
 /* called from signal handler: invalidate the code and unprotect the  
    page. Return TRUE if the fault was succesfully handled. */  
 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)  
 {  
 #if !defined(CONFIG_SOFTMMU)  
     VirtPageDesc *vp;  
   
 #if defined(DEBUG_TLB)  
     printf("page_unprotect: addr=0x%08x\n", addr);  
 #endif  
     addr &= TARGET_PAGE_MASK;  
   
     /* if it is not mapped, no need to worry here */  
     if (addr >= MMAP_AREA_END)  
         return 0;  
     vp = virt_page_find(addr >> TARGET_PAGE_BITS);  
     if (!vp)  
         return 0;  
     /* NOTE: in this case, validate_tag is _not_ tested as it  
        validates only the code TLB */  
     if (vp->valid_tag != virt_valid_tag)  
         return 0;  
     if (!(vp->prot & PAGE_WRITE))  
         return 0;  
 #if defined(DEBUG_TLB)  
     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",  
            addr, vp->phys_addr, vp->prot);  
 #endif  
     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)  
         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",  
                   (unsigned long)addr, vp->prot);  
     /* set the dirty bit */  
     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;  
     /* flush the code inside */  
     tb_invalidate_phys_page(vp->phys_addr, pc, puc);  
     return 1;  
 #else  
     return 0;  
 #endif  
 }  
   
 #else  #else
   
 void tlb_flush(CPUState *env, int flush_global)  void tlb_flush(CPUState *env, int flush_global)
Line 1876  void page_set_flags(target_ulong start,  Line 2114  void page_set_flags(target_ulong start, 
     PageDesc *p;      PageDesc *p;
     target_ulong addr;      target_ulong addr;
   
       /* mmap_lock should already be held.  */
     start = start & TARGET_PAGE_MASK;      start = start & TARGET_PAGE_MASK;
     end = TARGET_PAGE_ALIGN(end);      end = TARGET_PAGE_ALIGN(end);
     if (flags & PAGE_WRITE)      if (flags & PAGE_WRITE)
         flags |= PAGE_WRITE_ORG;          flags |= PAGE_WRITE_ORG;
     spin_lock(&tb_lock);  
     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {      for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
         p = page_find_alloc(addr >> TARGET_PAGE_BITS);          p = page_find_alloc(addr >> TARGET_PAGE_BITS);
           /* We may be called for host regions that are outside guest
              address space.  */
           if (!p)
               return;
         /* if the write protection is set, then we invalidate the code          /* if the write protection is set, then we invalidate the code
            inside */             inside */
         if (!(p->flags & PAGE_WRITE) &&          if (!(p->flags & PAGE_WRITE) &&
Line 1892  void page_set_flags(target_ulong start,  Line 2134  void page_set_flags(target_ulong start, 
         }          }
         p->flags = flags;          p->flags = flags;
     }      }
     spin_unlock(&tb_lock);  
 }  }
   
 int page_check_range(target_ulong start, target_ulong len, int flags)  int page_check_range(target_ulong start, target_ulong len, int flags)
Line 1901  int page_check_range(target_ulong start, Line 2142  int page_check_range(target_ulong start,
     target_ulong end;      target_ulong end;
     target_ulong addr;      target_ulong addr;
   
       if (start + len < start)
           /* we've wrapped around */
           return -1;
   
     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */      end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
     start = start & TARGET_PAGE_MASK;      start = start & TARGET_PAGE_MASK;
   
     if( end < start )  
         /* we've wrapped around */  
         return -1;  
     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {      for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
         p = page_find(addr >> TARGET_PAGE_BITS);          p = page_find(addr >> TARGET_PAGE_BITS);
         if( !p )          if( !p )
Line 1939  int page_unprotect(target_ulong address, Line 2181  int page_unprotect(target_ulong address,
     PageDesc *p, *p1;      PageDesc *p, *p1;
     target_ulong host_start, host_end, addr;      target_ulong host_start, host_end, addr;
   
       /* Technically this isn't safe inside a signal handler.  However we
          know this only ever happens in a synchronous SEGV handler, so in
          practice it seems to be ok.  */
       mmap_lock();
   
     host_start = address & qemu_host_page_mask;      host_start = address & qemu_host_page_mask;
     page_index = host_start >> TARGET_PAGE_BITS;      page_index = host_start >> TARGET_PAGE_BITS;
     p1 = page_find(page_index);      p1 = page_find(page_index);
     if (!p1)      if (!p1) {
           mmap_unlock();
         return 0;          return 0;
       }
     host_end = host_start + qemu_host_page_size;      host_end = host_start + qemu_host_page_size;
     p = p1;      p = p1;
     prot = 0;      prot = 0;
Line 1965  int page_unprotect(target_ulong address, Line 2214  int page_unprotect(target_ulong address,
 #ifdef DEBUG_TB_CHECK  #ifdef DEBUG_TB_CHECK
             tb_invalidate_check(address);              tb_invalidate_check(address);
 #endif  #endif
               mmap_unlock();
             return 1;              return 1;
         }          }
     }      }
       mmap_unlock();
     return 0;      return 0;
 }  }
   
Line 1977  static inline void tlb_set_dirty(CPUStat Line 2228  static inline void tlb_set_dirty(CPUStat
 }  }
 #endif /* defined(CONFIG_USER_ONLY) */  #endif /* defined(CONFIG_USER_ONLY) */
   
   #if !defined(CONFIG_USER_ONLY)
   
 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,  static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
                              int memory);                               ram_addr_t memory, ram_addr_t region_offset);
 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,  static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
                            int orig_memory);                             ram_addr_t orig_memory, ram_addr_t region_offset);
 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \  #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
                       need_subpage)                                     \                        need_subpage)                                     \
     do {                                                                \      do {                                                                \
Line 2003  static void *subpage_init (target_phys_a Line 2256  static void *subpage_init (target_phys_a
   
 /* register physical memory. 'size' must be a multiple of the target  /* register physical memory. 'size' must be a multiple of the target
    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an     page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
    io memory page */     io memory page.  The address used when calling the IO function is
 void cpu_register_physical_memory(target_phys_addr_t start_addr,     the offset from the start of the region, plus region_offset.  Both
                                   unsigned long size,     start_region and regon_offset are rounded down to a page boundary
                                   unsigned long phys_offset)     before calculating this offset.  This should not be a problem unless
      the low bits of start_addr and region_offset differ.  */
   void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
                                            ram_addr_t size,
                                            ram_addr_t phys_offset,
                                            ram_addr_t region_offset)
 {  {
     target_phys_addr_t addr, end_addr;      target_phys_addr_t addr, end_addr;
     PhysPageDesc *p;      PhysPageDesc *p;
     CPUState *env;      CPUState *env;
     unsigned long orig_size = size;      ram_addr_t orig_size = size;
     void *subpage;      void *subpage;
   
   #ifdef USE_KQEMU
       /* XXX: should not depend on cpu context */
       env = first_cpu;
       if (env->kqemu_enabled) {
           kqemu_set_phys_mem(start_addr, size, phys_offset);
       }
   #endif
       if (kvm_enabled())
           kvm_set_phys_mem(start_addr, size, phys_offset);
   
       if (phys_offset == IO_MEM_UNASSIGNED) {
           region_offset = start_addr;
       }
       region_offset &= TARGET_PAGE_MASK;
     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;      size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
     end_addr = start_addr + (target_phys_addr_t)size;      end_addr = start_addr + (target_phys_addr_t)size;
     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {      for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
         p = phys_page_find(addr >> TARGET_PAGE_BITS);          p = phys_page_find(addr >> TARGET_PAGE_BITS);
         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {          if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
             unsigned long orig_memory = p->phys_offset;              ram_addr_t orig_memory = p->phys_offset;
             target_phys_addr_t start_addr2, end_addr2;              target_phys_addr_t start_addr2, end_addr2;
             int need_subpage = 0;              int need_subpage = 0;
   
Line 2028  void cpu_register_physical_memory(target Line 2300  void cpu_register_physical_memory(target
             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {              if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
                 if (!(orig_memory & IO_MEM_SUBPAGE)) {                  if (!(orig_memory & IO_MEM_SUBPAGE)) {
                     subpage = subpage_init((addr & TARGET_PAGE_MASK),                      subpage = subpage_init((addr & TARGET_PAGE_MASK),
                                            &p->phys_offset, orig_memory);                                             &p->phys_offset, orig_memory,
                                              p->region_offset);
                 } else {                  } else {
                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)                      subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
                                             >> IO_MEM_SHIFT];                                              >> IO_MEM_SHIFT];
                 }                  }
                 subpage_register(subpage, start_addr2, end_addr2, phys_offset);                  subpage_register(subpage, start_addr2, end_addr2, phys_offset,
                                    region_offset);
                   p->region_offset = 0;
             } else {              } else {
                 p->phys_offset = phys_offset;                  p->phys_offset = phys_offset;
                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||                  if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
Line 2043  void cpu_register_physical_memory(target Line 2318  void cpu_register_physical_memory(target
         } else {          } else {
             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);              p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
             p->phys_offset = phys_offset;              p->phys_offset = phys_offset;
               p->region_offset = region_offset;
             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||              if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
                 (phys_offset & IO_MEM_ROMD))                  (phys_offset & IO_MEM_ROMD)) {
                 phys_offset += TARGET_PAGE_SIZE;                  phys_offset += TARGET_PAGE_SIZE;
             else {              } else {
                 target_phys_addr_t start_addr2, end_addr2;                  target_phys_addr_t start_addr2, end_addr2;
                 int need_subpage = 0;                  int need_subpage = 0;
   
Line 2055  void cpu_register_physical_memory(target Line 2331  void cpu_register_physical_memory(target
   
                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {                  if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
                     subpage = subpage_init((addr & TARGET_PAGE_MASK),                      subpage = subpage_init((addr & TARGET_PAGE_MASK),
                                            &p->phys_offset, IO_MEM_UNASSIGNED);                                             &p->phys_offset, IO_MEM_UNASSIGNED,
                                              addr & TARGET_PAGE_MASK);
                     subpage_register(subpage, start_addr2, end_addr2,                      subpage_register(subpage, start_addr2, end_addr2,
                                      phys_offset);                                       phys_offset, region_offset);
                       p->region_offset = 0;
                 }                  }
             }              }
         }          }
           region_offset += TARGET_PAGE_SIZE;
     }      }
   
     /* since each CPU stores ram addresses in its TLB cache, we must      /* since each CPU stores ram addresses in its TLB cache, we must
Line 2072  void cpu_register_physical_memory(target Line 2351  void cpu_register_physical_memory(target
 }  }
   
 /* XXX: temporary until new memory mapping API */  /* XXX: temporary until new memory mapping API */
 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)  ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
 {  {
     PhysPageDesc *p;      PhysPageDesc *p;
   
Line 2082  uint32_t cpu_get_physical_page_desc(targ Line 2361  uint32_t cpu_get_physical_page_desc(targ
     return p->phys_offset;      return p->phys_offset;
 }  }
   
   void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
   {
       if (kvm_enabled())
           kvm_coalesce_mmio_region(addr, size);
   }
   
   void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
   {
       if (kvm_enabled())
           kvm_uncoalesce_mmio_region(addr, size);
   }
   
 /* XXX: better than nothing */  /* XXX: better than nothing */
 ram_addr_t qemu_ram_alloc(unsigned int size)  ram_addr_t qemu_ram_alloc(ram_addr_t size)
 {  {
     ram_addr_t addr;      ram_addr_t addr;
     if ((phys_ram_alloc_offset + size) >= phys_ram_size) {      if ((phys_ram_alloc_offset + size) > phys_ram_size) {
         fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",          fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
                 size, phys_ram_size);                  (uint64_t)size, (uint64_t)phys_ram_size);
         abort();          abort();
     }      }
     addr = phys_ram_alloc_offset;      addr = phys_ram_alloc_offset;
Line 2105  static uint32_t unassigned_mem_readb(voi Line 2396  static uint32_t unassigned_mem_readb(voi
 #ifdef DEBUG_UNASSIGNED  #ifdef DEBUG_UNASSIGNED
     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);      printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
 #endif  #endif
 #ifdef TARGET_SPARC  #if defined(TARGET_SPARC)
     do_unassigned_access(addr, 0, 0, 0);      do_unassigned_access(addr, 0, 0, 0, 1);
 #elif TARGET_CRIS  #endif
     do_unassigned_access(addr, 0, 0, 0);      return 0;
   }
   
   static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 0, 0, 0, 2);
   #endif
       return 0;
   }
   
   static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 0, 0, 0, 4);
 #endif  #endif
     return 0;      return 0;
 }  }
Line 2118  static void unassigned_mem_writeb(void * Line 2429  static void unassigned_mem_writeb(void *
 #ifdef DEBUG_UNASSIGNED  #ifdef DEBUG_UNASSIGNED
     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);      printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
 #endif  #endif
 #ifdef TARGET_SPARC  #if defined(TARGET_SPARC)
     do_unassigned_access(addr, 1, 0, 0);      do_unassigned_access(addr, 1, 0, 0, 1);
 #elif TARGET_CRIS  #endif
     do_unassigned_access(addr, 1, 0, 0);  }
   
   static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 1, 0, 0, 2);
   #endif
   }
   
   static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 1, 0, 0, 4);
 #endif  #endif
 }  }
   
 static CPUReadMemoryFunc *unassigned_mem_read[3] = {  static CPUReadMemoryFunc *unassigned_mem_read[3] = {
     unassigned_mem_readb,      unassigned_mem_readb,
     unassigned_mem_readb,      unassigned_mem_readw,
     unassigned_mem_readb,      unassigned_mem_readl,
 };  };
   
 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {  static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
     unassigned_mem_writeb,      unassigned_mem_writeb,
     unassigned_mem_writeb,      unassigned_mem_writew,
     unassigned_mem_writeb,      unassigned_mem_writel,
 };  };
   
 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
                                   uint32_t val)
 {  {
     unsigned long ram_addr;  
     int dirty_flags;      int dirty_flags;
     ram_addr = addr - (unsigned long)phys_ram_base;  
     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];      dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {      if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
Line 2149  static void notdirty_mem_writeb(void *op Line 2477  static void notdirty_mem_writeb(void *op
         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];          dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
 #endif  #endif
     }      }
     stb_p((uint8_t *)(long)addr, val);      stb_p(phys_ram_base + ram_addr, val);
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (cpu_single_env->kqemu_enabled &&      if (cpu_single_env->kqemu_enabled &&
         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)          (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
Line 2160  static void notdirty_mem_writeb(void *op Line 2488  static void notdirty_mem_writeb(void *op
     /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
        flushed */         flushed */
     if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
 }  }
   
 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
                                   uint32_t val)
 {  {
     unsigned long ram_addr;  
     int dirty_flags;      int dirty_flags;
     ram_addr = addr - (unsigned long)phys_ram_base;  
     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];      dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {      if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
Line 2175  static void notdirty_mem_writew(void *op Line 2502  static void notdirty_mem_writew(void *op
         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];          dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
 #endif  #endif
     }      }
     stw_p((uint8_t *)(long)addr, val);      stw_p(phys_ram_base + ram_addr, val);
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (cpu_single_env->kqemu_enabled &&      if (cpu_single_env->kqemu_enabled &&
         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)          (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
Line 2186  static void notdirty_mem_writew(void *op Line 2513  static void notdirty_mem_writew(void *op
     /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
        flushed */         flushed */
     if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
 }  }
   
 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
                                   uint32_t val)
 {  {
     unsigned long ram_addr;  
     int dirty_flags;      int dirty_flags;
     ram_addr = addr - (unsigned long)phys_ram_base;  
     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];      dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {      if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
Line 2201  static void notdirty_mem_writel(void *op Line 2527  static void notdirty_mem_writel(void *op
         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];          dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
 #endif  #endif
     }      }
     stl_p((uint8_t *)(long)addr, val);      stl_p(phys_ram_base + ram_addr, val);
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (cpu_single_env->kqemu_enabled &&      if (cpu_single_env->kqemu_enabled &&
         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)          (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
Line 2212  static void notdirty_mem_writel(void *op Line 2538  static void notdirty_mem_writel(void *op
     /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
        flushed */         flushed */
     if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
 }  }
   
 static CPUReadMemoryFunc *error_mem_read[3] = {  static CPUReadMemoryFunc *error_mem_read[3] = {
Line 2227  static CPUWriteMemoryFunc *notdirty_mem_ Line 2553  static CPUWriteMemoryFunc *notdirty_mem_
     notdirty_mem_writel,      notdirty_mem_writel,
 };  };
   
 #if defined(CONFIG_SOFTMMU)  /* Generate a debug exception if a watchpoint has been hit.  */
   static void check_watchpoint(int offset, int len_mask, int flags)
   {
       CPUState *env = cpu_single_env;
       target_ulong pc, cs_base;
       TranslationBlock *tb;
       target_ulong vaddr;
       CPUWatchpoint *wp;
       int cpu_flags;
   
       if (env->watchpoint_hit) {
           /* We re-entered the check after replacing the TB. Now raise
            * the debug interrupt so that is will trigger after the
            * current instruction. */
           cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
           return;
       }
       vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
       TAILQ_FOREACH(wp, &env->watchpoints, entry) {
           if ((vaddr == (wp->vaddr & len_mask) ||
                (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
               wp->flags |= BP_WATCHPOINT_HIT;
               if (!env->watchpoint_hit) {
                   env->watchpoint_hit = wp;
                   tb = tb_find_pc(env->mem_io_pc);
                   if (!tb) {
                       cpu_abort(env, "check_watchpoint: could not find TB for "
                                 "pc=%p", (void *)env->mem_io_pc);
                   }
                   cpu_restore_state(tb, env, env->mem_io_pc, NULL);
                   tb_phys_invalidate(tb, -1);
                   if (wp->flags & BP_STOP_BEFORE_ACCESS) {
                       env->exception_index = EXCP_DEBUG;
                   } else {
                       cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
                       tb_gen_code(env, pc, cs_base, cpu_flags, 1);
                   }
                   cpu_resume_from_signal(env, NULL);
               }
           } else {
               wp->flags &= ~BP_WATCHPOINT_HIT;
           }
       }
   }
   
 /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,  /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
    so these check for a hit then pass through to the normal out-of-line     so these check for a hit then pass through to the normal out-of-line
    phys routines.  */     phys routines.  */
 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)  static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
 {  {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
     return ldub_phys(addr);      return ldub_phys(addr);
 }  }
   
 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)  static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
 {  {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
     return lduw_phys(addr);      return lduw_phys(addr);
 }  }
   
 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)  static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
 {  {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
     return ldl_phys(addr);      return ldl_phys(addr);
 }  }
   
 /* Generate a debug exception if a watchpoint has been hit.  
    Returns the real physical address of the access.  addr will be a host  
    address in case of a RAM location.  */  
 static target_ulong check_watchpoint(target_phys_addr_t addr)  
 {  
     CPUState *env = cpu_single_env;  
     target_ulong watch;  
     target_ulong retaddr;  
     int i;  
   
     retaddr = addr;  
     for (i = 0; i < env->nb_watchpoints; i++) {  
         watch = env->watchpoint[i].vaddr;  
         if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {  
             retaddr = addr - env->watchpoint[i].addend;  
             if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {  
                 cpu_single_env->watchpoint_hit = i + 1;  
                 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);  
                 break;  
             }  
         }  
     }  
     return retaddr;  
 }  
   
 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,  static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
                              uint32_t val)                               uint32_t val)
 {  {
     addr = check_watchpoint(addr);      check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
     stb_phys(addr, val);      stb_phys(addr, val);
 }  }
   
 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,  static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
                              uint32_t val)                               uint32_t val)
 {  {
     addr = check_watchpoint(addr);      check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
     stw_phys(addr, val);      stw_phys(addr, val);
 }  }
   
 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,  static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
                              uint32_t val)                               uint32_t val)
 {  {
     addr = check_watchpoint(addr);      check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
     stl_phys(addr, val);      stl_phys(addr, val);
 }  }
   
Line 2303  static CPUWriteMemoryFunc *watch_mem_wri Line 2651  static CPUWriteMemoryFunc *watch_mem_wri
     watch_mem_writew,      watch_mem_writew,
     watch_mem_writel,      watch_mem_writel,
 };  };
 #endif  
   
 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,  static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
                                  unsigned int len)                                   unsigned int len)
Line 2311  static inline uint32_t subpage_readlen ( Line 2658  static inline uint32_t subpage_readlen (
     uint32_t ret;      uint32_t ret;
     unsigned int idx;      unsigned int idx;
   
     idx = SUBPAGE_IDX(addr - mmio->base);      idx = SUBPAGE_IDX(addr);
 #if defined(DEBUG_SUBPAGE)  #if defined(DEBUG_SUBPAGE)
     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,      printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
            mmio, len, addr, idx);             mmio, len, addr, idx);
 #endif  #endif
     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);      ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
                                          addr + mmio->region_offset[idx][0][len]);
   
     return ret;      return ret;
 }  }
Line 2326  static inline void subpage_writelen (sub Line 2674  static inline void subpage_writelen (sub
 {  {
     unsigned int idx;      unsigned int idx;
   
     idx = SUBPAGE_IDX(addr - mmio->base);      idx = SUBPAGE_IDX(addr);
 #if defined(DEBUG_SUBPAGE)  #if defined(DEBUG_SUBPAGE)
     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,      printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
            mmio, len, addr, idx, value);             mmio, len, addr, idx, value);
 #endif  #endif
     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);      (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
                                     addr + mmio->region_offset[idx][1][len],
                                     value);
 }  }
   
 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)  static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
Line 2401  static CPUWriteMemoryFunc *subpage_write Line 2751  static CPUWriteMemoryFunc *subpage_write
 };  };
   
 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,  static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
                              int memory)                               ram_addr_t memory, ram_addr_t region_offset)
 {  {
     int idx, eidx;      int idx, eidx;
     unsigned int i;      unsigned int i;
Line 2420  static int subpage_register (subpage_t * Line 2770  static int subpage_register (subpage_t *
             if (io_mem_read[memory][i]) {              if (io_mem_read[memory][i]) {
                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];                  mmio->mem_read[idx][i] = &io_mem_read[memory][i];
                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];                  mmio->opaque[idx][0][i] = io_mem_opaque[memory];
                   mmio->region_offset[idx][0][i] = region_offset;
             }              }
             if (io_mem_write[memory][i]) {              if (io_mem_write[memory][i]) {
                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];                  mmio->mem_write[idx][i] = &io_mem_write[memory][i];
                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];                  mmio->opaque[idx][1][i] = io_mem_opaque[memory];
                   mmio->region_offset[idx][1][i] = region_offset;
             }              }
         }          }
     }      }
Line 2431  static int subpage_register (subpage_t * Line 2783  static int subpage_register (subpage_t *
     return 0;      return 0;
 }  }
   
 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,  static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
                            int orig_memory)                             ram_addr_t orig_memory, ram_addr_t region_offset)
 {  {
     subpage_t *mmio;      subpage_t *mmio;
     int subpage_memory;      int subpage_memory;
   
     mmio = qemu_mallocz(sizeof(subpage_t));      mmio = qemu_mallocz(sizeof(subpage_t));
     if (mmio != NULL) {  
         mmio->base = base;      mmio->base = base;
         subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);      subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
 #if defined(DEBUG_SUBPAGE)  #if defined(DEBUG_SUBPAGE)
         printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,      printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
                mmio, base, TARGET_PAGE_SIZE, subpage_memory);             mmio, base, TARGET_PAGE_SIZE, subpage_memory);
 #endif  #endif
         *phys = subpage_memory | IO_MEM_SUBPAGE;      *phys = subpage_memory | IO_MEM_SUBPAGE;
         subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);      subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
     }                           region_offset);
   
     return mmio;      return mmio;
 }  }
   
   static int get_free_io_mem_idx(void)
   {
       int i;
   
       for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
           if (!io_mem_used[i]) {
               io_mem_used[i] = 1;
               return i;
           }
   
       return -1;
   }
   
 static void io_mem_init(void)  static void io_mem_init(void)
 {  {
       int i;
   
     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);      cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);      cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);      cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
     io_mem_nb = 5;      for (i=0; i<5; i++)
           io_mem_used[i] = 1;
   
 #if defined(CONFIG_SOFTMMU)      io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
     io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,  
                                           watch_mem_write, NULL);                                            watch_mem_write, NULL);
 #endif  
     /* alloc dirty bits array */      /* alloc dirty bits array */
     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);      phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);      memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
Line 2484  int cpu_register_io_memory(int io_index, Line 2850  int cpu_register_io_memory(int io_index,
     int i, subwidth = 0;      int i, subwidth = 0;
   
     if (io_index <= 0) {      if (io_index <= 0) {
         if (io_mem_nb >= IO_MEM_NB_ENTRIES)          io_index = get_free_io_mem_idx();
             return -1;          if (io_index == -1)
         io_index = io_mem_nb++;              return io_index;
     } else {      } else {
         if (io_index >= IO_MEM_NB_ENTRIES)          if (io_index >= IO_MEM_NB_ENTRIES)
             return -1;              return -1;
Line 2502  int cpu_register_io_memory(int io_index, Line 2868  int cpu_register_io_memory(int io_index,
     return (io_index << IO_MEM_SHIFT) | subwidth;      return (io_index << IO_MEM_SHIFT) | subwidth;
 }  }
   
   void cpu_unregister_io_memory(int io_table_address)
   {
       int i;
       int io_index = io_table_address >> IO_MEM_SHIFT;
   
       for (i=0;i < 3; i++) {
           io_mem_read[io_index][i] = unassigned_mem_read[i];
           io_mem_write[io_index][i] = unassigned_mem_write[i];
       }
       io_mem_opaque[io_index] = NULL;
       io_mem_used[io_index] = 0;
   }
   
 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)  CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
 {  {
     return io_mem_write[io_index >> IO_MEM_SHIFT];      return io_mem_write[io_index >> IO_MEM_SHIFT];
Line 2512  CPUReadMemoryFunc **cpu_get_io_memory_re Line 2891  CPUReadMemoryFunc **cpu_get_io_memory_re
     return io_mem_read[io_index >> IO_MEM_SHIFT];      return io_mem_read[io_index >> IO_MEM_SHIFT];
 }  }
   
   #endif /* !defined(CONFIG_USER_ONLY) */
   
 /* physical memory access (slow version, mainly for debug) */  /* physical memory access (slow version, mainly for debug) */
 #if defined(CONFIG_USER_ONLY)  #if defined(CONFIG_USER_ONLY)
 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,  void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
Line 2533  void cpu_physical_memory_rw(target_phys_ Line 2914  void cpu_physical_memory_rw(target_phys_
             if (!(flags & PAGE_WRITE))              if (!(flags & PAGE_WRITE))
                 return;                  return;
             /* XXX: this code should not depend on lock_user */              /* XXX: this code should not depend on lock_user */
             if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))              if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
                 /* FIXME - should this return an error rather than just fail? */                  /* FIXME - should this return an error rather than just fail? */
                 return;                  return;
             memcpy(p, buf, len);              memcpy(p, buf, l);
             unlock_user(p, addr, len);              unlock_user(p, addr, l);
         } else {          } else {
             if (!(flags & PAGE_READ))              if (!(flags & PAGE_READ))
                 return;                  return;
             /* XXX: this code should not depend on lock_user */              /* XXX: this code should not depend on lock_user */
             if (!(p = lock_user(VERIFY_READ, addr, len, 1)))              if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
                 /* FIXME - should this return an error rather than just fail? */                  /* FIXME - should this return an error rather than just fail? */
                 return;                  return;
             memcpy(buf, p, len);              memcpy(buf, p, l);
             unlock_user(p, addr, 0);              unlock_user(p, addr, 0);
         }          }
         len -= l;          len -= l;
Line 2579  void cpu_physical_memory_rw(target_phys_ Line 2960  void cpu_physical_memory_rw(target_phys_
   
         if (is_write) {          if (is_write) {
             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {              if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
                   target_phys_addr_t addr1 = addr;
                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                  io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                   if (p)
                       addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
                 /* XXX: could force cpu_single_env to NULL to avoid                  /* XXX: could force cpu_single_env to NULL to avoid
                    potential bugs */                     potential bugs */
                 if (l >= 4 && ((addr & 3) == 0)) {                  if (l >= 4 && ((addr1 & 3) == 0)) {
                     /* 32 bit write access */                      /* 32 bit write access */
                     val = ldl_p(buf);                      val = ldl_p(buf);
                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);                      io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
                     l = 4;                      l = 4;
                 } else if (l >= 2 && ((addr & 1) == 0)) {                  } else if (l >= 2 && ((addr1 & 1) == 0)) {
                     /* 16 bit write access */                      /* 16 bit write access */
                     val = lduw_p(buf);                      val = lduw_p(buf);
                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);                      io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
                     l = 2;                      l = 2;
                 } else {                  } else {
                     /* 8 bit write access */                      /* 8 bit write access */
                     val = ldub_p(buf);                      val = ldub_p(buf);
                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);                      io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
                     l = 1;                      l = 1;
                 }                  }
             } else {              } else {
Line 2615  void cpu_physical_memory_rw(target_phys_ Line 2999  void cpu_physical_memory_rw(target_phys_
         } else {          } else {
             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&              if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
                 !(pd & IO_MEM_ROMD)) {                  !(pd & IO_MEM_ROMD)) {
                   target_phys_addr_t addr1 = addr;
                 /* I/O case */                  /* I/O case */
                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                  io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                 if (l >= 4 && ((addr & 3) == 0)) {                  if (p)
                       addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
                   if (l >= 4 && ((addr1 & 3) == 0)) {
                     /* 32 bit read access */                      /* 32 bit read access */
                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);                      val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
                     stl_p(buf, val);                      stl_p(buf, val);
                     l = 4;                      l = 4;
                 } else if (l >= 2 && ((addr & 1) == 0)) {                  } else if (l >= 2 && ((addr1 & 1) == 0)) {
                     /* 16 bit read access */                      /* 16 bit read access */
                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);                      val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
                     stw_p(buf, val);                      stw_p(buf, val);
                     l = 2;                      l = 2;
                 } else {                  } else {
                     /* 8 bit read access */                      /* 8 bit read access */
                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);                      val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
                     stb_p(buf, val);                      stb_p(buf, val);
                     l = 1;                      l = 1;
                 }                  }
Line 2685  void cpu_physical_memory_write_rom(targe Line 3072  void cpu_physical_memory_write_rom(targe
     }      }
 }  }
   
   typedef struct {
       void *buffer;
       target_phys_addr_t addr;
       target_phys_addr_t len;
   } BounceBuffer;
   
   static BounceBuffer bounce;
   
   typedef struct MapClient {
       void *opaque;
       void (*callback)(void *opaque);
       LIST_ENTRY(MapClient) link;
   } MapClient;
   
   static LIST_HEAD(map_client_list, MapClient) map_client_list
       = LIST_HEAD_INITIALIZER(map_client_list);
   
   void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
   {
       MapClient *client = qemu_malloc(sizeof(*client));
   
       client->opaque = opaque;
       client->callback = callback;
       LIST_INSERT_HEAD(&map_client_list, client, link);
       return client;
   }
   
   void cpu_unregister_map_client(void *_client)
   {
       MapClient *client = (MapClient *)_client;
   
       LIST_REMOVE(client, link);
   }
   
   static void cpu_notify_map_clients(void)
   {
       MapClient *client;
   
       while (!LIST_EMPTY(&map_client_list)) {
           client = LIST_FIRST(&map_client_list);
           client->callback(client->opaque);
           LIST_REMOVE(client, link);
       }
   }
   
   /* Map a physical memory region into a host virtual address.
    * May map a subset of the requested range, given by and returned in *plen.
    * May return NULL if resources needed to perform the mapping are exhausted.
    * Use only for reads OR writes - not for read-modify-write operations.
    * Use cpu_register_map_client() to know when retrying the map operation is
    * likely to succeed.
    */
   void *cpu_physical_memory_map(target_phys_addr_t addr,
                                 target_phys_addr_t *plen,
                                 int is_write)
   {
       target_phys_addr_t len = *plen;
       target_phys_addr_t done = 0;
       int l;
       uint8_t *ret = NULL;
       uint8_t *ptr;
       target_phys_addr_t page;
       unsigned long pd;
       PhysPageDesc *p;
       unsigned long addr1;
   
       while (len > 0) {
           page = addr & TARGET_PAGE_MASK;
           l = (page + TARGET_PAGE_SIZE) - addr;
           if (l > len)
               l = len;
           p = phys_page_find(page >> TARGET_PAGE_BITS);
           if (!p) {
               pd = IO_MEM_UNASSIGNED;
           } else {
               pd = p->phys_offset;
           }
   
           if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
               if (done || bounce.buffer) {
                   break;
               }
               bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
               bounce.addr = addr;
               bounce.len = l;
               if (!is_write) {
                   cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
               }
               ptr = bounce.buffer;
           } else {
               addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
               ptr = phys_ram_base + addr1;
           }
           if (!done) {
               ret = ptr;
           } else if (ret + done != ptr) {
               break;
           }
   
           len -= l;
           addr += l;
           done += l;
       }
       *plen = done;
       return ret;
   }
   
   /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
    * Will also mark the memory as dirty if is_write == 1.  access_len gives
    * the amount of memory that was actually read or written by the caller.
    */
   void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                                  int is_write, target_phys_addr_t access_len)
   {
       if (buffer != bounce.buffer) {
           if (is_write) {
               unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
               while (access_len) {
                   unsigned l;
                   l = TARGET_PAGE_SIZE;
                   if (l > access_len)
                       l = access_len;
                   if (!cpu_physical_memory_is_dirty(addr1)) {
                       /* invalidate code */
                       tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
                       /* set dirty bit */
                       phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
                           (0xff & ~CODE_DIRTY_FLAG);
                   }
                   addr1 += l;
                   access_len -= l;
               }
           }
           return;
       }
       if (is_write) {
           cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
       }
       qemu_free(bounce.buffer);
       bounce.buffer = NULL;
       cpu_notify_map_clients();
   }
   
 /* warning: addr must be aligned */  /* warning: addr must be aligned */
 uint32_t ldl_phys(target_phys_addr_t addr)  uint32_t ldl_phys(target_phys_addr_t addr)
Line 2706  uint32_t ldl_phys(target_phys_addr_t add Line 3235  uint32_t ldl_phys(target_phys_addr_t add
         !(pd & IO_MEM_ROMD)) {          !(pd & IO_MEM_ROMD)) {
         /* I/O case */          /* I/O case */
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);          val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
     } else {      } else {
         /* RAM case */          /* RAM case */
Line 2736  uint64_t ldq_phys(target_phys_addr_t add Line 3267  uint64_t ldq_phys(target_phys_addr_t add
         !(pd & IO_MEM_ROMD)) {          !(pd & IO_MEM_ROMD)) {
         /* I/O case */          /* I/O case */
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
 #ifdef TARGET_WORDS_BIGENDIAN  #ifdef TARGET_WORDS_BIGENDIAN
         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;          val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);          val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
Line 2787  void stl_phys_notdirty(target_phys_addr_ Line 3320  void stl_phys_notdirty(target_phys_addr_
   
     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {      if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);          io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
     } else {      } else {
         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +          unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
             (addr & ~TARGET_PAGE_MASK);          ptr = phys_ram_base + addr1;
         stl_p(ptr, val);          stl_p(ptr, val);
   
           if (unlikely(in_migration)) {
               if (!cpu_physical_memory_is_dirty(addr1)) {
                   /* invalidate code */
                   tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
                   /* set dirty bit */
                   phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
                       (0xff & ~CODE_DIRTY_FLAG);
               }
           }
     }      }
 }  }
   
Line 2811  void stq_phys_notdirty(target_phys_addr_ Line 3356  void stq_phys_notdirty(target_phys_addr_
   
     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {      if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
 #ifdef TARGET_WORDS_BIGENDIAN  #ifdef TARGET_WORDS_BIGENDIAN
         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);          io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);          io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
Line 2842  void stl_phys(target_phys_addr_t addr, u Line 3389  void stl_phys(target_phys_addr_t addr, u
   
     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {      if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);          io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
     } else {      } else {
         unsigned long addr1;          unsigned long addr1;
Line 2908  int cpu_memory_rw_debug(CPUState *env, t Line 3457  int cpu_memory_rw_debug(CPUState *env, t
     return 0;      return 0;
 }  }
   
   /* in deterministic execution mode, instructions doing device I/Os
      must be at the end of the TB */
   void cpu_io_recompile(CPUState *env, void *retaddr)
   {
       TranslationBlock *tb;
       uint32_t n, cflags;
       target_ulong pc, cs_base;
       uint64_t flags;
   
       tb = tb_find_pc((unsigned long)retaddr);
       if (!tb) {
           cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
                     retaddr);
       }
       n = env->icount_decr.u16.low + tb->icount;
       cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
       /* Calculate how many instructions had been executed before the fault
          occurred.  */
       n = n - env->icount_decr.u16.low;
       /* Generate a new TB ending on the I/O insn.  */
       n++;
       /* On MIPS and SH, delay slot instructions can only be restarted if
          they were already the first instruction in the TB.  If this is not
          the first instruction in a TB then re-execute the preceding
          branch.  */
   #if defined(TARGET_MIPS)
       if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
           env->active_tc.PC -= 4;
           env->icount_decr.u16.low++;
           env->hflags &= ~MIPS_HFLAG_BMASK;
       }
   #elif defined(TARGET_SH4)
       if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
               && n > 1) {
           env->pc -= 2;
           env->icount_decr.u16.low++;
           env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
       }
   #endif
       /* This should never happen.  */
       if (n > CF_COUNT_MASK)
           cpu_abort(env, "TB too big during recompile");
   
       cflags = n | CF_LAST_IO;
       pc = tb->pc;
       cs_base = tb->cs_base;
       flags = tb->flags;
       tb_phys_invalidate(tb, -1);
       /* FIXME: In theory this could raise an exception.  In practice
          we have already translated the block once so it's probably ok.  */
       tb_gen_code(env, pc, cs_base, flags, cflags);
       /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
          the first in the TB) then we end up generating a whole new TB and
          repeating the fault, which is horribly inefficient.
          Better would be to execute just this insn uncached, or generate a
          second new TB.  */
       cpu_resume_from_signal(env, NULL);
   }
   
 void dump_exec_info(FILE *f,  void dump_exec_info(FILE *f,
                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))                      int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
 {  {
Line 2935  void dump_exec_info(FILE *f, Line 3543  void dump_exec_info(FILE *f,
         }          }
     }      }
     /* XXX: avoid using doubles ? */      /* XXX: avoid using doubles ? */
     cpu_fprintf(f, "TB count            %d\n", nb_tbs);      cpu_fprintf(f, "Translation buffer state:\n");
       cpu_fprintf(f, "gen code size       %ld/%ld\n",
                   code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
       cpu_fprintf(f, "TB count            %d/%d\n", 
                   nb_tbs, code_gen_max_blocks);
     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",      cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
                 nb_tbs ? target_code_size / nb_tbs : 0,                  nb_tbs ? target_code_size / nb_tbs : 0,
                 max_target_code_size);                  max_target_code_size);
Line 2950  void dump_exec_info(FILE *f, Line 3562  void dump_exec_info(FILE *f,
                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,                  nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
                 direct_jmp2_count,                  direct_jmp2_count,
                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);                  nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
       cpu_fprintf(f, "\nStatistics:\n");
     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);      cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);      cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);      cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
       tcg_dump_info(f, cpu_fprintf);
 }  }
   
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)

Removed from v.1.1.1.6  
changed lines
  Added in v.1.1.1.7


unix.superglobalmegacorp.com