Diff for /qemu/exec.c between versions 1.1.1.1 and 1.1.1.9

version 1.1.1.1, 2018/04/24 16:37:52 version 1.1.1.9, 2018/04/24 17:06:55
Line 1 Line 1
 /*  /*
  *  virtual page mapping and translated block handling   *  virtual page mapping and translated block handling
  *    *
  *  Copyright (c) 2003 Fabrice Bellard   *  Copyright (c) 2003 Fabrice Bellard
  *   *
  * This library is free software; you can redistribute it and/or   * This library is free software; you can redistribute it and/or
Line 15 Line 15
  *   *
  * You should have received a copy of the GNU Lesser General Public   * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software   * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
  */   */
 #include "config.h"  #include "config.h"
 #ifdef _WIN32  #ifdef _WIN32
   #define WIN32_LEAN_AND_MEAN
 #include <windows.h>  #include <windows.h>
 #else  #else
 #include <sys/types.h>  #include <sys/types.h>
Line 34 Line 35
   
 #include "cpu.h"  #include "cpu.h"
 #include "exec-all.h"  #include "exec-all.h"
   #include "qemu-common.h"
   #include "tcg.h"
   #include "hw/hw.h"
   #include "osdep.h"
   #include "kvm.h"
   #if defined(CONFIG_USER_ONLY)
   #include <qemu.h>
   #endif
   
 //#define DEBUG_TB_INVALIDATE  //#define DEBUG_TB_INVALIDATE
 //#define DEBUG_FLUSH  //#define DEBUG_FLUSH
 //#define DEBUG_TLB  //#define DEBUG_TLB
   //#define DEBUG_UNASSIGNED
   
 /* make various TB consistency checks */  /* make various TB consistency checks */
 //#define DEBUG_TB_CHECK   //#define DEBUG_TB_CHECK
 //#define DEBUG_TLB_CHECK   //#define DEBUG_TLB_CHECK
   
 /* threshold to flush the translated code buffer */  //#define DEBUG_IOPORT
 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)  //#define DEBUG_SUBPAGE
   
   #if !defined(CONFIG_USER_ONLY)
   /* TB consistency checks only implemented for usermode emulation.  */
   #undef DEBUG_TB_CHECK
   #endif
   
 #define SMC_BITMAP_USE_THRESHOLD 10  #define SMC_BITMAP_USE_THRESHOLD 10
   
Line 53 Line 68
   
 #if defined(TARGET_SPARC64)  #if defined(TARGET_SPARC64)
 #define TARGET_PHYS_ADDR_SPACE_BITS 41  #define TARGET_PHYS_ADDR_SPACE_BITS 41
   #elif defined(TARGET_SPARC)
   #define TARGET_PHYS_ADDR_SPACE_BITS 36
   #elif defined(TARGET_ALPHA)
   #define TARGET_PHYS_ADDR_SPACE_BITS 42
   #define TARGET_VIRT_ADDR_SPACE_BITS 42
 #elif defined(TARGET_PPC64)  #elif defined(TARGET_PPC64)
 #define TARGET_PHYS_ADDR_SPACE_BITS 42  #define TARGET_PHYS_ADDR_SPACE_BITS 42
   #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
   #define TARGET_PHYS_ADDR_SPACE_BITS 42
   #elif defined(TARGET_I386) && !defined(USE_KQEMU)
   #define TARGET_PHYS_ADDR_SPACE_BITS 36
 #else  #else
 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */  /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
 #define TARGET_PHYS_ADDR_SPACE_BITS 32  #define TARGET_PHYS_ADDR_SPACE_BITS 32
 #endif  #endif
   
 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];  static TranslationBlock *tbs;
 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];  int code_gen_max_blocks;
 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];  TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
 int nb_tbs;  static int nb_tbs;
 /* any access to the tbs or the page table must use this lock */  /* any access to the tbs or the page table must use this lock */
 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;  spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
   
 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));  #if defined(__arm__) || defined(__sparc_v9__)
   /* The prologue must be reachable with a direct jump. ARM and Sparc64
    have limited branch ranges (possibly also PPC) so place it in a
    section close to code segment. */
   #define code_gen_section                                \
       __attribute__((__section__(".gen_code")))           \
       __attribute__((aligned (32)))
   #else
   #define code_gen_section                                \
       __attribute__((aligned (32)))
   #endif
   
   uint8_t code_gen_prologue[1024] code_gen_section;
   static uint8_t *code_gen_buffer;
   static unsigned long code_gen_buffer_size;
   /* threshold to flush the translated code buffer */
   static unsigned long code_gen_buffer_max_size;
 uint8_t *code_gen_ptr;  uint8_t *code_gen_ptr;
   
 int phys_ram_size;  #if !defined(CONFIG_USER_ONLY)
   ram_addr_t phys_ram_size;
 int phys_ram_fd;  int phys_ram_fd;
 uint8_t *phys_ram_base;  uint8_t *phys_ram_base;
 uint8_t *phys_ram_dirty;  uint8_t *phys_ram_dirty;
   static int in_migration;
   static ram_addr_t phys_ram_alloc_offset = 0;
   #endif
   
   CPUState *first_cpu;
   /* current CPU in the current thread. It is only valid inside
      cpu_exec() */
   CPUState *cpu_single_env;
   /* 0 = Do not count executed instructions.
      1 = Precise instruction counting.
      2 = Adaptive rate instruction counting.  */
   int use_icount = 0;
   /* Current instruction counter.  While executing translated code this may
      include some instructions that have not yet been executed.  */
   int64_t qemu_icount;
   
 typedef struct PageDesc {  typedef struct PageDesc {
     /* list of TBs intersecting this ram page */      /* list of TBs intersecting this ram page */
Line 88  typedef struct PageDesc { Line 144  typedef struct PageDesc {
 } PageDesc;  } PageDesc;
   
 typedef struct PhysPageDesc {  typedef struct PhysPageDesc {
     /* offset in host memory of the page + io_index in the low 12 bits */      /* offset in host memory of the page + io_index in the low bits */
     uint32_t phys_offset;      ram_addr_t phys_offset;
       ram_addr_t region_offset;
 } PhysPageDesc;  } PhysPageDesc;
   
 /* Note: the VirtPage handling is absolete and will be suppressed  
    ASAP */  
 typedef struct VirtPageDesc {  
     /* physical address of code page. It is valid only if 'valid_tag'  
        matches 'virt_valid_tag' */   
     target_ulong phys_addr;   
     unsigned int valid_tag;  
 #if !defined(CONFIG_SOFTMMU)  
     /* original page access rights. It is valid only if 'valid_tag'  
        matches 'virt_valid_tag' */  
     unsigned int prot;  
 #endif  
 } VirtPageDesc;  
   
 #define L2_BITS 10  #define L2_BITS 10
   #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
   /* XXX: this is a temporary hack for alpha target.
    *      In the future, this is to be replaced by a multi-level table
    *      to actually be able to handle the complete 64 bits address space.
    */
   #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
   #else
 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)  #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
   #endif
   
 #define L1_SIZE (1 << L1_BITS)  #define L1_SIZE (1 << L1_BITS)
 #define L2_SIZE (1 << L2_BITS)  #define L2_SIZE (1 << L2_BITS)
   
 static void io_mem_init(void);  
   
 unsigned long qemu_real_host_page_size;  unsigned long qemu_real_host_page_size;
 unsigned long qemu_host_page_bits;  unsigned long qemu_host_page_bits;
 unsigned long qemu_host_page_size;  unsigned long qemu_host_page_size;
Line 121  unsigned long qemu_host_page_mask; Line 170  unsigned long qemu_host_page_mask;
   
 /* XXX: for system emulation, it could just be an array */  /* XXX: for system emulation, it could just be an array */
 static PageDesc *l1_map[L1_SIZE];  static PageDesc *l1_map[L1_SIZE];
 PhysPageDesc **l1_phys_map;  static PhysPageDesc **l1_phys_map;
   
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
 #if TARGET_LONG_BITS > 32  static void io_mem_init(void);
 #define VIRT_L_BITS 9  
 #define VIRT_L_SIZE (1 << VIRT_L_BITS)  
 static void *l1_virt_map[VIRT_L_SIZE];  
 #else  
 static VirtPageDesc *l1_virt_map[L1_SIZE];  
 #endif  
 static unsigned int virt_valid_tag;  
 #endif  
   
 /* io memory support */  /* io memory support */
 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];  CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];  CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
 void *io_mem_opaque[IO_MEM_NB_ENTRIES];  void *io_mem_opaque[IO_MEM_NB_ENTRIES];
 static int io_mem_nb;  char io_mem_used[IO_MEM_NB_ENTRIES];
   static int io_mem_watch;
   #endif
   
 /* log support */  /* log support */
 char *logfilename = "/tmp/qemu.log";  static const char *logfilename = "/tmp/qemu.log";
 FILE *logfile;  FILE *logfile;
 int loglevel;  int loglevel;
   static int log_append = 0;
   
 /* statistics */  /* statistics */
 static int tlb_flush_count;  static int tlb_flush_count;
 static int tb_flush_count;  static int tb_flush_count;
 static int tb_phys_invalidate_count;  static int tb_phys_invalidate_count;
   
   #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
   typedef struct subpage_t {
       target_phys_addr_t base;
       CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
       CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
       void *opaque[TARGET_PAGE_SIZE][2][4];
       ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
   } subpage_t;
   
   #ifdef _WIN32
   static void map_exec(void *addr, long size)
   {
       DWORD old_protect;
       VirtualProtect(addr, size,
                      PAGE_EXECUTE_READWRITE, &old_protect);
       
   }
   #else
   static void map_exec(void *addr, long size)
   {
       unsigned long start, end, page_size;
       
       page_size = getpagesize();
       start = (unsigned long)addr;
       start &= ~(page_size - 1);
       
       end = (unsigned long)addr + size;
       end += page_size - 1;
       end &= ~(page_size - 1);
       
       mprotect((void *)start, end - start,
                PROT_READ | PROT_WRITE | PROT_EXEC);
   }
   #endif
   
 static void page_init(void)  static void page_init(void)
 {  {
     /* NOTE: we can always suppose that qemu_host_page_size >=      /* NOTE: we can always suppose that qemu_host_page_size >=
Line 157  static void page_init(void) Line 236  static void page_init(void)
 #ifdef _WIN32  #ifdef _WIN32
     {      {
         SYSTEM_INFO system_info;          SYSTEM_INFO system_info;
         DWORD old_protect;  
           
         GetSystemInfo(&system_info);          GetSystemInfo(&system_info);
         qemu_real_host_page_size = system_info.dwPageSize;          qemu_real_host_page_size = system_info.dwPageSize;
           
         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),  
                        PAGE_EXECUTE_READWRITE, &old_protect);  
     }      }
 #else  #else
     qemu_real_host_page_size = getpagesize();      qemu_real_host_page_size = getpagesize();
     {  
         unsigned long start, end;  
   
         start = (unsigned long)code_gen_buffer;  
         start &= ~(qemu_real_host_page_size - 1);  
           
         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);  
         end += qemu_real_host_page_size - 1;  
         end &= ~(qemu_real_host_page_size - 1);  
           
         mprotect((void *)start, end - start,   
                  PROT_READ | PROT_WRITE | PROT_EXEC);  
     }  
 #endif  #endif
   
     if (qemu_host_page_size == 0)      if (qemu_host_page_size == 0)
         qemu_host_page_size = qemu_real_host_page_size;          qemu_host_page_size = qemu_real_host_page_size;
     if (qemu_host_page_size < TARGET_PAGE_SIZE)      if (qemu_host_page_size < TARGET_PAGE_SIZE)
Line 190  static void page_init(void) Line 251  static void page_init(void)
     while ((1 << qemu_host_page_bits) < qemu_host_page_size)      while ((1 << qemu_host_page_bits) < qemu_host_page_size)
         qemu_host_page_bits++;          qemu_host_page_bits++;
     qemu_host_page_mask = ~(qemu_host_page_size - 1);      qemu_host_page_mask = ~(qemu_host_page_size - 1);
 #if !defined(CONFIG_USER_ONLY)  
     virt_valid_tag = 1;  
 #endif  
     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));      l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));      memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
   
   #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
       {
           long long startaddr, endaddr;
           FILE *f;
           int n;
   
           mmap_lock();
           last_brk = (unsigned long)sbrk(0);
           f = fopen("/proc/self/maps", "r");
           if (f) {
               do {
                   n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
                   if (n == 2) {
                       startaddr = MIN(startaddr,
                                       (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
                       endaddr = MIN(endaddr,
                                       (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
                       page_set_flags(startaddr & TARGET_PAGE_MASK,
                                      TARGET_PAGE_ALIGN(endaddr),
                                      PAGE_RESERVED); 
                   }
               } while (!feof(f));
               fclose(f);
           }
           mmap_unlock();
       }
   #endif
 }  }
   
 static inline PageDesc *page_find_alloc(unsigned int index)  static inline PageDesc **page_l1_map(target_ulong index)
   {
   #if TARGET_LONG_BITS > 32
       /* Host memory outside guest VM.  For 32-bit targets we have already
          excluded high addresses.  */
       if (index > ((target_ulong)L2_SIZE * L1_SIZE))
           return NULL;
   #endif
       return &l1_map[index >> L2_BITS];
   }
   
   static inline PageDesc *page_find_alloc(target_ulong index)
 {  {
     PageDesc **lp, *p;      PageDesc **lp, *p;
       lp = page_l1_map(index);
       if (!lp)
           return NULL;
   
     lp = &l1_map[index >> L2_BITS];  
     p = *lp;      p = *lp;
     if (!p) {      if (!p) {
         /* allocate if not found */          /* allocate if not found */
         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);  #if defined(CONFIG_USER_ONLY)
         memset(p, 0, sizeof(PageDesc) * L2_SIZE);          size_t len = sizeof(PageDesc) * L2_SIZE;
           /* Don't use qemu_malloc because it may recurse.  */
           p = mmap(0, len, PROT_READ | PROT_WRITE,
                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
         *lp = p;          *lp = p;
           if (h2g_valid(p)) {
               unsigned long addr = h2g(p);
               page_set_flags(addr & TARGET_PAGE_MASK,
                              TARGET_PAGE_ALIGN(addr + len),
                              PAGE_RESERVED); 
           }
   #else
           p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
           *lp = p;
   #endif
     }      }
     return p + (index & (L2_SIZE - 1));      return p + (index & (L2_SIZE - 1));
 }  }
   
 static inline PageDesc *page_find(unsigned int index)  static inline PageDesc *page_find(target_ulong index)
 {  {
     PageDesc *p;      PageDesc **lp, *p;
       lp = page_l1_map(index);
       if (!lp)
           return NULL;
   
     p = l1_map[index >> L2_BITS];      p = *lp;
     if (!p)      if (!p)
         return 0;          return 0;
     return p + (index & (L2_SIZE - 1));      return p + (index & (L2_SIZE - 1));
Line 225  static inline PageDesc *page_find(unsign Line 340  static inline PageDesc *page_find(unsign
 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)  static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
 {  {
     void **lp, **p;      void **lp, **p;
       PhysPageDesc *pd;
   
     p = (void **)l1_phys_map;      p = (void **)l1_phys_map;
 #if TARGET_PHYS_ADDR_SPACE_BITS > 32  #if TARGET_PHYS_ADDR_SPACE_BITS > 32
Line 244  static PhysPageDesc *phys_page_find_allo Line 360  static PhysPageDesc *phys_page_find_allo
     }      }
 #endif  #endif
     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));      lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
     p = *lp;      pd = *lp;
     if (!p) {      if (!pd) {
           int i;
         /* allocate if not found */          /* allocate if not found */
         if (!alloc)          if (!alloc)
             return NULL;              return NULL;
         p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);          pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
         memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);          *lp = pd;
         *lp = p;          for (i = 0; i < L2_SIZE; i++) {
             pd[i].phys_offset = IO_MEM_UNASSIGNED;
             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
           }
     }      }
     return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));      return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
 }  }
   
 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)  static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
Line 262  static inline PhysPageDesc *phys_page_fi Line 382  static inline PhysPageDesc *phys_page_fi
 }  }
   
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,   static void tlb_protect_code(ram_addr_t ram_addr);
                              target_ulong vaddr);  static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,   
                                     target_ulong vaddr);                                      target_ulong vaddr);
   #define mmap_lock() do { } while(0)
   #define mmap_unlock() do { } while(0)
   #endif
   
 static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)  #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
 {  
 #if TARGET_LONG_BITS > 32  
     void **p, **lp;  
   
     p = l1_virt_map;  #if defined(CONFIG_USER_ONLY)
     lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  /* Currently it is not recommanded to allocate big chunks of data in
     p = *lp;     user mode. It will change when a dedicated libc will be used */
     if (!p) {  #define USE_STATIC_CODE_GEN_BUFFER
         if (!alloc)  #endif
             return NULL;  
         p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
         *lp = p;  
     }  
     lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
     p = *lp;  
     if (!p) {  
         if (!alloc)  
             return NULL;  
         p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
         *lp = p;  
     }  
     lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
     p = *lp;  
     if (!p) {  
         if (!alloc)  
             return NULL;  
         p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
         *lp = p;  
     }  
     lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
     p = *lp;  
     if (!p) {  
         if (!alloc)  
             return NULL;  
         p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
         *lp = p;  
     }  
     lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
     p = *lp;  
     if (!p) {  
         if (!alloc)  
             return NULL;  
         p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);  
         *lp = p;  
     }  
     return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));  
 #else  
     VirtPageDesc *p, **lp;  
   
     lp = &l1_virt_map[index >> L2_BITS];  #ifdef USE_STATIC_CODE_GEN_BUFFER
     p = *lp;  static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
     if (!p) {  
         /* allocate if not found */  
         if (!alloc)  
             return NULL;  
         p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);  
         *lp = p;  
     }  
     return p + (index & (L2_SIZE - 1));  
 #endif  #endif
 }  
   
 static inline VirtPageDesc *virt_page_find(target_ulong index)  static void code_gen_alloc(unsigned long tb_size)
 {  {
     return virt_page_find_alloc(index, 0);  #ifdef USE_STATIC_CODE_GEN_BUFFER
 }      code_gen_buffer = static_code_gen_buffer;
       code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
       map_exec(code_gen_buffer, code_gen_buffer_size);
   #else
       code_gen_buffer_size = tb_size;
       if (code_gen_buffer_size == 0) {
   #if defined(CONFIG_USER_ONLY)
           /* in user mode, phys_ram_size is not meaningful */
           code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
   #else
           /* XXX: needs ajustments */
           code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
   #endif
       }
       if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
           code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
       /* The code gen buffer location may have constraints depending on
          the host cpu and OS */
   #if defined(__linux__) 
       {
           int flags;
           void *start = NULL;
   
 #if TARGET_LONG_BITS > 32          flags = MAP_PRIVATE | MAP_ANONYMOUS;
 static void virt_page_flush_internal(void **p, int level)  #if defined(__x86_64__)
 {          flags |= MAP_32BIT;
     int i;           /* Cannot map more than that */
     if (level == 0) {          if (code_gen_buffer_size > (800 * 1024 * 1024))
         VirtPageDesc *q = (VirtPageDesc *)p;              code_gen_buffer_size = (800 * 1024 * 1024);
         for(i = 0; i < VIRT_L_SIZE; i++)  #elif defined(__sparc_v9__)
             q[i].valid_tag = 0;          // Map the buffer below 2G, so we can use direct calls and branches
     } else {          flags |= MAP_FIXED;
         level--;          start = (void *) 0x60000000UL;
         for(i = 0; i < VIRT_L_SIZE; i++) {          if (code_gen_buffer_size > (512 * 1024 * 1024))
             if (p[i])              code_gen_buffer_size = (512 * 1024 * 1024);
                 virt_page_flush_internal(p[i], level);  #elif defined(__arm__)
           /* Map the buffer below 32M, so we can use direct calls and branches */
           flags |= MAP_FIXED;
           start = (void *) 0x01000000UL;
           if (code_gen_buffer_size > 16 * 1024 * 1024)
               code_gen_buffer_size = 16 * 1024 * 1024;
   #endif
           code_gen_buffer = mmap(start, code_gen_buffer_size,
                                  PROT_WRITE | PROT_READ | PROT_EXEC,
                                  flags, -1, 0);
           if (code_gen_buffer == MAP_FAILED) {
               fprintf(stderr, "Could not allocate dynamic translator buffer\n");
               exit(1);
         }          }
     }      }
 }  #elif defined(__FreeBSD__)
       {
           int flags;
           void *addr = NULL;
           flags = MAP_PRIVATE | MAP_ANONYMOUS;
   #if defined(__x86_64__)
           /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
            * 0x40000000 is free */
           flags |= MAP_FIXED;
           addr = (void *)0x40000000;
           /* Cannot map more than that */
           if (code_gen_buffer_size > (800 * 1024 * 1024))
               code_gen_buffer_size = (800 * 1024 * 1024);
   #endif
           code_gen_buffer = mmap(addr, code_gen_buffer_size,
                                  PROT_WRITE | PROT_READ | PROT_EXEC, 
                                  flags, -1, 0);
           if (code_gen_buffer == MAP_FAILED) {
               fprintf(stderr, "Could not allocate dynamic translator buffer\n");
               exit(1);
           }
       }
   #else
       code_gen_buffer = qemu_malloc(code_gen_buffer_size);
       map_exec(code_gen_buffer, code_gen_buffer_size);
 #endif  #endif
   #endif /* !USE_STATIC_CODE_GEN_BUFFER */
       map_exec(code_gen_prologue, sizeof(code_gen_prologue));
       code_gen_buffer_max_size = code_gen_buffer_size - 
           code_gen_max_block_size();
       code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
       tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
   }
   
   /* Must be called before using the QEMU cpus. 'tb_size' is the size
      (in bytes) allocated to the translation buffer. Zero means default
      size. */
   void cpu_exec_init_all(unsigned long tb_size)
   {
       cpu_gen_init();
       code_gen_alloc(tb_size);
       code_gen_ptr = code_gen_buffer;
       page_init();
   #if !defined(CONFIG_USER_ONLY)
       io_mem_init();
   #endif
   }
   
   #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
   
 static void virt_page_flush(void)  #define CPU_COMMON_SAVE_VERSION 1
   
   static void cpu_common_save(QEMUFile *f, void *opaque)
 {  {
     virt_valid_tag++;      CPUState *env = opaque;
   
     if (virt_valid_tag == 0) {      qemu_put_be32s(f, &env->halted);
         virt_valid_tag = 1;      qemu_put_be32s(f, &env->interrupt_request);
 #if TARGET_LONG_BITS > 32  
         virt_page_flush_internal(l1_virt_map, 5);  
 #else  
         {  
             int i, j;  
             VirtPageDesc *p;  
             for(i = 0; i < L1_SIZE; i++) {  
                 p = l1_virt_map[i];  
                 if (p) {  
                     for(j = 0; j < L2_SIZE; j++)  
                         p[j].valid_tag = 0;  
                 }  
             }  
         }  
 #endif  
     }  
 }  }
 #else  
 static void virt_page_flush(void)  static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
 {  {
       CPUState *env = opaque;
   
       if (version_id != CPU_COMMON_SAVE_VERSION)
           return -EINVAL;
   
       qemu_get_be32s(f, &env->halted);
       qemu_get_be32s(f, &env->interrupt_request);
       env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
       tlb_flush(env, 1);
   
       return 0;
 }  }
 #endif  #endif
   
 void cpu_exec_init(void)  void cpu_exec_init(CPUState *env)
 {  {
     if (!code_gen_ptr) {      CPUState **penv;
         code_gen_ptr = code_gen_buffer;      int cpu_index;
         page_init();  
         io_mem_init();      env->next_cpu = NULL;
     }      penv = &first_cpu;
       cpu_index = 0;
       while (*penv != NULL) {
           penv = (CPUState **)&(*penv)->next_cpu;
           cpu_index++;
       }
       env->cpu_index = cpu_index;
       TAILQ_INIT(&env->breakpoints);
       TAILQ_INIT(&env->watchpoints);
       *penv = env;
   #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
       register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
                       cpu_common_save, cpu_common_load, env);
       register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
                       cpu_save, cpu_load, env);
   #endif
 }  }
   
 static inline void invalidate_page_bitmap(PageDesc *p)  static inline void invalidate_page_bitmap(PageDesc *p)
Line 420  static void page_flush_tb(void) Line 583  static void page_flush_tb(void)
   
 /* flush all the translation blocks */  /* flush all the translation blocks */
 /* XXX: tb_flush is currently not thread safe */  /* XXX: tb_flush is currently not thread safe */
 void tb_flush(CPUState *env)  void tb_flush(CPUState *env1)
 {  {
       CPUState *env;
 #if defined(DEBUG_FLUSH)  #if defined(DEBUG_FLUSH)
     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",       printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
            code_gen_ptr - code_gen_buffer,              (unsigned long)(code_gen_ptr - code_gen_buffer),
            nb_tbs,              nb_tbs, nb_tbs > 0 ?
            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);             ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
 #endif  #endif
       if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
           cpu_abort(env1, "Internal error: code buffer overflow\n");
   
     nb_tbs = 0;      nb_tbs = 0;
     memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));  
     virt_page_flush();      for(env = first_cpu; env != NULL; env = env->next_cpu) {
           memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
       }
   
     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));      memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
     page_flush_tb();      page_flush_tb();
Line 443  void tb_flush(CPUState *env) Line 612  void tb_flush(CPUState *env)
   
 #ifdef DEBUG_TB_CHECK  #ifdef DEBUG_TB_CHECK
   
 static void tb_invalidate_check(unsigned long address)  static void tb_invalidate_check(target_ulong address)
 {  {
     TranslationBlock *tb;      TranslationBlock *tb;
     int i;      int i;
     address &= TARGET_PAGE_MASK;      address &= TARGET_PAGE_MASK;
     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {      for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {          for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||              if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
                   address >= tb->pc + tb->size)) {                    address >= tb->pc + tb->size)) {
                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",                  printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
                        address, tb->pc, tb->size);                         address, (long)tb->pc, tb->size);
             }              }
         }          }
     }      }
Line 464  static void tb_page_check(void) Line 633  static void tb_page_check(void)
 {  {
     TranslationBlock *tb;      TranslationBlock *tb;
     int i, flags1, flags2;      int i, flags1, flags2;
       
     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {      for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {          for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
             flags1 = page_get_flags(tb->pc);              flags1 = page_get_flags(tb->pc);
             flags2 = page_get_flags(tb->pc + tb->size - 1);              flags2 = page_get_flags(tb->pc + tb->size - 1);
             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {              if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",                  printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
                        tb->pc, tb->size, flags1, flags2);                         (long)tb->pc, tb->size, flags1, flags2);
             }              }
         }          }
     }      }
 }  }
   
 void tb_jmp_check(TranslationBlock *tb)  static void tb_jmp_check(TranslationBlock *tb)
 {  {
     TranslationBlock *tb1;      TranslationBlock *tb1;
     unsigned int n1;      unsigned int n1;
Line 566  static inline void tb_reset_jump(Transla Line 735  static inline void tb_reset_jump(Transla
     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));      tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
 }  }
   
 static inline void tb_invalidate(TranslationBlock *tb)  void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
 {  {
       CPUState *env;
       PageDesc *p;
     unsigned int h, n1;      unsigned int h, n1;
     TranslationBlock *tb1, *tb2, **ptb;      target_phys_addr_t phys_pc;
           TranslationBlock *tb1, *tb2;
   
       /* remove the TB from the hash list */
       phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
       h = tb_phys_hash_func(phys_pc);
       tb_remove(&tb_phys_hash[h], tb,
                 offsetof(TranslationBlock, phys_hash_next));
   
       /* remove the TB from the page list */
       if (tb->page_addr[0] != page_addr) {
           p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
           tb_page_remove(&p->first_tb, tb);
           invalidate_page_bitmap(p);
       }
       if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
           p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
           tb_page_remove(&p->first_tb, tb);
           invalidate_page_bitmap(p);
       }
   
     tb_invalidated_flag = 1;      tb_invalidated_flag = 1;
   
     /* remove the TB from the hash list */      /* remove the TB from the hash list */
     h = tb_hash_func(tb->pc);      h = tb_jmp_cache_hash_func(tb->pc);
     ptb = &tb_hash[h];      for(env = first_cpu; env != NULL; env = env->next_cpu) {
     for(;;) {          if (env->tb_jmp_cache[h] == tb)
         tb1 = *ptb;              env->tb_jmp_cache[h] = NULL;
         /* NOTE: the TB is not necessarily linked in the hash. It  
            indicates that it is not currently used */  
         if (tb1 == NULL)  
             return;  
         if (tb1 == tb) {  
             *ptb = tb1->hash_next;  
             break;  
         }  
         ptb = &tb1->hash_next;  
     }      }
   
     /* suppress this TB from the two jump lists */      /* suppress this TB from the two jump lists */
Line 606  static inline void tb_invalidate(Transla Line 787  static inline void tb_invalidate(Transla
         tb1 = tb2;          tb1 = tb2;
     }      }
     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */      tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
 }  
   
 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)  
 {  
     PageDesc *p;  
     unsigned int h;  
     target_ulong phys_pc;  
       
     /* remove the TB from the hash list */  
     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);  
     h = tb_phys_hash_func(phys_pc);  
     tb_remove(&tb_phys_hash[h], tb,   
               offsetof(TranslationBlock, phys_hash_next));  
   
     /* remove the TB from the page list */  
     if (tb->page_addr[0] != page_addr) {  
         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);  
         tb_page_remove(&p->first_tb, tb);  
         invalidate_page_bitmap(p);  
     }  
     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {  
         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);  
         tb_page_remove(&p->first_tb, tb);  
         invalidate_page_bitmap(p);  
     }  
   
     tb_invalidate(tb);  
     tb_phys_invalidate_count++;      tb_phys_invalidate_count++;
 }  }
   
Line 667  static void build_page_bitmap(PageDesc * Line 822  static void build_page_bitmap(PageDesc *
 {  {
     int n, tb_start, tb_end;      int n, tb_start, tb_end;
     TranslationBlock *tb;      TranslationBlock *tb;
       
     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);      p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
     if (!p->code_bitmap)  
         return;  
     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);  
   
     tb = p->first_tb;      tb = p->first_tb;
     while (tb != NULL) {      while (tb != NULL) {
Line 694  static void build_page_bitmap(PageDesc * Line 846  static void build_page_bitmap(PageDesc *
     }      }
 }  }
   
 #ifdef TARGET_HAS_PRECISE_SMC  TranslationBlock *tb_gen_code(CPUState *env,
                                 target_ulong pc, target_ulong cs_base,
 static void tb_gen_code(CPUState *env,                                 int flags, int cflags)
                         target_ulong pc, target_ulong cs_base, int flags,  
                         int cflags)  
 {  {
     TranslationBlock *tb;      TranslationBlock *tb;
     uint8_t *tc_ptr;      uint8_t *tc_ptr;
Line 712  static void tb_gen_code(CPUState *env,  Line 862  static void tb_gen_code(CPUState *env, 
         tb_flush(env);          tb_flush(env);
         /* cannot fail at this point */          /* cannot fail at this point */
         tb = tb_alloc(pc);          tb = tb_alloc(pc);
           /* Don't forget to invalidate previous TB info.  */
           tb_invalidated_flag = 1;
     }      }
     tc_ptr = code_gen_ptr;      tc_ptr = code_gen_ptr;
     tb->tc_ptr = tc_ptr;      tb->tc_ptr = tc_ptr;
     tb->cs_base = cs_base;      tb->cs_base = cs_base;
     tb->flags = flags;      tb->flags = flags;
     tb->cflags = cflags;      tb->cflags = cflags;
     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);      cpu_gen_code(env, tb, &code_gen_size);
     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));      code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
       
     /* check next page if needed */      /* check next page if needed */
     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;      virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
     phys_page2 = -1;      phys_page2 = -1;
Line 728  static void tb_gen_code(CPUState *env,  Line 880  static void tb_gen_code(CPUState *env, 
         phys_page2 = get_phys_addr_code(env, virt_page2);          phys_page2 = get_phys_addr_code(env, virt_page2);
     }      }
     tb_link_phys(tb, phys_pc, phys_page2);      tb_link_phys(tb, phys_pc, phys_page2);
       return tb;
 }  }
 #endif  
       
 /* invalidate all TBs which intersect with the target physical page  /* invalidate all TBs which intersect with the target physical page
    starting in range [start;end[. NOTE: start and end must refer to     starting in range [start;end[. NOTE: start and end must refer to
    the same physical page. 'is_cpu_write_access' should be true if called     the same physical page. 'is_cpu_write_access' should be true if called
    from a real cpu write access: the virtual CPU will exit the current     from a real cpu write access: the virtual CPU will exit the current
    TB if code is modified inside this TB. */     TB if code is modified inside this TB. */
 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,   void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
                                    int is_cpu_write_access)                                     int is_cpu_write_access)
 {  {
     int n, current_tb_modified, current_tb_not_found, current_flags;      TranslationBlock *tb, *tb_next, *saved_tb;
     CPUState *env = cpu_single_env;      CPUState *env = cpu_single_env;
     PageDesc *p;  
     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;  
     target_ulong tb_start, tb_end;      target_ulong tb_start, tb_end;
     target_ulong current_pc, current_cs_base;      PageDesc *p;
       int n;
   #ifdef TARGET_HAS_PRECISE_SMC
       int current_tb_not_found = is_cpu_write_access;
       TranslationBlock *current_tb = NULL;
       int current_tb_modified = 0;
       target_ulong current_pc = 0;
       target_ulong current_cs_base = 0;
       int current_flags = 0;
   #endif /* TARGET_HAS_PRECISE_SMC */
   
     p = page_find(start >> TARGET_PAGE_BITS);      p = page_find(start >> TARGET_PAGE_BITS);
     if (!p)       if (!p)
         return;          return;
     if (!p->code_bitmap &&       if (!p->code_bitmap &&
         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&          ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
         is_cpu_write_access) {          is_cpu_write_access) {
         /* build code bitmap */          /* build code bitmap */
Line 758  void tb_invalidate_phys_page_range(targe Line 917  void tb_invalidate_phys_page_range(targe
   
     /* we remove all the TBs in the range [start, end[ */      /* we remove all the TBs in the range [start, end[ */
     /* XXX: see if in some cases it could be faster to invalidate all the code */      /* XXX: see if in some cases it could be faster to invalidate all the code */
     current_tb_not_found = is_cpu_write_access;  
     current_tb_modified = 0;  
     current_tb = NULL; /* avoid warning */  
     current_pc = 0; /* avoid warning */  
     current_cs_base = 0; /* avoid warning */  
     current_flags = 0; /* avoid warning */  
     tb = p->first_tb;      tb = p->first_tb;
     while (tb != NULL) {      while (tb != NULL) {
         n = (long)tb & 3;          n = (long)tb & 3;
Line 784  void tb_invalidate_phys_page_range(targe Line 937  void tb_invalidate_phys_page_range(targe
             if (current_tb_not_found) {              if (current_tb_not_found) {
                 current_tb_not_found = 0;                  current_tb_not_found = 0;
                 current_tb = NULL;                  current_tb = NULL;
                 if (env->mem_write_pc) {                  if (env->mem_io_pc) {
                     /* now we have a real cpu fault */                      /* now we have a real cpu fault */
                     current_tb = tb_find_pc(env->mem_write_pc);                      current_tb = tb_find_pc(env->mem_io_pc);
                 }                  }
             }              }
             if (current_tb == tb &&              if (current_tb == tb &&
                 !(current_tb->cflags & CF_SINGLE_INSN)) {                  (current_tb->cflags & CF_COUNT_MASK) != 1) {
                 /* If we are modifying the current TB, we must stop                  /* If we are modifying the current TB, we must stop
                 its execution. We could be more precise by checking                  its execution. We could be more precise by checking
                 that the modification is after the current PC, but it                  that the modification is after the current PC, but it
                 would require a specialized function to partially                  would require a specialized function to partially
                 restore the CPU state */                  restore the CPU state */
                   
                 current_tb_modified = 1;                  current_tb_modified = 1;
                 cpu_restore_state(current_tb, env,                   cpu_restore_state(current_tb, env,
                                   env->mem_write_pc, NULL);                                    env->mem_io_pc, NULL);
 #if defined(TARGET_I386)                  cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                 current_flags = env->hflags;                                       &current_flags);
                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));  
                 current_cs_base = (target_ulong)env->segs[R_CS].base;  
                 current_pc = current_cs_base + env->eip;  
 #else  
 #error unsupported CPU  
 #endif  
             }              }
 #endif /* TARGET_HAS_PRECISE_SMC */  #endif /* TARGET_HAS_PRECISE_SMC */
             saved_tb = env->current_tb;              /* we need to do that to handle the case where a signal
             env->current_tb = NULL;                 occurs while doing tb_phys_invalidate() */
               saved_tb = NULL;
               if (env) {
                   saved_tb = env->current_tb;
                   env->current_tb = NULL;
               }
             tb_phys_invalidate(tb, -1);              tb_phys_invalidate(tb, -1);
             env->current_tb = saved_tb;              if (env) {
             if (env->interrupt_request && env->current_tb)                  env->current_tb = saved_tb;
                 cpu_interrupt(env, env->interrupt_request);                  if (env->interrupt_request && env->current_tb)
                       cpu_interrupt(env, env->interrupt_request);
               }
         }          }
         tb = tb_next;          tb = tb_next;
     }      }
Line 824  void tb_invalidate_phys_page_range(targe Line 978  void tb_invalidate_phys_page_range(targe
     if (!p->first_tb) {      if (!p->first_tb) {
         invalidate_page_bitmap(p);          invalidate_page_bitmap(p);
         if (is_cpu_write_access) {          if (is_cpu_write_access) {
             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);              tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
         }          }
     }      }
 #endif  #endif
Line 834  void tb_invalidate_phys_page_range(targe Line 988  void tb_invalidate_phys_page_range(targe
            modifying the memory. It will ensure that it cannot modify             modifying the memory. It will ensure that it cannot modify
            itself */             itself */
         env->current_tb = NULL;          env->current_tb = NULL;
         tb_gen_code(env, current_pc, current_cs_base, current_flags,           tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
                     CF_SINGLE_INSN);  
         cpu_resume_from_signal(env, NULL);          cpu_resume_from_signal(env, NULL);
     }      }
 #endif  #endif
 }  }
   
 /* len must be <= 8 and start must be a multiple of len */  /* len must be <= 8 and start must be a multiple of len */
 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)  static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
 {  {
     PageDesc *p;      PageDesc *p;
     int offset, b;      int offset, b;
 #if 0  #if 0
     if (1) {      if (1) {
         if (loglevel) {          qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",                     cpu_single_env->mem_io_vaddr, len,
                    cpu_single_env->mem_write_vaddr, len,                     cpu_single_env->eip,
                    cpu_single_env->eip,                     cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);  
         }  
     }      }
 #endif  #endif
     p = page_find(start >> TARGET_PAGE_BITS);      p = page_find(start >> TARGET_PAGE_BITS);
     if (!p)       if (!p)
         return;          return;
     if (p->code_bitmap) {      if (p->code_bitmap) {
         offset = start & ~TARGET_PAGE_MASK;          offset = start & ~TARGET_PAGE_MASK;
Line 871  static inline void tb_invalidate_phys_pa Line 1022  static inline void tb_invalidate_phys_pa
 }  }
   
 #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
 static void tb_invalidate_phys_page(target_ulong addr,   static void tb_invalidate_phys_page(target_phys_addr_t addr,
                                     unsigned long pc, void *puc)                                      unsigned long pc, void *puc)
 {  {
     int n, current_flags, current_tb_modified;      TranslationBlock *tb;
     target_ulong current_pc, current_cs_base;  
     PageDesc *p;      PageDesc *p;
     TranslationBlock *tb, *current_tb;      int n;
 #ifdef TARGET_HAS_PRECISE_SMC  #ifdef TARGET_HAS_PRECISE_SMC
       TranslationBlock *current_tb = NULL;
     CPUState *env = cpu_single_env;      CPUState *env = cpu_single_env;
       int current_tb_modified = 0;
       target_ulong current_pc = 0;
       target_ulong current_cs_base = 0;
       int current_flags = 0;
 #endif  #endif
   
     addr &= TARGET_PAGE_MASK;      addr &= TARGET_PAGE_MASK;
     p = page_find(addr >> TARGET_PAGE_BITS);      p = page_find(addr >> TARGET_PAGE_BITS);
     if (!p)       if (!p)
         return;          return;
     tb = p->first_tb;      tb = p->first_tb;
     current_tb_modified = 0;  
     current_tb = NULL;  
     current_pc = 0; /* avoid warning */  
     current_cs_base = 0; /* avoid warning */  
     current_flags = 0; /* avoid warning */  
 #ifdef TARGET_HAS_PRECISE_SMC  #ifdef TARGET_HAS_PRECISE_SMC
     if (tb && pc != 0) {      if (tb && pc != 0) {
         current_tb = tb_find_pc(pc);          current_tb = tb_find_pc(pc);
Line 902  static void tb_invalidate_phys_page(targ Line 1052  static void tb_invalidate_phys_page(targ
         tb = (TranslationBlock *)((long)tb & ~3);          tb = (TranslationBlock *)((long)tb & ~3);
 #ifdef TARGET_HAS_PRECISE_SMC  #ifdef TARGET_HAS_PRECISE_SMC
         if (current_tb == tb &&          if (current_tb == tb &&
             !(current_tb->cflags & CF_SINGLE_INSN)) {              (current_tb->cflags & CF_COUNT_MASK) != 1) {
                 /* If we are modifying the current TB, we must stop                  /* If we are modifying the current TB, we must stop
                    its execution. We could be more precise by checking                     its execution. We could be more precise by checking
                    that the modification is after the current PC, but it                     that the modification is after the current PC, but it
                    would require a specialized function to partially                     would require a specialized function to partially
                    restore the CPU state */                     restore the CPU state */
               
             current_tb_modified = 1;              current_tb_modified = 1;
             cpu_restore_state(current_tb, env, pc, puc);              cpu_restore_state(current_tb, env, pc, puc);
 #if defined(TARGET_I386)              cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
             current_flags = env->hflags;                                   &current_flags);
             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));  
             current_cs_base = (target_ulong)env->segs[R_CS].base;  
             current_pc = current_cs_base + env->eip;  
 #else  
 #error unsupported CPU  
 #endif  
         }          }
 #endif /* TARGET_HAS_PRECISE_SMC */  #endif /* TARGET_HAS_PRECISE_SMC */
         tb_phys_invalidate(tb, addr);          tb_phys_invalidate(tb, addr);
Line 931  static void tb_invalidate_phys_page(targ Line 1075  static void tb_invalidate_phys_page(targ
            modifying the memory. It will ensure that it cannot modify             modifying the memory. It will ensure that it cannot modify
            itself */             itself */
         env->current_tb = NULL;          env->current_tb = NULL;
         tb_gen_code(env, current_pc, current_cs_base, current_flags,           tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
                     CF_SINGLE_INSN);  
         cpu_resume_from_signal(env, puc);          cpu_resume_from_signal(env, puc);
     }      }
 #endif  #endif
Line 940  static void tb_invalidate_phys_page(targ Line 1083  static void tb_invalidate_phys_page(targ
 #endif  #endif
   
 /* add the tb in the target page and protect it if necessary */  /* add the tb in the target page and protect it if necessary */
 static inline void tb_alloc_page(TranslationBlock *tb,   static inline void tb_alloc_page(TranslationBlock *tb,
                                  unsigned int n, unsigned int page_addr)                                   unsigned int n, target_ulong page_addr)
 {  {
     PageDesc *p;      PageDesc *p;
     TranslationBlock *last_first_tb;      TranslationBlock *last_first_tb;
Line 957  static inline void tb_alloc_page(Transla Line 1100  static inline void tb_alloc_page(Transla
   
 #if defined(CONFIG_USER_ONLY)  #if defined(CONFIG_USER_ONLY)
     if (p->flags & PAGE_WRITE) {      if (p->flags & PAGE_WRITE) {
         unsigned long host_start, host_end, addr;          target_ulong addr;
           PageDesc *p2;
         int prot;          int prot;
   
         /* force the host page as non writable (writes will have a          /* force the host page as non writable (writes will have a
            page fault + mprotect overhead) */             page fault + mprotect overhead) */
         host_start = page_addr & qemu_host_page_mask;          page_addr &= qemu_host_page_mask;
         host_end = host_start + qemu_host_page_size;  
         prot = 0;          prot = 0;
         for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)          for(addr = page_addr; addr < page_addr + qemu_host_page_size;
             prot |= page_get_flags(addr);              addr += TARGET_PAGE_SIZE) {
         mprotect((void *)host_start, qemu_host_page_size,   
               p2 = page_find (addr >> TARGET_PAGE_BITS);
               if (!p2)
                   continue;
               prot |= p2->flags;
               p2->flags &= ~PAGE_WRITE;
               page_get_flags(addr);
             }
           mprotect(g2h(page_addr), qemu_host_page_size,
                  (prot & PAGE_BITS) & ~PAGE_WRITE);                   (prot & PAGE_BITS) & ~PAGE_WRITE);
 #ifdef DEBUG_TB_INVALIDATE  #ifdef DEBUG_TB_INVALIDATE
         printf("protecting code page: 0x%08lx\n",           printf("protecting code page: 0x" TARGET_FMT_lx "\n",
                host_start);                 page_addr);
 #endif  #endif
         p->flags &= ~PAGE_WRITE;  
     }      }
 #else  #else
     /* if some code is already present, then the pages are already      /* if some code is already present, then the pages are already
        protected. So we handle the case where only the first TB is         protected. So we handle the case where only the first TB is
        allocated in a physical page */         allocated in a physical page */
     if (!last_first_tb) {      if (!last_first_tb) {
         target_ulong virt_addr;          tlb_protect_code(page_addr);
   
         virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);  
         tlb_protect_code(cpu_single_env, page_addr, virt_addr);  
     }      }
 #endif  #endif
   
Line 996  TranslationBlock *tb_alloc(target_ulong  Line 1143  TranslationBlock *tb_alloc(target_ulong 
 {  {
     TranslationBlock *tb;      TranslationBlock *tb;
   
     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||       if (nb_tbs >= code_gen_max_blocks ||
         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)          (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
         return NULL;          return NULL;
     tb = &tbs[nb_tbs++];      tb = &tbs[nb_tbs++];
     tb->pc = pc;      tb->pc = pc;
Line 1005  TranslationBlock *tb_alloc(target_ulong  Line 1152  TranslationBlock *tb_alloc(target_ulong 
     return tb;      return tb;
 }  }
   
   void tb_free(TranslationBlock *tb)
   {
       /* In practice this is mostly used for single use temporary TB
          Ignore the hard cases and just back up if this TB happens to
          be the last one generated.  */
       if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
           code_gen_ptr = tb->tc_ptr;
           nb_tbs--;
       }
   }
   
 /* add a new TB and link it to the physical page tables. phys_page2 is  /* add a new TB and link it to the physical page tables. phys_page2 is
    (-1) to indicate that only one page contains the TB. */     (-1) to indicate that only one page contains the TB. */
 void tb_link_phys(TranslationBlock *tb,   void tb_link_phys(TranslationBlock *tb,
                   target_ulong phys_pc, target_ulong phys_page2)                    target_ulong phys_pc, target_ulong phys_page2)
 {  {
     unsigned int h;      unsigned int h;
     TranslationBlock **ptb;      TranslationBlock **ptb;
   
       /* Grab the mmap lock to stop another thread invalidating this TB
          before we are done.  */
       mmap_lock();
     /* add in the physical hash table */      /* add in the physical hash table */
     h = tb_phys_hash_func(phys_pc);      h = tb_phys_hash_func(phys_pc);
     ptb = &tb_phys_hash[h];      ptb = &tb_phys_hash[h];
Line 1025  void tb_link_phys(TranslationBlock *tb,  Line 1186  void tb_link_phys(TranslationBlock *tb, 
         tb_alloc_page(tb, 1, phys_page2);          tb_alloc_page(tb, 1, phys_page2);
     else      else
         tb->page_addr[1] = -1;          tb->page_addr[1] = -1;
 #ifdef DEBUG_TB_CHECK  
     tb_page_check();  
 #endif  
 }  
   
 /* link the tb with the other TBs */  
 void tb_link(TranslationBlock *tb)  
 {  
 #if !defined(CONFIG_USER_ONLY)  
     {  
         VirtPageDesc *vp;  
         target_ulong addr;  
           
         /* save the code memory mappings (needed to invalidate the code) */  
         addr = tb->pc & TARGET_PAGE_MASK;  
         vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);  
 #ifdef DEBUG_TLB_CHECK   
         if (vp->valid_tag == virt_valid_tag &&  
             vp->phys_addr != tb->page_addr[0]) {  
             printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",  
                    addr, tb->page_addr[0], vp->phys_addr);  
         }  
 #endif  
         vp->phys_addr = tb->page_addr[0];  
         if (vp->valid_tag != virt_valid_tag) {  
             vp->valid_tag = virt_valid_tag;  
 #if !defined(CONFIG_SOFTMMU)  
             vp->prot = 0;  
 #endif  
         }  
           
         if (tb->page_addr[1] != -1) {  
             addr += TARGET_PAGE_SIZE;  
             vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);  
 #ifdef DEBUG_TLB_CHECK   
             if (vp->valid_tag == virt_valid_tag &&  
                 vp->phys_addr != tb->page_addr[1]) {   
                 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",  
                        addr, tb->page_addr[1], vp->phys_addr);  
             }  
 #endif  
             vp->phys_addr = tb->page_addr[1];  
             if (vp->valid_tag != virt_valid_tag) {  
                 vp->valid_tag = virt_valid_tag;  
 #if !defined(CONFIG_SOFTMMU)  
                 vp->prot = 0;  
 #endif  
             }  
         }  
     }  
 #endif  
   
     tb->jmp_first = (TranslationBlock *)((long)tb | 2);      tb->jmp_first = (TranslationBlock *)((long)tb | 2);
     tb->jmp_next[0] = NULL;      tb->jmp_next[0] = NULL;
     tb->jmp_next[1] = NULL;      tb->jmp_next[1] = NULL;
 #ifdef USE_CODE_COPY  
     tb->cflags &= ~CF_FP_USED;  
     if (tb->cflags & CF_TB_FP_USED)  
         tb->cflags |= CF_FP_USED;  
 #endif  
   
     /* init original jump addresses */      /* init original jump addresses */
     if (tb->tb_next_offset[0] != 0xffff)      if (tb->tb_next_offset[0] != 0xffff)
         tb_reset_jump(tb, 0);          tb_reset_jump(tb, 0);
     if (tb->tb_next_offset[1] != 0xffff)      if (tb->tb_next_offset[1] != 0xffff)
         tb_reset_jump(tb, 1);          tb_reset_jump(tb, 1);
   
   #ifdef DEBUG_TB_CHECK
       tb_page_check();
   #endif
       mmap_unlock();
 }  }
   
 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <  /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
Line 1120  TranslationBlock *tb_find_pc(unsigned lo Line 1230  TranslationBlock *tb_find_pc(unsigned lo
         } else {          } else {
             m_min = m + 1;              m_min = m + 1;
         }          }
     }       }
     return &tbs[m_max];      return &tbs[m_max];
 }  }
   
Line 1156  static inline void tb_reset_jump_recursi Line 1266  static inline void tb_reset_jump_recursi
         }          }
         *ptb = tb->jmp_next[n];          *ptb = tb->jmp_next[n];
         tb->jmp_next[n] = NULL;          tb->jmp_next[n] = NULL;
           
         /* suppress the jump to next tb in generated code */          /* suppress the jump to next tb in generated code */
         tb_reset_jump(tb, n);          tb_reset_jump(tb, n);
   
Line 1174  static void tb_reset_jump_recursive(Tran Line 1284  static void tb_reset_jump_recursive(Tran
 #if defined(TARGET_HAS_ICE)  #if defined(TARGET_HAS_ICE)
 static void breakpoint_invalidate(CPUState *env, target_ulong pc)  static void breakpoint_invalidate(CPUState *env, target_ulong pc)
 {  {
     target_ulong phys_addr;      target_phys_addr_t addr;
       target_ulong pd;
       ram_addr_t ram_addr;
       PhysPageDesc *p;
   
     phys_addr = cpu_get_phys_page_debug(env, pc);      addr = cpu_get_phys_page_debug(env, pc);
     tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);      p = phys_page_find(addr >> TARGET_PAGE_BITS);
       if (!p) {
           pd = IO_MEM_UNASSIGNED;
       } else {
           pd = p->phys_offset;
       }
       ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
       tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
 }  }
 #endif  #endif
   
 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a  /* Add a watchpoint.  */
    breakpoint is reached */  int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)                            int flags, CPUWatchpoint **watchpoint)
 {  {
 #if defined(TARGET_HAS_ICE)      target_ulong len_mask = ~(len - 1);
     int i;      CPUWatchpoint *wp;
       
     for(i = 0; i < env->nb_breakpoints; i++) {      /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
         if (env->breakpoints[i] == pc)      if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
           fprintf(stderr, "qemu: tried to set invalid watchpoint at "
                   TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
           return -EINVAL;
       }
       wp = qemu_malloc(sizeof(*wp));
   
       wp->vaddr = addr;
       wp->len_mask = len_mask;
       wp->flags = flags;
   
       /* keep all GDB-injected watchpoints in front */
       if (flags & BP_GDB)
           TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
       else
           TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
   
       tlb_flush_page(env, addr);
   
       if (watchpoint)
           *watchpoint = wp;
       return 0;
   }
   
   /* Remove a specific watchpoint.  */
   int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
                             int flags)
   {
       target_ulong len_mask = ~(len - 1);
       CPUWatchpoint *wp;
   
       TAILQ_FOREACH(wp, &env->watchpoints, entry) {
           if (addr == wp->vaddr && len_mask == wp->len_mask
                   && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
               cpu_watchpoint_remove_by_ref(env, wp);
             return 0;              return 0;
           }
       }
       return -ENOENT;
   }
   
   /* Remove a specific watchpoint by reference.  */
   void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
   {
       TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
   
       tlb_flush_page(env, watchpoint->vaddr);
   
       qemu_free(watchpoint);
   }
   
   /* Remove all matching watchpoints.  */
   void cpu_watchpoint_remove_all(CPUState *env, int mask)
   {
       CPUWatchpoint *wp, *next;
   
       TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
           if (wp->flags & mask)
               cpu_watchpoint_remove_by_ref(env, wp);
     }      }
   }
   
   /* Add a breakpoint.  */
   int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
                             CPUBreakpoint **breakpoint)
   {
   #if defined(TARGET_HAS_ICE)
       CPUBreakpoint *bp;
   
       bp = qemu_malloc(sizeof(*bp));
   
       bp->pc = pc;
       bp->flags = flags;
   
       /* keep all GDB-injected breakpoints in front */
       if (flags & BP_GDB)
           TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
       else
           TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
   
     if (env->nb_breakpoints >= MAX_BREAKPOINTS)  
         return -1;  
     env->breakpoints[env->nb_breakpoints++] = pc;  
       
     breakpoint_invalidate(env, pc);      breakpoint_invalidate(env, pc);
   
       if (breakpoint)
           *breakpoint = bp;
     return 0;      return 0;
 #else  #else
     return -1;      return -ENOSYS;
 #endif  #endif
 }  }
   
 /* remove a breakpoint */  /* Remove a specific breakpoint.  */
 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)  int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
 {  {
 #if defined(TARGET_HAS_ICE)  #if defined(TARGET_HAS_ICE)
     int i;      CPUBreakpoint *bp;
     for(i = 0; i < env->nb_breakpoints; i++) {  
         if (env->breakpoints[i] == pc)  
             goto found;  
     }  
     return -1;  
  found:  
     env->nb_breakpoints--;  
     if (i < env->nb_breakpoints)  
       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];  
   
     breakpoint_invalidate(env, pc);      TAILQ_FOREACH(bp, &env->breakpoints, entry) {
     return 0;          if (bp->pc == pc && bp->flags == flags) {
               cpu_breakpoint_remove_by_ref(env, bp);
               return 0;
           }
       }
       return -ENOENT;
 #else  #else
     return -1;      return -ENOSYS;
   #endif
   }
   
   /* Remove a specific breakpoint by reference.  */
   void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
   {
   #if defined(TARGET_HAS_ICE)
       TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
   
       breakpoint_invalidate(env, breakpoint->pc);
   
       qemu_free(breakpoint);
   #endif
   }
   
   /* Remove all matching breakpoints. */
   void cpu_breakpoint_remove_all(CPUState *env, int mask)
   {
   #if defined(TARGET_HAS_ICE)
       CPUBreakpoint *bp, *next;
   
       TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
           if (bp->flags & mask)
               cpu_breakpoint_remove_by_ref(env, bp);
       }
 #endif  #endif
 }  }
   
Line 1245  void cpu_set_log(int log_flags) Line 1461  void cpu_set_log(int log_flags)
 {  {
     loglevel = log_flags;      loglevel = log_flags;
     if (loglevel && !logfile) {      if (loglevel && !logfile) {
         logfile = fopen(logfilename, "w");          logfile = fopen(logfilename, log_append ? "a" : "w");
         if (!logfile) {          if (!logfile) {
             perror(logfilename);              perror(logfilename);
             _exit(1);              _exit(1);
Line 1253  void cpu_set_log(int log_flags) Line 1469  void cpu_set_log(int log_flags)
 #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */          /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
         {          {
             static uint8_t logfile_buf[4096];              static char logfile_buf[4096];
             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));              setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
         }          }
 #else  #else
         setvbuf(logfile, NULL, _IOLBF, 0);          setvbuf(logfile, NULL, _IOLBF, 0);
 #endif  #endif
           log_append = 1;
       }
       if (!loglevel && logfile) {
           fclose(logfile);
           logfile = NULL;
     }      }
 }  }
   
 void cpu_set_log_filename(const char *filename)  void cpu_set_log_filename(const char *filename)
 {  {
     logfilename = strdup(filename);      logfilename = strdup(filename);
       if (logfile) {
           fclose(logfile);
           logfile = NULL;
       }
       cpu_set_log(loglevel);
 }  }
   
 /* mask must never be zero, except for A20 change call */  /* mask must never be zero, except for A20 change call */
 void cpu_interrupt(CPUState *env, int mask)  void cpu_interrupt(CPUState *env, int mask)
 {  {
   #if !defined(USE_NPTL)
     TranslationBlock *tb;      TranslationBlock *tb;
     static int interrupt_lock;      static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
   #endif
       int old_mask;
   
       if (mask & CPU_INTERRUPT_EXIT) {
           env->exit_request = 1;
           mask &= ~CPU_INTERRUPT_EXIT;
       }
   
       old_mask = env->interrupt_request;
     env->interrupt_request |= mask;      env->interrupt_request |= mask;
     /* if the cpu is currently executing code, we must unlink it and  #if defined(USE_NPTL)
        all the potentially executing TB */      /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
     tb = env->current_tb;         problem and hope the cpu will stop of its own accord.  For userspace
     if (tb && !testandset(&interrupt_lock)) {         emulation this often isn't actually as bad as it sounds.  Often
         env->current_tb = NULL;         signals are used primarily to interrupt blocking syscalls.  */
         tb_reset_jump_recursive(tb);  #else
         interrupt_lock = 0;      if (use_icount) {
           env->icount_decr.u16.high = 0xffff;
   #ifndef CONFIG_USER_ONLY
           if (!can_do_io(env)
               && (mask & ~old_mask) != 0) {
               cpu_abort(env, "Raised interrupt while not in I/O function");
           }
   #endif
       } else {
           tb = env->current_tb;
           /* if the cpu is currently executing code, we must unlink it and
              all the potentially executing TB */
           if (tb && !testandset(&interrupt_lock)) {
               env->current_tb = NULL;
               tb_reset_jump_recursive(tb);
               resetlock(&interrupt_lock);
           }
     }      }
   #endif
 }  }
   
 void cpu_reset_interrupt(CPUState *env, int mask)  void cpu_reset_interrupt(CPUState *env, int mask)
Line 1289  void cpu_reset_interrupt(CPUState *env,  Line 1541  void cpu_reset_interrupt(CPUState *env, 
     env->interrupt_request &= ~mask;      env->interrupt_request &= ~mask;
 }  }
   
 CPULogItem cpu_log_items[] = {  const CPULogItem cpu_log_items[] = {
     { CPU_LOG_TB_OUT_ASM, "out_asm",       { CPU_LOG_TB_OUT_ASM, "out_asm",
       "show generated host assembly code for each compiled TB" },        "show generated host assembly code for each compiled TB" },
     { CPU_LOG_TB_IN_ASM, "in_asm",      { CPU_LOG_TB_IN_ASM, "in_asm",
       "show target assembly code for each compiled TB" },        "show target assembly code for each compiled TB" },
     { CPU_LOG_TB_OP, "op",       { CPU_LOG_TB_OP, "op",
       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },        "show micro ops for each compiled TB" },
 #ifdef TARGET_I386  
     { CPU_LOG_TB_OP_OPT, "op_opt",      { CPU_LOG_TB_OP_OPT, "op_opt",
       "show micro ops after optimization for each compiled TB" },        "show micro ops "
   #ifdef TARGET_I386
         "before eflags optimization and "
 #endif  #endif
         "after liveness analysis" },
     { CPU_LOG_INT, "int",      { CPU_LOG_INT, "int",
       "show interrupts/exceptions in short format" },        "show interrupts/exceptions in short format" },
     { CPU_LOG_EXEC, "exec",      { CPU_LOG_EXEC, "exec",
       "show trace before each executed TB (lots of logs)" },        "show trace before each executed TB (lots of logs)" },
     { CPU_LOG_TB_CPU, "cpu",      { CPU_LOG_TB_CPU, "cpu",
       "show CPU state before bloc translation" },        "show CPU state before block translation" },
 #ifdef TARGET_I386  #ifdef TARGET_I386
     { CPU_LOG_PCALL, "pcall",      { CPU_LOG_PCALL, "pcall",
       "show protected mode far calls/returns/exceptions" },        "show protected mode far calls/returns/exceptions" },
       { CPU_LOG_RESET, "cpu_reset",
         "show CPU state before CPU resets" },
 #endif  #endif
 #ifdef DEBUG_IOPORT  #ifdef DEBUG_IOPORT
     { CPU_LOG_IOPORT, "ioport",      { CPU_LOG_IOPORT, "ioport",
Line 1323  static int cmp1(const char *s1, int n, c Line 1579  static int cmp1(const char *s1, int n, c
         return 0;          return 0;
     return memcmp(s1, s2, n) == 0;      return memcmp(s1, s2, n) == 0;
 }  }
         
 /* takes a comma separated list of log masks. Return 0 if error. */  /* takes a comma separated list of log masks. Return 0 if error. */
 int cpu_str_to_log_mask(const char *str)  int cpu_str_to_log_mask(const char *str)
 {  {
     CPULogItem *item;      const CPULogItem *item;
     int mask;      int mask;
     const char *p, *p1;      const char *p, *p1;
   
Line 1360  int cpu_str_to_log_mask(const char *str) Line 1616  int cpu_str_to_log_mask(const char *str)
 void cpu_abort(CPUState *env, const char *fmt, ...)  void cpu_abort(CPUState *env, const char *fmt, ...)
 {  {
     va_list ap;      va_list ap;
       va_list ap2;
   
     va_start(ap, fmt);      va_start(ap, fmt);
       va_copy(ap2, ap);
     fprintf(stderr, "qemu: fatal: ");      fprintf(stderr, "qemu: fatal: ");
     vfprintf(stderr, fmt, ap);      vfprintf(stderr, fmt, ap);
     fprintf(stderr, "\n");      fprintf(stderr, "\n");
Line 1370  void cpu_abort(CPUState *env, const char Line 1628  void cpu_abort(CPUState *env, const char
 #else  #else
     cpu_dump_state(env, stderr, fprintf, 0);      cpu_dump_state(env, stderr, fprintf, 0);
 #endif  #endif
       if (qemu_log_enabled()) {
           qemu_log("qemu: fatal: ");
           qemu_log_vprintf(fmt, ap2);
           qemu_log("\n");
   #ifdef TARGET_I386
           log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
   #else
           log_cpu_state(env, 0);
   #endif
           qemu_log_flush();
           qemu_log_close();
       }
       va_end(ap2);
     va_end(ap);      va_end(ap);
     abort();      abort();
 }  }
   
   CPUState *cpu_copy(CPUState *env)
   {
       CPUState *new_env = cpu_init(env->cpu_model_str);
       CPUState *next_cpu = new_env->next_cpu;
       int cpu_index = new_env->cpu_index;
   #if defined(TARGET_HAS_ICE)
       CPUBreakpoint *bp;
       CPUWatchpoint *wp;
   #endif
   
       memcpy(new_env, env, sizeof(CPUState));
   
       /* Preserve chaining and index. */
       new_env->next_cpu = next_cpu;
       new_env->cpu_index = cpu_index;
   
       /* Clone all break/watchpoints.
          Note: Once we support ptrace with hw-debug register access, make sure
          BP_CPU break/watchpoints are handled correctly on clone. */
       TAILQ_INIT(&env->breakpoints);
       TAILQ_INIT(&env->watchpoints);
   #if defined(TARGET_HAS_ICE)
       TAILQ_FOREACH(bp, &env->breakpoints, entry) {
           cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
       }
       TAILQ_FOREACH(wp, &env->watchpoints, entry) {
           cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
                                 wp->flags, NULL);
       }
   #endif
   
       return new_env;
   }
   
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
   
   static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
   {
       unsigned int i;
   
       /* Discard jump cache entries for any tb which might potentially
          overlap the flushed page.  */
       i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
       memset (&env->tb_jmp_cache[i], 0, 
               TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
   
       i = tb_jmp_cache_hash_page(addr);
       memset (&env->tb_jmp_cache[i], 0, 
               TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
   }
   
 /* NOTE: if flush_global is true, also flush global entries (not  /* NOTE: if flush_global is true, also flush global entries (not
    implemented yet) */     implemented yet) */
 void tlb_flush(CPUState *env, int flush_global)  void tlb_flush(CPUState *env, int flush_global)
Line 1390  void tlb_flush(CPUState *env, int flush_ Line 1710  void tlb_flush(CPUState *env, int flush_
     env->current_tb = NULL;      env->current_tb = NULL;
   
     for(i = 0; i < CPU_TLB_SIZE; i++) {      for(i = 0; i < CPU_TLB_SIZE; i++) {
         env->tlb_read[0][i].address = -1;          env->tlb_table[0][i].addr_read = -1;
         env->tlb_write[0][i].address = -1;          env->tlb_table[0][i].addr_write = -1;
         env->tlb_read[1][i].address = -1;          env->tlb_table[0][i].addr_code = -1;
         env->tlb_write[1][i].address = -1;          env->tlb_table[1][i].addr_read = -1;
           env->tlb_table[1][i].addr_write = -1;
           env->tlb_table[1][i].addr_code = -1;
   #if (NB_MMU_MODES >= 3)
           env->tlb_table[2][i].addr_read = -1;
           env->tlb_table[2][i].addr_write = -1;
           env->tlb_table[2][i].addr_code = -1;
   #if (NB_MMU_MODES == 4)
           env->tlb_table[3][i].addr_read = -1;
           env->tlb_table[3][i].addr_write = -1;
           env->tlb_table[3][i].addr_code = -1;
   #endif
   #endif
     }      }
   
     virt_page_flush();      memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
     memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));  
   
 #if !defined(CONFIG_SOFTMMU)  
     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);  
 #endif  
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (env->kqemu_enabled) {      if (env->kqemu_enabled) {
         kqemu_flush(env, flush_global);          kqemu_flush(env, flush_global);
Line 1412  void tlb_flush(CPUState *env, int flush_ Line 1740  void tlb_flush(CPUState *env, int flush_
   
 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)  static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
 {  {
     if (addr == (tlb_entry->address &       if (addr == (tlb_entry->addr_read &
                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)))                   (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
         tlb_entry->address = -1;          addr == (tlb_entry->addr_write &
                    (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
           addr == (tlb_entry->addr_code &
                    (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
           tlb_entry->addr_read = -1;
           tlb_entry->addr_write = -1;
           tlb_entry->addr_code = -1;
       }
 }  }
   
 void tlb_flush_page(CPUState *env, target_ulong addr)  void tlb_flush_page(CPUState *env, target_ulong addr)
 {  {
     int i, n;      int i;
     VirtPageDesc *vp;  
     PageDesc *p;  
     TranslationBlock *tb;  
   
 #if defined(DEBUG_TLB)  #if defined(DEBUG_TLB)
     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);      printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
Line 1433  void tlb_flush_page(CPUState *env, targe Line 1765  void tlb_flush_page(CPUState *env, targe
   
     addr &= TARGET_PAGE_MASK;      addr &= TARGET_PAGE_MASK;
     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);      i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     tlb_flush_entry(&env->tlb_read[0][i], addr);      tlb_flush_entry(&env->tlb_table[0][i], addr);
     tlb_flush_entry(&env->tlb_write[0][i], addr);      tlb_flush_entry(&env->tlb_table[1][i], addr);
     tlb_flush_entry(&env->tlb_read[1][i], addr);  #if (NB_MMU_MODES >= 3)
     tlb_flush_entry(&env->tlb_write[1][i], addr);      tlb_flush_entry(&env->tlb_table[2][i], addr);
   #if (NB_MMU_MODES == 4)
       tlb_flush_entry(&env->tlb_table[3][i], addr);
   #endif
   #endif
   
     /* remove from the virtual pc hash table all the TB at this      tlb_flush_jmp_cache(env, addr);
        virtual address */  
       
     vp = virt_page_find(addr >> TARGET_PAGE_BITS);  
     if (vp && vp->valid_tag == virt_valid_tag) {  
         p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);  
         if (p) {  
             /* we remove all the links to the TBs in this virtual page */  
             tb = p->first_tb;  
             while (tb != NULL) {  
                 n = (long)tb & 3;  
                 tb = (TranslationBlock *)((long)tb & ~3);  
                 if ((tb->pc & TARGET_PAGE_MASK) == addr ||  
                     ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {  
                     tb_invalidate(tb);  
                 }  
                 tb = tb->page_next[n];  
             }  
         }  
         vp->valid_tag = 0;  
     }  
   
 #if !defined(CONFIG_SOFTMMU)  
     if (addr < MMAP_AREA_END)  
         munmap((void *)addr, TARGET_PAGE_SIZE);  
 #endif  
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
     if (env->kqemu_enabled) {      if (env->kqemu_enabled) {
         kqemu_flush_page(env, addr);          kqemu_flush_page(env, addr);
Line 1471  void tlb_flush_page(CPUState *env, targe Line 1783  void tlb_flush_page(CPUState *env, targe
 #endif  #endif
 }  }
   
 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)  
 {  
     if (addr == (tlb_entry->address &   
                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&  
         (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {  
         tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;  
     }  
 }  
   
 /* update the TLBs so that writes to code in the virtual page 'addr'  /* update the TLBs so that writes to code in the virtual page 'addr'
    can be detected */     can be detected */
 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,   static void tlb_protect_code(ram_addr_t ram_addr)
                              target_ulong vaddr)  
 {  {
     int i;      cpu_physical_memory_reset_dirty(ram_addr,
                                       ram_addr + TARGET_PAGE_SIZE,
     vaddr &= TARGET_PAGE_MASK;                                      CODE_DIRTY_FLAG);
     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);  
     tlb_protect_code1(&env->tlb_write[0][i], vaddr);  
     tlb_protect_code1(&env->tlb_write[1][i], vaddr);  
   
 #ifdef USE_KQEMU  
     if (env->kqemu_enabled) {  
         kqemu_set_notdirty(env, ram_addr);  
     }  
 #endif  
     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;  
       
 #if !defined(CONFIG_SOFTMMU)  
     /* NOTE: as we generated the code for this page, it is already at  
        least readable */  
     if (vaddr < MMAP_AREA_END)  
         mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);  
 #endif  
 }  }
   
 /* update the TLB so that writes in physical page 'phys_addr' are no longer  /* update the TLB so that writes in physical page 'phys_addr' are no longer
    tested for self modifying code */     tested for self modifying code */
 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,   static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
                                     target_ulong vaddr)                                      target_ulong vaddr)
 {  {
     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;      phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
 }  }
   
 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,   static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
                                          unsigned long start, unsigned long length)                                           unsigned long start, unsigned long length)
 {  {
     unsigned long addr;      unsigned long addr;
     if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {      if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
         addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;          addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
         if ((addr - start) < length) {          if ((addr - start) < length) {
             tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;              tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
         }          }
     }      }
 }  }
Line 1542  void cpu_physical_memory_reset_dirty(ram Line 1827  void cpu_physical_memory_reset_dirty(ram
     if (length == 0)      if (length == 0)
         return;          return;
     len = length >> TARGET_PAGE_BITS;      len = length >> TARGET_PAGE_BITS;
     env = cpu_single_env;  
 #ifdef USE_KQEMU  #ifdef USE_KQEMU
       /* XXX: should not depend on cpu context */
       env = first_cpu;
     if (env->kqemu_enabled) {      if (env->kqemu_enabled) {
         ram_addr_t addr;          ram_addr_t addr;
         addr = start;          addr = start;
Line 1561  void cpu_physical_memory_reset_dirty(ram Line 1847  void cpu_physical_memory_reset_dirty(ram
     /* we modify the TLB cache so that the dirty bit will be set again      /* we modify the TLB cache so that the dirty bit will be set again
        when accessing the range */         when accessing the range */
     start1 = start + (unsigned long)phys_ram_base;      start1 = start + (unsigned long)phys_ram_base;
     for(i = 0; i < CPU_TLB_SIZE; i++)      for(env = first_cpu; env != NULL; env = env->next_cpu) {
         tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);          for(i = 0; i < CPU_TLB_SIZE; i++)
     for(i = 0; i < CPU_TLB_SIZE; i++)              tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
         tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);          for(i = 0; i < CPU_TLB_SIZE; i++)
               tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
   #if (NB_MMU_MODES >= 3)
           for(i = 0; i < CPU_TLB_SIZE; i++)
               tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
   #if (NB_MMU_MODES == 4)
           for(i = 0; i < CPU_TLB_SIZE; i++)
               tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
   #endif
   #endif
       }
   }
   
 #if !defined(CONFIG_SOFTMMU)  int cpu_physical_memory_set_dirty_tracking(int enable)
     /* XXX: this is expensive */  {
     {      in_migration = enable;
         VirtPageDesc *p;      return 0;
         int j;  }
         target_ulong addr;  
   
         for(i = 0; i < L1_SIZE; i++) {  int cpu_physical_memory_get_dirty_tracking(void)
             p = l1_virt_map[i];  {
             if (p) {      return in_migration;
                 addr = i << (TARGET_PAGE_BITS + L2_BITS);  }
                 for(j = 0; j < L2_SIZE; j++) {  
                     if (p->valid_tag == virt_valid_tag &&  void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
                         p->phys_addr >= start && p->phys_addr < end &&  {
                         (p->prot & PROT_WRITE)) {      if (kvm_enabled())
                         if (addr < MMAP_AREA_END) {          kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
                             mprotect((void *)addr, TARGET_PAGE_SIZE,   
                                      p->prot & ~PROT_WRITE);  
                         }  
                     }  
                     addr += TARGET_PAGE_SIZE;  
                     p++;  
                 }  
             }  
         }  
     }  
 #endif  
 }  }
   
 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)  static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
 {  {
     ram_addr_t ram_addr;      ram_addr_t ram_addr;
   
     if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {      if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
         ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +           ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
             tlb_entry->addend - (unsigned long)phys_ram_base;              tlb_entry->addend - (unsigned long)phys_ram_base;
         if (!cpu_physical_memory_is_dirty(ram_addr)) {          if (!cpu_physical_memory_is_dirty(ram_addr)) {
             tlb_entry->address |= IO_MEM_NOTDIRTY;              tlb_entry->addr_write |= TLB_NOTDIRTY;
         }          }
     }      }
 }  }
Line 1613  void cpu_tlb_update_dirty(CPUState *env) Line 1898  void cpu_tlb_update_dirty(CPUState *env)
 {  {
     int i;      int i;
     for(i = 0; i < CPU_TLB_SIZE; i++)      for(i = 0; i < CPU_TLB_SIZE; i++)
         tlb_update_dirty(&env->tlb_write[0][i]);          tlb_update_dirty(&env->tlb_table[0][i]);
     for(i = 0; i < CPU_TLB_SIZE; i++)      for(i = 0; i < CPU_TLB_SIZE; i++)
         tlb_update_dirty(&env->tlb_write[1][i]);          tlb_update_dirty(&env->tlb_table[1][i]);
   #if (NB_MMU_MODES >= 3)
       for(i = 0; i < CPU_TLB_SIZE; i++)
           tlb_update_dirty(&env->tlb_table[2][i]);
   #if (NB_MMU_MODES == 4)
       for(i = 0; i < CPU_TLB_SIZE; i++)
           tlb_update_dirty(&env->tlb_table[3][i]);
   #endif
   #endif
 }  }
   
 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,   static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
                                   unsigned long start)  
 {  {
     unsigned long addr;      if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
     if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {          tlb_entry->addr_write = vaddr;
         addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;  
         if (addr == start) {  
             tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;  
         }  
     }  
 }  }
   
 /* update the TLB corresponding to virtual page vaddr and phys addr  /* update the TLB corresponding to virtual page vaddr
    addr so that it is no longer dirty */     so that it is no longer dirty */
 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)  static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
 {  {
     CPUState *env = cpu_single_env;  
     int i;      int i;
   
     addr &= TARGET_PAGE_MASK;      vaddr &= TARGET_PAGE_MASK;
     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);      i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     tlb_set_dirty1(&env->tlb_write[0][i], addr);      tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
     tlb_set_dirty1(&env->tlb_write[1][i], addr);      tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
   #if (NB_MMU_MODES >= 3)
       tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
   #if (NB_MMU_MODES == 4)
       tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
   #endif
   #endif
 }  }
   
 /* add a new TLB entry. At most one entry for a given virtual address  /* add a new TLB entry. At most one entry for a given virtual address
    is permitted. Return 0 if OK or 2 if the page could not be mapped     is permitted. Return 0 if OK or 2 if the page could not be mapped
    (can only happen in non SOFTMMU mode for I/O pages or pages     (can only happen in non SOFTMMU mode for I/O pages or pages
    conflicting with the host address space). */     conflicting with the host address space). */
 int tlb_set_page(CPUState *env, target_ulong vaddr,   int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
                  target_phys_addr_t paddr, int prot,                         target_phys_addr_t paddr, int prot,
                  int is_user, int is_softmmu)                        int mmu_idx, int is_softmmu)
 {  {
     PhysPageDesc *p;      PhysPageDesc *p;
     unsigned long pd;      unsigned long pd;
     unsigned int index;      unsigned int index;
     target_ulong address;      target_ulong address;
       target_ulong code_address;
     target_phys_addr_t addend;      target_phys_addr_t addend;
     int ret;      int ret;
       CPUTLBEntry *te;
       CPUWatchpoint *wp;
       target_phys_addr_t iotlb;
   
     p = phys_page_find(paddr >> TARGET_PAGE_BITS);      p = phys_page_find(paddr >> TARGET_PAGE_BITS);
     if (!p) {      if (!p) {
Line 1665  int tlb_set_page(CPUState *env, target_u Line 1961  int tlb_set_page(CPUState *env, target_u
         pd = p->phys_offset;          pd = p->phys_offset;
     }      }
 #if defined(DEBUG_TLB)  #if defined(DEBUG_TLB)
     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",      printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
            vaddr, paddr, prot, is_user, is_softmmu, pd);             vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
 #endif  #endif
   
     ret = 0;      ret = 0;
 #if !defined(CONFIG_SOFTMMU)      address = vaddr;
     if (is_softmmu)       if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
 #endif          /* IO memory case (romd handled later) */
     {          address |= TLB_MMIO;
         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {      }
             /* IO memory case */      addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
             address = vaddr | pd;      if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
             addend = paddr;          /* Normal RAM.  */
         } else {          iotlb = pd & TARGET_PAGE_MASK;
             /* standard memory */          if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
             address = vaddr;              iotlb |= IO_MEM_NOTDIRTY;
             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);          else
         }              iotlb |= IO_MEM_ROM;
               } else {
         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);          /* IO handlers are currently passed a phsical address.
         addend -= vaddr;             It would be nice to pass an offset from the base address
         if (prot & PAGE_READ) {             of that region.  This would avoid having to special case RAM,
             env->tlb_read[is_user][index].address = address;             and avoid full address decoding in every device.
             env->tlb_read[is_user][index].addend = addend;             We can't use the high bits of pd for this because
         } else {             IO_MEM_ROMD uses these as a ram address.  */
             env->tlb_read[is_user][index].address = -1;          iotlb = (pd & ~TARGET_PAGE_MASK);
             env->tlb_read[is_user][index].addend = -1;          if (p) {
         }              iotlb += p->region_offset;
         if (prot & PAGE_WRITE) {  
             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {  
                 /* ROM: access is ignored (same as unassigned) */  
                 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;  
                 env->tlb_write[is_user][index].addend = addend;  
             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&   
                        !cpu_physical_memory_is_dirty(pd)) {  
                 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;  
                 env->tlb_write[is_user][index].addend = addend;  
             } else {  
                 env->tlb_write[is_user][index].address = address;  
                 env->tlb_write[is_user][index].addend = addend;  
             }  
         } else {          } else {
             env->tlb_write[is_user][index].address = -1;              iotlb += paddr;
             env->tlb_write[is_user][index].addend = -1;  
         }          }
     }      }
 #if !defined(CONFIG_SOFTMMU)  
     else {  
         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {  
             /* IO access: no mapping is done as it will be handled by the  
                soft MMU */  
             if (!(env->hflags & HF_SOFTMMU_MASK))  
                 ret = 2;  
         } else {  
             void *map_addr;  
   
             if (vaddr >= MMAP_AREA_END) {      code_address = address;
                 ret = 2;      /* Make accesses to pages with watchpoints go via the
             } else {         watchpoint trap routines.  */
                 if (prot & PROT_WRITE) {      TAILQ_FOREACH(wp, &env->watchpoints, entry) {
                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||           if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
 #if defined(TARGET_HAS_SMC) || 1              iotlb = io_mem_watch + paddr;
                         first_tb ||              /* TODO: The memory case can be optimized by not trapping
 #endif                 reads of pages with a write breakpoint.  */
                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&               address |= TLB_MMIO;
                          !cpu_physical_memory_is_dirty(pd))) {  
                         /* ROM: we do as if code was inside */  
                         /* if code is present, we only map as read only and save the  
                            original mapping */  
                         VirtPageDesc *vp;  
                           
                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);  
                         vp->phys_addr = pd;  
                         vp->prot = prot;  
                         vp->valid_tag = virt_valid_tag;  
                         prot &= ~PAGE_WRITE;  
                     }  
                 }  
                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,   
                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));  
                 if (map_addr == MAP_FAILED) {  
                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",  
                               paddr, vaddr);  
                 }  
             }  
         }          }
     }      }
 #endif  
     return ret;  
 }  
   
 /* called from signal handler: invalidate the code and unprotect the  
    page. Return TRUE if the fault was succesfully handled. */  
 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)  
 {  
 #if !defined(CONFIG_SOFTMMU)  
     VirtPageDesc *vp;  
   
 #if defined(DEBUG_TLB)      index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     printf("page_unprotect: addr=0x%08x\n", addr);      env->iotlb[mmu_idx][index] = iotlb - vaddr;
 #endif      te = &env->tlb_table[mmu_idx][index];
     addr &= TARGET_PAGE_MASK;      te->addend = addend - vaddr;
       if (prot & PAGE_READ) {
           te->addr_read = address;
       } else {
           te->addr_read = -1;
       }
   
     /* if it is not mapped, no need to worry here */      if (prot & PAGE_EXEC) {
     if (addr >= MMAP_AREA_END)          te->addr_code = code_address;
         return 0;      } else {
     vp = virt_page_find(addr >> TARGET_PAGE_BITS);          te->addr_code = -1;
     if (!vp)      }
         return 0;      if (prot & PAGE_WRITE) {
     /* NOTE: in this case, validate_tag is _not_ tested as it          if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
        validates only the code TLB */              (pd & IO_MEM_ROMD)) {
     if (vp->valid_tag != virt_valid_tag)              /* Write access calls the I/O callback.  */
         return 0;              te->addr_write = address | TLB_MMIO;
     if (!(vp->prot & PAGE_WRITE))          } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
         return 0;                     !cpu_physical_memory_is_dirty(pd)) {
 #if defined(DEBUG_TLB)              te->addr_write = address | TLB_NOTDIRTY;
     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",           } else {
            addr, vp->phys_addr, vp->prot);              te->addr_write = address;
 #endif          }
     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)      } else {
         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",          te->addr_write = -1;
                   (unsigned long)addr, vp->prot);      }
     /* set the dirty bit */      return ret;
     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;  
     /* flush the code inside */  
     tb_invalidate_phys_page(vp->phys_addr, pc, puc);  
     return 1;  
 #else  
     return 0;  
 #endif  
 }  }
   
 #else  #else
Line 1807  void tlb_flush_page(CPUState *env, targe Line 2048  void tlb_flush_page(CPUState *env, targe
 {  {
 }  }
   
 int tlb_set_page(CPUState *env, target_ulong vaddr,   int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
                  target_phys_addr_t paddr, int prot,                         target_phys_addr_t paddr, int prot,
                  int is_user, int is_softmmu)                        int mmu_idx, int is_softmmu)
 {  {
     return 0;      return 0;
 }  }
Line 1840  void page_dump(FILE *f) Line 2081  void page_dump(FILE *f)
                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);                  end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
                 if (start != -1) {                  if (start != -1) {
                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",                      fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
                             start, end, end - start,                               start, end, end - start,
                             prot & PAGE_READ ? 'r' : '-',                              prot & PAGE_READ ? 'r' : '-',
                             prot & PAGE_WRITE ? 'w' : '-',                              prot & PAGE_WRITE ? 'w' : '-',
                             prot & PAGE_EXEC ? 'x' : '-');                              prot & PAGE_EXEC ? 'x' : '-');
Line 1857  void page_dump(FILE *f) Line 2098  void page_dump(FILE *f)
     }      }
 }  }
   
 int page_get_flags(unsigned long address)  int page_get_flags(target_ulong address)
 {  {
     PageDesc *p;      PageDesc *p;
   
Line 1870  int page_get_flags(unsigned long address Line 2111  int page_get_flags(unsigned long address
 /* modify the flags of a page and invalidate the code if  /* modify the flags of a page and invalidate the code if
    necessary. The flag PAGE_WRITE_ORG is positionned automatically     necessary. The flag PAGE_WRITE_ORG is positionned automatically
    depending on PAGE_WRITE */     depending on PAGE_WRITE */
 void page_set_flags(unsigned long start, unsigned long end, int flags)  void page_set_flags(target_ulong start, target_ulong end, int flags)
 {  {
     PageDesc *p;      PageDesc *p;
     unsigned long addr;      target_ulong addr;
   
       /* mmap_lock should already be held.  */
     start = start & TARGET_PAGE_MASK;      start = start & TARGET_PAGE_MASK;
     end = TARGET_PAGE_ALIGN(end);      end = TARGET_PAGE_ALIGN(end);
     if (flags & PAGE_WRITE)      if (flags & PAGE_WRITE)
         flags |= PAGE_WRITE_ORG;          flags |= PAGE_WRITE_ORG;
     spin_lock(&tb_lock);  
     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {      for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
         p = page_find_alloc(addr >> TARGET_PAGE_BITS);          p = page_find_alloc(addr >> TARGET_PAGE_BITS);
           /* We may be called for host regions that are outside guest
              address space.  */
           if (!p)
               return;
         /* if the write protection is set, then we invalidate the code          /* if the write protection is set, then we invalidate the code
            inside */             inside */
         if (!(p->flags & PAGE_WRITE) &&           if (!(p->flags & PAGE_WRITE) &&
             (flags & PAGE_WRITE) &&              (flags & PAGE_WRITE) &&
             p->first_tb) {              p->first_tb) {
             tb_invalidate_phys_page(addr, 0, NULL);              tb_invalidate_phys_page(addr, 0, NULL);
         }          }
         p->flags = flags;          p->flags = flags;
     }      }
     spin_unlock(&tb_lock);  
 }  }
   
 /* called from signal handler: invalidate the code and unprotect the  int page_check_range(target_ulong start, target_ulong len, int flags)
    page. Return TRUE if the fault was succesfully handled. */  
 int page_unprotect(unsigned long address, unsigned long pc, void *puc)  
 {  {
     unsigned int page_index, prot, pindex;      PageDesc *p;
     PageDesc *p, *p1;      target_ulong end;
     unsigned long host_start, host_end, addr;      target_ulong addr;
   
     host_start = address & qemu_host_page_mask;      if (start + len < start)
     page_index = host_start >> TARGET_PAGE_BITS;          /* we've wrapped around */
     p1 = page_find(page_index);          return -1;
     if (!p1)  
         return 0;      end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
     host_end = host_start + qemu_host_page_size;      start = start & TARGET_PAGE_MASK;
   
       for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
           p = page_find(addr >> TARGET_PAGE_BITS);
           if( !p )
               return -1;
           if( !(p->flags & PAGE_VALID) )
               return -1;
   
           if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
               return -1;
           if (flags & PAGE_WRITE) {
               if (!(p->flags & PAGE_WRITE_ORG))
                   return -1;
               /* unprotect the page if it was put read-only because it
                  contains translated code */
               if (!(p->flags & PAGE_WRITE)) {
                   if (!page_unprotect(addr, 0, NULL))
                       return -1;
               }
               return 0;
           }
       }
       return 0;
   }
   
   /* called from signal handler: invalidate the code and unprotect the
      page. Return TRUE if the fault was succesfully handled. */
   int page_unprotect(target_ulong address, unsigned long pc, void *puc)
   {
       unsigned int page_index, prot, pindex;
       PageDesc *p, *p1;
       target_ulong host_start, host_end, addr;
   
       /* Technically this isn't safe inside a signal handler.  However we
          know this only ever happens in a synchronous SEGV handler, so in
          practice it seems to be ok.  */
       mmap_lock();
   
       host_start = address & qemu_host_page_mask;
       page_index = host_start >> TARGET_PAGE_BITS;
       p1 = page_find(page_index);
       if (!p1) {
           mmap_unlock();
           return 0;
       }
       host_end = host_start + qemu_host_page_size;
     p = p1;      p = p1;
     prot = 0;      prot = 0;
     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {      for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
Line 1919  int page_unprotect(unsigned long address Line 2207  int page_unprotect(unsigned long address
     if (prot & PAGE_WRITE_ORG) {      if (prot & PAGE_WRITE_ORG) {
         pindex = (address - host_start) >> TARGET_PAGE_BITS;          pindex = (address - host_start) >> TARGET_PAGE_BITS;
         if (!(p1[pindex].flags & PAGE_WRITE)) {          if (!(p1[pindex].flags & PAGE_WRITE)) {
             mprotect((void *)host_start, qemu_host_page_size,               mprotect((void *)g2h(host_start), qemu_host_page_size,
                      (prot & PAGE_BITS) | PAGE_WRITE);                       (prot & PAGE_BITS) | PAGE_WRITE);
             p1[pindex].flags |= PAGE_WRITE;              p1[pindex].flags |= PAGE_WRITE;
             /* and since the content will be modified, we must invalidate              /* and since the content will be modified, we must invalidate
Line 1928  int page_unprotect(unsigned long address Line 2216  int page_unprotect(unsigned long address
 #ifdef DEBUG_TB_CHECK  #ifdef DEBUG_TB_CHECK
             tb_invalidate_check(address);              tb_invalidate_check(address);
 #endif  #endif
               mmap_unlock();
             return 1;              return 1;
         }          }
     }      }
       mmap_unlock();
     return 0;      return 0;
 }  }
   
 /* call this function when system calls directly modify a memory area */  static inline void tlb_set_dirty(CPUState *env,
 void page_unprotect_range(uint8_t *data, unsigned long data_size)                                   unsigned long addr, target_ulong vaddr)
 {  
     unsigned long start, end, addr;  
   
     start = (unsigned long)data;  
     end = start + data_size;  
     start &= TARGET_PAGE_MASK;  
     end = TARGET_PAGE_ALIGN(end);  
     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {  
         page_unprotect(addr, 0, NULL);  
     }  
 }  
   
 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)  
 {  {
 }  }
 #endif /* defined(CONFIG_USER_ONLY) */  #endif /* defined(CONFIG_USER_ONLY) */
   
   #if !defined(CONFIG_USER_ONLY)
   
   static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
                                ram_addr_t memory, ram_addr_t region_offset);
   static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
                              ram_addr_t orig_memory, ram_addr_t region_offset);
   #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
                         need_subpage)                                     \
       do {                                                                \
           if (addr > start_addr)                                          \
               start_addr2 = 0;                                            \
           else {                                                          \
               start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
               if (start_addr2 > 0)                                        \
                   need_subpage = 1;                                       \
           }                                                               \
                                                                           \
           if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
               end_addr2 = TARGET_PAGE_SIZE - 1;                           \
           else {                                                          \
               end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
               if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
                   need_subpage = 1;                                       \
           }                                                               \
       } while (0)
   
 /* register physical memory. 'size' must be a multiple of the target  /* register physical memory. 'size' must be a multiple of the target
    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an     page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
    io memory page */     io memory page.  The address used when calling the IO function is
 void cpu_register_physical_memory(target_phys_addr_t start_addr,      the offset from the start of the region, plus region_offset.  Both
                                   unsigned long size,     start_region and regon_offset are rounded down to a page boundary
                                   unsigned long phys_offset)     before calculating this offset.  This should not be a problem unless
      the low bits of start_addr and region_offset differ.  */
   void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
                                            ram_addr_t size,
                                            ram_addr_t phys_offset,
                                            ram_addr_t region_offset)
 {  {
     target_phys_addr_t addr, end_addr;      target_phys_addr_t addr, end_addr;
     PhysPageDesc *p;      PhysPageDesc *p;
       CPUState *env;
       ram_addr_t orig_size = size;
       void *subpage;
   
   #ifdef USE_KQEMU
       /* XXX: should not depend on cpu context */
       env = first_cpu;
       if (env->kqemu_enabled) {
           kqemu_set_phys_mem(start_addr, size, phys_offset);
       }
   #endif
       if (kvm_enabled())
           kvm_set_phys_mem(start_addr, size, phys_offset);
   
       if (phys_offset == IO_MEM_UNASSIGNED) {
           region_offset = start_addr;
       }
       region_offset &= TARGET_PAGE_MASK;
     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;      size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
     end_addr = start_addr + size;      end_addr = start_addr + (target_phys_addr_t)size;
     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {      for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);          p = phys_page_find(addr >> TARGET_PAGE_BITS);
         p->phys_offset = phys_offset;          if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)              ram_addr_t orig_memory = p->phys_offset;
             phys_offset += TARGET_PAGE_SIZE;              target_phys_addr_t start_addr2, end_addr2;
               int need_subpage = 0;
   
               CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
                             need_subpage);
               if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
                   if (!(orig_memory & IO_MEM_SUBPAGE)) {
                       subpage = subpage_init((addr & TARGET_PAGE_MASK),
                                              &p->phys_offset, orig_memory,
                                              p->region_offset);
                   } else {
                       subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
                                               >> IO_MEM_SHIFT];
                   }
                   subpage_register(subpage, start_addr2, end_addr2, phys_offset,
                                    region_offset);
                   p->region_offset = 0;
               } else {
                   p->phys_offset = phys_offset;
                   if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
                       (phys_offset & IO_MEM_ROMD))
                       phys_offset += TARGET_PAGE_SIZE;
               }
           } else {
               p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
               p->phys_offset = phys_offset;
               p->region_offset = region_offset;
               if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
                   (phys_offset & IO_MEM_ROMD)) {
                   phys_offset += TARGET_PAGE_SIZE;
               } else {
                   target_phys_addr_t start_addr2, end_addr2;
                   int need_subpage = 0;
   
                   CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
                                 end_addr2, need_subpage);
   
                   if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
                       subpage = subpage_init((addr & TARGET_PAGE_MASK),
                                              &p->phys_offset, IO_MEM_UNASSIGNED,
                                              addr & TARGET_PAGE_MASK);
                       subpage_register(subpage, start_addr2, end_addr2,
                                        phys_offset, region_offset);
                       p->region_offset = 0;
                   }
               }
           }
           region_offset += TARGET_PAGE_SIZE;
     }      }
   
       /* since each CPU stores ram addresses in its TLB cache, we must
          reset the modified entries */
       /* XXX: slow ! */
       for(env = first_cpu; env != NULL; env = env->next_cpu) {
           tlb_flush(env, 1);
       }
   }
   
   /* XXX: temporary until new memory mapping API */
   ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
   {
       PhysPageDesc *p;
   
       p = phys_page_find(addr >> TARGET_PAGE_BITS);
       if (!p)
           return IO_MEM_UNASSIGNED;
       return p->phys_offset;
   }
   
   void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
   {
       if (kvm_enabled())
           kvm_coalesce_mmio_region(addr, size);
   }
   
   void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
   {
       if (kvm_enabled())
           kvm_uncoalesce_mmio_region(addr, size);
   }
   
   /* XXX: better than nothing */
   ram_addr_t qemu_ram_alloc(ram_addr_t size)
   {
       ram_addr_t addr;
       if ((phys_ram_alloc_offset + size) > phys_ram_size) {
           fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
                   (uint64_t)size, (uint64_t)phys_ram_size);
           abort();
       }
       addr = phys_ram_alloc_offset;
       phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
   
       if (kvm_enabled())
           kvm_setup_guest_memory(phys_ram_base + addr, size);
   
       return addr;
   }
   
   void qemu_ram_free(ram_addr_t addr)
   {
 }  }
   
 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)  static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
 {  {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 0, 0, 0, 1);
   #endif
       return 0;
   }
   
   static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 0, 0, 0, 2);
   #endif
       return 0;
   }
   
   static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 0, 0, 0, 4);
   #endif
     return 0;      return 0;
 }  }
   
 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)  static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
 {  {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 1, 0, 0, 1);
   #endif
   }
   
   static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 1, 0, 0, 2);
   #endif
   }
   
   static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
   {
   #ifdef DEBUG_UNASSIGNED
       printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
   #endif
   #if defined(TARGET_SPARC)
       do_unassigned_access(addr, 1, 0, 0, 4);
   #endif
 }  }
   
 static CPUReadMemoryFunc *unassigned_mem_read[3] = {  static CPUReadMemoryFunc *unassigned_mem_read[3] = {
     unassigned_mem_readb,      unassigned_mem_readb,
     unassigned_mem_readb,      unassigned_mem_readw,
     unassigned_mem_readb,      unassigned_mem_readl,
 };  };
   
 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {  static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
     unassigned_mem_writeb,      unassigned_mem_writeb,
     unassigned_mem_writeb,      unassigned_mem_writew,
     unassigned_mem_writeb,      unassigned_mem_writel,
 };  };
   
 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
                                   uint32_t val)
 {  {
     unsigned long ram_addr;  
     int dirty_flags;      int dirty_flags;
     ram_addr = addr - (unsigned long)phys_ram_base;  
     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];      dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {      if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
Line 2006  static void notdirty_mem_writeb(void *op Line 2483  static void notdirty_mem_writeb(void *op
         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];          dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
 #endif  #endif
     }      }
     stb_p((uint8_t *)(long)addr, val);      stb_p(phys_ram_base + ram_addr, val);
   #ifdef USE_KQEMU
       if (cpu_single_env->kqemu_enabled &&
           (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
           kqemu_modify_page(cpu_single_env, ram_addr);
   #endif
     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);      dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;      phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
     /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
        flushed */         flushed */
     if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
         tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
 }  }
   
 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
                                   uint32_t val)
 {  {
     unsigned long ram_addr;  
     int dirty_flags;      int dirty_flags;
     ram_addr = addr - (unsigned long)phys_ram_base;  
     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];      dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {      if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
Line 2027  static void notdirty_mem_writew(void *op Line 2508  static void notdirty_mem_writew(void *op
         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];          dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
 #endif  #endif
     }      }
     stw_p((uint8_t *)(long)addr, val);      stw_p(phys_ram_base + ram_addr, val);
   #ifdef USE_KQEMU
       if (cpu_single_env->kqemu_enabled &&
           (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
           kqemu_modify_page(cpu_single_env, ram_addr);
   #endif
     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);      dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;      phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
     /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
        flushed */         flushed */
     if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
         tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
 }  }
   
 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)  static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
                                   uint32_t val)
 {  {
     unsigned long ram_addr;  
     int dirty_flags;      int dirty_flags;
     ram_addr = addr - (unsigned long)phys_ram_base;  
     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];      dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {      if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)  #if !defined(CONFIG_USER_ONLY)
Line 2048  static void notdirty_mem_writel(void *op Line 2533  static void notdirty_mem_writel(void *op
         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];          dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
 #endif  #endif
     }      }
     stl_p((uint8_t *)(long)addr, val);      stl_p(phys_ram_base + ram_addr, val);
   #ifdef USE_KQEMU
       if (cpu_single_env->kqemu_enabled &&
           (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
           kqemu_modify_page(cpu_single_env, ram_addr);
   #endif
     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);      dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;      phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
     /* we remove the notdirty callback only if the code has been      /* we remove the notdirty callback only if the code has been
        flushed */         flushed */
     if (dirty_flags == 0xff)      if (dirty_flags == 0xff)
         tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);          tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
 }  }
   
 static CPUReadMemoryFunc *error_mem_read[3] = {  static CPUReadMemoryFunc *error_mem_read[3] = {
Line 2069  static CPUWriteMemoryFunc *notdirty_mem_ Line 2559  static CPUWriteMemoryFunc *notdirty_mem_
     notdirty_mem_writel,      notdirty_mem_writel,
 };  };
   
   /* Generate a debug exception if a watchpoint has been hit.  */
   static void check_watchpoint(int offset, int len_mask, int flags)
   {
       CPUState *env = cpu_single_env;
       target_ulong pc, cs_base;
       TranslationBlock *tb;
       target_ulong vaddr;
       CPUWatchpoint *wp;
       int cpu_flags;
   
       if (env->watchpoint_hit) {
           /* We re-entered the check after replacing the TB. Now raise
            * the debug interrupt so that is will trigger after the
            * current instruction. */
           cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
           return;
       }
       vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
       TAILQ_FOREACH(wp, &env->watchpoints, entry) {
           if ((vaddr == (wp->vaddr & len_mask) ||
                (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
               wp->flags |= BP_WATCHPOINT_HIT;
               if (!env->watchpoint_hit) {
                   env->watchpoint_hit = wp;
                   tb = tb_find_pc(env->mem_io_pc);
                   if (!tb) {
                       cpu_abort(env, "check_watchpoint: could not find TB for "
                                 "pc=%p", (void *)env->mem_io_pc);
                   }
                   cpu_restore_state(tb, env, env->mem_io_pc, NULL);
                   tb_phys_invalidate(tb, -1);
                   if (wp->flags & BP_STOP_BEFORE_ACCESS) {
                       env->exception_index = EXCP_DEBUG;
                   } else {
                       cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
                       tb_gen_code(env, pc, cs_base, cpu_flags, 1);
                   }
                   cpu_resume_from_signal(env, NULL);
               }
           } else {
               wp->flags &= ~BP_WATCHPOINT_HIT;
           }
       }
   }
   
   /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
      so these check for a hit then pass through to the normal out-of-line
      phys routines.  */
   static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
   {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
       return ldub_phys(addr);
   }
   
   static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
   {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
       return lduw_phys(addr);
   }
   
   static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
   {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
       return ldl_phys(addr);
   }
   
   static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
                                uint32_t val)
   {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
       stb_phys(addr, val);
   }
   
   static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
                                uint32_t val)
   {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
       stw_phys(addr, val);
   }
   
   static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
                                uint32_t val)
   {
       check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
       stl_phys(addr, val);
   }
   
   static CPUReadMemoryFunc *watch_mem_read[3] = {
       watch_mem_readb,
       watch_mem_readw,
       watch_mem_readl,
   };
   
   static CPUWriteMemoryFunc *watch_mem_write[3] = {
       watch_mem_writeb,
       watch_mem_writew,
       watch_mem_writel,
   };
   
   static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
                                    unsigned int len)
   {
       uint32_t ret;
       unsigned int idx;
   
       idx = SUBPAGE_IDX(addr);
   #if defined(DEBUG_SUBPAGE)
       printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
              mmio, len, addr, idx);
   #endif
       ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
                                          addr + mmio->region_offset[idx][0][len]);
   
       return ret;
   }
   
   static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
                                 uint32_t value, unsigned int len)
   {
       unsigned int idx;
   
       idx = SUBPAGE_IDX(addr);
   #if defined(DEBUG_SUBPAGE)
       printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
              mmio, len, addr, idx, value);
   #endif
       (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
                                     addr + mmio->region_offset[idx][1][len],
                                     value);
   }
   
   static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
   {
   #if defined(DEBUG_SUBPAGE)
       printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
   #endif
   
       return subpage_readlen(opaque, addr, 0);
   }
   
   static void subpage_writeb (void *opaque, target_phys_addr_t addr,
                               uint32_t value)
   {
   #if defined(DEBUG_SUBPAGE)
       printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
   #endif
       subpage_writelen(opaque, addr, value, 0);
   }
   
   static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
   {
   #if defined(DEBUG_SUBPAGE)
       printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
   #endif
   
       return subpage_readlen(opaque, addr, 1);
   }
   
   static void subpage_writew (void *opaque, target_phys_addr_t addr,
                               uint32_t value)
   {
   #if defined(DEBUG_SUBPAGE)
       printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
   #endif
       subpage_writelen(opaque, addr, value, 1);
   }
   
   static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
   {
   #if defined(DEBUG_SUBPAGE)
       printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
   #endif
   
       return subpage_readlen(opaque, addr, 2);
   }
   
   static void subpage_writel (void *opaque,
                            target_phys_addr_t addr, uint32_t value)
   {
   #if defined(DEBUG_SUBPAGE)
       printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
   #endif
       subpage_writelen(opaque, addr, value, 2);
   }
   
   static CPUReadMemoryFunc *subpage_read[] = {
       &subpage_readb,
       &subpage_readw,
       &subpage_readl,
   };
   
   static CPUWriteMemoryFunc *subpage_write[] = {
       &subpage_writeb,
       &subpage_writew,
       &subpage_writel,
   };
   
   static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
                                ram_addr_t memory, ram_addr_t region_offset)
   {
       int idx, eidx;
       unsigned int i;
   
       if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
           return -1;
       idx = SUBPAGE_IDX(start);
       eidx = SUBPAGE_IDX(end);
   #if defined(DEBUG_SUBPAGE)
       printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
              mmio, start, end, idx, eidx, memory);
   #endif
       memory >>= IO_MEM_SHIFT;
       for (; idx <= eidx; idx++) {
           for (i = 0; i < 4; i++) {
               if (io_mem_read[memory][i]) {
                   mmio->mem_read[idx][i] = &io_mem_read[memory][i];
                   mmio->opaque[idx][0][i] = io_mem_opaque[memory];
                   mmio->region_offset[idx][0][i] = region_offset;
               }
               if (io_mem_write[memory][i]) {
                   mmio->mem_write[idx][i] = &io_mem_write[memory][i];
                   mmio->opaque[idx][1][i] = io_mem_opaque[memory];
                   mmio->region_offset[idx][1][i] = region_offset;
               }
           }
       }
   
       return 0;
   }
   
   static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
                              ram_addr_t orig_memory, ram_addr_t region_offset)
   {
       subpage_t *mmio;
       int subpage_memory;
   
       mmio = qemu_mallocz(sizeof(subpage_t));
   
       mmio->base = base;
       subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
   #if defined(DEBUG_SUBPAGE)
       printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
              mmio, base, TARGET_PAGE_SIZE, subpage_memory);
   #endif
       *phys = subpage_memory | IO_MEM_SUBPAGE;
       subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
                            region_offset);
   
       return mmio;
   }
   
   static int get_free_io_mem_idx(void)
   {
       int i;
   
       for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
           if (!io_mem_used[i]) {
               io_mem_used[i] = 1;
               return i;
           }
   
       return -1;
   }
   
 static void io_mem_init(void)  static void io_mem_init(void)
 {  {
       int i;
   
     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);      cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);      cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);      cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
     io_mem_nb = 5;      for (i=0; i<5; i++)
           io_mem_used[i] = 1;
   
       io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
                                             watch_mem_write, NULL);
     /* alloc dirty bits array */      /* alloc dirty bits array */
     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);      phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);      memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
Line 2083  static void io_mem_init(void) Line 2842  static void io_mem_init(void)
   
 /* mem_read and mem_write are arrays of functions containing the  /* mem_read and mem_write are arrays of functions containing the
    function to access byte (index 0), word (index 1) and dword (index     function to access byte (index 0), word (index 1) and dword (index
    2). All functions must be supplied. If io_index is non zero, the     2). Functions can be omitted with a NULL function pointer. The
    corresponding io zone is modified. If it is zero, a new io zone is     registered functions may be modified dynamically later.
    allocated. The return value can be used with     If io_index is non zero, the corresponding io zone is
    cpu_register_physical_memory(). (-1) is returned if error. */     modified. If it is zero, a new io zone is allocated. The return
      value can be used with cpu_register_physical_memory(). (-1) is
      returned if error. */
 int cpu_register_io_memory(int io_index,  int cpu_register_io_memory(int io_index,
                            CPUReadMemoryFunc **mem_read,                             CPUReadMemoryFunc **mem_read,
                            CPUWriteMemoryFunc **mem_write,                             CPUWriteMemoryFunc **mem_write,
                            void *opaque)                             void *opaque)
 {  {
     int i;      int i, subwidth = 0;
   
     if (io_index <= 0) {      if (io_index <= 0) {
         if (io_index >= IO_MEM_NB_ENTRIES)          io_index = get_free_io_mem_idx();
             return -1;          if (io_index == -1)
         io_index = io_mem_nb++;              return io_index;
     } else {      } else {
         if (io_index >= IO_MEM_NB_ENTRIES)          if (io_index >= IO_MEM_NB_ENTRIES)
             return -1;              return -1;
     }      }
       
     for(i = 0;i < 3; i++) {      for(i = 0;i < 3; i++) {
           if (!mem_read[i] || !mem_write[i])
               subwidth = IO_MEM_SUBWIDTH;
         io_mem_read[io_index][i] = mem_read[i];          io_mem_read[io_index][i] = mem_read[i];
         io_mem_write[io_index][i] = mem_write[i];          io_mem_write[io_index][i] = mem_write[i];
     }      }
     io_mem_opaque[io_index] = opaque;      io_mem_opaque[io_index] = opaque;
     return io_index << IO_MEM_SHIFT;      return (io_index << IO_MEM_SHIFT) | subwidth;
   }
   
   void cpu_unregister_io_memory(int io_table_address)
   {
       int i;
       int io_index = io_table_address >> IO_MEM_SHIFT;
   
       for (i=0;i < 3; i++) {
           io_mem_read[io_index][i] = unassigned_mem_read[i];
           io_mem_write[io_index][i] = unassigned_mem_write[i];
       }
       io_mem_opaque[io_index] = NULL;
       io_mem_used[io_index] = 0;
 }  }
   
 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)  CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
Line 2121  CPUReadMemoryFunc **cpu_get_io_memory_re Line 2897  CPUReadMemoryFunc **cpu_get_io_memory_re
     return io_mem_read[io_index >> IO_MEM_SHIFT];      return io_mem_read[io_index >> IO_MEM_SHIFT];
 }  }
   
   #endif /* !defined(CONFIG_USER_ONLY) */
   
 /* physical memory access (slow version, mainly for debug) */  /* physical memory access (slow version, mainly for debug) */
 #if defined(CONFIG_USER_ONLY)  #if defined(CONFIG_USER_ONLY)
 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,   void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                             int len, int is_write)                              int len, int is_write)
 {  {
     int l, flags;      int l, flags;
     target_ulong page;      target_ulong page;
       void * p;
   
     while (len > 0) {      while (len > 0) {
         page = addr & TARGET_PAGE_MASK;          page = addr & TARGET_PAGE_MASK;
Line 2140  void cpu_physical_memory_rw(target_phys_ Line 2919  void cpu_physical_memory_rw(target_phys_
         if (is_write) {          if (is_write) {
             if (!(flags & PAGE_WRITE))              if (!(flags & PAGE_WRITE))
                 return;                  return;
             memcpy((uint8_t *)addr, buf, len);              /* XXX: this code should not depend on lock_user */
               if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
                   /* FIXME - should this return an error rather than just fail? */
                   return;
               memcpy(p, buf, l);
               unlock_user(p, addr, l);
         } else {          } else {
             if (!(flags & PAGE_READ))              if (!(flags & PAGE_READ))
                 return;                  return;
             memcpy(buf, (uint8_t *)addr, len);              /* XXX: this code should not depend on lock_user */
               if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
                   /* FIXME - should this return an error rather than just fail? */
                   return;
               memcpy(buf, p, l);
               unlock_user(p, addr, 0);
         }          }
         len -= l;          len -= l;
         buf += l;          buf += l;
Line 2152  void cpu_physical_memory_rw(target_phys_ Line 2941  void cpu_physical_memory_rw(target_phys_
     }      }
 }  }
   
 /* never used */  
 uint32_t ldl_phys(target_phys_addr_t addr)  
 {  
     return 0;  
 }  
   
 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)  
 {  
 }  
   
 void stl_phys(target_phys_addr_t addr, uint32_t val)  
 {  
 }  
   
 #else  #else
 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,   void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                             int len, int is_write)                              int len, int is_write)
 {  {
     int l, io_index;      int l, io_index;
Line 2176  void cpu_physical_memory_rw(target_phys_ Line 2951  void cpu_physical_memory_rw(target_phys_
     target_phys_addr_t page;      target_phys_addr_t page;
     unsigned long pd;      unsigned long pd;
     PhysPageDesc *p;      PhysPageDesc *p;
       
     while (len > 0) {      while (len > 0) {
         page = addr & TARGET_PAGE_MASK;          page = addr & TARGET_PAGE_MASK;
         l = (page + TARGET_PAGE_SIZE) - addr;          l = (page + TARGET_PAGE_SIZE) - addr;
Line 2188  void cpu_physical_memory_rw(target_phys_ Line 2963  void cpu_physical_memory_rw(target_phys_
         } else {          } else {
             pd = p->phys_offset;              pd = p->phys_offset;
         }          }
           
         if (is_write) {          if (is_write) {
             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {              if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
                   target_phys_addr_t addr1 = addr;
                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                  io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                 if (l >= 4 && ((addr & 3) == 0)) {                  if (p)
                       addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
                   /* XXX: could force cpu_single_env to NULL to avoid
                      potential bugs */
                   if (l >= 4 && ((addr1 & 3) == 0)) {
                     /* 32 bit write access */                      /* 32 bit write access */
                     val = ldl_p(buf);                      val = ldl_p(buf);
                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);                      io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
                     l = 4;                      l = 4;
                 } else if (l >= 2 && ((addr & 1) == 0)) {                  } else if (l >= 2 && ((addr1 & 1) == 0)) {
                     /* 16 bit write access */                      /* 16 bit write access */
                     val = lduw_p(buf);                      val = lduw_p(buf);
                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);                      io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
                     l = 2;                      l = 2;
                 } else {                  } else {
                     /* 8 bit write access */                      /* 8 bit write access */
                     val = ldub_p(buf);                      val = ldub_p(buf);
                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);                      io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
                     l = 1;                      l = 1;
                 }                  }
             } else {              } else {
Line 2218  void cpu_physical_memory_rw(target_phys_ Line 2998  void cpu_physical_memory_rw(target_phys_
                     /* invalidate code */                      /* invalidate code */
                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);                      tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
                     /* set dirty bit */                      /* set dirty bit */
                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=                       phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
                         (0xff & ~CODE_DIRTY_FLAG);                          (0xff & ~CODE_DIRTY_FLAG);
                 }                  }
             }              }
         } else {          } else {
             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {              if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
                   !(pd & IO_MEM_ROMD)) {
                   target_phys_addr_t addr1 = addr;
                 /* I/O case */                  /* I/O case */
                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                  io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                 if (l >= 4 && ((addr & 3) == 0)) {                  if (p)
                       addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
                   if (l >= 4 && ((addr1 & 3) == 0)) {
                     /* 32 bit read access */                      /* 32 bit read access */
                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);                      val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
                     stl_p(buf, val);                      stl_p(buf, val);
                     l = 4;                      l = 4;
                 } else if (l >= 2 && ((addr & 1) == 0)) {                  } else if (l >= 2 && ((addr1 & 1) == 0)) {
                     /* 16 bit read access */                      /* 16 bit read access */
                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);                      val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
                     stw_p(buf, val);                      stw_p(buf, val);
                     l = 2;                      l = 2;
                 } else {                  } else {
                     /* 8 bit read access */                      /* 8 bit read access */
                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);                      val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
                     stb_p(buf, val);                      stb_p(buf, val);
                     l = 1;                      l = 1;
                 }                  }
             } else {              } else {
                 /* RAM case */                  /* RAM case */
                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +                   ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
                     (addr & ~TARGET_PAGE_MASK);                      (addr & ~TARGET_PAGE_MASK);
                 memcpy(buf, ptr, l);                  memcpy(buf, ptr, l);
             }              }
Line 2255  void cpu_physical_memory_rw(target_phys_ Line 3039  void cpu_physical_memory_rw(target_phys_
     }      }
 }  }
   
   /* used for ROM loading : can write in RAM and ROM */
   void cpu_physical_memory_write_rom(target_phys_addr_t addr,
                                      const uint8_t *buf, int len)
   {
       int l;
       uint8_t *ptr;
       target_phys_addr_t page;
       unsigned long pd;
       PhysPageDesc *p;
   
       while (len > 0) {
           page = addr & TARGET_PAGE_MASK;
           l = (page + TARGET_PAGE_SIZE) - addr;
           if (l > len)
               l = len;
           p = phys_page_find(page >> TARGET_PAGE_BITS);
           if (!p) {
               pd = IO_MEM_UNASSIGNED;
           } else {
               pd = p->phys_offset;
           }
   
           if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
               (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
               !(pd & IO_MEM_ROMD)) {
               /* do nothing */
           } else {
               unsigned long addr1;
               addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
               /* ROM/RAM case */
               ptr = phys_ram_base + addr1;
               memcpy(ptr, buf, l);
           }
           len -= l;
           buf += l;
           addr += l;
       }
   }
   
   typedef struct {
       void *buffer;
       target_phys_addr_t addr;
       target_phys_addr_t len;
   } BounceBuffer;
   
   static BounceBuffer bounce;
   
   typedef struct MapClient {
       void *opaque;
       void (*callback)(void *opaque);
       LIST_ENTRY(MapClient) link;
   } MapClient;
   
   static LIST_HEAD(map_client_list, MapClient) map_client_list
       = LIST_HEAD_INITIALIZER(map_client_list);
   
   void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
   {
       MapClient *client = qemu_malloc(sizeof(*client));
   
       client->opaque = opaque;
       client->callback = callback;
       LIST_INSERT_HEAD(&map_client_list, client, link);
       return client;
   }
   
   void cpu_unregister_map_client(void *_client)
   {
       MapClient *client = (MapClient *)_client;
   
       LIST_REMOVE(client, link);
   }
   
   static void cpu_notify_map_clients(void)
   {
       MapClient *client;
   
       while (!LIST_EMPTY(&map_client_list)) {
           client = LIST_FIRST(&map_client_list);
           client->callback(client->opaque);
           LIST_REMOVE(client, link);
       }
   }
   
   /* Map a physical memory region into a host virtual address.
    * May map a subset of the requested range, given by and returned in *plen.
    * May return NULL if resources needed to perform the mapping are exhausted.
    * Use only for reads OR writes - not for read-modify-write operations.
    * Use cpu_register_map_client() to know when retrying the map operation is
    * likely to succeed.
    */
   void *cpu_physical_memory_map(target_phys_addr_t addr,
                                 target_phys_addr_t *plen,
                                 int is_write)
   {
       target_phys_addr_t len = *plen;
       target_phys_addr_t done = 0;
       int l;
       uint8_t *ret = NULL;
       uint8_t *ptr;
       target_phys_addr_t page;
       unsigned long pd;
       PhysPageDesc *p;
       unsigned long addr1;
   
       while (len > 0) {
           page = addr & TARGET_PAGE_MASK;
           l = (page + TARGET_PAGE_SIZE) - addr;
           if (l > len)
               l = len;
           p = phys_page_find(page >> TARGET_PAGE_BITS);
           if (!p) {
               pd = IO_MEM_UNASSIGNED;
           } else {
               pd = p->phys_offset;
           }
   
           if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
               if (done || bounce.buffer) {
                   break;
               }
               bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
               bounce.addr = addr;
               bounce.len = l;
               if (!is_write) {
                   cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
               }
               ptr = bounce.buffer;
           } else {
               addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
               ptr = phys_ram_base + addr1;
           }
           if (!done) {
               ret = ptr;
           } else if (ret + done != ptr) {
               break;
           }
   
           len -= l;
           addr += l;
           done += l;
       }
       *plen = done;
       return ret;
   }
   
   /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
    * Will also mark the memory as dirty if is_write == 1.  access_len gives
    * the amount of memory that was actually read or written by the caller.
    */
   void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                                  int is_write, target_phys_addr_t access_len)
   {
       if (buffer != bounce.buffer) {
           if (is_write) {
               unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
               while (access_len) {
                   unsigned l;
                   l = TARGET_PAGE_SIZE;
                   if (l > access_len)
                       l = access_len;
                   if (!cpu_physical_memory_is_dirty(addr1)) {
                       /* invalidate code */
                       tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
                       /* set dirty bit */
                       phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
                           (0xff & ~CODE_DIRTY_FLAG);
                   }
                   addr1 += l;
                   access_len -= l;
               }
           }
           return;
       }
       if (is_write) {
           cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
       }
       qemu_free(bounce.buffer);
       bounce.buffer = NULL;
       cpu_notify_map_clients();
   }
   
 /* warning: addr must be aligned */  /* warning: addr must be aligned */
 uint32_t ldl_phys(target_phys_addr_t addr)  uint32_t ldl_phys(target_phys_addr_t addr)
 {  {
Line 2270  uint32_t ldl_phys(target_phys_addr_t add Line 3236  uint32_t ldl_phys(target_phys_addr_t add
     } else {      } else {
         pd = p->phys_offset;          pd = p->phys_offset;
     }      }
           
     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {      if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
           !(pd & IO_MEM_ROMD)) {
         /* I/O case */          /* I/O case */
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);          val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
     } else {      } else {
         /* RAM case */          /* RAM case */
         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +           ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
             (addr & ~TARGET_PAGE_MASK);              (addr & ~TARGET_PAGE_MASK);
         val = ldl_p(ptr);          val = ldl_p(ptr);
     }      }
     return val;      return val;
 }  }
   
   /* warning: addr must be aligned */
   uint64_t ldq_phys(target_phys_addr_t addr)
   {
       int io_index;
       uint8_t *ptr;
       uint64_t val;
       unsigned long pd;
       PhysPageDesc *p;
   
       p = phys_page_find(addr >> TARGET_PAGE_BITS);
       if (!p) {
           pd = IO_MEM_UNASSIGNED;
       } else {
           pd = p->phys_offset;
       }
   
       if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
           !(pd & IO_MEM_ROMD)) {
           /* I/O case */
           io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
   #ifdef TARGET_WORDS_BIGENDIAN
           val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
           val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
   #else
           val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
           val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
   #endif
       } else {
           /* RAM case */
           ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
               (addr & ~TARGET_PAGE_MASK);
           val = ldq_p(ptr);
       }
       return val;
   }
   
   /* XXX: optimize */
   uint32_t ldub_phys(target_phys_addr_t addr)
   {
       uint8_t val;
       cpu_physical_memory_read(addr, &val, 1);
       return val;
   }
   
   /* XXX: optimize */
   uint32_t lduw_phys(target_phys_addr_t addr)
   {
       uint16_t val;
       cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
       return tswap16(val);
   }
   
 /* warning: addr must be aligned. The ram page is not masked as dirty  /* warning: addr must be aligned. The ram page is not masked as dirty
    and the code inside is not invalidated. It is useful if the dirty     and the code inside is not invalidated. It is useful if the dirty
    bits are used to track modified PTEs */     bits are used to track modified PTEs */
Line 2300  void stl_phys_notdirty(target_phys_addr_ Line 3323  void stl_phys_notdirty(target_phys_addr_
     } else {      } else {
         pd = p->phys_offset;          pd = p->phys_offset;
     }      }
           
     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {      if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);          io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
     } else {      } else {
         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +           unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
             (addr & ~TARGET_PAGE_MASK);          ptr = phys_ram_base + addr1;
         stl_p(ptr, val);          stl_p(ptr, val);
   
           if (unlikely(in_migration)) {
               if (!cpu_physical_memory_is_dirty(addr1)) {
                   /* invalidate code */
                   tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
                   /* set dirty bit */
                   phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
                       (0xff & ~CODE_DIRTY_FLAG);
               }
           }
       }
   }
   
   void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
   {
       int io_index;
       uint8_t *ptr;
       unsigned long pd;
       PhysPageDesc *p;
   
       p = phys_page_find(addr >> TARGET_PAGE_BITS);
       if (!p) {
           pd = IO_MEM_UNASSIGNED;
       } else {
           pd = p->phys_offset;
       }
   
       if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
           io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
   #ifdef TARGET_WORDS_BIGENDIAN
           io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
           io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
   #else
           io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
           io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
   #endif
       } else {
           ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
               (addr & ~TARGET_PAGE_MASK);
           stq_p(ptr, val);
     }      }
 }  }
   
Line 2325  void stl_phys(target_phys_addr_t addr, u Line 3392  void stl_phys(target_phys_addr_t addr, u
     } else {      } else {
         pd = p->phys_offset;          pd = p->phys_offset;
     }      }
           
     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {      if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);          io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
           if (p)
               addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);          io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
     } else {      } else {
         unsigned long addr1;          unsigned long addr1;
Line 2345  void stl_phys(target_phys_addr_t addr, u Line 3414  void stl_phys(target_phys_addr_t addr, u
     }      }
 }  }
   
   /* XXX: optimize */
   void stb_phys(target_phys_addr_t addr, uint32_t val)
   {
       uint8_t v = val;
       cpu_physical_memory_write(addr, &v, 1);
   }
   
   /* XXX: optimize */
   void stw_phys(target_phys_addr_t addr, uint32_t val)
   {
       uint16_t v = tswap16(val);
       cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
   }
   
   /* XXX: optimize */
   void stq_phys(target_phys_addr_t addr, uint64_t val)
   {
       val = tswap64(val);
       cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
   }
   
 #endif  #endif
   
 /* virtual memory access for debug */  /* virtual memory access for debug */
 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,   int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
                         uint8_t *buf, int len, int is_write)                          uint8_t *buf, int len, int is_write)
 {  {
     int l;      int l;
     target_ulong page, phys_addr;      target_phys_addr_t phys_addr;
       target_ulong page;
   
     while (len > 0) {      while (len > 0) {
         page = addr & TARGET_PAGE_MASK;          page = addr & TARGET_PAGE_MASK;
Line 2363  int cpu_memory_rw_debug(CPUState *env, t Line 3454  int cpu_memory_rw_debug(CPUState *env, t
         l = (page + TARGET_PAGE_SIZE) - addr;          l = (page + TARGET_PAGE_SIZE) - addr;
         if (l > len)          if (l > len)
             l = len;              l = len;
         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),           cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
                                buf, l, is_write);                                 buf, l, is_write);
         len -= l;          len -= l;
         buf += l;          buf += l;
Line 2372  int cpu_memory_rw_debug(CPUState *env, t Line 3463  int cpu_memory_rw_debug(CPUState *env, t
     return 0;      return 0;
 }  }
   
   /* in deterministic execution mode, instructions doing device I/Os
      must be at the end of the TB */
   void cpu_io_recompile(CPUState *env, void *retaddr)
   {
       TranslationBlock *tb;
       uint32_t n, cflags;
       target_ulong pc, cs_base;
       uint64_t flags;
   
       tb = tb_find_pc((unsigned long)retaddr);
       if (!tb) {
           cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
                     retaddr);
       }
       n = env->icount_decr.u16.low + tb->icount;
       cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
       /* Calculate how many instructions had been executed before the fault
          occurred.  */
       n = n - env->icount_decr.u16.low;
       /* Generate a new TB ending on the I/O insn.  */
       n++;
       /* On MIPS and SH, delay slot instructions can only be restarted if
          they were already the first instruction in the TB.  If this is not
          the first instruction in a TB then re-execute the preceding
          branch.  */
   #if defined(TARGET_MIPS)
       if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
           env->active_tc.PC -= 4;
           env->icount_decr.u16.low++;
           env->hflags &= ~MIPS_HFLAG_BMASK;
       }
   #elif defined(TARGET_SH4)
       if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
               && n > 1) {
           env->pc -= 2;
           env->icount_decr.u16.low++;
           env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
       }
   #endif
       /* This should never happen.  */
       if (n > CF_COUNT_MASK)
           cpu_abort(env, "TB too big during recompile");
   
       cflags = n | CF_LAST_IO;
       pc = tb->pc;
       cs_base = tb->cs_base;
       flags = tb->flags;
       tb_phys_invalidate(tb, -1);
       /* FIXME: In theory this could raise an exception.  In practice
          we have already translated the block once so it's probably ok.  */
       tb_gen_code(env, pc, cs_base, flags, cflags);
       /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
          the first in the TB) then we end up generating a whole new TB and
          repeating the fault, which is horribly inefficient.
          Better would be to execute just this insn uncached, or generate a
          second new TB.  */
       cpu_resume_from_signal(env, NULL);
   }
   
 void dump_exec_info(FILE *f,  void dump_exec_info(FILE *f,
                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))                      int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
 {  {
     int i, target_code_size, max_target_code_size;      int i, target_code_size, max_target_code_size;
     int direct_jmp_count, direct_jmp2_count, cross_page;      int direct_jmp_count, direct_jmp2_count, cross_page;
     TranslationBlock *tb;      TranslationBlock *tb;
       
     target_code_size = 0;      target_code_size = 0;
     max_target_code_size = 0;      max_target_code_size = 0;
     cross_page = 0;      cross_page = 0;
Line 2399  void dump_exec_info(FILE *f, Line 3549  void dump_exec_info(FILE *f,
         }          }
     }      }
     /* XXX: avoid using doubles ? */      /* XXX: avoid using doubles ? */
     cpu_fprintf(f, "TB count            %d\n", nb_tbs);      cpu_fprintf(f, "Translation buffer state:\n");
     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",       cpu_fprintf(f, "gen code size       %ld/%ld\n",
                   code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
       cpu_fprintf(f, "TB count            %d/%d\n", 
                   nb_tbs, code_gen_max_blocks);
       cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
                 nb_tbs ? target_code_size / nb_tbs : 0,                  nb_tbs ? target_code_size / nb_tbs : 0,
                 max_target_code_size);                  max_target_code_size);
     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",       cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,                  nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);                  target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",       cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
             cross_page,               cross_page,
             nb_tbs ? (cross_page * 100) / nb_tbs : 0);              nb_tbs ? (cross_page * 100) / nb_tbs : 0);
     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",      cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
                 direct_jmp_count,                   direct_jmp_count,
                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,                  nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
                 direct_jmp2_count,                  direct_jmp2_count,
                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);                  nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
       cpu_fprintf(f, "\nStatistics:\n");
     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);      cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);      cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);      cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
       tcg_dump_info(f, cpu_fprintf);
 }  }
   
 #if !defined(CONFIG_USER_ONLY)   #if !defined(CONFIG_USER_ONLY)
   
 #define MMUSUFFIX _cmmu  #define MMUSUFFIX _cmmu
 #define GETPC() NULL  #define GETPC() NULL

Removed from v.1.1.1.1  
changed lines
  Added in v.1.1.1.9


unix.superglobalmegacorp.com