version 1.1.1.1, 2018/04/24 16:37:52
|
version 1.1.1.5, 2018/04/24 16:45:02
|
Line 34
|
Line 34
|
|
|
#include "cpu.h" |
#include "cpu.h" |
#include "exec-all.h" |
#include "exec-all.h" |
|
#if defined(CONFIG_USER_ONLY) |
|
#include <qemu.h> |
|
#endif |
|
|
//#define DEBUG_TB_INVALIDATE |
//#define DEBUG_TB_INVALIDATE |
//#define DEBUG_FLUSH |
//#define DEBUG_FLUSH |
//#define DEBUG_TLB |
//#define DEBUG_TLB |
|
//#define DEBUG_UNASSIGNED |
|
|
/* make various TB consistency checks */ |
/* make various TB consistency checks */ |
//#define DEBUG_TB_CHECK |
//#define DEBUG_TB_CHECK |
//#define DEBUG_TLB_CHECK |
//#define DEBUG_TLB_CHECK |
|
|
|
#if !defined(CONFIG_USER_ONLY) |
|
/* TB consistency checks only implemented for usermode emulation. */ |
|
#undef DEBUG_TB_CHECK |
|
#endif |
|
|
/* threshold to flush the translated code buffer */ |
/* threshold to flush the translated code buffer */ |
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) |
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) |
|
|
Line 61
|
Line 70
|
#endif |
#endif |
|
|
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE]; |
|
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
int nb_tbs; |
int nb_tbs; |
/* any access to the tbs or the page table must use this lock */ |
/* any access to the tbs or the page table must use this lock */ |
Line 75 int phys_ram_fd;
|
Line 83 int phys_ram_fd;
|
uint8_t *phys_ram_base; |
uint8_t *phys_ram_base; |
uint8_t *phys_ram_dirty; |
uint8_t *phys_ram_dirty; |
|
|
|
CPUState *first_cpu; |
|
/* current CPU in the current thread. It is only valid inside |
|
cpu_exec() */ |
|
CPUState *cpu_single_env; |
|
|
typedef struct PageDesc { |
typedef struct PageDesc { |
/* list of TBs intersecting this ram page */ |
/* list of TBs intersecting this ram page */ |
TranslationBlock *first_tb; |
TranslationBlock *first_tb; |
Line 92 typedef struct PhysPageDesc {
|
Line 105 typedef struct PhysPageDesc {
|
uint32_t phys_offset; |
uint32_t phys_offset; |
} PhysPageDesc; |
} PhysPageDesc; |
|
|
/* Note: the VirtPage handling is absolete and will be suppressed |
|
ASAP */ |
|
typedef struct VirtPageDesc { |
|
/* physical address of code page. It is valid only if 'valid_tag' |
|
matches 'virt_valid_tag' */ |
|
target_ulong phys_addr; |
|
unsigned int valid_tag; |
|
#if !defined(CONFIG_SOFTMMU) |
|
/* original page access rights. It is valid only if 'valid_tag' |
|
matches 'virt_valid_tag' */ |
|
unsigned int prot; |
|
#endif |
|
} VirtPageDesc; |
|
|
|
#define L2_BITS 10 |
#define L2_BITS 10 |
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
|
|
Line 123 unsigned long qemu_host_page_mask;
|
Line 122 unsigned long qemu_host_page_mask;
|
static PageDesc *l1_map[L1_SIZE]; |
static PageDesc *l1_map[L1_SIZE]; |
PhysPageDesc **l1_phys_map; |
PhysPageDesc **l1_phys_map; |
|
|
#if !defined(CONFIG_USER_ONLY) |
|
#if TARGET_LONG_BITS > 32 |
|
#define VIRT_L_BITS 9 |
|
#define VIRT_L_SIZE (1 << VIRT_L_BITS) |
|
static void *l1_virt_map[VIRT_L_SIZE]; |
|
#else |
|
static VirtPageDesc *l1_virt_map[L1_SIZE]; |
|
#endif |
|
static unsigned int virt_valid_tag; |
|
#endif |
|
|
|
/* io memory support */ |
/* io memory support */ |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
Line 190 static void page_init(void)
|
Line 178 static void page_init(void)
|
while ((1 << qemu_host_page_bits) < qemu_host_page_size) |
while ((1 << qemu_host_page_bits) < qemu_host_page_size) |
qemu_host_page_bits++; |
qemu_host_page_bits++; |
qemu_host_page_mask = ~(qemu_host_page_size - 1); |
qemu_host_page_mask = ~(qemu_host_page_size - 1); |
#if !defined(CONFIG_USER_ONLY) |
|
virt_valid_tag = 1; |
|
#endif |
|
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); |
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); |
} |
} |
Line 225 static inline PageDesc *page_find(unsign
|
Line 210 static inline PageDesc *page_find(unsign
|
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
{ |
{ |
void **lp, **p; |
void **lp, **p; |
|
PhysPageDesc *pd; |
|
|
p = (void **)l1_phys_map; |
p = (void **)l1_phys_map; |
#if TARGET_PHYS_ADDR_SPACE_BITS > 32 |
#if TARGET_PHYS_ADDR_SPACE_BITS > 32 |
Line 244 static PhysPageDesc *phys_page_find_allo
|
Line 230 static PhysPageDesc *phys_page_find_allo
|
} |
} |
#endif |
#endif |
lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); |
lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); |
p = *lp; |
pd = *lp; |
if (!p) { |
if (!pd) { |
|
int i; |
/* allocate if not found */ |
/* allocate if not found */ |
if (!alloc) |
if (!alloc) |
return NULL; |
return NULL; |
p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); |
pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); |
memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); |
*lp = pd; |
*lp = p; |
for (i = 0; i < L2_SIZE; i++) |
|
pd[i].phys_offset = IO_MEM_UNASSIGNED; |
} |
} |
return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1)); |
return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); |
} |
} |
|
|
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
Line 262 static inline PhysPageDesc *phys_page_fi
|
Line 250 static inline PhysPageDesc *phys_page_fi
|
} |
} |
|
|
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, |
static void tlb_protect_code(ram_addr_t ram_addr); |
target_ulong vaddr); |
|
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
target_ulong vaddr); |
target_ulong vaddr); |
|
|
static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc) |
|
{ |
|
#if TARGET_LONG_BITS > 32 |
|
void **p, **lp; |
|
|
|
p = l1_virt_map; |
|
lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
p = *lp; |
|
if (!p) { |
|
if (!alloc) |
|
return NULL; |
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
*lp = p; |
|
} |
|
lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
p = *lp; |
|
if (!p) { |
|
if (!alloc) |
|
return NULL; |
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
*lp = p; |
|
} |
|
lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
p = *lp; |
|
if (!p) { |
|
if (!alloc) |
|
return NULL; |
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
*lp = p; |
|
} |
|
lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
p = *lp; |
|
if (!p) { |
|
if (!alloc) |
|
return NULL; |
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
*lp = p; |
|
} |
|
lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
p = *lp; |
|
if (!p) { |
|
if (!alloc) |
|
return NULL; |
|
p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE); |
|
*lp = p; |
|
} |
|
return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1)); |
|
#else |
|
VirtPageDesc *p, **lp; |
|
|
|
lp = &l1_virt_map[index >> L2_BITS]; |
|
p = *lp; |
|
if (!p) { |
|
/* allocate if not found */ |
|
if (!alloc) |
|
return NULL; |
|
p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE); |
|
*lp = p; |
|
} |
|
return p + (index & (L2_SIZE - 1)); |
|
#endif |
|
} |
|
|
|
static inline VirtPageDesc *virt_page_find(target_ulong index) |
|
{ |
|
return virt_page_find_alloc(index, 0); |
|
} |
|
|
|
#if TARGET_LONG_BITS > 32 |
|
static void virt_page_flush_internal(void **p, int level) |
|
{ |
|
int i; |
|
if (level == 0) { |
|
VirtPageDesc *q = (VirtPageDesc *)p; |
|
for(i = 0; i < VIRT_L_SIZE; i++) |
|
q[i].valid_tag = 0; |
|
} else { |
|
level--; |
|
for(i = 0; i < VIRT_L_SIZE; i++) { |
|
if (p[i]) |
|
virt_page_flush_internal(p[i], level); |
|
} |
|
} |
|
} |
|
#endif |
#endif |
|
|
static void virt_page_flush(void) |
void cpu_exec_init(CPUState *env) |
{ |
{ |
virt_valid_tag++; |
CPUState **penv; |
|
int cpu_index; |
|
|
if (virt_valid_tag == 0) { |
|
virt_valid_tag = 1; |
|
#if TARGET_LONG_BITS > 32 |
|
virt_page_flush_internal(l1_virt_map, 5); |
|
#else |
|
{ |
|
int i, j; |
|
VirtPageDesc *p; |
|
for(i = 0; i < L1_SIZE; i++) { |
|
p = l1_virt_map[i]; |
|
if (p) { |
|
for(j = 0; j < L2_SIZE; j++) |
|
p[j].valid_tag = 0; |
|
} |
|
} |
|
} |
|
#endif |
|
} |
|
} |
|
#else |
|
static void virt_page_flush(void) |
|
{ |
|
} |
|
#endif |
|
|
|
void cpu_exec_init(void) |
|
{ |
|
if (!code_gen_ptr) { |
if (!code_gen_ptr) { |
code_gen_ptr = code_gen_buffer; |
code_gen_ptr = code_gen_buffer; |
page_init(); |
page_init(); |
io_mem_init(); |
io_mem_init(); |
} |
} |
|
env->next_cpu = NULL; |
|
penv = &first_cpu; |
|
cpu_index = 0; |
|
while (*penv != NULL) { |
|
penv = (CPUState **)&(*penv)->next_cpu; |
|
cpu_index++; |
|
} |
|
env->cpu_index = cpu_index; |
|
*penv = env; |
} |
} |
|
|
static inline void invalidate_page_bitmap(PageDesc *p) |
static inline void invalidate_page_bitmap(PageDesc *p) |
Line 420 static void page_flush_tb(void)
|
Line 305 static void page_flush_tb(void)
|
|
|
/* flush all the translation blocks */ |
/* flush all the translation blocks */ |
/* XXX: tb_flush is currently not thread safe */ |
/* XXX: tb_flush is currently not thread safe */ |
void tb_flush(CPUState *env) |
void tb_flush(CPUState *env1) |
{ |
{ |
|
CPUState *env; |
#if defined(DEBUG_FLUSH) |
#if defined(DEBUG_FLUSH) |
printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", |
printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", |
code_gen_ptr - code_gen_buffer, |
code_gen_ptr - code_gen_buffer, |
Line 429 void tb_flush(CPUState *env)
|
Line 315 void tb_flush(CPUState *env)
|
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
#endif |
#endif |
nb_tbs = 0; |
nb_tbs = 0; |
memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); |
|
virt_page_flush(); |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
|
} |
|
|
memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
page_flush_tb(); |
page_flush_tb(); |
Line 448 static void tb_invalidate_check(unsigned
|
Line 336 static void tb_invalidate_check(unsigned
|
TranslationBlock *tb; |
TranslationBlock *tb; |
int i; |
int i; |
address &= TARGET_PAGE_MASK; |
address &= TARGET_PAGE_MASK; |
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { |
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { |
for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
address >= tb->pc + tb->size)) { |
address >= tb->pc + tb->size)) { |
printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", |
printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", |
address, tb->pc, tb->size); |
address, (long)tb->pc, tb->size); |
} |
} |
} |
} |
} |
} |
Line 465 static void tb_page_check(void)
|
Line 353 static void tb_page_check(void)
|
TranslationBlock *tb; |
TranslationBlock *tb; |
int i, flags1, flags2; |
int i, flags1, flags2; |
|
|
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { |
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { |
for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
flags1 = page_get_flags(tb->pc); |
flags1 = page_get_flags(tb->pc); |
flags2 = page_get_flags(tb->pc + tb->size - 1); |
flags2 = page_get_flags(tb->pc + tb->size - 1); |
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { |
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { |
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", |
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", |
tb->pc, tb->size, flags1, flags2); |
(long)tb->pc, tb->size, flags1, flags2); |
} |
} |
} |
} |
} |
} |
Line 566 static inline void tb_reset_jump(Transla
|
Line 454 static inline void tb_reset_jump(Transla
|
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
} |
} |
|
|
static inline void tb_invalidate(TranslationBlock *tb) |
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
{ |
{ |
|
CPUState *env; |
|
PageDesc *p; |
unsigned int h, n1; |
unsigned int h, n1; |
TranslationBlock *tb1, *tb2, **ptb; |
target_ulong phys_pc; |
|
TranslationBlock *tb1, *tb2; |
|
|
|
/* remove the TB from the hash list */ |
|
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); |
|
h = tb_phys_hash_func(phys_pc); |
|
tb_remove(&tb_phys_hash[h], tb, |
|
offsetof(TranslationBlock, phys_hash_next)); |
|
|
|
/* remove the TB from the page list */ |
|
if (tb->page_addr[0] != page_addr) { |
|
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); |
|
tb_page_remove(&p->first_tb, tb); |
|
invalidate_page_bitmap(p); |
|
} |
|
if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
|
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); |
|
tb_page_remove(&p->first_tb, tb); |
|
invalidate_page_bitmap(p); |
|
} |
|
|
tb_invalidated_flag = 1; |
tb_invalidated_flag = 1; |
|
|
/* remove the TB from the hash list */ |
/* remove the TB from the hash list */ |
h = tb_hash_func(tb->pc); |
h = tb_jmp_cache_hash_func(tb->pc); |
ptb = &tb_hash[h]; |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
for(;;) { |
if (env->tb_jmp_cache[h] == tb) |
tb1 = *ptb; |
env->tb_jmp_cache[h] = NULL; |
/* NOTE: the TB is not necessarily linked in the hash. It |
|
indicates that it is not currently used */ |
|
if (tb1 == NULL) |
|
return; |
|
if (tb1 == tb) { |
|
*ptb = tb1->hash_next; |
|
break; |
|
} |
|
ptb = &tb1->hash_next; |
|
} |
} |
|
|
/* suppress this TB from the two jump lists */ |
/* suppress this TB from the two jump lists */ |
Line 606 static inline void tb_invalidate(Transla
|
Line 506 static inline void tb_invalidate(Transla
|
tb1 = tb2; |
tb1 = tb2; |
} |
} |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
} |
|
|
|
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
|
{ |
|
PageDesc *p; |
|
unsigned int h; |
|
target_ulong phys_pc; |
|
|
|
/* remove the TB from the hash list */ |
|
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); |
|
h = tb_phys_hash_func(phys_pc); |
|
tb_remove(&tb_phys_hash[h], tb, |
|
offsetof(TranslationBlock, phys_hash_next)); |
|
|
|
/* remove the TB from the page list */ |
|
if (tb->page_addr[0] != page_addr) { |
|
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); |
|
tb_page_remove(&p->first_tb, tb); |
|
invalidate_page_bitmap(p); |
|
} |
|
if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
|
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); |
|
tb_page_remove(&p->first_tb, tb); |
|
invalidate_page_bitmap(p); |
|
} |
|
|
|
tb_invalidate(tb); |
|
tb_phys_invalidate_count++; |
tb_phys_invalidate_count++; |
} |
} |
|
|
Line 810 void tb_invalidate_phys_page_range(targe
|
Line 684 void tb_invalidate_phys_page_range(targe
|
#endif |
#endif |
} |
} |
#endif /* TARGET_HAS_PRECISE_SMC */ |
#endif /* TARGET_HAS_PRECISE_SMC */ |
saved_tb = env->current_tb; |
/* we need to do that to handle the case where a signal |
env->current_tb = NULL; |
occurs while doing tb_phys_invalidate() */ |
|
saved_tb = NULL; |
|
if (env) { |
|
saved_tb = env->current_tb; |
|
env->current_tb = NULL; |
|
} |
tb_phys_invalidate(tb, -1); |
tb_phys_invalidate(tb, -1); |
env->current_tb = saved_tb; |
if (env) { |
if (env->interrupt_request && env->current_tb) |
env->current_tb = saved_tb; |
cpu_interrupt(env, env->interrupt_request); |
if (env->interrupt_request && env->current_tb) |
|
cpu_interrupt(env, env->interrupt_request); |
|
} |
} |
} |
tb = tb_next; |
tb = tb_next; |
} |
} |
Line 941 static void tb_invalidate_phys_page(targ
|
Line 822 static void tb_invalidate_phys_page(targ
|
|
|
/* add the tb in the target page and protect it if necessary */ |
/* add the tb in the target page and protect it if necessary */ |
static inline void tb_alloc_page(TranslationBlock *tb, |
static inline void tb_alloc_page(TranslationBlock *tb, |
unsigned int n, unsigned int page_addr) |
unsigned int n, target_ulong page_addr) |
{ |
{ |
PageDesc *p; |
PageDesc *p; |
TranslationBlock *last_first_tb; |
TranslationBlock *last_first_tb; |
Line 957 static inline void tb_alloc_page(Transla
|
Line 838 static inline void tb_alloc_page(Transla
|
|
|
#if defined(CONFIG_USER_ONLY) |
#if defined(CONFIG_USER_ONLY) |
if (p->flags & PAGE_WRITE) { |
if (p->flags & PAGE_WRITE) { |
unsigned long host_start, host_end, addr; |
target_ulong addr; |
|
PageDesc *p2; |
int prot; |
int prot; |
|
|
/* force the host page as non writable (writes will have a |
/* force the host page as non writable (writes will have a |
page fault + mprotect overhead) */ |
page fault + mprotect overhead) */ |
host_start = page_addr & qemu_host_page_mask; |
page_addr &= qemu_host_page_mask; |
host_end = host_start + qemu_host_page_size; |
|
prot = 0; |
prot = 0; |
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) |
for(addr = page_addr; addr < page_addr + qemu_host_page_size; |
prot |= page_get_flags(addr); |
addr += TARGET_PAGE_SIZE) { |
mprotect((void *)host_start, qemu_host_page_size, |
|
|
p2 = page_find (addr >> TARGET_PAGE_BITS); |
|
if (!p2) |
|
continue; |
|
prot |= p2->flags; |
|
p2->flags &= ~PAGE_WRITE; |
|
page_get_flags(addr); |
|
} |
|
mprotect(g2h(page_addr), qemu_host_page_size, |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
#ifdef DEBUG_TB_INVALIDATE |
#ifdef DEBUG_TB_INVALIDATE |
printf("protecting code page: 0x%08lx\n", |
printf("protecting code page: 0x%08lx\n", |
host_start); |
page_addr); |
#endif |
#endif |
p->flags &= ~PAGE_WRITE; |
|
} |
} |
#else |
#else |
/* if some code is already present, then the pages are already |
/* if some code is already present, then the pages are already |
protected. So we handle the case where only the first TB is |
protected. So we handle the case where only the first TB is |
allocated in a physical page */ |
allocated in a physical page */ |
if (!last_first_tb) { |
if (!last_first_tb) { |
target_ulong virt_addr; |
tlb_protect_code(page_addr); |
|
|
virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); |
|
tlb_protect_code(cpu_single_env, page_addr, virt_addr); |
|
} |
} |
#endif |
#endif |
|
|
Line 1025 void tb_link_phys(TranslationBlock *tb,
|
Line 910 void tb_link_phys(TranslationBlock *tb,
|
tb_alloc_page(tb, 1, phys_page2); |
tb_alloc_page(tb, 1, phys_page2); |
else |
else |
tb->page_addr[1] = -1; |
tb->page_addr[1] = -1; |
#ifdef DEBUG_TB_CHECK |
|
tb_page_check(); |
|
#endif |
|
} |
|
|
|
/* link the tb with the other TBs */ |
|
void tb_link(TranslationBlock *tb) |
|
{ |
|
#if !defined(CONFIG_USER_ONLY) |
|
{ |
|
VirtPageDesc *vp; |
|
target_ulong addr; |
|
|
|
/* save the code memory mappings (needed to invalidate the code) */ |
|
addr = tb->pc & TARGET_PAGE_MASK; |
|
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
|
#ifdef DEBUG_TLB_CHECK |
|
if (vp->valid_tag == virt_valid_tag && |
|
vp->phys_addr != tb->page_addr[0]) { |
|
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", |
|
addr, tb->page_addr[0], vp->phys_addr); |
|
} |
|
#endif |
|
vp->phys_addr = tb->page_addr[0]; |
|
if (vp->valid_tag != virt_valid_tag) { |
|
vp->valid_tag = virt_valid_tag; |
|
#if !defined(CONFIG_SOFTMMU) |
|
vp->prot = 0; |
|
#endif |
|
} |
|
|
|
if (tb->page_addr[1] != -1) { |
|
addr += TARGET_PAGE_SIZE; |
|
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
|
#ifdef DEBUG_TLB_CHECK |
|
if (vp->valid_tag == virt_valid_tag && |
|
vp->phys_addr != tb->page_addr[1]) { |
|
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", |
|
addr, tb->page_addr[1], vp->phys_addr); |
|
} |
|
#endif |
|
vp->phys_addr = tb->page_addr[1]; |
|
if (vp->valid_tag != virt_valid_tag) { |
|
vp->valid_tag = virt_valid_tag; |
|
#if !defined(CONFIG_SOFTMMU) |
|
vp->prot = 0; |
|
#endif |
|
} |
|
} |
|
} |
|
#endif |
|
|
|
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
tb->jmp_next[0] = NULL; |
tb->jmp_next[0] = NULL; |
Line 1091 void tb_link(TranslationBlock *tb)
|
Line 925 void tb_link(TranslationBlock *tb)
|
tb_reset_jump(tb, 0); |
tb_reset_jump(tb, 0); |
if (tb->tb_next_offset[1] != 0xffff) |
if (tb->tb_next_offset[1] != 0xffff) |
tb_reset_jump(tb, 1); |
tb_reset_jump(tb, 1); |
|
|
|
#ifdef DEBUG_TB_CHECK |
|
tb_page_check(); |
|
#endif |
} |
} |
|
|
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
Line 1174 static void tb_reset_jump_recursive(Tran
|
Line 1012 static void tb_reset_jump_recursive(Tran
|
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
{ |
{ |
target_ulong phys_addr; |
target_ulong addr, pd; |
|
ram_addr_t ram_addr; |
|
PhysPageDesc *p; |
|
|
phys_addr = cpu_get_phys_page_debug(env, pc); |
addr = cpu_get_phys_page_debug(env, pc); |
tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0); |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); |
|
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
} |
} |
#endif |
#endif |
|
|
Line 1390 void tlb_flush(CPUState *env, int flush_
|
Line 1237 void tlb_flush(CPUState *env, int flush_
|
env->current_tb = NULL; |
env->current_tb = NULL; |
|
|
for(i = 0; i < CPU_TLB_SIZE; i++) { |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
env->tlb_read[0][i].address = -1; |
env->tlb_table[0][i].addr_read = -1; |
env->tlb_write[0][i].address = -1; |
env->tlb_table[0][i].addr_write = -1; |
env->tlb_read[1][i].address = -1; |
env->tlb_table[0][i].addr_code = -1; |
env->tlb_write[1][i].address = -1; |
env->tlb_table[1][i].addr_read = -1; |
|
env->tlb_table[1][i].addr_write = -1; |
|
env->tlb_table[1][i].addr_code = -1; |
} |
} |
|
|
virt_page_flush(); |
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); |
|
|
|
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); |
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); |
Line 1412 void tlb_flush(CPUState *env, int flush_
|
Line 1260 void tlb_flush(CPUState *env, int flush_
|
|
|
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
{ |
{ |
if (addr == (tlb_entry->address & |
if (addr == (tlb_entry->addr_read & |
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) |
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
tlb_entry->address = -1; |
addr == (tlb_entry->addr_write & |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
|
addr == (tlb_entry->addr_code & |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
|
tlb_entry->addr_read = -1; |
|
tlb_entry->addr_write = -1; |
|
tlb_entry->addr_code = -1; |
|
} |
} |
} |
|
|
void tlb_flush_page(CPUState *env, target_ulong addr) |
void tlb_flush_page(CPUState *env, target_ulong addr) |
{ |
{ |
int i, n; |
int i; |
VirtPageDesc *vp; |
|
PageDesc *p; |
|
TranslationBlock *tb; |
TranslationBlock *tb; |
|
|
#if defined(DEBUG_TLB) |
#if defined(DEBUG_TLB) |
Line 1433 void tlb_flush_page(CPUState *env, targe
|
Line 1286 void tlb_flush_page(CPUState *env, targe
|
|
|
addr &= TARGET_PAGE_MASK; |
addr &= TARGET_PAGE_MASK; |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
tlb_flush_entry(&env->tlb_read[0][i], addr); |
tlb_flush_entry(&env->tlb_table[0][i], addr); |
tlb_flush_entry(&env->tlb_write[0][i], addr); |
tlb_flush_entry(&env->tlb_table[1][i], addr); |
tlb_flush_entry(&env->tlb_read[1][i], addr); |
|
tlb_flush_entry(&env->tlb_write[1][i], addr); |
|
|
|
/* remove from the virtual pc hash table all the TB at this |
/* Discard jump cache entries for any tb which might potentially |
virtual address */ |
overlap the flushed page. */ |
|
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); |
vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); |
if (vp && vp->valid_tag == virt_valid_tag) { |
|
p = page_find(vp->phys_addr >> TARGET_PAGE_BITS); |
i = tb_jmp_cache_hash_page(addr); |
if (p) { |
memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); |
/* we remove all the links to the TBs in this virtual page */ |
|
tb = p->first_tb; |
|
while (tb != NULL) { |
|
n = (long)tb & 3; |
|
tb = (TranslationBlock *)((long)tb & ~3); |
|
if ((tb->pc & TARGET_PAGE_MASK) == addr || |
|
((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) { |
|
tb_invalidate(tb); |
|
} |
|
tb = tb->page_next[n]; |
|
} |
|
} |
|
vp->valid_tag = 0; |
|
} |
|
|
|
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
if (addr < MMAP_AREA_END) |
if (addr < MMAP_AREA_END) |
Line 1471 void tlb_flush_page(CPUState *env, targe
|
Line 1308 void tlb_flush_page(CPUState *env, targe
|
#endif |
#endif |
} |
} |
|
|
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) |
|
{ |
|
if (addr == (tlb_entry->address & |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) && |
|
(tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
|
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
|
} |
|
} |
|
|
|
/* update the TLBs so that writes to code in the virtual page 'addr' |
/* update the TLBs so that writes to code in the virtual page 'addr' |
can be detected */ |
can be detected */ |
static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, |
static void tlb_protect_code(ram_addr_t ram_addr) |
target_ulong vaddr) |
|
{ |
{ |
int i; |
cpu_physical_memory_reset_dirty(ram_addr, |
|
ram_addr + TARGET_PAGE_SIZE, |
vaddr &= TARGET_PAGE_MASK; |
CODE_DIRTY_FLAG); |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
tlb_protect_code1(&env->tlb_write[0][i], vaddr); |
|
tlb_protect_code1(&env->tlb_write[1][i], vaddr); |
|
|
|
#ifdef USE_KQEMU |
|
if (env->kqemu_enabled) { |
|
kqemu_set_notdirty(env, ram_addr); |
|
} |
|
#endif |
|
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG; |
|
|
|
#if !defined(CONFIG_SOFTMMU) |
|
/* NOTE: as we generated the code for this page, it is already at |
|
least readable */ |
|
if (vaddr < MMAP_AREA_END) |
|
mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ); |
|
#endif |
|
} |
} |
|
|
/* update the TLB so that writes in physical page 'phys_addr' are no longer |
/* update the TLB so that writes in physical page 'phys_addr' are no longer |
Line 1519 static inline void tlb_reset_dirty_range
|
Line 1329 static inline void tlb_reset_dirty_range
|
unsigned long start, unsigned long length) |
unsigned long start, unsigned long length) |
{ |
{ |
unsigned long addr; |
unsigned long addr; |
if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; |
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
if ((addr - start) < length) { |
if ((addr - start) < length) { |
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
} |
} |
} |
} |
} |
} |
Line 1542 void cpu_physical_memory_reset_dirty(ram
|
Line 1352 void cpu_physical_memory_reset_dirty(ram
|
if (length == 0) |
if (length == 0) |
return; |
return; |
len = length >> TARGET_PAGE_BITS; |
len = length >> TARGET_PAGE_BITS; |
env = cpu_single_env; |
|
#ifdef USE_KQEMU |
#ifdef USE_KQEMU |
|
/* XXX: should not depend on cpu context */ |
|
env = first_cpu; |
if (env->kqemu_enabled) { |
if (env->kqemu_enabled) { |
ram_addr_t addr; |
ram_addr_t addr; |
addr = start; |
addr = start; |
Line 1561 void cpu_physical_memory_reset_dirty(ram
|
Line 1372 void cpu_physical_memory_reset_dirty(ram
|
/* we modify the TLB cache so that the dirty bit will be set again |
/* we modify the TLB cache so that the dirty bit will be set again |
when accessing the range */ |
when accessing the range */ |
start1 = start + (unsigned long)phys_ram_base; |
start1 = start + (unsigned long)phys_ram_base; |
for(i = 0; i < CPU_TLB_SIZE; i++) |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length); |
for(i = 0; i < CPU_TLB_SIZE; i++) |
for(i = 0; i < CPU_TLB_SIZE; i++) |
tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); |
tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length); |
for(i = 0; i < CPU_TLB_SIZE; i++) |
|
tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); |
|
} |
|
|
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
/* XXX: this is expensive */ |
/* XXX: this is expensive */ |
Line 1599 static inline void tlb_update_dirty(CPUT
|
Line 1412 static inline void tlb_update_dirty(CPUT
|
{ |
{ |
ram_addr_t ram_addr; |
ram_addr_t ram_addr; |
|
|
if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) + |
ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + |
tlb_entry->addend - (unsigned long)phys_ram_base; |
tlb_entry->addend - (unsigned long)phys_ram_base; |
if (!cpu_physical_memory_is_dirty(ram_addr)) { |
if (!cpu_physical_memory_is_dirty(ram_addr)) { |
tlb_entry->address |= IO_MEM_NOTDIRTY; |
tlb_entry->addr_write |= IO_MEM_NOTDIRTY; |
} |
} |
} |
} |
} |
} |
Line 1613 void cpu_tlb_update_dirty(CPUState *env)
|
Line 1426 void cpu_tlb_update_dirty(CPUState *env)
|
{ |
{ |
int i; |
int i; |
for(i = 0; i < CPU_TLB_SIZE; i++) |
for(i = 0; i < CPU_TLB_SIZE; i++) |
tlb_update_dirty(&env->tlb_write[0][i]); |
tlb_update_dirty(&env->tlb_table[0][i]); |
for(i = 0; i < CPU_TLB_SIZE; i++) |
for(i = 0; i < CPU_TLB_SIZE; i++) |
tlb_update_dirty(&env->tlb_write[1][i]); |
tlb_update_dirty(&env->tlb_table[1][i]); |
} |
} |
|
|
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
unsigned long start) |
unsigned long start) |
{ |
{ |
unsigned long addr; |
unsigned long addr; |
if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { |
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { |
addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; |
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
if (addr == start) { |
if (addr == start) { |
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM; |
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; |
} |
} |
} |
} |
} |
} |
|
|
/* update the TLB corresponding to virtual page vaddr and phys addr |
/* update the TLB corresponding to virtual page vaddr and phys addr |
addr so that it is no longer dirty */ |
addr so that it is no longer dirty */ |
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) |
static inline void tlb_set_dirty(CPUState *env, |
|
unsigned long addr, target_ulong vaddr) |
{ |
{ |
CPUState *env = cpu_single_env; |
|
int i; |
int i; |
|
|
addr &= TARGET_PAGE_MASK; |
addr &= TARGET_PAGE_MASK; |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
tlb_set_dirty1(&env->tlb_write[0][i], addr); |
tlb_set_dirty1(&env->tlb_table[0][i], addr); |
tlb_set_dirty1(&env->tlb_write[1][i], addr); |
tlb_set_dirty1(&env->tlb_table[1][i], addr); |
} |
} |
|
|
/* add a new TLB entry. At most one entry for a given virtual address |
/* add a new TLB entry. At most one entry for a given virtual address |
is permitted. Return 0 if OK or 2 if the page could not be mapped |
is permitted. Return 0 if OK or 2 if the page could not be mapped |
(can only happen in non SOFTMMU mode for I/O pages or pages |
(can only happen in non SOFTMMU mode for I/O pages or pages |
conflicting with the host address space). */ |
conflicting with the host address space). */ |
int tlb_set_page(CPUState *env, target_ulong vaddr, |
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
target_phys_addr_t paddr, int prot, |
target_phys_addr_t paddr, int prot, |
int is_user, int is_softmmu) |
int is_user, int is_softmmu) |
{ |
{ |
PhysPageDesc *p; |
PhysPageDesc *p; |
unsigned long pd; |
unsigned long pd; |
Line 1657 int tlb_set_page(CPUState *env, target_u
|
Line 1470 int tlb_set_page(CPUState *env, target_u
|
target_ulong address; |
target_ulong address; |
target_phys_addr_t addend; |
target_phys_addr_t addend; |
int ret; |
int ret; |
|
CPUTLBEntry *te; |
|
|
p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
if (!p) { |
if (!p) { |
Line 1666 int tlb_set_page(CPUState *env, target_u
|
Line 1480 int tlb_set_page(CPUState *env, target_u
|
} |
} |
#if defined(DEBUG_TLB) |
#if defined(DEBUG_TLB) |
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", |
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", |
vaddr, paddr, prot, is_user, is_softmmu, pd); |
vaddr, (int)paddr, prot, is_user, is_softmmu, pd); |
#endif |
#endif |
|
|
ret = 0; |
ret = 0; |
Line 1674 int tlb_set_page(CPUState *env, target_u
|
Line 1488 int tlb_set_page(CPUState *env, target_u
|
if (is_softmmu) |
if (is_softmmu) |
#endif |
#endif |
{ |
{ |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { |
/* IO memory case */ |
/* IO memory case */ |
address = vaddr | pd; |
address = vaddr | pd; |
addend = paddr; |
addend = paddr; |
Line 1686 int tlb_set_page(CPUState *env, target_u
|
Line 1500 int tlb_set_page(CPUState *env, target_u
|
|
|
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
addend -= vaddr; |
addend -= vaddr; |
|
te = &env->tlb_table[is_user][index]; |
|
te->addend = addend; |
if (prot & PAGE_READ) { |
if (prot & PAGE_READ) { |
env->tlb_read[is_user][index].address = address; |
te->addr_read = address; |
env->tlb_read[is_user][index].addend = addend; |
} else { |
|
te->addr_read = -1; |
|
} |
|
if (prot & PAGE_EXEC) { |
|
te->addr_code = address; |
} else { |
} else { |
env->tlb_read[is_user][index].address = -1; |
te->addr_code = -1; |
env->tlb_read[is_user][index].addend = -1; |
|
} |
} |
if (prot & PAGE_WRITE) { |
if (prot & PAGE_WRITE) { |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || |
/* ROM: access is ignored (same as unassigned) */ |
(pd & IO_MEM_ROMD)) { |
env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; |
/* write access calls the I/O callback */ |
env->tlb_write[is_user][index].addend = addend; |
te->addr_write = vaddr | |
|
(pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); |
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
!cpu_physical_memory_is_dirty(pd)) { |
!cpu_physical_memory_is_dirty(pd)) { |
env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; |
te->addr_write = vaddr | IO_MEM_NOTDIRTY; |
env->tlb_write[is_user][index].addend = addend; |
|
} else { |
} else { |
env->tlb_write[is_user][index].address = address; |
te->addr_write = address; |
env->tlb_write[is_user][index].addend = addend; |
|
} |
} |
} else { |
} else { |
env->tlb_write[is_user][index].address = -1; |
te->addr_write = -1; |
env->tlb_write[is_user][index].addend = -1; |
|
} |
} |
} |
} |
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
Line 1758 int tlb_set_page(CPUState *env, target_u
|
Line 1575 int tlb_set_page(CPUState *env, target_u
|
|
|
/* called from signal handler: invalidate the code and unprotect the |
/* called from signal handler: invalidate the code and unprotect the |
page. Return TRUE if the fault was succesfully handled. */ |
page. Return TRUE if the fault was succesfully handled. */ |
int page_unprotect(unsigned long addr, unsigned long pc, void *puc) |
int page_unprotect(target_ulong addr, unsigned long pc, void *puc) |
{ |
{ |
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
VirtPageDesc *vp; |
VirtPageDesc *vp; |
Line 1807 void tlb_flush_page(CPUState *env, targe
|
Line 1624 void tlb_flush_page(CPUState *env, targe
|
{ |
{ |
} |
} |
|
|
int tlb_set_page(CPUState *env, target_ulong vaddr, |
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
target_phys_addr_t paddr, int prot, |
target_phys_addr_t paddr, int prot, |
int is_user, int is_softmmu) |
int is_user, int is_softmmu) |
{ |
{ |
return 0; |
return 0; |
} |
} |
Line 1857 void page_dump(FILE *f)
|
Line 1674 void page_dump(FILE *f)
|
} |
} |
} |
} |
|
|
int page_get_flags(unsigned long address) |
int page_get_flags(target_ulong address) |
{ |
{ |
PageDesc *p; |
PageDesc *p; |
|
|
Line 1870 int page_get_flags(unsigned long address
|
Line 1687 int page_get_flags(unsigned long address
|
/* modify the flags of a page and invalidate the code if |
/* modify the flags of a page and invalidate the code if |
necessary. The flag PAGE_WRITE_ORG is positionned automatically |
necessary. The flag PAGE_WRITE_ORG is positionned automatically |
depending on PAGE_WRITE */ |
depending on PAGE_WRITE */ |
void page_set_flags(unsigned long start, unsigned long end, int flags) |
void page_set_flags(target_ulong start, target_ulong end, int flags) |
{ |
{ |
PageDesc *p; |
PageDesc *p; |
unsigned long addr; |
target_ulong addr; |
|
|
start = start & TARGET_PAGE_MASK; |
start = start & TARGET_PAGE_MASK; |
end = TARGET_PAGE_ALIGN(end); |
end = TARGET_PAGE_ALIGN(end); |
Line 1896 void page_set_flags(unsigned long start,
|
Line 1713 void page_set_flags(unsigned long start,
|
|
|
/* called from signal handler: invalidate the code and unprotect the |
/* called from signal handler: invalidate the code and unprotect the |
page. Return TRUE if the fault was succesfully handled. */ |
page. Return TRUE if the fault was succesfully handled. */ |
int page_unprotect(unsigned long address, unsigned long pc, void *puc) |
int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
{ |
{ |
unsigned int page_index, prot, pindex; |
unsigned int page_index, prot, pindex; |
PageDesc *p, *p1; |
PageDesc *p, *p1; |
unsigned long host_start, host_end, addr; |
target_ulong host_start, host_end, addr; |
|
|
host_start = address & qemu_host_page_mask; |
host_start = address & qemu_host_page_mask; |
page_index = host_start >> TARGET_PAGE_BITS; |
page_index = host_start >> TARGET_PAGE_BITS; |
Line 1919 int page_unprotect(unsigned long address
|
Line 1736 int page_unprotect(unsigned long address
|
if (prot & PAGE_WRITE_ORG) { |
if (prot & PAGE_WRITE_ORG) { |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
if (!(p1[pindex].flags & PAGE_WRITE)) { |
if (!(p1[pindex].flags & PAGE_WRITE)) { |
mprotect((void *)host_start, qemu_host_page_size, |
mprotect((void *)g2h(host_start), qemu_host_page_size, |
(prot & PAGE_BITS) | PAGE_WRITE); |
(prot & PAGE_BITS) | PAGE_WRITE); |
p1[pindex].flags |= PAGE_WRITE; |
p1[pindex].flags |= PAGE_WRITE; |
/* and since the content will be modified, we must invalidate |
/* and since the content will be modified, we must invalidate |
Line 1935 int page_unprotect(unsigned long address
|
Line 1752 int page_unprotect(unsigned long address
|
} |
} |
|
|
/* call this function when system calls directly modify a memory area */ |
/* call this function when system calls directly modify a memory area */ |
void page_unprotect_range(uint8_t *data, unsigned long data_size) |
/* ??? This should be redundant now we have lock_user. */ |
|
void page_unprotect_range(target_ulong data, target_ulong data_size) |
{ |
{ |
unsigned long start, end, addr; |
target_ulong start, end, addr; |
|
|
start = (unsigned long)data; |
start = data; |
end = start + data_size; |
end = start + data_size; |
start &= TARGET_PAGE_MASK; |
start &= TARGET_PAGE_MASK; |
end = TARGET_PAGE_ALIGN(end); |
end = TARGET_PAGE_ALIGN(end); |
Line 1948 void page_unprotect_range(uint8_t *data,
|
Line 1766 void page_unprotect_range(uint8_t *data,
|
} |
} |
} |
} |
|
|
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) |
static inline void tlb_set_dirty(CPUState *env, |
|
unsigned long addr, target_ulong vaddr) |
{ |
{ |
} |
} |
#endif /* defined(CONFIG_USER_ONLY) */ |
#endif /* defined(CONFIG_USER_ONLY) */ |
Line 1962 void cpu_register_physical_memory(target
|
Line 1781 void cpu_register_physical_memory(target
|
{ |
{ |
target_phys_addr_t addr, end_addr; |
target_phys_addr_t addr, end_addr; |
PhysPageDesc *p; |
PhysPageDesc *p; |
|
CPUState *env; |
|
|
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
end_addr = start_addr + size; |
end_addr = start_addr + size; |
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
p->phys_offset = phys_offset; |
p->phys_offset = phys_offset; |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
|
(phys_offset & IO_MEM_ROMD)) |
phys_offset += TARGET_PAGE_SIZE; |
phys_offset += TARGET_PAGE_SIZE; |
} |
} |
|
|
|
/* since each CPU stores ram addresses in its TLB cache, we must |
|
reset the modified entries */ |
|
/* XXX: slow ! */ |
|
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
tlb_flush(env, 1); |
|
} |
|
} |
|
|
|
/* XXX: temporary until new memory mapping API */ |
|
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr) |
|
{ |
|
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
if (!p) |
|
return IO_MEM_UNASSIGNED; |
|
return p->phys_offset; |
} |
} |
|
|
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
{ |
{ |
|
#ifdef DEBUG_UNASSIGNED |
|
printf("Unassigned mem read 0x%08x\n", (int)addr); |
|
#endif |
return 0; |
return 0; |
} |
} |
|
|
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
{ |
{ |
|
#ifdef DEBUG_UNASSIGNED |
|
printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val); |
|
#endif |
} |
} |
|
|
static CPUReadMemoryFunc *unassigned_mem_read[3] = { |
static CPUReadMemoryFunc *unassigned_mem_read[3] = { |
Line 2007 static void notdirty_mem_writeb(void *op
|
Line 1852 static void notdirty_mem_writeb(void *op
|
#endif |
#endif |
} |
} |
stb_p((uint8_t *)(long)addr, val); |
stb_p((uint8_t *)(long)addr, val); |
|
#ifdef USE_KQEMU |
|
if (cpu_single_env->kqemu_enabled && |
|
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
|
kqemu_modify_page(cpu_single_env, ram_addr); |
|
#endif |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
/* we remove the notdirty callback only if the code has been |
/* we remove the notdirty callback only if the code has been |
flushed */ |
flushed */ |
if (dirty_flags == 0xff) |
if (dirty_flags == 0xff) |
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); |
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
} |
} |
|
|
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
Line 2028 static void notdirty_mem_writew(void *op
|
Line 1878 static void notdirty_mem_writew(void *op
|
#endif |
#endif |
} |
} |
stw_p((uint8_t *)(long)addr, val); |
stw_p((uint8_t *)(long)addr, val); |
|
#ifdef USE_KQEMU |
|
if (cpu_single_env->kqemu_enabled && |
|
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
|
kqemu_modify_page(cpu_single_env, ram_addr); |
|
#endif |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
/* we remove the notdirty callback only if the code has been |
/* we remove the notdirty callback only if the code has been |
flushed */ |
flushed */ |
if (dirty_flags == 0xff) |
if (dirty_flags == 0xff) |
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); |
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
} |
} |
|
|
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
Line 2049 static void notdirty_mem_writel(void *op
|
Line 1904 static void notdirty_mem_writel(void *op
|
#endif |
#endif |
} |
} |
stl_p((uint8_t *)(long)addr, val); |
stl_p((uint8_t *)(long)addr, val); |
|
#ifdef USE_KQEMU |
|
if (cpu_single_env->kqemu_enabled && |
|
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
|
kqemu_modify_page(cpu_single_env, ram_addr); |
|
#endif |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
/* we remove the notdirty callback only if the code has been |
/* we remove the notdirty callback only if the code has been |
flushed */ |
flushed */ |
if (dirty_flags == 0xff) |
if (dirty_flags == 0xff) |
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); |
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
} |
} |
|
|
static CPUReadMemoryFunc *error_mem_read[3] = { |
static CPUReadMemoryFunc *error_mem_read[3] = { |
Line 2095 int cpu_register_io_memory(int io_index,
|
Line 1955 int cpu_register_io_memory(int io_index,
|
int i; |
int i; |
|
|
if (io_index <= 0) { |
if (io_index <= 0) { |
if (io_index >= IO_MEM_NB_ENTRIES) |
if (io_mem_nb >= IO_MEM_NB_ENTRIES) |
return -1; |
return -1; |
io_index = io_mem_nb++; |
io_index = io_mem_nb++; |
} else { |
} else { |
if (io_index >= IO_MEM_NB_ENTRIES) |
if (io_index >= IO_MEM_NB_ENTRIES) |
return -1; |
return -1; |
} |
} |
|
|
for(i = 0;i < 3; i++) { |
for(i = 0;i < 3; i++) { |
io_mem_read[io_index][i] = mem_read[i]; |
io_mem_read[io_index][i] = mem_read[i]; |
io_mem_write[io_index][i] = mem_write[i]; |
io_mem_write[io_index][i] = mem_write[i]; |
Line 2128 void cpu_physical_memory_rw(target_phys_
|
Line 1988 void cpu_physical_memory_rw(target_phys_
|
{ |
{ |
int l, flags; |
int l, flags; |
target_ulong page; |
target_ulong page; |
|
void * p; |
|
|
while (len > 0) { |
while (len > 0) { |
page = addr & TARGET_PAGE_MASK; |
page = addr & TARGET_PAGE_MASK; |
Line 2140 void cpu_physical_memory_rw(target_phys_
|
Line 2001 void cpu_physical_memory_rw(target_phys_
|
if (is_write) { |
if (is_write) { |
if (!(flags & PAGE_WRITE)) |
if (!(flags & PAGE_WRITE)) |
return; |
return; |
memcpy((uint8_t *)addr, buf, len); |
p = lock_user(addr, len, 0); |
|
memcpy(p, buf, len); |
|
unlock_user(p, addr, len); |
} else { |
} else { |
if (!(flags & PAGE_READ)) |
if (!(flags & PAGE_READ)) |
return; |
return; |
memcpy(buf, (uint8_t *)addr, len); |
p = lock_user(addr, len, 1); |
|
memcpy(buf, p, len); |
|
unlock_user(p, addr, 0); |
} |
} |
len -= l; |
len -= l; |
buf += l; |
buf += l; |
Line 2152 void cpu_physical_memory_rw(target_phys_
|
Line 2017 void cpu_physical_memory_rw(target_phys_
|
} |
} |
} |
} |
|
|
/* never used */ |
|
uint32_t ldl_phys(target_phys_addr_t addr) |
|
{ |
|
return 0; |
|
} |
|
|
|
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) |
|
{ |
|
} |
|
|
|
void stl_phys(target_phys_addr_t addr, uint32_t val) |
|
{ |
|
} |
|
|
|
#else |
#else |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
int len, int is_write) |
int len, int is_write) |
Line 2192 void cpu_physical_memory_rw(target_phys_
|
Line 2043 void cpu_physical_memory_rw(target_phys_
|
if (is_write) { |
if (is_write) { |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
/* XXX: could force cpu_single_env to NULL to avoid |
|
potential bugs */ |
if (l >= 4 && ((addr & 3) == 0)) { |
if (l >= 4 && ((addr & 3) == 0)) { |
/* 32 bit write access */ |
/* 32 bit write access */ |
val = ldl_p(buf); |
val = ldl_p(buf); |
Line 2223 void cpu_physical_memory_rw(target_phys_
|
Line 2076 void cpu_physical_memory_rw(target_phys_
|
} |
} |
} |
} |
} else { |
} else { |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
|
!(pd & IO_MEM_ROMD)) { |
/* I/O case */ |
/* I/O case */ |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
if (l >= 4 && ((addr & 3) == 0)) { |
if (l >= 4 && ((addr & 3) == 0)) { |
Line 2255 void cpu_physical_memory_rw(target_phys_
|
Line 2109 void cpu_physical_memory_rw(target_phys_
|
} |
} |
} |
} |
|
|
|
/* used for ROM loading : can write in RAM and ROM */ |
|
void cpu_physical_memory_write_rom(target_phys_addr_t addr, |
|
const uint8_t *buf, int len) |
|
{ |
|
int l; |
|
uint8_t *ptr; |
|
target_phys_addr_t page; |
|
unsigned long pd; |
|
PhysPageDesc *p; |
|
|
|
while (len > 0) { |
|
page = addr & TARGET_PAGE_MASK; |
|
l = (page + TARGET_PAGE_SIZE) - addr; |
|
if (l > len) |
|
l = len; |
|
p = phys_page_find(page >> TARGET_PAGE_BITS); |
|
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && |
|
(pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
|
!(pd & IO_MEM_ROMD)) { |
|
/* do nothing */ |
|
} else { |
|
unsigned long addr1; |
|
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
|
/* ROM/RAM case */ |
|
ptr = phys_ram_base + addr1; |
|
memcpy(ptr, buf, l); |
|
} |
|
len -= l; |
|
buf += l; |
|
addr += l; |
|
} |
|
} |
|
|
|
|
/* warning: addr must be aligned */ |
/* warning: addr must be aligned */ |
uint32_t ldl_phys(target_phys_addr_t addr) |
uint32_t ldl_phys(target_phys_addr_t addr) |
{ |
{ |
Line 2271 uint32_t ldl_phys(target_phys_addr_t add
|
Line 2165 uint32_t ldl_phys(target_phys_addr_t add
|
pd = p->phys_offset; |
pd = p->phys_offset; |
} |
} |
|
|
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
|
!(pd & IO_MEM_ROMD)) { |
/* I/O case */ |
/* I/O case */ |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
Line 2284 uint32_t ldl_phys(target_phys_addr_t add
|
Line 2179 uint32_t ldl_phys(target_phys_addr_t add
|
return val; |
return val; |
} |
} |
|
|
|
/* warning: addr must be aligned */ |
|
uint64_t ldq_phys(target_phys_addr_t addr) |
|
{ |
|
int io_index; |
|
uint8_t *ptr; |
|
uint64_t val; |
|
unsigned long pd; |
|
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
|
!(pd & IO_MEM_ROMD)) { |
|
/* I/O case */ |
|
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
#ifdef TARGET_WORDS_BIGENDIAN |
|
val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; |
|
val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); |
|
#else |
|
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
|
val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; |
|
#endif |
|
} else { |
|
/* RAM case */ |
|
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
|
(addr & ~TARGET_PAGE_MASK); |
|
val = ldq_p(ptr); |
|
} |
|
return val; |
|
} |
|
|
|
/* XXX: optimize */ |
|
uint32_t ldub_phys(target_phys_addr_t addr) |
|
{ |
|
uint8_t val; |
|
cpu_physical_memory_read(addr, &val, 1); |
|
return val; |
|
} |
|
|
|
/* XXX: optimize */ |
|
uint32_t lduw_phys(target_phys_addr_t addr) |
|
{ |
|
uint16_t val; |
|
cpu_physical_memory_read(addr, (uint8_t *)&val, 2); |
|
return tswap16(val); |
|
} |
|
|
/* warning: addr must be aligned. The ram page is not masked as dirty |
/* warning: addr must be aligned. The ram page is not masked as dirty |
and the code inside is not invalidated. It is useful if the dirty |
and the code inside is not invalidated. It is useful if the dirty |
bits are used to track modified PTEs */ |
bits are used to track modified PTEs */ |
Line 2345 void stl_phys(target_phys_addr_t addr, u
|
Line 2292 void stl_phys(target_phys_addr_t addr, u
|
} |
} |
} |
} |
|
|
|
/* XXX: optimize */ |
|
void stb_phys(target_phys_addr_t addr, uint32_t val) |
|
{ |
|
uint8_t v = val; |
|
cpu_physical_memory_write(addr, &v, 1); |
|
} |
|
|
|
/* XXX: optimize */ |
|
void stw_phys(target_phys_addr_t addr, uint32_t val) |
|
{ |
|
uint16_t v = tswap16(val); |
|
cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); |
|
} |
|
|
|
/* XXX: optimize */ |
|
void stq_phys(target_phys_addr_t addr, uint64_t val) |
|
{ |
|
val = tswap64(val); |
|
cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); |
|
} |
|
|
#endif |
#endif |
|
|
/* virtual memory access for debug */ |
/* virtual memory access for debug */ |