version 1.1.1.16, 2018/04/24 19:17:27
|
version 1.1.1.17, 2018/04/24 19:34:42
|
Line 57
|
Line 57
|
#include "trace.h" |
#include "trace.h" |
#endif |
#endif |
|
|
|
#include "cputlb.h" |
|
|
|
#define WANT_EXEC_OBSOLETE |
|
#include "exec-obsolete.h" |
|
|
//#define DEBUG_TB_INVALIDATE |
//#define DEBUG_TB_INVALIDATE |
//#define DEBUG_FLUSH |
//#define DEBUG_FLUSH |
//#define DEBUG_TLB |
|
//#define DEBUG_UNASSIGNED |
//#define DEBUG_UNASSIGNED |
|
|
/* make various TB consistency checks */ |
/* make various TB consistency checks */ |
//#define DEBUG_TB_CHECK |
//#define DEBUG_TB_CHECK |
//#define DEBUG_TLB_CHECK |
|
|
|
//#define DEBUG_IOPORT |
//#define DEBUG_IOPORT |
//#define DEBUG_SUBPAGE |
//#define DEBUG_SUBPAGE |
Line 90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
|
Line 93 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
|
#define code_gen_section \ |
#define code_gen_section \ |
__attribute__((__section__(".gen_code"))) \ |
__attribute__((__section__(".gen_code"))) \ |
__attribute__((aligned (32))) |
__attribute__((aligned (32))) |
#elif defined(_WIN32) |
#elif defined(_WIN32) && !defined(_WIN64) |
/* Maximum alignment for Win32 is 16. */ |
|
#define code_gen_section \ |
#define code_gen_section \ |
__attribute__((aligned (16))) |
__attribute__((aligned (16))) |
#else |
#else |
Line 115 RAMList ram_list = { .blocks = QLIST_HEA
|
Line 117 RAMList ram_list = { .blocks = QLIST_HEA
|
static MemoryRegion *system_memory; |
static MemoryRegion *system_memory; |
static MemoryRegion *system_io; |
static MemoryRegion *system_io; |
|
|
|
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty; |
|
static MemoryRegion io_mem_subpage_ram; |
|
|
#endif |
#endif |
|
|
CPUState *first_cpu; |
CPUArchState *first_cpu; |
/* current CPU in the current thread. It is only valid inside |
/* current CPU in the current thread. It is only valid inside |
cpu_exec() */ |
cpu_exec() */ |
DEFINE_TLS(CPUState *,cpu_single_env); |
DEFINE_TLS(CPUArchState *,cpu_single_env); |
/* 0 = Do not count executed instructions. |
/* 0 = Do not count executed instructions. |
1 = Precise instruction counting. |
1 = Precise instruction counting. |
2 = Adaptive rate instruction counting. */ |
2 = Adaptive rate instruction counting. */ |
Line 154 typedef struct PageDesc {
|
Line 159 typedef struct PageDesc {
|
#define L2_BITS 10 |
#define L2_BITS 10 |
#define L2_SIZE (1 << L2_BITS) |
#define L2_SIZE (1 << L2_BITS) |
|
|
|
#define P_L2_LEVELS \ |
|
(((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1) |
|
|
/* The bits remaining after N lower levels of page tables. */ |
/* The bits remaining after N lower levels of page tables. */ |
#define P_L1_BITS_REM \ |
|
((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) |
|
#define V_L1_BITS_REM \ |
#define V_L1_BITS_REM \ |
((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) |
((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) |
|
|
/* Size of the L1 page table. Avoid silly small sizes. */ |
|
#if P_L1_BITS_REM < 4 |
|
#define P_L1_BITS (P_L1_BITS_REM + L2_BITS) |
|
#else |
|
#define P_L1_BITS P_L1_BITS_REM |
|
#endif |
|
|
|
#if V_L1_BITS_REM < 4 |
#if V_L1_BITS_REM < 4 |
#define V_L1_BITS (V_L1_BITS_REM + L2_BITS) |
#define V_L1_BITS (V_L1_BITS_REM + L2_BITS) |
#else |
#else |
#define V_L1_BITS V_L1_BITS_REM |
#define V_L1_BITS V_L1_BITS_REM |
#endif |
#endif |
|
|
#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS) |
|
#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) |
#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) |
|
|
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS) |
|
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) |
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) |
|
|
unsigned long qemu_real_host_page_size; |
uintptr_t qemu_real_host_page_size; |
unsigned long qemu_host_page_size; |
uintptr_t qemu_host_page_size; |
unsigned long qemu_host_page_mask; |
uintptr_t qemu_host_page_mask; |
|
|
/* This is a multi-level map on the virtual address space. |
/* This is a multi-level map on the virtual address space. |
The bottom level has pointers to PageDesc. */ |
The bottom level has pointers to PageDesc. */ |
static void *l1_map[V_L1_SIZE]; |
static void *l1_map[V_L1_SIZE]; |
|
|
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
typedef struct PhysPageDesc { |
typedef struct PhysPageEntry PhysPageEntry; |
/* offset in host memory of the page + io_index in the low bits */ |
|
ram_addr_t phys_offset; |
static MemoryRegionSection *phys_sections; |
ram_addr_t region_offset; |
static unsigned phys_sections_nb, phys_sections_nb_alloc; |
} PhysPageDesc; |
static uint16_t phys_section_unassigned; |
|
static uint16_t phys_section_notdirty; |
|
static uint16_t phys_section_rom; |
|
static uint16_t phys_section_watch; |
|
|
|
struct PhysPageEntry { |
|
uint16_t is_leaf : 1; |
|
/* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */ |
|
uint16_t ptr : 15; |
|
}; |
|
|
|
/* Simple allocator for PhysPageEntry nodes */ |
|
static PhysPageEntry (*phys_map_nodes)[L2_SIZE]; |
|
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; |
|
|
|
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1) |
|
|
/* This is a multi-level map on the physical address space. |
/* This is a multi-level map on the physical address space. |
The bottom level has pointers to PhysPageDesc. */ |
The bottom level has pointers to MemoryRegionSections. */ |
static void *l1_phys_map[P_L1_SIZE]; |
static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; |
|
|
static void io_mem_init(void); |
static void io_mem_init(void); |
static void memory_map_init(void); |
static void memory_map_init(void); |
|
|
/* io memory support */ |
static MemoryRegion io_mem_watch; |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
|
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
|
void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
|
static char io_mem_used[IO_MEM_NB_ENTRIES]; |
|
static int io_mem_watch; |
|
#endif |
#endif |
|
|
/* log support */ |
/* log support */ |
Line 220 int loglevel;
|
Line 227 int loglevel;
|
static int log_append = 0; |
static int log_append = 0; |
|
|
/* statistics */ |
/* statistics */ |
#if !defined(CONFIG_USER_ONLY) |
|
static int tlb_flush_count; |
|
#endif |
|
static int tb_flush_count; |
static int tb_flush_count; |
static int tb_phys_invalidate_count; |
static int tb_phys_invalidate_count; |
|
|
Line 394 static inline PageDesc *page_find(tb_pag
|
Line 398 static inline PageDesc *page_find(tb_pag
|
} |
} |
|
|
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
|
|
static void phys_map_node_reserve(unsigned nodes) |
{ |
{ |
PhysPageDesc *pd; |
if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) { |
void **lp; |
typedef PhysPageEntry Node[L2_SIZE]; |
int i; |
phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16); |
|
phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc, |
|
phys_map_nodes_nb + nodes); |
|
phys_map_nodes = g_renew(Node, phys_map_nodes, |
|
phys_map_nodes_nb_alloc); |
|
} |
|
} |
|
|
/* Level 1. Always allocated. */ |
static uint16_t phys_map_node_alloc(void) |
lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1)); |
{ |
|
unsigned i; |
|
uint16_t ret; |
|
|
/* Level 2..N-1. */ |
ret = phys_map_nodes_nb++; |
for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) { |
assert(ret != PHYS_MAP_NODE_NIL); |
void **p = *lp; |
assert(ret != phys_map_nodes_nb_alloc); |
if (p == NULL) { |
for (i = 0; i < L2_SIZE; ++i) { |
if (!alloc) { |
phys_map_nodes[ret][i].is_leaf = 0; |
return NULL; |
phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; |
|
} |
|
return ret; |
|
} |
|
|
|
static void phys_map_nodes_reset(void) |
|
{ |
|
phys_map_nodes_nb = 0; |
|
} |
|
|
|
|
|
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index, |
|
target_phys_addr_t *nb, uint16_t leaf, |
|
int level) |
|
{ |
|
PhysPageEntry *p; |
|
int i; |
|
target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS); |
|
|
|
if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) { |
|
lp->ptr = phys_map_node_alloc(); |
|
p = phys_map_nodes[lp->ptr]; |
|
if (level == 0) { |
|
for (i = 0; i < L2_SIZE; i++) { |
|
p[i].is_leaf = 1; |
|
p[i].ptr = phys_section_unassigned; |
} |
} |
*lp = p = g_malloc0(sizeof(void *) * L2_SIZE); |
|
} |
} |
lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); |
} else { |
|
p = phys_map_nodes[lp->ptr]; |
} |
} |
|
lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)]; |
|
|
pd = *lp; |
while (*nb && lp < &p[L2_SIZE]) { |
if (pd == NULL) { |
if ((*index & (step - 1)) == 0 && *nb >= step) { |
int i; |
lp->is_leaf = true; |
|
lp->ptr = leaf; |
if (!alloc) { |
*index += step; |
return NULL; |
*nb -= step; |
|
} else { |
|
phys_page_set_level(lp, index, nb, leaf, level - 1); |
} |
} |
|
++lp; |
|
} |
|
} |
|
|
|
static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb, |
|
uint16_t leaf) |
|
{ |
|
/* Wildly overreserve - it doesn't matter much. */ |
|
phys_map_node_reserve(3 * P_L2_LEVELS); |
|
|
|
phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
|
} |
|
|
*lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE); |
MemoryRegionSection *phys_page_find(target_phys_addr_t index) |
|
{ |
|
PhysPageEntry lp = phys_map; |
|
PhysPageEntry *p; |
|
int i; |
|
uint16_t s_index = phys_section_unassigned; |
|
|
for (i = 0; i < L2_SIZE; i++) { |
for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { |
pd[i].phys_offset = IO_MEM_UNASSIGNED; |
if (lp.ptr == PHYS_MAP_NODE_NIL) { |
pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; |
goto not_found; |
} |
} |
|
p = phys_map_nodes[lp.ptr]; |
|
lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; |
} |
} |
|
|
return pd + (index & (L2_SIZE - 1)); |
s_index = lp.ptr; |
|
not_found: |
|
return &phys_sections[s_index]; |
} |
} |
|
|
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
bool memory_region_is_unassigned(MemoryRegion *mr) |
{ |
{ |
return phys_page_find_alloc(index, 0); |
return mr != &io_mem_ram && mr != &io_mem_rom |
|
&& mr != &io_mem_notdirty && !mr->rom_device |
|
&& mr != &io_mem_watch; |
} |
} |
|
|
static void tlb_protect_code(ram_addr_t ram_addr); |
|
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
|
target_ulong vaddr); |
|
#define mmap_lock() do { } while(0) |
#define mmap_lock() do { } while(0) |
#define mmap_unlock() do { } while(0) |
#define mmap_unlock() do { } while(0) |
#endif |
#endif |
Line 497 static void code_gen_alloc(unsigned long
|
Line 558 static void code_gen_alloc(unsigned long
|
if (code_gen_buffer_size > (512 * 1024 * 1024)) |
if (code_gen_buffer_size > (512 * 1024 * 1024)) |
code_gen_buffer_size = (512 * 1024 * 1024); |
code_gen_buffer_size = (512 * 1024 * 1024); |
#elif defined(__arm__) |
#elif defined(__arm__) |
/* Map the buffer below 32M, so we can use direct calls and branches */ |
/* Keep the buffer no bigger than 16MB to branch between blocks */ |
flags |= MAP_FIXED; |
|
start = (void *) 0x01000000UL; |
|
if (code_gen_buffer_size > 16 * 1024 * 1024) |
if (code_gen_buffer_size > 16 * 1024 * 1024) |
code_gen_buffer_size = 16 * 1024 * 1024; |
code_gen_buffer_size = 16 * 1024 * 1024; |
#elif defined(__s390x__) |
#elif defined(__s390x__) |
Line 569 void tcg_exec_init(unsigned long tb_size
|
Line 628 void tcg_exec_init(unsigned long tb_size
|
cpu_gen_init(); |
cpu_gen_init(); |
code_gen_alloc(tb_size); |
code_gen_alloc(tb_size); |
code_gen_ptr = code_gen_buffer; |
code_gen_ptr = code_gen_buffer; |
|
tcg_register_jit(code_gen_buffer, code_gen_buffer_size); |
page_init(); |
page_init(); |
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) |
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) |
/* There's no guest base to take into account, so go ahead and |
/* There's no guest base to take into account, so go ahead and |
Line 594 void cpu_exec_init_all(void)
|
Line 654 void cpu_exec_init_all(void)
|
|
|
static int cpu_common_post_load(void *opaque, int version_id) |
static int cpu_common_post_load(void *opaque, int version_id) |
{ |
{ |
CPUState *env = opaque; |
CPUArchState *env = opaque; |
|
|
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
version_id is increased. */ |
version_id is increased. */ |
Line 611 static const VMStateDescription vmstate_
|
Line 671 static const VMStateDescription vmstate_
|
.minimum_version_id_old = 1, |
.minimum_version_id_old = 1, |
.post_load = cpu_common_post_load, |
.post_load = cpu_common_post_load, |
.fields = (VMStateField []) { |
.fields = (VMStateField []) { |
VMSTATE_UINT32(halted, CPUState), |
VMSTATE_UINT32(halted, CPUArchState), |
VMSTATE_UINT32(interrupt_request, CPUState), |
VMSTATE_UINT32(interrupt_request, CPUArchState), |
VMSTATE_END_OF_LIST() |
VMSTATE_END_OF_LIST() |
} |
} |
}; |
}; |
#endif |
#endif |
|
|
CPUState *qemu_get_cpu(int cpu) |
CPUArchState *qemu_get_cpu(int cpu) |
{ |
{ |
CPUState *env = first_cpu; |
CPUArchState *env = first_cpu; |
|
|
while (env) { |
while (env) { |
if (env->cpu_index == cpu) |
if (env->cpu_index == cpu) |
Line 631 CPUState *qemu_get_cpu(int cpu)
|
Line 691 CPUState *qemu_get_cpu(int cpu)
|
return env; |
return env; |
} |
} |
|
|
void cpu_exec_init(CPUState *env) |
void cpu_exec_init(CPUArchState *env) |
{ |
{ |
CPUState **penv; |
CPUArchState **penv; |
int cpu_index; |
int cpu_index; |
|
|
#if defined(CONFIG_USER_ONLY) |
#if defined(CONFIG_USER_ONLY) |
Line 732 static void page_flush_tb(void)
|
Line 792 static void page_flush_tb(void)
|
|
|
/* flush all the translation blocks */ |
/* flush all the translation blocks */ |
/* XXX: tb_flush is currently not thread safe */ |
/* XXX: tb_flush is currently not thread safe */ |
void tb_flush(CPUState *env1) |
void tb_flush(CPUArchState *env1) |
{ |
{ |
CPUState *env; |
CPUArchState *env; |
#if defined(DEBUG_FLUSH) |
#if defined(DEBUG_FLUSH) |
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", |
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", |
(unsigned long)(code_gen_ptr - code_gen_buffer), |
(unsigned long)(code_gen_ptr - code_gen_buffer), |
Line 820 static inline void tb_page_remove(Transl
|
Line 880 static inline void tb_page_remove(Transl
|
|
|
for(;;) { |
for(;;) { |
tb1 = *ptb; |
tb1 = *ptb; |
n1 = (long)tb1 & 3; |
n1 = (uintptr_t)tb1 & 3; |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); |
if (tb1 == tb) { |
if (tb1 == tb) { |
*ptb = tb1->page_next[n1]; |
*ptb = tb1->page_next[n1]; |
break; |
break; |
Line 841 static inline void tb_jmp_remove(Transla
|
Line 901 static inline void tb_jmp_remove(Transla
|
/* find tb(n) in circular list */ |
/* find tb(n) in circular list */ |
for(;;) { |
for(;;) { |
tb1 = *ptb; |
tb1 = *ptb; |
n1 = (long)tb1 & 3; |
n1 = (uintptr_t)tb1 & 3; |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); |
if (n1 == n && tb1 == tb) |
if (n1 == n && tb1 == tb) |
break; |
break; |
if (n1 == 2) { |
if (n1 == 2) { |
Line 862 static inline void tb_jmp_remove(Transla
|
Line 922 static inline void tb_jmp_remove(Transla
|
another TB */ |
another TB */ |
static inline void tb_reset_jump(TranslationBlock *tb, int n) |
static inline void tb_reset_jump(TranslationBlock *tb, int n) |
{ |
{ |
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); |
} |
} |
|
|
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
{ |
{ |
CPUState *env; |
CPUArchState *env; |
PageDesc *p; |
PageDesc *p; |
unsigned int h, n1; |
unsigned int h, n1; |
tb_page_addr_t phys_pc; |
tb_page_addr_t phys_pc; |
Line 907 void tb_phys_invalidate(TranslationBlock
|
Line 967 void tb_phys_invalidate(TranslationBlock
|
/* suppress any remaining jumps to this TB */ |
/* suppress any remaining jumps to this TB */ |
tb1 = tb->jmp_first; |
tb1 = tb->jmp_first; |
for(;;) { |
for(;;) { |
n1 = (long)tb1 & 3; |
n1 = (uintptr_t)tb1 & 3; |
if (n1 == 2) |
if (n1 == 2) |
break; |
break; |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); |
tb2 = tb1->jmp_next[n1]; |
tb2 = tb1->jmp_next[n1]; |
tb_reset_jump(tb1, n1); |
tb_reset_jump(tb1, n1); |
tb1->jmp_next[n1] = NULL; |
tb1->jmp_next[n1] = NULL; |
tb1 = tb2; |
tb1 = tb2; |
} |
} |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ |
|
|
tb_phys_invalidate_count++; |
tb_phys_invalidate_count++; |
} |
} |
Line 957 static void build_page_bitmap(PageDesc *
|
Line 1017 static void build_page_bitmap(PageDesc *
|
|
|
tb = p->first_tb; |
tb = p->first_tb; |
while (tb != NULL) { |
while (tb != NULL) { |
n = (long)tb & 3; |
n = (uintptr_t)tb & 3; |
tb = (TranslationBlock *)((long)tb & ~3); |
tb = (TranslationBlock *)((uintptr_t)tb & ~3); |
/* NOTE: this is subtle as a TB may span two physical pages */ |
/* NOTE: this is subtle as a TB may span two physical pages */ |
if (n == 0) { |
if (n == 0) { |
/* NOTE: tb_end may be after the end of the page, but |
/* NOTE: tb_end may be after the end of the page, but |
Line 976 static void build_page_bitmap(PageDesc *
|
Line 1036 static void build_page_bitmap(PageDesc *
|
} |
} |
} |
} |
|
|
TranslationBlock *tb_gen_code(CPUState *env, |
TranslationBlock *tb_gen_code(CPUArchState *env, |
target_ulong pc, target_ulong cs_base, |
target_ulong pc, target_ulong cs_base, |
int flags, int cflags) |
int flags, int cflags) |
{ |
{ |
Line 1002 TranslationBlock *tb_gen_code(CPUState *
|
Line 1062 TranslationBlock *tb_gen_code(CPUState *
|
tb->flags = flags; |
tb->flags = flags; |
tb->cflags = cflags; |
tb->cflags = cflags; |
cpu_gen_code(env, tb, &code_gen_size); |
cpu_gen_code(env, tb, &code_gen_size); |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + |
|
CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
|
|
/* check next page if needed */ |
/* check next page if needed */ |
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
Line 1014 TranslationBlock *tb_gen_code(CPUState *
|
Line 1075 TranslationBlock *tb_gen_code(CPUState *
|
return tb; |
return tb; |
} |
} |
|
|
|
/* |
|
* invalidate all TBs which intersect with the target physical pages |
|
* starting in range [start;end[. NOTE: start and end may refer to |
|
* different physical pages. 'is_cpu_write_access' should be true if called |
|
* from a real cpu write access: the virtual CPU will exit the current |
|
* TB if code is modified inside this TB. |
|
*/ |
|
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, |
|
int is_cpu_write_access) |
|
{ |
|
while (start < end) { |
|
tb_invalidate_phys_page_range(start, end, is_cpu_write_access); |
|
start &= TARGET_PAGE_MASK; |
|
start += TARGET_PAGE_SIZE; |
|
} |
|
} |
|
|
/* invalidate all TBs which intersect with the target physical page |
/* invalidate all TBs which intersect with the target physical page |
starting in range [start;end[. NOTE: start and end must refer to |
starting in range [start;end[. NOTE: start and end must refer to |
the same physical page. 'is_cpu_write_access' should be true if called |
the same physical page. 'is_cpu_write_access' should be true if called |
Line 1023 void tb_invalidate_phys_page_range(tb_pa
|
Line 1101 void tb_invalidate_phys_page_range(tb_pa
|
int is_cpu_write_access) |
int is_cpu_write_access) |
{ |
{ |
TranslationBlock *tb, *tb_next, *saved_tb; |
TranslationBlock *tb, *tb_next, *saved_tb; |
CPUState *env = cpu_single_env; |
CPUArchState *env = cpu_single_env; |
tb_page_addr_t tb_start, tb_end; |
tb_page_addr_t tb_start, tb_end; |
PageDesc *p; |
PageDesc *p; |
int n; |
int n; |
Line 1050 void tb_invalidate_phys_page_range(tb_pa
|
Line 1128 void tb_invalidate_phys_page_range(tb_pa
|
/* XXX: see if in some cases it could be faster to invalidate all the code */ |
/* XXX: see if in some cases it could be faster to invalidate all the code */ |
tb = p->first_tb; |
tb = p->first_tb; |
while (tb != NULL) { |
while (tb != NULL) { |
n = (long)tb & 3; |
n = (uintptr_t)tb & 3; |
tb = (TranslationBlock *)((long)tb & ~3); |
tb = (TranslationBlock *)((uintptr_t)tb & ~3); |
tb_next = tb->page_next[n]; |
tb_next = tb->page_next[n]; |
/* NOTE: this is subtle as a TB may span two physical pages */ |
/* NOTE: this is subtle as a TB may span two physical pages */ |
if (n == 0) { |
if (n == 0) { |
Line 1134 static inline void tb_invalidate_phys_pa
|
Line 1212 static inline void tb_invalidate_phys_pa
|
qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", |
qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", |
cpu_single_env->mem_io_vaddr, len, |
cpu_single_env->mem_io_vaddr, len, |
cpu_single_env->eip, |
cpu_single_env->eip, |
cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); |
cpu_single_env->eip + |
|
(intptr_t)cpu_single_env->segs[R_CS].base); |
} |
} |
#endif |
#endif |
p = page_find(start >> TARGET_PAGE_BITS); |
p = page_find(start >> TARGET_PAGE_BITS); |
Line 1153 static inline void tb_invalidate_phys_pa
|
Line 1232 static inline void tb_invalidate_phys_pa
|
|
|
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
static void tb_invalidate_phys_page(tb_page_addr_t addr, |
static void tb_invalidate_phys_page(tb_page_addr_t addr, |
unsigned long pc, void *puc) |
uintptr_t pc, void *puc) |
{ |
{ |
TranslationBlock *tb; |
TranslationBlock *tb; |
PageDesc *p; |
PageDesc *p; |
int n; |
int n; |
#ifdef TARGET_HAS_PRECISE_SMC |
#ifdef TARGET_HAS_PRECISE_SMC |
TranslationBlock *current_tb = NULL; |
TranslationBlock *current_tb = NULL; |
CPUState *env = cpu_single_env; |
CPUArchState *env = cpu_single_env; |
int current_tb_modified = 0; |
int current_tb_modified = 0; |
target_ulong current_pc = 0; |
target_ulong current_pc = 0; |
target_ulong current_cs_base = 0; |
target_ulong current_cs_base = 0; |
Line 1178 static void tb_invalidate_phys_page(tb_p
|
Line 1257 static void tb_invalidate_phys_page(tb_p
|
} |
} |
#endif |
#endif |
while (tb != NULL) { |
while (tb != NULL) { |
n = (long)tb & 3; |
n = (uintptr_t)tb & 3; |
tb = (TranslationBlock *)((long)tb & ~3); |
tb = (TranslationBlock *)((uintptr_t)tb & ~3); |
#ifdef TARGET_HAS_PRECISE_SMC |
#ifdef TARGET_HAS_PRECISE_SMC |
if (current_tb == tb && |
if (current_tb == tb && |
(current_tb->cflags & CF_COUNT_MASK) != 1) { |
(current_tb->cflags & CF_COUNT_MASK) != 1) { |
Line 1227 static inline void tb_alloc_page(Transla
|
Line 1306 static inline void tb_alloc_page(Transla
|
#ifndef CONFIG_USER_ONLY |
#ifndef CONFIG_USER_ONLY |
page_already_protected = p->first_tb != NULL; |
page_already_protected = p->first_tb != NULL; |
#endif |
#endif |
p->first_tb = (TranslationBlock *)((long)tb | n); |
p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); |
invalidate_page_bitmap(p); |
invalidate_page_bitmap(p); |
|
|
#if defined(TARGET_HAS_SMC) || 1 |
#if defined(TARGET_HAS_SMC) || 1 |
Line 1294 void tb_link_page(TranslationBlock *tb,
|
Line 1373 void tb_link_page(TranslationBlock *tb,
|
else |
else |
tb->page_addr[1] = -1; |
tb->page_addr[1] = -1; |
|
|
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); |
tb->jmp_next[0] = NULL; |
tb->jmp_next[0] = NULL; |
tb->jmp_next[1] = NULL; |
tb->jmp_next[1] = NULL; |
|
|
Line 1312 void tb_link_page(TranslationBlock *tb,
|
Line 1391 void tb_link_page(TranslationBlock *tb,
|
|
|
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
tb[1].tc_ptr. Return NULL if not found */ |
tb[1].tc_ptr. Return NULL if not found */ |
TranslationBlock *tb_find_pc(unsigned long tc_ptr) |
TranslationBlock *tb_find_pc(uintptr_t tc_ptr) |
{ |
{ |
int m_min, m_max, m; |
int m_min, m_max, m; |
unsigned long v; |
uintptr_t v; |
TranslationBlock *tb; |
TranslationBlock *tb; |
|
|
if (nb_tbs <= 0) |
if (nb_tbs <= 0) |
return NULL; |
return NULL; |
if (tc_ptr < (unsigned long)code_gen_buffer || |
if (tc_ptr < (uintptr_t)code_gen_buffer || |
tc_ptr >= (unsigned long)code_gen_ptr) |
tc_ptr >= (uintptr_t)code_gen_ptr) { |
return NULL; |
return NULL; |
|
} |
/* binary search (cf Knuth) */ |
/* binary search (cf Knuth) */ |
m_min = 0; |
m_min = 0; |
m_max = nb_tbs - 1; |
m_max = nb_tbs - 1; |
while (m_min <= m_max) { |
while (m_min <= m_max) { |
m = (m_min + m_max) >> 1; |
m = (m_min + m_max) >> 1; |
tb = &tbs[m]; |
tb = &tbs[m]; |
v = (unsigned long)tb->tc_ptr; |
v = (uintptr_t)tb->tc_ptr; |
if (v == tc_ptr) |
if (v == tc_ptr) |
return tb; |
return tb; |
else if (tc_ptr < v) { |
else if (tc_ptr < v) { |
Line 1352 static inline void tb_reset_jump_recursi
|
Line 1432 static inline void tb_reset_jump_recursi
|
if (tb1 != NULL) { |
if (tb1 != NULL) { |
/* find head of list */ |
/* find head of list */ |
for(;;) { |
for(;;) { |
n1 = (long)tb1 & 3; |
n1 = (uintptr_t)tb1 & 3; |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); |
if (n1 == 2) |
if (n1 == 2) |
break; |
break; |
tb1 = tb1->jmp_next[n1]; |
tb1 = tb1->jmp_next[n1]; |
Line 1365 static inline void tb_reset_jump_recursi
|
Line 1445 static inline void tb_reset_jump_recursi
|
ptb = &tb_next->jmp_first; |
ptb = &tb_next->jmp_first; |
for(;;) { |
for(;;) { |
tb1 = *ptb; |
tb1 = *ptb; |
n1 = (long)tb1 & 3; |
n1 = (uintptr_t)tb1 & 3; |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); |
if (n1 == n && tb1 == tb) |
if (n1 == n && tb1 == tb) |
break; |
break; |
ptb = &tb1->jmp_next[n1]; |
ptb = &tb1->jmp_next[n1]; |
Line 1390 static void tb_reset_jump_recursive(Tran
|
Line 1470 static void tb_reset_jump_recursive(Tran
|
|
|
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
#if defined(CONFIG_USER_ONLY) |
#if defined(CONFIG_USER_ONLY) |
static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
{ |
{ |
tb_invalidate_phys_page_range(pc, pc + 1, 0); |
tb_invalidate_phys_page_range(pc, pc + 1, 0); |
} |
} |
#else |
#else |
static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
void tb_invalidate_phys_addr(target_phys_addr_t addr) |
{ |
{ |
target_phys_addr_t addr; |
|
target_ulong pd; |
|
ram_addr_t ram_addr; |
ram_addr_t ram_addr; |
PhysPageDesc *p; |
MemoryRegionSection *section; |
|
|
addr = cpu_get_phys_page_debug(env, pc); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!(memory_region_is_ram(section->mr) |
if (!p) { |
|| (section->mr->rom_device && section->mr->readable))) { |
pd = IO_MEM_UNASSIGNED; |
return; |
} else { |
|
pd = p->phys_offset; |
|
} |
} |
ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); |
ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr); |
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
} |
} |
|
|
|
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
|
{ |
|
tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | |
|
(pc & ~TARGET_PAGE_MASK)); |
|
} |
#endif |
#endif |
#endif /* TARGET_HAS_ICE */ |
#endif /* TARGET_HAS_ICE */ |
|
|
#if defined(CONFIG_USER_ONLY) |
#if defined(CONFIG_USER_ONLY) |
void cpu_watchpoint_remove_all(CPUState *env, int mask) |
void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
|
|
{ |
{ |
} |
} |
|
|
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, |
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
int flags, CPUWatchpoint **watchpoint) |
int flags, CPUWatchpoint **watchpoint) |
{ |
{ |
return -ENOSYS; |
return -ENOSYS; |
} |
} |
#else |
#else |
/* Add a watchpoint. */ |
/* Add a watchpoint. */ |
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, |
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
int flags, CPUWatchpoint **watchpoint) |
int flags, CPUWatchpoint **watchpoint) |
{ |
{ |
target_ulong len_mask = ~(len - 1); |
target_ulong len_mask = ~(len - 1); |
CPUWatchpoint *wp; |
CPUWatchpoint *wp; |
|
|
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) { |
if ((len & (len - 1)) || (addr & ~len_mask) || |
|
len == 0 || len > TARGET_PAGE_SIZE) { |
fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); |
TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); |
return -EINVAL; |
return -EINVAL; |
Line 1460 int cpu_watchpoint_insert(CPUState *env,
|
Line 1544 int cpu_watchpoint_insert(CPUState *env,
|
} |
} |
|
|
/* Remove a specific watchpoint. */ |
/* Remove a specific watchpoint. */ |
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, |
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, |
int flags) |
int flags) |
{ |
{ |
target_ulong len_mask = ~(len - 1); |
target_ulong len_mask = ~(len - 1); |
Line 1477 int cpu_watchpoint_remove(CPUState *env,
|
Line 1561 int cpu_watchpoint_remove(CPUState *env,
|
} |
} |
|
|
/* Remove a specific watchpoint by reference. */ |
/* Remove a specific watchpoint by reference. */ |
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) |
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) |
{ |
{ |
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
|
|
Line 1487 void cpu_watchpoint_remove_by_ref(CPUSta
|
Line 1571 void cpu_watchpoint_remove_by_ref(CPUSta
|
} |
} |
|
|
/* Remove all matching watchpoints. */ |
/* Remove all matching watchpoints. */ |
void cpu_watchpoint_remove_all(CPUState *env, int mask) |
void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
{ |
{ |
CPUWatchpoint *wp, *next; |
CPUWatchpoint *wp, *next; |
|
|
Line 1499 void cpu_watchpoint_remove_all(CPUState
|
Line 1583 void cpu_watchpoint_remove_all(CPUState
|
#endif |
#endif |
|
|
/* Add a breakpoint. */ |
/* Add a breakpoint. */ |
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, |
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
CPUBreakpoint **breakpoint) |
CPUBreakpoint **breakpoint) |
{ |
{ |
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
Line 1527 int cpu_breakpoint_insert(CPUState *env,
|
Line 1611 int cpu_breakpoint_insert(CPUState *env,
|
} |
} |
|
|
/* Remove a specific breakpoint. */ |
/* Remove a specific breakpoint. */ |
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) |
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) |
{ |
{ |
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
CPUBreakpoint *bp; |
CPUBreakpoint *bp; |
Line 1545 int cpu_breakpoint_remove(CPUState *env,
|
Line 1629 int cpu_breakpoint_remove(CPUState *env,
|
} |
} |
|
|
/* Remove a specific breakpoint by reference. */ |
/* Remove a specific breakpoint by reference. */ |
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) |
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) |
{ |
{ |
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
Line 1557 void cpu_breakpoint_remove_by_ref(CPUSta
|
Line 1641 void cpu_breakpoint_remove_by_ref(CPUSta
|
} |
} |
|
|
/* Remove all matching breakpoints. */ |
/* Remove all matching breakpoints. */ |
void cpu_breakpoint_remove_all(CPUState *env, int mask) |
void cpu_breakpoint_remove_all(CPUArchState *env, int mask) |
{ |
{ |
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
CPUBreakpoint *bp, *next; |
CPUBreakpoint *bp, *next; |
Line 1571 void cpu_breakpoint_remove_all(CPUState
|
Line 1655 void cpu_breakpoint_remove_all(CPUState
|
|
|
/* enable or disable single step mode. EXCP_DEBUG is returned by the |
/* enable or disable single step mode. EXCP_DEBUG is returned by the |
CPU loop after each instruction */ |
CPU loop after each instruction */ |
void cpu_single_step(CPUState *env, int enabled) |
void cpu_single_step(CPUArchState *env, int enabled) |
{ |
{ |
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
if (env->singlestep_enabled != enabled) { |
if (env->singlestep_enabled != enabled) { |
Line 1603 void cpu_set_log(int log_flags)
|
Line 1687 void cpu_set_log(int log_flags)
|
static char logfile_buf[4096]; |
static char logfile_buf[4096]; |
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); |
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); |
} |
} |
#elif !defined(_WIN32) |
#elif defined(_WIN32) |
/* Win32 doesn't support line-buffering and requires size >= 2 */ |
/* Win32 doesn't support line-buffering, so use unbuffered output. */ |
|
setvbuf(logfile, NULL, _IONBF, 0); |
|
#else |
setvbuf(logfile, NULL, _IOLBF, 0); |
setvbuf(logfile, NULL, _IOLBF, 0); |
#endif |
#endif |
log_append = 1; |
log_append = 1; |
Line 1625 void cpu_set_log_filename(const char *fi
|
Line 1711 void cpu_set_log_filename(const char *fi
|
cpu_set_log(loglevel); |
cpu_set_log(loglevel); |
} |
} |
|
|
static void cpu_unlink_tb(CPUState *env) |
static void cpu_unlink_tb(CPUArchState *env) |
{ |
{ |
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the |
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the |
problem and hope the cpu will stop of its own accord. For userspace |
problem and hope the cpu will stop of its own accord. For userspace |
Line 1647 static void cpu_unlink_tb(CPUState *env)
|
Line 1733 static void cpu_unlink_tb(CPUState *env)
|
|
|
#ifndef CONFIG_USER_ONLY |
#ifndef CONFIG_USER_ONLY |
/* mask must never be zero, except for A20 change call */ |
/* mask must never be zero, except for A20 change call */ |
static void tcg_handle_interrupt(CPUState *env, int mask) |
static void tcg_handle_interrupt(CPUArchState *env, int mask) |
{ |
{ |
int old_mask; |
int old_mask; |
|
|
Line 1678 CPUInterruptHandler cpu_interrupt_handle
|
Line 1764 CPUInterruptHandler cpu_interrupt_handle
|
|
|
#else /* CONFIG_USER_ONLY */ |
#else /* CONFIG_USER_ONLY */ |
|
|
void cpu_interrupt(CPUState *env, int mask) |
void cpu_interrupt(CPUArchState *env, int mask) |
{ |
{ |
env->interrupt_request |= mask; |
env->interrupt_request |= mask; |
cpu_unlink_tb(env); |
cpu_unlink_tb(env); |
} |
} |
#endif /* CONFIG_USER_ONLY */ |
#endif /* CONFIG_USER_ONLY */ |
|
|
void cpu_reset_interrupt(CPUState *env, int mask) |
void cpu_reset_interrupt(CPUArchState *env, int mask) |
{ |
{ |
env->interrupt_request &= ~mask; |
env->interrupt_request &= ~mask; |
} |
} |
|
|
void cpu_exit(CPUState *env) |
void cpu_exit(CPUArchState *env) |
{ |
{ |
env->exit_request = 1; |
env->exit_request = 1; |
cpu_unlink_tb(env); |
cpu_unlink_tb(env); |
Line 1728 const CPULogItem cpu_log_items[] = {
|
Line 1814 const CPULogItem cpu_log_items[] = {
|
{ 0, NULL, NULL }, |
{ 0, NULL, NULL }, |
}; |
}; |
|
|
#ifndef CONFIG_USER_ONLY |
|
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list |
|
= QLIST_HEAD_INITIALIZER(memory_client_list); |
|
|
|
static void cpu_notify_set_memory(target_phys_addr_t start_addr, |
|
ram_addr_t size, |
|
ram_addr_t phys_offset, |
|
bool log_dirty) |
|
{ |
|
CPUPhysMemoryClient *client; |
|
QLIST_FOREACH(client, &memory_client_list, list) { |
|
client->set_memory(client, start_addr, size, phys_offset, log_dirty); |
|
} |
|
} |
|
|
|
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start, |
|
target_phys_addr_t end) |
|
{ |
|
CPUPhysMemoryClient *client; |
|
QLIST_FOREACH(client, &memory_client_list, list) { |
|
int r = client->sync_dirty_bitmap(client, start, end); |
|
if (r < 0) |
|
return r; |
|
} |
|
return 0; |
|
} |
|
|
|
static int cpu_notify_migration_log(int enable) |
|
{ |
|
CPUPhysMemoryClient *client; |
|
QLIST_FOREACH(client, &memory_client_list, list) { |
|
int r = client->migration_log(client, enable); |
|
if (r < 0) |
|
return r; |
|
} |
|
return 0; |
|
} |
|
|
|
struct last_map { |
|
target_phys_addr_t start_addr; |
|
ram_addr_t size; |
|
ram_addr_t phys_offset; |
|
}; |
|
|
|
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical |
|
* address. Each intermediate table provides the next L2_BITs of guest |
|
* physical address space. The number of levels vary based on host and |
|
* guest configuration, making it efficient to build the final guest |
|
* physical address by seeding the L1 offset and shifting and adding in |
|
* each L2 offset as we recurse through them. */ |
|
static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level, |
|
void **lp, target_phys_addr_t addr, |
|
struct last_map *map) |
|
{ |
|
int i; |
|
|
|
if (*lp == NULL) { |
|
return; |
|
} |
|
if (level == 0) { |
|
PhysPageDesc *pd = *lp; |
|
addr <<= L2_BITS + TARGET_PAGE_BITS; |
|
for (i = 0; i < L2_SIZE; ++i) { |
|
if (pd[i].phys_offset != IO_MEM_UNASSIGNED) { |
|
target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS; |
|
|
|
if (map->size && |
|
start_addr == map->start_addr + map->size && |
|
pd[i].phys_offset == map->phys_offset + map->size) { |
|
|
|
map->size += TARGET_PAGE_SIZE; |
|
continue; |
|
} else if (map->size) { |
|
client->set_memory(client, map->start_addr, |
|
map->size, map->phys_offset, false); |
|
} |
|
|
|
map->start_addr = start_addr; |
|
map->size = TARGET_PAGE_SIZE; |
|
map->phys_offset = pd[i].phys_offset; |
|
} |
|
} |
|
} else { |
|
void **pp = *lp; |
|
for (i = 0; i < L2_SIZE; ++i) { |
|
phys_page_for_each_1(client, level - 1, pp + i, |
|
(addr << L2_BITS) | i, map); |
|
} |
|
} |
|
} |
|
|
|
static void phys_page_for_each(CPUPhysMemoryClient *client) |
|
{ |
|
int i; |
|
struct last_map map = { }; |
|
|
|
for (i = 0; i < P_L1_SIZE; ++i) { |
|
phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1, |
|
l1_phys_map + i, i, &map); |
|
} |
|
if (map.size) { |
|
client->set_memory(client, map.start_addr, map.size, map.phys_offset, |
|
false); |
|
} |
|
} |
|
|
|
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client) |
|
{ |
|
QLIST_INSERT_HEAD(&memory_client_list, client, list); |
|
phys_page_for_each(client); |
|
} |
|
|
|
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client) |
|
{ |
|
QLIST_REMOVE(client, list); |
|
} |
|
#endif |
|
|
|
static int cmp1(const char *s1, int n, const char *s2) |
static int cmp1(const char *s1, int n, const char *s2) |
{ |
{ |
if (strlen(s2) != n) |
if (strlen(s2) != n) |
Line 1886 int cpu_str_to_log_mask(const char *str)
|
Line 1854 int cpu_str_to_log_mask(const char *str)
|
return mask; |
return mask; |
} |
} |
|
|
void cpu_abort(CPUState *env, const char *fmt, ...) |
void cpu_abort(CPUArchState *env, const char *fmt, ...) |
{ |
{ |
va_list ap; |
va_list ap; |
va_list ap2; |
va_list ap2; |
Line 1926 void cpu_abort(CPUState *env, const char
|
Line 1894 void cpu_abort(CPUState *env, const char
|
abort(); |
abort(); |
} |
} |
|
|
CPUState *cpu_copy(CPUState *env) |
CPUArchState *cpu_copy(CPUArchState *env) |
{ |
{ |
CPUState *new_env = cpu_init(env->cpu_model_str); |
CPUArchState *new_env = cpu_init(env->cpu_model_str); |
CPUState *next_cpu = new_env->next_cpu; |
CPUArchState *next_cpu = new_env->next_cpu; |
int cpu_index = new_env->cpu_index; |
int cpu_index = new_env->cpu_index; |
#if defined(TARGET_HAS_ICE) |
#if defined(TARGET_HAS_ICE) |
CPUBreakpoint *bp; |
CPUBreakpoint *bp; |
CPUWatchpoint *wp; |
CPUWatchpoint *wp; |
#endif |
#endif |
|
|
memcpy(new_env, env, sizeof(CPUState)); |
memcpy(new_env, env, sizeof(CPUArchState)); |
|
|
/* Preserve chaining and index. */ |
/* Preserve chaining and index. */ |
new_env->next_cpu = next_cpu; |
new_env->next_cpu = next_cpu; |
Line 1961 CPUState *cpu_copy(CPUState *env)
|
Line 1929 CPUState *cpu_copy(CPUState *env)
|
} |
} |
|
|
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
|
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) |
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) |
|
{ |
{ |
unsigned int i; |
unsigned int i; |
|
|
Line 1977 static inline void tlb_flush_jmp_cache(C
|
Line 1944 static inline void tlb_flush_jmp_cache(C
|
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
} |
} |
|
|
static CPUTLBEntry s_cputlb_empty_entry = { |
|
.addr_read = -1, |
|
.addr_write = -1, |
|
.addr_code = -1, |
|
.addend = -1, |
|
}; |
|
|
|
/* NOTE: if flush_global is true, also flush global entries (not |
|
implemented yet) */ |
|
void tlb_flush(CPUState *env, int flush_global) |
|
{ |
|
int i; |
|
|
|
#if defined(DEBUG_TLB) |
|
printf("tlb_flush:\n"); |
|
#endif |
|
/* must reset current TB so that interrupts cannot modify the |
|
links while we are modifying them */ |
|
env->current_tb = NULL; |
|
|
|
for(i = 0; i < CPU_TLB_SIZE; i++) { |
|
int mmu_idx; |
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
|
env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; |
|
} |
|
} |
|
|
|
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
|
|
|
env->tlb_flush_addr = -1; |
|
env->tlb_flush_mask = 0; |
|
tlb_flush_count++; |
|
} |
|
|
|
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
|
{ |
|
if (addr == (tlb_entry->addr_read & |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
|
addr == (tlb_entry->addr_write & |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
|
addr == (tlb_entry->addr_code & |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
|
*tlb_entry = s_cputlb_empty_entry; |
|
} |
|
} |
|
|
|
void tlb_flush_page(CPUState *env, target_ulong addr) |
|
{ |
|
int i; |
|
int mmu_idx; |
|
|
|
#if defined(DEBUG_TLB) |
|
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
|
#endif |
|
/* Check if we need to flush due to large pages. */ |
|
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { |
|
#if defined(DEBUG_TLB) |
|
printf("tlb_flush_page: forced full flush (" |
|
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
|
env->tlb_flush_addr, env->tlb_flush_mask); |
|
#endif |
|
tlb_flush(env, 1); |
|
return; |
|
} |
|
/* must reset current TB so that interrupts cannot modify the |
|
links while we are modifying them */ |
|
env->current_tb = NULL; |
|
|
|
addr &= TARGET_PAGE_MASK; |
|
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) |
|
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); |
|
|
|
tlb_flush_jmp_cache(env, addr); |
|
} |
|
|
|
/* update the TLBs so that writes to code in the virtual page 'addr' |
|
can be detected */ |
|
static void tlb_protect_code(ram_addr_t ram_addr) |
|
{ |
|
cpu_physical_memory_reset_dirty(ram_addr, |
|
ram_addr + TARGET_PAGE_SIZE, |
|
CODE_DIRTY_FLAG); |
|
} |
|
|
|
/* update the TLB so that writes in physical page 'phys_addr' are no longer |
|
tested for self modifying code */ |
|
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
|
target_ulong vaddr) |
|
{ |
|
cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); |
|
} |
|
|
|
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
|
unsigned long start, unsigned long length) |
|
{ |
|
unsigned long addr; |
|
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
|
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
|
if ((addr - start) < length) { |
|
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; |
|
} |
|
} |
|
} |
|
|
|
/* Note: start and end must be within the same ram block. */ |
/* Note: start and end must be within the same ram block. */ |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
int dirty_flags) |
int dirty_flags) |
{ |
{ |
CPUState *env; |
uintptr_t length, start1; |
unsigned long length, start1; |
|
int i; |
|
|
|
start &= TARGET_PAGE_MASK; |
start &= TARGET_PAGE_MASK; |
end = TARGET_PAGE_ALIGN(end); |
end = TARGET_PAGE_ALIGN(end); |
Line 2100 void cpu_physical_memory_reset_dirty(ram
|
Line 1960 void cpu_physical_memory_reset_dirty(ram
|
|
|
/* we modify the TLB cache so that the dirty bit will be set again |
/* we modify the TLB cache so that the dirty bit will be set again |
when accessing the range */ |
when accessing the range */ |
start1 = (unsigned long)qemu_safe_ram_ptr(start); |
start1 = (uintptr_t)qemu_safe_ram_ptr(start); |
/* Check that we don't span multiple blocks - this breaks the |
/* Check that we don't span multiple blocks - this breaks the |
address comparisons below. */ |
address comparisons below. */ |
if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 |
if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 |
!= (end - 1) - start) { |
!= (end - 1) - start) { |
abort(); |
abort(); |
} |
} |
|
cpu_tlb_reset_dirty_all(start1, length); |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
int mmu_idx; |
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
|
for(i = 0; i < CPU_TLB_SIZE; i++) |
|
tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], |
|
start1, length); |
|
} |
|
} |
|
} |
} |
|
|
int cpu_physical_memory_set_dirty_tracking(int enable) |
int cpu_physical_memory_set_dirty_tracking(int enable) |
{ |
{ |
int ret = 0; |
int ret = 0; |
in_migration = enable; |
in_migration = enable; |
ret = cpu_notify_migration_log(!!enable); |
|
return ret; |
|
} |
|
|
|
int cpu_physical_memory_get_dirty_tracking(void) |
|
{ |
|
return in_migration; |
|
} |
|
|
|
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
|
target_phys_addr_t end_addr) |
|
{ |
|
int ret; |
|
|
|
ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr); |
|
return ret; |
return ret; |
} |
} |
|
|
int cpu_physical_log_start(target_phys_addr_t start_addr, |
target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env, |
ram_addr_t size) |
MemoryRegionSection *section, |
{ |
target_ulong vaddr, |
CPUPhysMemoryClient *client; |
target_phys_addr_t paddr, |
QLIST_FOREACH(client, &memory_client_list, list) { |
int prot, |
if (client->log_start) { |
target_ulong *address) |
int r = client->log_start(client, start_addr, size); |
|
if (r < 0) { |
|
return r; |
|
} |
|
} |
|
} |
|
return 0; |
|
} |
|
|
|
int cpu_physical_log_stop(target_phys_addr_t start_addr, |
|
ram_addr_t size) |
|
{ |
|
CPUPhysMemoryClient *client; |
|
QLIST_FOREACH(client, &memory_client_list, list) { |
|
if (client->log_stop) { |
|
int r = client->log_stop(client, start_addr, size); |
|
if (r < 0) { |
|
return r; |
|
} |
|
} |
|
} |
|
return 0; |
|
} |
|
|
|
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
|
{ |
|
ram_addr_t ram_addr; |
|
void *p; |
|
|
|
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
|
p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) |
|
+ tlb_entry->addend); |
|
ram_addr = qemu_ram_addr_from_host_nofail(p); |
|
if (!cpu_physical_memory_is_dirty(ram_addr)) { |
|
tlb_entry->addr_write |= TLB_NOTDIRTY; |
|
} |
|
} |
|
} |
|
|
|
/* update the TLB according to the current state of the dirty bits */ |
|
void cpu_tlb_update_dirty(CPUState *env) |
|
{ |
|
int i; |
|
int mmu_idx; |
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
|
for(i = 0; i < CPU_TLB_SIZE; i++) |
|
tlb_update_dirty(&env->tlb_table[mmu_idx][i]); |
|
} |
|
} |
|
|
|
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
|
{ |
|
if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) |
|
tlb_entry->addr_write = vaddr; |
|
} |
|
|
|
/* update the TLB corresponding to virtual page vaddr |
|
so that it is no longer dirty */ |
|
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) |
|
{ |
{ |
int i; |
|
int mmu_idx; |
|
|
|
vaddr &= TARGET_PAGE_MASK; |
|
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) |
|
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); |
|
} |
|
|
|
/* Our TLB does not support large pages, so remember the area covered by |
|
large pages and trigger a full TLB flush if these are invalidated. */ |
|
static void tlb_add_large_page(CPUState *env, target_ulong vaddr, |
|
target_ulong size) |
|
{ |
|
target_ulong mask = ~(size - 1); |
|
|
|
if (env->tlb_flush_addr == (target_ulong)-1) { |
|
env->tlb_flush_addr = vaddr & mask; |
|
env->tlb_flush_mask = mask; |
|
return; |
|
} |
|
/* Extend the existing region to include the new page. |
|
This is a compromise between unnecessary flushes and the cost |
|
of maintaining a full variable size TLB. */ |
|
mask &= env->tlb_flush_mask; |
|
while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { |
|
mask <<= 1; |
|
} |
|
env->tlb_flush_addr &= mask; |
|
env->tlb_flush_mask = mask; |
|
} |
|
|
|
/* Add a new TLB entry. At most one entry for a given virtual address |
|
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
|
supplied size is only used by tlb_flush_page. */ |
|
void tlb_set_page(CPUState *env, target_ulong vaddr, |
|
target_phys_addr_t paddr, int prot, |
|
int mmu_idx, target_ulong size) |
|
{ |
|
PhysPageDesc *p; |
|
unsigned long pd; |
|
unsigned int index; |
|
target_ulong address; |
|
target_ulong code_address; |
|
unsigned long addend; |
|
CPUTLBEntry *te; |
|
CPUWatchpoint *wp; |
|
target_phys_addr_t iotlb; |
target_phys_addr_t iotlb; |
|
CPUWatchpoint *wp; |
|
|
assert(size >= TARGET_PAGE_SIZE); |
if (memory_region_is_ram(section->mr)) { |
if (size != TARGET_PAGE_SIZE) { |
|
tlb_add_large_page(env, vaddr, size); |
|
} |
|
p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
|
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
#if defined(DEBUG_TLB) |
|
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
|
" prot=%x idx=%d pd=0x%08lx\n", |
|
vaddr, paddr, prot, mmu_idx, pd); |
|
#endif |
|
|
|
address = vaddr; |
|
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { |
|
/* IO memory case (romd handled later) */ |
|
address |= TLB_MMIO; |
|
} |
|
addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); |
|
if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { |
|
/* Normal RAM. */ |
/* Normal RAM. */ |
iotlb = pd & TARGET_PAGE_MASK; |
iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM) |
+ memory_region_section_addr(section, paddr); |
iotlb |= IO_MEM_NOTDIRTY; |
if (!section->readonly) { |
else |
iotlb |= phys_section_notdirty; |
iotlb |= IO_MEM_ROM; |
} else { |
|
iotlb |= phys_section_rom; |
|
} |
} else { |
} else { |
/* IO handlers are currently passed a physical address. |
/* IO handlers are currently passed a physical address. |
It would be nice to pass an offset from the base address |
It would be nice to pass an offset from the base address |
Line 2291 void tlb_set_page(CPUState *env, target_
|
Line 2003 void tlb_set_page(CPUState *env, target_
|
and avoid full address decoding in every device. |
and avoid full address decoding in every device. |
We can't use the high bits of pd for this because |
We can't use the high bits of pd for this because |
IO_MEM_ROMD uses these as a ram address. */ |
IO_MEM_ROMD uses these as a ram address. */ |
iotlb = (pd & ~TARGET_PAGE_MASK); |
iotlb = section - phys_sections; |
if (p) { |
iotlb += memory_region_section_addr(section, paddr); |
iotlb += p->region_offset; |
|
} else { |
|
iotlb += paddr; |
|
} |
|
} |
} |
|
|
code_address = address; |
|
/* Make accesses to pages with watchpoints go via the |
/* Make accesses to pages with watchpoints go via the |
watchpoint trap routines. */ |
watchpoint trap routines. */ |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { |
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { |
/* Avoid trapping reads of pages with a write breakpoint. */ |
/* Avoid trapping reads of pages with a write breakpoint. */ |
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { |
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { |
iotlb = io_mem_watch + paddr; |
iotlb = phys_section_watch + paddr; |
address |= TLB_MMIO; |
*address |= TLB_MMIO; |
break; |
break; |
} |
} |
} |
} |
} |
} |
|
|
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
return iotlb; |
env->iotlb[mmu_idx][index] = iotlb - vaddr; |
|
te = &env->tlb_table[mmu_idx][index]; |
|
te->addend = addend - vaddr; |
|
if (prot & PAGE_READ) { |
|
te->addr_read = address; |
|
} else { |
|
te->addr_read = -1; |
|
} |
|
|
|
if (prot & PAGE_EXEC) { |
|
te->addr_code = code_address; |
|
} else { |
|
te->addr_code = -1; |
|
} |
|
if (prot & PAGE_WRITE) { |
|
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || |
|
(pd & IO_MEM_ROMD)) { |
|
/* Write access calls the I/O callback. */ |
|
te->addr_write = address | TLB_MMIO; |
|
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
|
!cpu_physical_memory_is_dirty(pd)) { |
|
te->addr_write = address | TLB_NOTDIRTY; |
|
} else { |
|
te->addr_write = address; |
|
} |
|
} else { |
|
te->addr_write = -1; |
|
} |
|
} |
} |
|
|
#else |
#else |
|
|
void tlb_flush(CPUState *env, int flush_global) |
|
{ |
|
} |
|
|
|
void tlb_flush_page(CPUState *env, target_ulong addr) |
|
{ |
|
} |
|
|
|
/* |
/* |
* Walks guest process memory "regions" one by one |
* Walks guest process memory "regions" one by one |
* and calls callback function 'fn' for each region. |
* and calls callback function 'fn' for each region. |
Line 2363 struct walk_memory_regions_data
|
Line 2033 struct walk_memory_regions_data
|
{ |
{ |
walk_memory_regions_fn fn; |
walk_memory_regions_fn fn; |
void *priv; |
void *priv; |
unsigned long start; |
uintptr_t start; |
int prot; |
int prot; |
}; |
}; |
|
|
Line 2424 static int walk_memory_regions_1(struct
|
Line 2094 static int walk_memory_regions_1(struct
|
int walk_memory_regions(void *priv, walk_memory_regions_fn fn) |
int walk_memory_regions(void *priv, walk_memory_regions_fn fn) |
{ |
{ |
struct walk_memory_regions_data data; |
struct walk_memory_regions_data data; |
unsigned long i; |
uintptr_t i; |
|
|
data.fn = fn; |
data.fn = fn; |
data.priv = priv; |
data.priv = priv; |
Line 2565 int page_check_range(target_ulong start,
|
Line 2235 int page_check_range(target_ulong start,
|
|
|
/* called from signal handler: invalidate the code and unprotect the |
/* called from signal handler: invalidate the code and unprotect the |
page. Return TRUE if the fault was successfully handled. */ |
page. Return TRUE if the fault was successfully handled. */ |
int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
int page_unprotect(target_ulong address, uintptr_t pc, void *puc) |
{ |
{ |
unsigned int prot; |
unsigned int prot; |
PageDesc *p; |
PageDesc *p; |
Line 2610 int page_unprotect(target_ulong address,
|
Line 2280 int page_unprotect(target_ulong address,
|
mmap_unlock(); |
mmap_unlock(); |
return 0; |
return 0; |
} |
} |
|
|
static inline void tlb_set_dirty(CPUState *env, |
|
unsigned long addr, target_ulong vaddr) |
|
{ |
|
} |
|
#endif /* defined(CONFIG_USER_ONLY) */ |
#endif /* defined(CONFIG_USER_ONLY) */ |
|
|
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
|
|
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
typedef struct subpage_t { |
typedef struct subpage_t { |
|
MemoryRegion iomem; |
target_phys_addr_t base; |
target_phys_addr_t base; |
ram_addr_t sub_io_index[TARGET_PAGE_SIZE]; |
uint16_t sub_section[TARGET_PAGE_SIZE]; |
ram_addr_t region_offset[TARGET_PAGE_SIZE]; |
|
} subpage_t; |
} subpage_t; |
|
|
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
ram_addr_t memory, ram_addr_t region_offset); |
uint16_t section); |
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, |
static subpage_t *subpage_init(target_phys_addr_t base); |
ram_addr_t orig_memory, |
static void destroy_page_desc(uint16_t section_index) |
ram_addr_t region_offset); |
{ |
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ |
MemoryRegionSection *section = &phys_sections[section_index]; |
need_subpage) \ |
MemoryRegion *mr = section->mr; |
do { \ |
|
if (addr > start_addr) \ |
|
start_addr2 = 0; \ |
|
else { \ |
|
start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ |
|
if (start_addr2 > 0) \ |
|
need_subpage = 1; \ |
|
} \ |
|
\ |
|
if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ |
|
end_addr2 = TARGET_PAGE_SIZE - 1; \ |
|
else { \ |
|
end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ |
|
if (end_addr2 < TARGET_PAGE_SIZE - 1) \ |
|
need_subpage = 1; \ |
|
} \ |
|
} while (0) |
|
|
|
/* register physical memory. |
if (mr->subpage) { |
For RAM, 'size' must be a multiple of the target page size. |
subpage_t *subpage = container_of(mr, subpage_t, iomem); |
If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an |
memory_region_destroy(&subpage->iomem); |
io memory page. The address used when calling the IO function is |
g_free(subpage); |
the offset from the start of the region, plus region_offset. Both |
} |
start_addr and region_offset are rounded down to a page boundary |
} |
before calculating this offset. This should not be a problem unless |
|
the low bits of start_addr and region_offset differ. */ |
|
void cpu_register_physical_memory_log(target_phys_addr_t start_addr, |
|
ram_addr_t size, |
|
ram_addr_t phys_offset, |
|
ram_addr_t region_offset, |
|
bool log_dirty) |
|
{ |
|
target_phys_addr_t addr, end_addr; |
|
PhysPageDesc *p; |
|
CPUState *env; |
|
ram_addr_t orig_size = size; |
|
subpage_t *subpage; |
|
|
|
assert(size); |
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level) |
cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty); |
{ |
|
unsigned i; |
|
PhysPageEntry *p; |
|
|
if (phys_offset == IO_MEM_UNASSIGNED) { |
if (lp->ptr == PHYS_MAP_NODE_NIL) { |
region_offset = start_addr; |
return; |
} |
} |
region_offset &= TARGET_PAGE_MASK; |
|
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
|
end_addr = start_addr + (target_phys_addr_t)size; |
|
|
|
addr = start_addr; |
p = phys_map_nodes[lp->ptr]; |
do { |
for (i = 0; i < L2_SIZE; ++i) { |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p[i].is_leaf) { |
if (p && p->phys_offset != IO_MEM_UNASSIGNED) { |
destroy_l2_mapping(&p[i], level - 1); |
ram_addr_t orig_memory = p->phys_offset; |
|
target_phys_addr_t start_addr2, end_addr2; |
|
int need_subpage = 0; |
|
|
|
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, |
|
need_subpage); |
|
if (need_subpage) { |
|
if (!(orig_memory & IO_MEM_SUBPAGE)) { |
|
subpage = subpage_init((addr & TARGET_PAGE_MASK), |
|
&p->phys_offset, orig_memory, |
|
p->region_offset); |
|
} else { |
|
subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) |
|
>> IO_MEM_SHIFT]; |
|
} |
|
subpage_register(subpage, start_addr2, end_addr2, phys_offset, |
|
region_offset); |
|
p->region_offset = 0; |
|
} else { |
|
p->phys_offset = phys_offset; |
|
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
|
(phys_offset & IO_MEM_ROMD)) |
|
phys_offset += TARGET_PAGE_SIZE; |
|
} |
|
} else { |
} else { |
p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
destroy_page_desc(p[i].ptr); |
p->phys_offset = phys_offset; |
} |
p->region_offset = region_offset; |
} |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
lp->is_leaf = 0; |
(phys_offset & IO_MEM_ROMD)) { |
lp->ptr = PHYS_MAP_NODE_NIL; |
phys_offset += TARGET_PAGE_SIZE; |
} |
} else { |
|
target_phys_addr_t start_addr2, end_addr2; |
static void destroy_all_mappings(void) |
int need_subpage = 0; |
{ |
|
destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1); |
|
phys_map_nodes_reset(); |
|
} |
|
|
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, |
static uint16_t phys_section_add(MemoryRegionSection *section) |
end_addr2, need_subpage); |
{ |
|
if (phys_sections_nb == phys_sections_nb_alloc) { |
|
phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16); |
|
phys_sections = g_renew(MemoryRegionSection, phys_sections, |
|
phys_sections_nb_alloc); |
|
} |
|
phys_sections[phys_sections_nb] = *section; |
|
return phys_sections_nb++; |
|
} |
|
|
if (need_subpage) { |
static void phys_sections_clear(void) |
subpage = subpage_init((addr & TARGET_PAGE_MASK), |
{ |
&p->phys_offset, IO_MEM_UNASSIGNED, |
phys_sections_nb = 0; |
addr & TARGET_PAGE_MASK); |
} |
subpage_register(subpage, start_addr2, end_addr2, |
|
phys_offset, region_offset); |
|
p->region_offset = 0; |
|
} |
|
} |
|
} |
|
region_offset += TARGET_PAGE_SIZE; |
|
addr += TARGET_PAGE_SIZE; |
|
} while (addr != end_addr); |
|
|
|
/* since each CPU stores ram addresses in its TLB cache, we must |
/* register physical memory. |
reset the modified entries */ |
For RAM, 'size' must be a multiple of the target page size. |
/* XXX: slow ! */ |
If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
io memory page. The address used when calling the IO function is |
tlb_flush(env, 1); |
the offset from the start of the region, plus region_offset. Both |
|
start_addr and region_offset are rounded down to a page boundary |
|
before calculating this offset. This should not be a problem unless |
|
the low bits of start_addr and region_offset differ. */ |
|
static void register_subpage(MemoryRegionSection *section) |
|
{ |
|
subpage_t *subpage; |
|
target_phys_addr_t base = section->offset_within_address_space |
|
& TARGET_PAGE_MASK; |
|
MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS); |
|
MemoryRegionSection subsection = { |
|
.offset_within_address_space = base, |
|
.size = TARGET_PAGE_SIZE, |
|
}; |
|
target_phys_addr_t start, end; |
|
|
|
assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
|
|
|
if (!(existing->mr->subpage)) { |
|
subpage = subpage_init(base); |
|
subsection.mr = &subpage->iomem; |
|
phys_page_set(base >> TARGET_PAGE_BITS, 1, |
|
phys_section_add(&subsection)); |
|
} else { |
|
subpage = container_of(existing->mr, subpage_t, iomem); |
} |
} |
|
start = section->offset_within_address_space & ~TARGET_PAGE_MASK; |
|
end = start + section->size; |
|
subpage_register(subpage, start, end, phys_section_add(section)); |
} |
} |
|
|
/* XXX: temporary until new memory mapping API */ |
|
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) |
static void register_multipage(MemoryRegionSection *section) |
{ |
{ |
PhysPageDesc *p; |
target_phys_addr_t start_addr = section->offset_within_address_space; |
|
ram_addr_t size = section->size; |
|
target_phys_addr_t addr; |
|
uint16_t section_index = phys_section_add(section); |
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
assert(size); |
if (!p) |
|
return IO_MEM_UNASSIGNED; |
addr = start_addr; |
return p->phys_offset; |
phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS, |
|
section_index); |
|
} |
|
|
|
void cpu_register_physical_memory_log(MemoryRegionSection *section, |
|
bool readonly) |
|
{ |
|
MemoryRegionSection now = *section, remain = *section; |
|
|
|
if ((now.offset_within_address_space & ~TARGET_PAGE_MASK) |
|
|| (now.size < TARGET_PAGE_SIZE)) { |
|
now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space) |
|
- now.offset_within_address_space, |
|
now.size); |
|
register_subpage(&now); |
|
remain.size -= now.size; |
|
remain.offset_within_address_space += now.size; |
|
remain.offset_within_region += now.size; |
|
} |
|
now = remain; |
|
now.size &= TARGET_PAGE_MASK; |
|
if (now.size) { |
|
register_multipage(&now); |
|
remain.size -= now.size; |
|
remain.offset_within_address_space += now.size; |
|
remain.offset_within_region += now.size; |
|
} |
|
now = remain; |
|
if (now.size) { |
|
register_subpage(&now); |
|
} |
} |
} |
|
|
|
|
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
{ |
{ |
if (kvm_enabled()) |
if (kvm_enabled()) |
Line 2914 static ram_addr_t last_ram_offset(void)
|
Line 2587 static ram_addr_t last_ram_offset(void)
|
return last; |
return last; |
} |
} |
|
|
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, |
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) |
ram_addr_t size, void *host) |
|
{ |
{ |
RAMBlock *new_block, *block; |
RAMBlock *new_block, *block; |
|
|
size = TARGET_PAGE_ALIGN(size); |
new_block = NULL; |
new_block = g_malloc0(sizeof(*new_block)); |
QLIST_FOREACH(block, &ram_list.blocks, next) { |
|
if (block->offset == addr) { |
|
new_block = block; |
|
break; |
|
} |
|
} |
|
assert(new_block); |
|
assert(!new_block->idstr[0]); |
|
|
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { |
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { |
char *id = dev->parent_bus->info->get_dev_path(dev); |
char *id = dev->parent_bus->info->get_dev_path(dev); |
Line 2932 ram_addr_t qemu_ram_alloc_from_ptr(Devic
|
Line 2611 ram_addr_t qemu_ram_alloc_from_ptr(Devic
|
pstrcat(new_block->idstr, sizeof(new_block->idstr), name); |
pstrcat(new_block->idstr, sizeof(new_block->idstr), name); |
|
|
QLIST_FOREACH(block, &ram_list.blocks, next) { |
QLIST_FOREACH(block, &ram_list.blocks, next) { |
if (!strcmp(block->idstr, new_block->idstr)) { |
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { |
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
new_block->idstr); |
new_block->idstr); |
abort(); |
abort(); |
} |
} |
} |
} |
|
} |
|
|
|
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
|
MemoryRegion *mr) |
|
{ |
|
RAMBlock *new_block; |
|
|
|
size = TARGET_PAGE_ALIGN(size); |
|
new_block = g_malloc0(sizeof(*new_block)); |
|
|
|
new_block->mr = mr; |
new_block->offset = find_ram_offset(size); |
new_block->offset = find_ram_offset(size); |
if (host) { |
if (host) { |
new_block->host = host; |
new_block->host = host; |
Line 2971 ram_addr_t qemu_ram_alloc_from_ptr(Devic
|
Line 2660 ram_addr_t qemu_ram_alloc_from_ptr(Devic
|
} |
} |
#else |
#else |
if (xen_enabled()) { |
if (xen_enabled()) { |
xen_ram_alloc(new_block->offset, size); |
xen_ram_alloc(new_block->offset, size, mr); |
} else { |
} else { |
new_block->host = qemu_vmalloc(size); |
new_block->host = qemu_vmalloc(size); |
} |
} |
Line 2994 ram_addr_t qemu_ram_alloc_from_ptr(Devic
|
Line 2683 ram_addr_t qemu_ram_alloc_from_ptr(Devic
|
return new_block->offset; |
return new_block->offset; |
} |
} |
|
|
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) |
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr) |
{ |
{ |
return qemu_ram_alloc_from_ptr(dev, name, size, NULL); |
return qemu_ram_alloc_from_ptr(size, NULL, mr); |
} |
} |
|
|
void qemu_ram_free_from_ptr(ram_addr_t addr) |
void qemu_ram_free_from_ptr(ram_addr_t addr) |
Line 3250 ram_addr_t qemu_ram_addr_from_host_nofai
|
Line 2939 ram_addr_t qemu_ram_addr_from_host_nofai
|
return ram_addr; |
return ram_addr; |
} |
} |
|
|
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr, |
|
unsigned size) |
{ |
{ |
#ifdef DEBUG_UNASSIGNED |
#ifdef DEBUG_UNASSIGNED |
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
#endif |
#endif |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1); |
cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size); |
#endif |
#endif |
return 0; |
return 0; |
} |
} |
|
|
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) |
static void unassigned_mem_write(void *opaque, target_phys_addr_t addr, |
|
uint64_t val, unsigned size) |
{ |
{ |
#ifdef DEBUG_UNASSIGNED |
#ifdef DEBUG_UNASSIGNED |
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); |
#endif |
#endif |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2); |
cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size); |
#endif |
#endif |
return 0; |
|
} |
} |
|
|
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) |
static const MemoryRegionOps unassigned_mem_ops = { |
{ |
.read = unassigned_mem_read, |
#ifdef DEBUG_UNASSIGNED |
.write = unassigned_mem_write, |
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
.endianness = DEVICE_NATIVE_ENDIAN, |
#endif |
}; |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
|
cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4); |
|
#endif |
|
return 0; |
|
} |
|
|
|
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
|
{ |
|
#ifdef DEBUG_UNASSIGNED |
|
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
|
#endif |
|
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
|
cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1); |
|
#endif |
|
} |
|
|
|
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr, |
|
unsigned size) |
{ |
{ |
#ifdef DEBUG_UNASSIGNED |
abort(); |
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
|
#endif |
|
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
|
cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2); |
|
#endif |
|
} |
} |
|
|
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
static void error_mem_write(void *opaque, target_phys_addr_t addr, |
|
uint64_t value, unsigned size) |
{ |
{ |
#ifdef DEBUG_UNASSIGNED |
abort(); |
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
|
#endif |
|
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
|
cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4); |
|
#endif |
|
} |
} |
|
|
static CPUReadMemoryFunc * const unassigned_mem_read[3] = { |
static const MemoryRegionOps error_mem_ops = { |
unassigned_mem_readb, |
.read = error_mem_read, |
unassigned_mem_readw, |
.write = error_mem_write, |
unassigned_mem_readl, |
.endianness = DEVICE_NATIVE_ENDIAN, |
}; |
}; |
|
|
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = { |
static const MemoryRegionOps rom_mem_ops = { |
unassigned_mem_writeb, |
.read = error_mem_read, |
unassigned_mem_writew, |
.write = unassigned_mem_write, |
unassigned_mem_writel, |
.endianness = DEVICE_NATIVE_ENDIAN, |
}; |
}; |
|
|
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, |
static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr, |
uint32_t val) |
uint64_t val, unsigned size) |
{ |
|
int dirty_flags; |
|
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
|
if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
|
#if !defined(CONFIG_USER_ONLY) |
|
tb_invalidate_phys_page_fast(ram_addr, 1); |
|
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
|
#endif |
|
} |
|
stb_p(qemu_get_ram_ptr(ram_addr), val); |
|
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
|
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
|
/* we remove the notdirty callback only if the code has been |
|
flushed */ |
|
if (dirty_flags == 0xff) |
|
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
|
} |
|
|
|
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, |
|
uint32_t val) |
|
{ |
{ |
int dirty_flags; |
int dirty_flags; |
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
#if !defined(CONFIG_USER_ONLY) |
#if !defined(CONFIG_USER_ONLY) |
tb_invalidate_phys_page_fast(ram_addr, 2); |
tb_invalidate_phys_page_fast(ram_addr, size); |
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
#endif |
#endif |
} |
} |
stw_p(qemu_get_ram_ptr(ram_addr), val); |
switch (size) { |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
case 1: |
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
stb_p(qemu_get_ram_ptr(ram_addr), val); |
/* we remove the notdirty callback only if the code has been |
break; |
flushed */ |
case 2: |
if (dirty_flags == 0xff) |
stw_p(qemu_get_ram_ptr(ram_addr), val); |
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
break; |
} |
case 4: |
|
stl_p(qemu_get_ram_ptr(ram_addr), val); |
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, |
break; |
uint32_t val) |
default: |
{ |
abort(); |
int dirty_flags; |
|
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
|
if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
|
#if !defined(CONFIG_USER_ONLY) |
|
tb_invalidate_phys_page_fast(ram_addr, 4); |
|
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
|
#endif |
|
} |
} |
stl_p(qemu_get_ram_ptr(ram_addr), val); |
|
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
/* we remove the notdirty callback only if the code has been |
/* we remove the notdirty callback only if the code has been |
Line 3385 static void notdirty_mem_writel(void *op
|
Line 3024 static void notdirty_mem_writel(void *op
|
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
} |
} |
|
|
static CPUReadMemoryFunc * const error_mem_read[3] = { |
static const MemoryRegionOps notdirty_mem_ops = { |
NULL, /* never used */ |
.read = error_mem_read, |
NULL, /* never used */ |
.write = notdirty_mem_write, |
NULL, /* never used */ |
.endianness = DEVICE_NATIVE_ENDIAN, |
}; |
|
|
|
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = { |
|
notdirty_mem_writeb, |
|
notdirty_mem_writew, |
|
notdirty_mem_writel, |
|
}; |
}; |
|
|
/* Generate a debug exception if a watchpoint has been hit. */ |
/* Generate a debug exception if a watchpoint has been hit. */ |
static void check_watchpoint(int offset, int len_mask, int flags) |
static void check_watchpoint(int offset, int len_mask, int flags) |
{ |
{ |
CPUState *env = cpu_single_env; |
CPUArchState *env = cpu_single_env; |
target_ulong pc, cs_base; |
target_ulong pc, cs_base; |
TranslationBlock *tb; |
TranslationBlock *tb; |
target_ulong vaddr; |
target_ulong vaddr; |
Line 3430 static void check_watchpoint(int offset,
|
Line 3063 static void check_watchpoint(int offset,
|
tb_phys_invalidate(tb, -1); |
tb_phys_invalidate(tb, -1); |
if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
env->exception_index = EXCP_DEBUG; |
env->exception_index = EXCP_DEBUG; |
|
cpu_loop_exit(env); |
} else { |
} else { |
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); |
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); |
tb_gen_code(env, pc, cs_base, cpu_flags, 1); |
tb_gen_code(env, pc, cs_base, cpu_flags, 1); |
|
cpu_resume_from_signal(env, NULL); |
} |
} |
cpu_resume_from_signal(env, NULL); |
|
} |
} |
} else { |
} else { |
wp->flags &= ~BP_WATCHPOINT_HIT; |
wp->flags &= ~BP_WATCHPOINT_HIT; |
Line 3445 static void check_watchpoint(int offset,
|
Line 3079 static void check_watchpoint(int offset,
|
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
so these check for a hit then pass through to the normal out-of-line |
so these check for a hit then pass through to the normal out-of-line |
phys routines. */ |
phys routines. */ |
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) |
static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr, |
|
unsigned size) |
{ |
{ |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ); |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ); |
return ldub_phys(addr); |
switch (size) { |
} |
case 1: return ldub_phys(addr); |
|
case 2: return lduw_phys(addr); |
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) |
case 4: return ldl_phys(addr); |
{ |
default: abort(); |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ); |
} |
return lduw_phys(addr); |
|
} |
|
|
|
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) |
|
{ |
|
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ); |
|
return ldl_phys(addr); |
|
} |
|
|
|
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, |
|
uint32_t val) |
|
{ |
|
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE); |
|
stb_phys(addr, val); |
|
} |
|
|
|
static void watch_mem_writew(void *opaque, target_phys_addr_t addr, |
|
uint32_t val) |
|
{ |
|
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE); |
|
stw_phys(addr, val); |
|
} |
} |
|
|
static void watch_mem_writel(void *opaque, target_phys_addr_t addr, |
static void watch_mem_write(void *opaque, target_phys_addr_t addr, |
uint32_t val) |
uint64_t val, unsigned size) |
{ |
{ |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE); |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE); |
stl_phys(addr, val); |
switch (size) { |
|
case 1: |
|
stb_phys(addr, val); |
|
break; |
|
case 2: |
|
stw_phys(addr, val); |
|
break; |
|
case 4: |
|
stl_phys(addr, val); |
|
break; |
|
default: abort(); |
|
} |
} |
} |
|
|
static CPUReadMemoryFunc * const watch_mem_read[3] = { |
static const MemoryRegionOps watch_mem_ops = { |
watch_mem_readb, |
.read = watch_mem_read, |
watch_mem_readw, |
.write = watch_mem_write, |
watch_mem_readl, |
.endianness = DEVICE_NATIVE_ENDIAN, |
}; |
|
|
|
static CPUWriteMemoryFunc * const watch_mem_write[3] = { |
|
watch_mem_writeb, |
|
watch_mem_writew, |
|
watch_mem_writel, |
|
}; |
}; |
|
|
static inline uint32_t subpage_readlen (subpage_t *mmio, |
static uint64_t subpage_read(void *opaque, target_phys_addr_t addr, |
target_phys_addr_t addr, |
unsigned len) |
unsigned int len) |
|
{ |
{ |
|
subpage_t *mmio = opaque; |
unsigned int idx = SUBPAGE_IDX(addr); |
unsigned int idx = SUBPAGE_IDX(addr); |
|
MemoryRegionSection *section; |
#if defined(DEBUG_SUBPAGE) |
#if defined(DEBUG_SUBPAGE) |
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, |
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, |
mmio, len, addr, idx); |
mmio, len, addr, idx); |
#endif |
#endif |
|
|
addr += mmio->region_offset[idx]; |
section = &phys_sections[mmio->sub_section[idx]]; |
idx = mmio->sub_io_index[idx]; |
addr += mmio->base; |
return io_mem_read[idx][len](io_mem_opaque[idx], addr); |
addr -= section->offset_within_address_space; |
|
addr += section->offset_within_region; |
|
return io_mem_read(section->mr, addr, len); |
} |
} |
|
|
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, |
static void subpage_write(void *opaque, target_phys_addr_t addr, |
uint32_t value, unsigned int len) |
uint64_t value, unsigned len) |
{ |
{ |
|
subpage_t *mmio = opaque; |
unsigned int idx = SUBPAGE_IDX(addr); |
unsigned int idx = SUBPAGE_IDX(addr); |
|
MemoryRegionSection *section; |
#if defined(DEBUG_SUBPAGE) |
#if defined(DEBUG_SUBPAGE) |
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", |
printf("%s: subpage %p len %d addr " TARGET_FMT_plx |
|
" idx %d value %"PRIx64"\n", |
__func__, mmio, len, addr, idx, value); |
__func__, mmio, len, addr, idx, value); |
#endif |
#endif |
|
|
addr += mmio->region_offset[idx]; |
section = &phys_sections[mmio->sub_section[idx]]; |
idx = mmio->sub_io_index[idx]; |
addr += mmio->base; |
io_mem_write[idx][len](io_mem_opaque[idx], addr, value); |
addr -= section->offset_within_address_space; |
} |
addr += section->offset_within_region; |
|
io_mem_write(section->mr, addr, value, len); |
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) |
|
{ |
|
return subpage_readlen(opaque, addr, 0); |
|
} |
|
|
|
static void subpage_writeb (void *opaque, target_phys_addr_t addr, |
|
uint32_t value) |
|
{ |
|
subpage_writelen(opaque, addr, value, 0); |
|
} |
|
|
|
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) |
|
{ |
|
return subpage_readlen(opaque, addr, 1); |
|
} |
|
|
|
static void subpage_writew (void *opaque, target_phys_addr_t addr, |
|
uint32_t value) |
|
{ |
|
subpage_writelen(opaque, addr, value, 1); |
|
} |
|
|
|
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) |
|
{ |
|
return subpage_readlen(opaque, addr, 2); |
|
} |
} |
|
|
static void subpage_writel (void *opaque, target_phys_addr_t addr, |
static const MemoryRegionOps subpage_ops = { |
uint32_t value) |
.read = subpage_read, |
{ |
.write = subpage_write, |
subpage_writelen(opaque, addr, value, 2); |
.endianness = DEVICE_NATIVE_ENDIAN, |
} |
|
|
|
static CPUReadMemoryFunc * const subpage_read[] = { |
|
&subpage_readb, |
|
&subpage_readw, |
|
&subpage_readl, |
|
}; |
|
|
|
static CPUWriteMemoryFunc * const subpage_write[] = { |
|
&subpage_writeb, |
|
&subpage_writew, |
|
&subpage_writel, |
|
}; |
}; |
|
|
static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr) |
static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr, |
{ |
unsigned size) |
ram_addr_t raddr = addr; |
|
void *ptr = qemu_get_ram_ptr(raddr); |
|
return ldub_p(ptr); |
|
} |
|
|
|
static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr, |
|
uint32_t value) |
|
{ |
|
ram_addr_t raddr = addr; |
|
void *ptr = qemu_get_ram_ptr(raddr); |
|
stb_p(ptr, value); |
|
} |
|
|
|
static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr) |
|
{ |
|
ram_addr_t raddr = addr; |
|
void *ptr = qemu_get_ram_ptr(raddr); |
|
return lduw_p(ptr); |
|
} |
|
|
|
static void subpage_ram_writew(void *opaque, target_phys_addr_t addr, |
|
uint32_t value) |
|
{ |
|
ram_addr_t raddr = addr; |
|
void *ptr = qemu_get_ram_ptr(raddr); |
|
stw_p(ptr, value); |
|
} |
|
|
|
static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr) |
|
{ |
{ |
ram_addr_t raddr = addr; |
ram_addr_t raddr = addr; |
void *ptr = qemu_get_ram_ptr(raddr); |
void *ptr = qemu_get_ram_ptr(raddr); |
return ldl_p(ptr); |
switch (size) { |
|
case 1: return ldub_p(ptr); |
|
case 2: return lduw_p(ptr); |
|
case 4: return ldl_p(ptr); |
|
default: abort(); |
|
} |
} |
} |
|
|
static void subpage_ram_writel(void *opaque, target_phys_addr_t addr, |
static void subpage_ram_write(void *opaque, target_phys_addr_t addr, |
uint32_t value) |
uint64_t value, unsigned size) |
{ |
{ |
ram_addr_t raddr = addr; |
ram_addr_t raddr = addr; |
void *ptr = qemu_get_ram_ptr(raddr); |
void *ptr = qemu_get_ram_ptr(raddr); |
stl_p(ptr, value); |
switch (size) { |
|
case 1: return stb_p(ptr, value); |
|
case 2: return stw_p(ptr, value); |
|
case 4: return stl_p(ptr, value); |
|
default: abort(); |
|
} |
} |
} |
|
|
static CPUReadMemoryFunc * const subpage_ram_read[] = { |
static const MemoryRegionOps subpage_ram_ops = { |
&subpage_ram_readb, |
.read = subpage_ram_read, |
&subpage_ram_readw, |
.write = subpage_ram_write, |
&subpage_ram_readl, |
.endianness = DEVICE_NATIVE_ENDIAN, |
}; |
|
|
|
static CPUWriteMemoryFunc * const subpage_ram_write[] = { |
|
&subpage_ram_writeb, |
|
&subpage_ram_writew, |
|
&subpage_ram_writel, |
|
}; |
}; |
|
|
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
ram_addr_t memory, ram_addr_t region_offset) |
uint16_t section) |
{ |
{ |
int idx, eidx; |
int idx, eidx; |
|
|
Line 3640 static int subpage_register (subpage_t *
|
Line 3203 static int subpage_register (subpage_t *
|
printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, |
printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, |
mmio, start, end, idx, eidx, memory); |
mmio, start, end, idx, eidx, memory); |
#endif |
#endif |
if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
if (memory_region_is_ram(phys_sections[section].mr)) { |
memory = IO_MEM_SUBPAGE_RAM; |
MemoryRegionSection new_section = phys_sections[section]; |
|
new_section.mr = &io_mem_subpage_ram; |
|
section = phys_section_add(&new_section); |
} |
} |
memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
for (; idx <= eidx; idx++) { |
for (; idx <= eidx; idx++) { |
mmio->sub_io_index[idx] = memory; |
mmio->sub_section[idx] = section; |
mmio->region_offset[idx] = region_offset; |
|
} |
} |
|
|
return 0; |
return 0; |
} |
} |
|
|
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, |
static subpage_t *subpage_init(target_phys_addr_t base) |
ram_addr_t orig_memory, |
|
ram_addr_t region_offset) |
|
{ |
{ |
subpage_t *mmio; |
subpage_t *mmio; |
int subpage_memory; |
|
|
|
mmio = g_malloc0(sizeof(subpage_t)); |
mmio = g_malloc0(sizeof(subpage_t)); |
|
|
mmio->base = base; |
mmio->base = base; |
subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio, |
memory_region_init_io(&mmio->iomem, &subpage_ops, mmio, |
DEVICE_NATIVE_ENDIAN); |
"subpage", TARGET_PAGE_SIZE); |
|
mmio->iomem.subpage = true; |
#if defined(DEBUG_SUBPAGE) |
#if defined(DEBUG_SUBPAGE) |
printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
mmio, base, TARGET_PAGE_SIZE, subpage_memory); |
mmio, base, TARGET_PAGE_SIZE, subpage_memory); |
#endif |
#endif |
*phys = subpage_memory | IO_MEM_SUBPAGE; |
subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned); |
subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset); |
|
|
|
return mmio; |
return mmio; |
} |
} |
|
|
static int get_free_io_mem_idx(void) |
static uint16_t dummy_section(MemoryRegion *mr) |
{ |
{ |
int i; |
MemoryRegionSection section = { |
|
.mr = mr, |
|
.offset_within_address_space = 0, |
|
.offset_within_region = 0, |
|
.size = UINT64_MAX, |
|
}; |
|
|
for (i = 0; i<IO_MEM_NB_ENTRIES; i++) |
return phys_section_add(§ion); |
if (!io_mem_used[i]) { |
|
io_mem_used[i] = 1; |
|
return i; |
|
} |
|
fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES); |
|
return -1; |
|
} |
} |
|
|
/* |
MemoryRegion *iotlb_to_region(target_phys_addr_t index) |
* Usually, devices operate in little endian mode. There are devices out |
{ |
* there that operate in big endian too. Each device gets byte swapped |
return phys_sections[index & ~TARGET_PAGE_MASK].mr; |
* mmio if plugged onto a CPU that does the other endianness. |
} |
* |
|
* CPU Device swap? |
|
* |
|
* little little no |
|
* little big yes |
|
* big little yes |
|
* big big no |
|
*/ |
|
|
|
typedef struct SwapEndianContainer { |
static void io_mem_init(void) |
CPUReadMemoryFunc *read[3]; |
{ |
CPUWriteMemoryFunc *write[3]; |
memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX); |
void *opaque; |
memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX); |
} SwapEndianContainer; |
memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL, |
|
"unassigned", UINT64_MAX); |
|
memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL, |
|
"notdirty", UINT64_MAX); |
|
memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL, |
|
"subpage-ram", UINT64_MAX); |
|
memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL, |
|
"watch", UINT64_MAX); |
|
} |
|
|
|
static void core_begin(MemoryListener *listener) |
|
{ |
|
destroy_all_mappings(); |
|
phys_sections_clear(); |
|
phys_map.ptr = PHYS_MAP_NODE_NIL; |
|
phys_section_unassigned = dummy_section(&io_mem_unassigned); |
|
phys_section_notdirty = dummy_section(&io_mem_notdirty); |
|
phys_section_rom = dummy_section(&io_mem_rom); |
|
phys_section_watch = dummy_section(&io_mem_watch); |
|
} |
|
|
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr) |
static void core_commit(MemoryListener *listener) |
{ |
{ |
uint32_t val; |
CPUArchState *env; |
SwapEndianContainer *c = opaque; |
|
val = c->read[0](c->opaque, addr); |
/* since each CPU stores ram addresses in its TLB cache, we must |
return val; |
reset the modified entries */ |
|
/* XXX: slow ! */ |
|
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
tlb_flush(env, 1); |
|
} |
} |
} |
|
|
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr) |
static void core_region_add(MemoryListener *listener, |
|
MemoryRegionSection *section) |
{ |
{ |
uint32_t val; |
cpu_register_physical_memory_log(section, section->readonly); |
SwapEndianContainer *c = opaque; |
|
val = bswap16(c->read[1](c->opaque, addr)); |
|
return val; |
|
} |
} |
|
|
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr) |
static void core_region_del(MemoryListener *listener, |
|
MemoryRegionSection *section) |
{ |
{ |
uint32_t val; |
|
SwapEndianContainer *c = opaque; |
|
val = bswap32(c->read[2](c->opaque, addr)); |
|
return val; |
|
} |
} |
|
|
static CPUReadMemoryFunc * const swapendian_readfn[3]={ |
static void core_region_nop(MemoryListener *listener, |
swapendian_mem_readb, |
MemoryRegionSection *section) |
swapendian_mem_readw, |
{ |
swapendian_mem_readl |
cpu_register_physical_memory_log(section, section->readonly); |
}; |
} |
|
|
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr, |
static void core_log_start(MemoryListener *listener, |
uint32_t val) |
MemoryRegionSection *section) |
{ |
{ |
SwapEndianContainer *c = opaque; |
|
c->write[0](c->opaque, addr, val); |
|
} |
} |
|
|
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr, |
static void core_log_stop(MemoryListener *listener, |
uint32_t val) |
MemoryRegionSection *section) |
{ |
{ |
SwapEndianContainer *c = opaque; |
|
c->write[1](c->opaque, addr, bswap16(val)); |
|
} |
} |
|
|
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr, |
static void core_log_sync(MemoryListener *listener, |
uint32_t val) |
MemoryRegionSection *section) |
{ |
{ |
SwapEndianContainer *c = opaque; |
|
c->write[2](c->opaque, addr, bswap32(val)); |
|
} |
} |
|
|
static CPUWriteMemoryFunc * const swapendian_writefn[3]={ |
static void core_log_global_start(MemoryListener *listener) |
swapendian_mem_writeb, |
{ |
swapendian_mem_writew, |
cpu_physical_memory_set_dirty_tracking(1); |
swapendian_mem_writel |
} |
}; |
|
|
|
static void swapendian_init(int io_index) |
static void core_log_global_stop(MemoryListener *listener) |
{ |
{ |
SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer)); |
cpu_physical_memory_set_dirty_tracking(0); |
int i; |
} |
|
|
/* Swap mmio for big endian targets */ |
static void core_eventfd_add(MemoryListener *listener, |
c->opaque = io_mem_opaque[io_index]; |
MemoryRegionSection *section, |
for (i = 0; i < 3; i++) { |
bool match_data, uint64_t data, int fd) |
c->read[i] = io_mem_read[io_index][i]; |
{ |
c->write[i] = io_mem_write[io_index][i]; |
} |
|
|
io_mem_read[io_index][i] = swapendian_readfn[i]; |
static void core_eventfd_del(MemoryListener *listener, |
io_mem_write[io_index][i] = swapendian_writefn[i]; |
MemoryRegionSection *section, |
} |
bool match_data, uint64_t data, int fd) |
io_mem_opaque[io_index] = c; |
{ |
} |
} |
|
|
static void swapendian_del(int io_index) |
static void io_begin(MemoryListener *listener) |
{ |
{ |
if (io_mem_read[io_index][0] == swapendian_readfn[0]) { |
|
g_free(io_mem_opaque[io_index]); |
|
} |
|
} |
} |
|
|
/* mem_read and mem_write are arrays of functions containing the |
static void io_commit(MemoryListener *listener) |
function to access byte (index 0), word (index 1) and dword (index |
|
2). Functions can be omitted with a NULL function pointer. |
|
If io_index is non zero, the corresponding io zone is |
|
modified. If it is zero, a new io zone is allocated. The return |
|
value can be used with cpu_register_physical_memory(). (-1) is |
|
returned if error. */ |
|
static int cpu_register_io_memory_fixed(int io_index, |
|
CPUReadMemoryFunc * const *mem_read, |
|
CPUWriteMemoryFunc * const *mem_write, |
|
void *opaque, enum device_endian endian) |
|
{ |
{ |
int i; |
} |
|
|
if (io_index <= 0) { |
static void io_region_add(MemoryListener *listener, |
io_index = get_free_io_mem_idx(); |
MemoryRegionSection *section) |
if (io_index == -1) |
{ |
return io_index; |
MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1); |
} else { |
|
io_index >>= IO_MEM_SHIFT; |
|
if (io_index >= IO_MEM_NB_ENTRIES) |
|
return -1; |
|
} |
|
|
|
for (i = 0; i < 3; ++i) { |
mrio->mr = section->mr; |
io_mem_read[io_index][i] |
mrio->offset = section->offset_within_region; |
= (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]); |
iorange_init(&mrio->iorange, &memory_region_iorange_ops, |
} |
section->offset_within_address_space, section->size); |
for (i = 0; i < 3; ++i) { |
ioport_register(&mrio->iorange); |
io_mem_write[io_index][i] |
} |
= (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]); |
|
} |
|
io_mem_opaque[io_index] = opaque; |
|
|
|
switch (endian) { |
|
case DEVICE_BIG_ENDIAN: |
|
#ifndef TARGET_WORDS_BIGENDIAN |
|
swapendian_init(io_index); |
|
#endif |
|
break; |
|
case DEVICE_LITTLE_ENDIAN: |
|
#ifdef TARGET_WORDS_BIGENDIAN |
|
swapendian_init(io_index); |
|
#endif |
|
break; |
|
case DEVICE_NATIVE_ENDIAN: |
|
default: |
|
break; |
|
} |
|
|
|
return (io_index << IO_MEM_SHIFT); |
static void io_region_del(MemoryListener *listener, |
|
MemoryRegionSection *section) |
|
{ |
|
isa_unassign_ioport(section->offset_within_address_space, section->size); |
} |
} |
|
|
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, |
static void io_region_nop(MemoryListener *listener, |
CPUWriteMemoryFunc * const *mem_write, |
MemoryRegionSection *section) |
void *opaque, enum device_endian endian) |
|
{ |
{ |
return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian); |
|
} |
} |
|
|
void cpu_unregister_io_memory(int io_table_address) |
static void io_log_start(MemoryListener *listener, |
|
MemoryRegionSection *section) |
{ |
{ |
int i; |
} |
int io_index = io_table_address >> IO_MEM_SHIFT; |
|
|
|
swapendian_del(io_index); |
static void io_log_stop(MemoryListener *listener, |
|
MemoryRegionSection *section) |
|
{ |
|
} |
|
|
for (i=0;i < 3; i++) { |
static void io_log_sync(MemoryListener *listener, |
io_mem_read[io_index][i] = unassigned_mem_read[i]; |
MemoryRegionSection *section) |
io_mem_write[io_index][i] = unassigned_mem_write[i]; |
{ |
} |
|
io_mem_opaque[io_index] = NULL; |
|
io_mem_used[io_index] = 0; |
|
} |
} |
|
|
static void io_mem_init(void) |
static void io_log_global_start(MemoryListener *listener) |
{ |
{ |
int i; |
} |
|
|
|
static void io_log_global_stop(MemoryListener *listener) |
|
{ |
|
} |
|
|
cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, |
static void io_eventfd_add(MemoryListener *listener, |
unassigned_mem_write, NULL, |
MemoryRegionSection *section, |
DEVICE_NATIVE_ENDIAN); |
bool match_data, uint64_t data, int fd) |
cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, |
{ |
unassigned_mem_write, NULL, |
|
DEVICE_NATIVE_ENDIAN); |
|
cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, |
|
notdirty_mem_write, NULL, |
|
DEVICE_NATIVE_ENDIAN); |
|
cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read, |
|
subpage_ram_write, NULL, |
|
DEVICE_NATIVE_ENDIAN); |
|
for (i=0; i<5; i++) |
|
io_mem_used[i] = 1; |
|
|
|
io_mem_watch = cpu_register_io_memory(watch_mem_read, |
|
watch_mem_write, NULL, |
|
DEVICE_NATIVE_ENDIAN); |
|
} |
} |
|
|
|
static void io_eventfd_del(MemoryListener *listener, |
|
MemoryRegionSection *section, |
|
bool match_data, uint64_t data, int fd) |
|
{ |
|
} |
|
|
|
static MemoryListener core_memory_listener = { |
|
.begin = core_begin, |
|
.commit = core_commit, |
|
.region_add = core_region_add, |
|
.region_del = core_region_del, |
|
.region_nop = core_region_nop, |
|
.log_start = core_log_start, |
|
.log_stop = core_log_stop, |
|
.log_sync = core_log_sync, |
|
.log_global_start = core_log_global_start, |
|
.log_global_stop = core_log_global_stop, |
|
.eventfd_add = core_eventfd_add, |
|
.eventfd_del = core_eventfd_del, |
|
.priority = 0, |
|
}; |
|
|
|
static MemoryListener io_memory_listener = { |
|
.begin = io_begin, |
|
.commit = io_commit, |
|
.region_add = io_region_add, |
|
.region_del = io_region_del, |
|
.region_nop = io_region_nop, |
|
.log_start = io_log_start, |
|
.log_stop = io_log_stop, |
|
.log_sync = io_log_sync, |
|
.log_global_start = io_log_global_start, |
|
.log_global_stop = io_log_global_stop, |
|
.eventfd_add = io_eventfd_add, |
|
.eventfd_del = io_eventfd_del, |
|
.priority = 0, |
|
}; |
|
|
static void memory_map_init(void) |
static void memory_map_init(void) |
{ |
{ |
system_memory = g_malloc(sizeof(*system_memory)); |
system_memory = g_malloc(sizeof(*system_memory)); |
Line 3895 static void memory_map_init(void)
|
Line 3449 static void memory_map_init(void)
|
system_io = g_malloc(sizeof(*system_io)); |
system_io = g_malloc(sizeof(*system_io)); |
memory_region_init(system_io, "io", 65536); |
memory_region_init(system_io, "io", 65536); |
set_system_io_map(system_io); |
set_system_io_map(system_io); |
|
|
|
memory_listener_register(&core_memory_listener, system_memory); |
|
memory_listener_register(&io_memory_listener, system_io); |
} |
} |
|
|
MemoryRegion *get_system_memory(void) |
MemoryRegion *get_system_memory(void) |
Line 3911 MemoryRegion *get_system_io(void)
|
Line 3468 MemoryRegion *get_system_io(void)
|
|
|
/* physical memory access (slow version, mainly for debug) */ |
/* physical memory access (slow version, mainly for debug) */ |
#if defined(CONFIG_USER_ONLY) |
#if defined(CONFIG_USER_ONLY) |
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
uint8_t *buf, int len, int is_write) |
uint8_t *buf, int len, int is_write) |
{ |
{ |
int l, flags; |
int l, flags; |
Line 3954 int cpu_memory_rw_debug(CPUState *env, t
|
Line 3511 int cpu_memory_rw_debug(CPUState *env, t
|
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
int len, int is_write) |
int len, int is_write) |
{ |
{ |
int l, io_index; |
int l; |
uint8_t *ptr; |
uint8_t *ptr; |
uint32_t val; |
uint32_t val; |
target_phys_addr_t page; |
target_phys_addr_t page; |
ram_addr_t pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
while (len > 0) { |
while (len > 0) { |
page = addr & TARGET_PAGE_MASK; |
page = addr & TARGET_PAGE_MASK; |
l = (page + TARGET_PAGE_SIZE) - addr; |
l = (page + TARGET_PAGE_SIZE) - addr; |
if (l > len) |
if (l > len) |
l = len; |
l = len; |
p = phys_page_find(page >> TARGET_PAGE_BITS); |
section = phys_page_find(page >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if (is_write) { |
if (is_write) { |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if (!memory_region_is_ram(section->mr)) { |
target_phys_addr_t addr1 = addr; |
target_phys_addr_t addr1; |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr1 = memory_region_section_addr(section, addr); |
if (p) |
|
addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
/* XXX: could force cpu_single_env to NULL to avoid |
/* XXX: could force cpu_single_env to NULL to avoid |
potential bugs */ |
potential bugs */ |
if (l >= 4 && ((addr1 & 3) == 0)) { |
if (l >= 4 && ((addr1 & 3) == 0)) { |
/* 32 bit write access */ |
/* 32 bit write access */ |
val = ldl_p(buf); |
val = ldl_p(buf); |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val); |
io_mem_write(section->mr, addr1, val, 4); |
l = 4; |
l = 4; |
} else if (l >= 2 && ((addr1 & 1) == 0)) { |
} else if (l >= 2 && ((addr1 & 1) == 0)) { |
/* 16 bit write access */ |
/* 16 bit write access */ |
val = lduw_p(buf); |
val = lduw_p(buf); |
io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val); |
io_mem_write(section->mr, addr1, val, 2); |
l = 2; |
l = 2; |
} else { |
} else { |
/* 8 bit write access */ |
/* 8 bit write access */ |
val = ldub_p(buf); |
val = ldub_p(buf); |
io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val); |
io_mem_write(section->mr, addr1, val, 1); |
l = 1; |
l = 1; |
} |
} |
} else { |
} else if (!section->readonly) { |
ram_addr_t addr1; |
ram_addr_t addr1; |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
addr1 = memory_region_get_ram_addr(section->mr) |
|
+ memory_region_section_addr(section, addr); |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(addr1); |
ptr = qemu_get_ram_ptr(addr1); |
memcpy(ptr, buf, l); |
memcpy(ptr, buf, l); |
Line 4013 void cpu_physical_memory_rw(target_phys_
|
Line 3563 void cpu_physical_memory_rw(target_phys_
|
qemu_put_ram_ptr(ptr); |
qemu_put_ram_ptr(ptr); |
} |
} |
} else { |
} else { |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
if (!(memory_region_is_ram(section->mr) || |
!(pd & IO_MEM_ROMD)) { |
memory_region_is_romd(section->mr))) { |
target_phys_addr_t addr1 = addr; |
target_phys_addr_t addr1; |
/* I/O case */ |
/* I/O case */ |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr1 = memory_region_section_addr(section, addr); |
if (p) |
|
addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
if (l >= 4 && ((addr1 & 3) == 0)) { |
if (l >= 4 && ((addr1 & 3) == 0)) { |
/* 32 bit read access */ |
/* 32 bit read access */ |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1); |
val = io_mem_read(section->mr, addr1, 4); |
stl_p(buf, val); |
stl_p(buf, val); |
l = 4; |
l = 4; |
} else if (l >= 2 && ((addr1 & 1) == 0)) { |
} else if (l >= 2 && ((addr1 & 1) == 0)) { |
/* 16 bit read access */ |
/* 16 bit read access */ |
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1); |
val = io_mem_read(section->mr, addr1, 2); |
stw_p(buf, val); |
stw_p(buf, val); |
l = 2; |
l = 2; |
} else { |
} else { |
/* 8 bit read access */ |
/* 8 bit read access */ |
val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1); |
val = io_mem_read(section->mr, addr1, 1); |
stb_p(buf, val); |
stb_p(buf, val); |
l = 1; |
l = 1; |
} |
} |
} else { |
} else { |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); |
ptr = qemu_get_ram_ptr(section->mr->ram_addr |
memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l); |
+ memory_region_section_addr(section, |
|
addr)); |
|
memcpy(buf, ptr, l); |
qemu_put_ram_ptr(ptr); |
qemu_put_ram_ptr(ptr); |
} |
} |
} |
} |
Line 4056 void cpu_physical_memory_write_rom(targe
|
Line 3606 void cpu_physical_memory_write_rom(targe
|
int l; |
int l; |
uint8_t *ptr; |
uint8_t *ptr; |
target_phys_addr_t page; |
target_phys_addr_t page; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
while (len > 0) { |
while (len > 0) { |
page = addr & TARGET_PAGE_MASK; |
page = addr & TARGET_PAGE_MASK; |
l = (page + TARGET_PAGE_SIZE) - addr; |
l = (page + TARGET_PAGE_SIZE) - addr; |
if (l > len) |
if (l > len) |
l = len; |
l = len; |
p = phys_page_find(page >> TARGET_PAGE_BITS); |
section = phys_page_find(page >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && |
if (!(memory_region_is_ram(section->mr) || |
(pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
memory_region_is_romd(section->mr))) { |
!(pd & IO_MEM_ROMD)) { |
|
/* do nothing */ |
/* do nothing */ |
} else { |
} else { |
unsigned long addr1; |
unsigned long addr1; |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
addr1 = memory_region_get_ram_addr(section->mr) |
|
+ memory_region_section_addr(section, addr); |
/* ROM/RAM case */ |
/* ROM/RAM case */ |
ptr = qemu_get_ram_ptr(addr1); |
ptr = qemu_get_ram_ptr(addr1); |
memcpy(ptr, buf, l); |
memcpy(ptr, buf, l); |
Line 4150 void *cpu_physical_memory_map(target_phy
|
Line 3694 void *cpu_physical_memory_map(target_phy
|
target_phys_addr_t todo = 0; |
target_phys_addr_t todo = 0; |
int l; |
int l; |
target_phys_addr_t page; |
target_phys_addr_t page; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
ram_addr_t raddr = RAM_ADDR_MAX; |
ram_addr_t raddr = RAM_ADDR_MAX; |
ram_addr_t rlen; |
ram_addr_t rlen; |
void *ret; |
void *ret; |
Line 4161 void *cpu_physical_memory_map(target_phy
|
Line 3704 void *cpu_physical_memory_map(target_phy
|
l = (page + TARGET_PAGE_SIZE) - addr; |
l = (page + TARGET_PAGE_SIZE) - addr; |
if (l > len) |
if (l > len) |
l = len; |
l = len; |
p = phys_page_find(page >> TARGET_PAGE_BITS); |
section = phys_page_find(page >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if (!(memory_region_is_ram(section->mr) && !section->readonly)) { |
if (todo || bounce.buffer) { |
if (todo || bounce.buffer) { |
break; |
break; |
} |
} |
Line 4183 void *cpu_physical_memory_map(target_phy
|
Line 3721 void *cpu_physical_memory_map(target_phy
|
return bounce.buffer; |
return bounce.buffer; |
} |
} |
if (!todo) { |
if (!todo) { |
raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
raddr = memory_region_get_ram_addr(section->mr) |
|
+ memory_region_section_addr(section, addr); |
} |
} |
|
|
len -= l; |
len -= l; |
Line 4239 void cpu_physical_memory_unmap(void *buf
|
Line 3778 void cpu_physical_memory_unmap(void *buf
|
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, |
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, |
enum device_endian endian) |
enum device_endian endian) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
uint32_t val; |
uint32_t val; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
if (!(memory_region_is_ram(section->mr) || |
!(pd & IO_MEM_ROMD)) { |
memory_region_is_romd(section->mr))) { |
/* I/O case */ |
/* I/O case */ |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
val = io_mem_read(section->mr, addr, 4); |
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
|
#if defined(TARGET_WORDS_BIGENDIAN) |
#if defined(TARGET_WORDS_BIGENDIAN) |
if (endian == DEVICE_LITTLE_ENDIAN) { |
if (endian == DEVICE_LITTLE_ENDIAN) { |
val = bswap32(val); |
val = bswap32(val); |
Line 4270 static inline uint32_t ldl_phys_internal
|
Line 3800 static inline uint32_t ldl_phys_internal
|
#endif |
#endif |
} else { |
} else { |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
(addr & ~TARGET_PAGE_MASK); |
& TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr)); |
switch (endian) { |
switch (endian) { |
case DEVICE_LITTLE_ENDIAN: |
case DEVICE_LITTLE_ENDIAN: |
val = ldl_le_p(ptr); |
val = ldl_le_p(ptr); |
Line 4306 uint32_t ldl_be_phys(target_phys_addr_t
|
Line 3837 uint32_t ldl_be_phys(target_phys_addr_t
|
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, |
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, |
enum device_endian endian) |
enum device_endian endian) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
uint64_t val; |
uint64_t val; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
if (!(memory_region_is_ram(section->mr) || |
!(pd & IO_MEM_ROMD)) { |
memory_region_is_romd(section->mr))) { |
/* I/O case */ |
/* I/O case */ |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
|
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
|
|
/* XXX This is broken when device endian != cpu endian. |
/* XXX This is broken when device endian != cpu endian. |
Fix and add "endian" variable check */ |
Fix and add "endian" variable check */ |
#ifdef TARGET_WORDS_BIGENDIAN |
#ifdef TARGET_WORDS_BIGENDIAN |
val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; |
val = io_mem_read(section->mr, addr, 4) << 32; |
val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); |
val |= io_mem_read(section->mr, addr + 4, 4); |
#else |
#else |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
val = io_mem_read(section->mr, addr, 4); |
val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; |
val |= io_mem_read(section->mr, addr + 4, 4) << 32; |
#endif |
#endif |
} else { |
} else { |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
(addr & ~TARGET_PAGE_MASK); |
& TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr)); |
switch (endian) { |
switch (endian) { |
case DEVICE_LITTLE_ENDIAN: |
case DEVICE_LITTLE_ENDIAN: |
val = ldq_le_p(ptr); |
val = ldq_le_p(ptr); |
Line 4381 uint32_t ldub_phys(target_phys_addr_t ad
|
Line 3904 uint32_t ldub_phys(target_phys_addr_t ad
|
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, |
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, |
enum device_endian endian) |
enum device_endian endian) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
uint64_t val; |
uint64_t val; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
if (!(memory_region_is_ram(section->mr) || |
!(pd & IO_MEM_ROMD)) { |
memory_region_is_romd(section->mr))) { |
/* I/O case */ |
/* I/O case */ |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
val = io_mem_read(section->mr, addr, 2); |
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); |
|
#if defined(TARGET_WORDS_BIGENDIAN) |
#if defined(TARGET_WORDS_BIGENDIAN) |
if (endian == DEVICE_LITTLE_ENDIAN) { |
if (endian == DEVICE_LITTLE_ENDIAN) { |
val = bswap16(val); |
val = bswap16(val); |
Line 4412 static inline uint32_t lduw_phys_interna
|
Line 3926 static inline uint32_t lduw_phys_interna
|
#endif |
#endif |
} else { |
} else { |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
(addr & ~TARGET_PAGE_MASK); |
& TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr)); |
switch (endian) { |
switch (endian) { |
case DEVICE_LITTLE_ENDIAN: |
case DEVICE_LITTLE_ENDIAN: |
val = lduw_le_p(ptr); |
val = lduw_le_p(ptr); |
Line 4449 uint32_t lduw_be_phys(target_phys_addr_t
|
Line 3964 uint32_t lduw_be_phys(target_phys_addr_t
|
bits are used to track modified PTEs */ |
bits are used to track modified PTEs */ |
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) |
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if (!memory_region_is_ram(section->mr) || section->readonly) { |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
if (memory_region_is_ram(section->mr)) { |
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
section = &phys_sections[phys_section_rom]; |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
} |
|
io_mem_write(section->mr, addr, val, 4); |
} else { |
} else { |
unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
unsigned long addr1 = (memory_region_get_ram_addr(section->mr) |
|
& TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr); |
ptr = qemu_get_ram_ptr(addr1); |
ptr = qemu_get_ram_ptr(addr1); |
stl_p(ptr, val); |
stl_p(ptr, val); |
|
|
Line 4485 void stl_phys_notdirty(target_phys_addr_
|
Line 3996 void stl_phys_notdirty(target_phys_addr_
|
|
|
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) |
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if (!memory_region_is_ram(section->mr) || section->readonly) { |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
if (memory_region_is_ram(section->mr)) { |
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
section = &phys_sections[phys_section_rom]; |
|
} |
#ifdef TARGET_WORDS_BIGENDIAN |
#ifdef TARGET_WORDS_BIGENDIAN |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); |
io_mem_write(section->mr, addr, val >> 32, 4); |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); |
io_mem_write(section->mr, addr + 4, (uint32_t)val, 4); |
#else |
#else |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
io_mem_write(section->mr, addr, (uint32_t)val, 4); |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); |
io_mem_write(section->mr, addr + 4, val >> 32, 4); |
#endif |
#endif |
} else { |
} else { |
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
(addr & ~TARGET_PAGE_MASK); |
& TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr)); |
stq_p(ptr, val); |
stq_p(ptr, val); |
} |
} |
} |
} |
Line 4519 void stq_phys_notdirty(target_phys_addr_
|
Line 4025 void stq_phys_notdirty(target_phys_addr_
|
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val, |
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val, |
enum device_endian endian) |
enum device_endian endian) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if (!memory_region_is_ram(section->mr) || section->readonly) { |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
if (memory_region_is_ram(section->mr)) { |
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
section = &phys_sections[phys_section_rom]; |
|
} |
#if defined(TARGET_WORDS_BIGENDIAN) |
#if defined(TARGET_WORDS_BIGENDIAN) |
if (endian == DEVICE_LITTLE_ENDIAN) { |
if (endian == DEVICE_LITTLE_ENDIAN) { |
val = bswap32(val); |
val = bswap32(val); |
Line 4544 static inline void stl_phys_internal(tar
|
Line 4044 static inline void stl_phys_internal(tar
|
val = bswap32(val); |
val = bswap32(val); |
} |
} |
#endif |
#endif |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
io_mem_write(section->mr, addr, val, 4); |
} else { |
} else { |
unsigned long addr1; |
unsigned long addr1; |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr); |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(addr1); |
ptr = qemu_get_ram_ptr(addr1); |
switch (endian) { |
switch (endian) { |
Line 4597 void stb_phys(target_phys_addr_t addr, u
|
Line 4098 void stb_phys(target_phys_addr_t addr, u
|
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val, |
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val, |
enum device_endian endian) |
enum device_endian endian) |
{ |
{ |
int io_index; |
|
uint8_t *ptr; |
uint8_t *ptr; |
unsigned long pd; |
MemoryRegionSection *section; |
PhysPageDesc *p; |
|
|
|
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
section = phys_page_find(addr >> TARGET_PAGE_BITS); |
if (!p) { |
|
pd = IO_MEM_UNASSIGNED; |
|
} else { |
|
pd = p->phys_offset; |
|
} |
|
|
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
if (!memory_region_is_ram(section->mr) || section->readonly) { |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
addr = memory_region_section_addr(section, addr); |
if (p) |
if (memory_region_is_ram(section->mr)) { |
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
section = &phys_sections[phys_section_rom]; |
|
} |
#if defined(TARGET_WORDS_BIGENDIAN) |
#if defined(TARGET_WORDS_BIGENDIAN) |
if (endian == DEVICE_LITTLE_ENDIAN) { |
if (endian == DEVICE_LITTLE_ENDIAN) { |
val = bswap16(val); |
val = bswap16(val); |
Line 4622 static inline void stw_phys_internal(tar
|
Line 4117 static inline void stw_phys_internal(tar
|
val = bswap16(val); |
val = bswap16(val); |
} |
} |
#endif |
#endif |
io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); |
io_mem_write(section->mr, addr, val, 2); |
} else { |
} else { |
unsigned long addr1; |
unsigned long addr1; |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) |
|
+ memory_region_section_addr(section, addr); |
/* RAM case */ |
/* RAM case */ |
ptr = qemu_get_ram_ptr(addr1); |
ptr = qemu_get_ram_ptr(addr1); |
switch (endian) { |
switch (endian) { |
Line 4684 void stq_be_phys(target_phys_addr_t addr
|
Line 4180 void stq_be_phys(target_phys_addr_t addr
|
} |
} |
|
|
/* virtual memory access for debug (includes writing to ROM) */ |
/* virtual memory access for debug (includes writing to ROM) */ |
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
uint8_t *buf, int len, int is_write) |
uint8_t *buf, int len, int is_write) |
{ |
{ |
int l; |
int l; |
Line 4715 int cpu_memory_rw_debug(CPUState *env, t
|
Line 4211 int cpu_memory_rw_debug(CPUState *env, t
|
|
|
/* in deterministic execution mode, instructions doing device I/Os |
/* in deterministic execution mode, instructions doing device I/Os |
must be at the end of the TB */ |
must be at the end of the TB */ |
void cpu_io_recompile(CPUState *env, void *retaddr) |
void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr) |
{ |
{ |
TranslationBlock *tb; |
TranslationBlock *tb; |
uint32_t n, cflags; |
uint32_t n, cflags; |
target_ulong pc, cs_base; |
target_ulong pc, cs_base; |
uint64_t flags; |
uint64_t flags; |
|
|
tb = tb_find_pc((unsigned long)retaddr); |
tb = tb_find_pc(retaddr); |
if (!tb) { |
if (!tb) { |
cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", |
cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", |
retaddr); |
(void *)retaddr); |
} |
} |
n = env->icount_decr.u16.low + tb->icount; |
n = env->icount_decr.u16.low + tb->icount; |
cpu_restore_state(tb, env, (unsigned long)retaddr); |
cpu_restore_state(tb, env, retaddr); |
/* Calculate how many instructions had been executed before the fault |
/* Calculate how many instructions had been executed before the fault |
occurred. */ |
occurred. */ |
n = n - env->icount_decr.u16.low; |
n = n - env->icount_decr.u16.low; |
Line 4826 void dump_exec_info(FILE *f, fprintf_fun
|
Line 4322 void dump_exec_info(FILE *f, fprintf_fun
|
tcg_dump_info(f, cpu_fprintf); |
tcg_dump_info(f, cpu_fprintf); |
} |
} |
|
|
#define MMUSUFFIX _cmmu |
/* |
#undef GETPC |
* A helper function for the _utterly broken_ virtio device model to find out if |
#define GETPC() NULL |
* it's running on a big endian machine. Don't do this at home kids! |
#define env cpu_single_env |
*/ |
#define SOFTMMU_CODE_ACCESS |
bool virtio_is_big_endian(void); |
|
bool virtio_is_big_endian(void) |
#define SHIFT 0 |
{ |
#include "softmmu_template.h" |
#if defined(TARGET_WORDS_BIGENDIAN) |
|
return true; |
#define SHIFT 1 |
#else |
#include "softmmu_template.h" |
return false; |
|
#endif |
#define SHIFT 2 |
} |
#include "softmmu_template.h" |
|
|
|
#define SHIFT 3 |
|
#include "softmmu_template.h" |
|
|
|
#undef env |
|
|
|
#endif |
#endif |