File:  [Qemu by Fabrice Bellard] / qemu / cpu-all.h
Revision 1.1.1.15 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 19:34:20 2018 UTC (2 years, 11 months ago) by root
Branches: qemu, MAIN
CVS tags: qemu1101, HEAD
qemu 1.1.1

    1: /*
    2:  * defines common to all virtual CPUs
    3:  *
    4:  *  Copyright (c) 2003 Fabrice Bellard
    5:  *
    6:  * This library is free software; you can redistribute it and/or
    7:  * modify it under the terms of the GNU Lesser General Public
    8:  * License as published by the Free Software Foundation; either
    9:  * version 2 of the License, or (at your option) any later version.
   10:  *
   11:  * This library is distributed in the hope that it will be useful,
   12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14:  * Lesser General Public License for more details.
   15:  *
   16:  * You should have received a copy of the GNU Lesser General Public
   17:  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
   18:  */
   19: #ifndef CPU_ALL_H
   20: #define CPU_ALL_H
   21: 
   22: #include "qemu-common.h"
   23: #include "qemu-tls.h"
   24: #include "cpu-common.h"
   25: 
   26: /* some important defines:
   27:  *
   28:  * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
   29:  * memory accesses.
   30:  *
   31:  * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
   32:  * otherwise little endian.
   33:  *
   34:  * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
   35:  *
   36:  * TARGET_WORDS_BIGENDIAN : same for target cpu
   37:  */
   38: 
   39: #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
   40: #define BSWAP_NEEDED
   41: #endif
   42: 
   43: #ifdef BSWAP_NEEDED
   44: 
   45: static inline uint16_t tswap16(uint16_t s)
   46: {
   47:     return bswap16(s);
   48: }
   49: 
   50: static inline uint32_t tswap32(uint32_t s)
   51: {
   52:     return bswap32(s);
   53: }
   54: 
   55: static inline uint64_t tswap64(uint64_t s)
   56: {
   57:     return bswap64(s);
   58: }
   59: 
   60: static inline void tswap16s(uint16_t *s)
   61: {
   62:     *s = bswap16(*s);
   63: }
   64: 
   65: static inline void tswap32s(uint32_t *s)
   66: {
   67:     *s = bswap32(*s);
   68: }
   69: 
   70: static inline void tswap64s(uint64_t *s)
   71: {
   72:     *s = bswap64(*s);
   73: }
   74: 
   75: #else
   76: 
   77: static inline uint16_t tswap16(uint16_t s)
   78: {
   79:     return s;
   80: }
   81: 
   82: static inline uint32_t tswap32(uint32_t s)
   83: {
   84:     return s;
   85: }
   86: 
   87: static inline uint64_t tswap64(uint64_t s)
   88: {
   89:     return s;
   90: }
   91: 
   92: static inline void tswap16s(uint16_t *s)
   93: {
   94: }
   95: 
   96: static inline void tswap32s(uint32_t *s)
   97: {
   98: }
   99: 
  100: static inline void tswap64s(uint64_t *s)
  101: {
  102: }
  103: 
  104: #endif
  105: 
  106: #if TARGET_LONG_SIZE == 4
  107: #define tswapl(s) tswap32(s)
  108: #define tswapls(s) tswap32s((uint32_t *)(s))
  109: #define bswaptls(s) bswap32s(s)
  110: #else
  111: #define tswapl(s) tswap64(s)
  112: #define tswapls(s) tswap64s((uint64_t *)(s))
  113: #define bswaptls(s) bswap64s(s)
  114: #endif
  115: 
  116: /* CPU memory access without any memory or io remapping */
  117: 
  118: /*
  119:  * the generic syntax for the memory accesses is:
  120:  *
  121:  * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
  122:  *
  123:  * store: st{type}{size}{endian}_{access_type}(ptr, val)
  124:  *
  125:  * type is:
  126:  * (empty): integer access
  127:  *   f    : float access
  128:  *
  129:  * sign is:
  130:  * (empty): for floats or 32 bit size
  131:  *   u    : unsigned
  132:  *   s    : signed
  133:  *
  134:  * size is:
  135:  *   b: 8 bits
  136:  *   w: 16 bits
  137:  *   l: 32 bits
  138:  *   q: 64 bits
  139:  *
  140:  * endian is:
  141:  * (empty): target cpu endianness or 8 bit access
  142:  *   r    : reversed target cpu endianness (not implemented yet)
  143:  *   be   : big endian (not implemented yet)
  144:  *   le   : little endian (not implemented yet)
  145:  *
  146:  * access_type is:
  147:  *   raw    : host memory access
  148:  *   user   : user mode access using soft MMU
  149:  *   kernel : kernel mode access using soft MMU
  150:  */
  151: 
  152: /* target-endianness CPU memory access functions */
  153: #if defined(TARGET_WORDS_BIGENDIAN)
  154: #define lduw_p(p) lduw_be_p(p)
  155: #define ldsw_p(p) ldsw_be_p(p)
  156: #define ldl_p(p) ldl_be_p(p)
  157: #define ldq_p(p) ldq_be_p(p)
  158: #define ldfl_p(p) ldfl_be_p(p)
  159: #define ldfq_p(p) ldfq_be_p(p)
  160: #define stw_p(p, v) stw_be_p(p, v)
  161: #define stl_p(p, v) stl_be_p(p, v)
  162: #define stq_p(p, v) stq_be_p(p, v)
  163: #define stfl_p(p, v) stfl_be_p(p, v)
  164: #define stfq_p(p, v) stfq_be_p(p, v)
  165: #else
  166: #define lduw_p(p) lduw_le_p(p)
  167: #define ldsw_p(p) ldsw_le_p(p)
  168: #define ldl_p(p) ldl_le_p(p)
  169: #define ldq_p(p) ldq_le_p(p)
  170: #define ldfl_p(p) ldfl_le_p(p)
  171: #define ldfq_p(p) ldfq_le_p(p)
  172: #define stw_p(p, v) stw_le_p(p, v)
  173: #define stl_p(p, v) stl_le_p(p, v)
  174: #define stq_p(p, v) stq_le_p(p, v)
  175: #define stfl_p(p, v) stfl_le_p(p, v)
  176: #define stfq_p(p, v) stfq_le_p(p, v)
  177: #endif
  178: 
  179: /* MMU memory access macros */
  180: 
  181: #if defined(CONFIG_USER_ONLY)
  182: #include <assert.h>
  183: #include "qemu-types.h"
  184: 
  185: /* On some host systems the guest address space is reserved on the host.
  186:  * This allows the guest address space to be offset to a convenient location.
  187:  */
  188: #if defined(CONFIG_USE_GUEST_BASE)
  189: extern unsigned long guest_base;
  190: extern int have_guest_base;
  191: extern unsigned long reserved_va;
  192: #define GUEST_BASE guest_base
  193: #define RESERVED_VA reserved_va
  194: #else
  195: #define GUEST_BASE 0ul
  196: #define RESERVED_VA 0ul
  197: #endif
  198: 
  199: /* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
  200: #define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
  201: 
  202: #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
  203: #define h2g_valid(x) 1
  204: #else
  205: #define h2g_valid(x) ({ \
  206:     unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
  207:     (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
  208:     (!RESERVED_VA || (__guest < RESERVED_VA)); \
  209: })
  210: #endif
  211: 
  212: #define h2g(x) ({ \
  213:     unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
  214:     /* Check if given address fits target address space */ \
  215:     assert(h2g_valid(x)); \
  216:     (abi_ulong)__ret; \
  217: })
  218: 
  219: #define saddr(x) g2h(x)
  220: #define laddr(x) g2h(x)
  221: 
  222: #else /* !CONFIG_USER_ONLY */
  223: /* NOTE: we use double casts if pointers and target_ulong have
  224:    different sizes */
  225: #define saddr(x) (uint8_t *)(intptr_t)(x)
  226: #define laddr(x) (uint8_t *)(intptr_t)(x)
  227: #endif
  228: 
  229: #define ldub_raw(p) ldub_p(laddr((p)))
  230: #define ldsb_raw(p) ldsb_p(laddr((p)))
  231: #define lduw_raw(p) lduw_p(laddr((p)))
  232: #define ldsw_raw(p) ldsw_p(laddr((p)))
  233: #define ldl_raw(p) ldl_p(laddr((p)))
  234: #define ldq_raw(p) ldq_p(laddr((p)))
  235: #define ldfl_raw(p) ldfl_p(laddr((p)))
  236: #define ldfq_raw(p) ldfq_p(laddr((p)))
  237: #define stb_raw(p, v) stb_p(saddr((p)), v)
  238: #define stw_raw(p, v) stw_p(saddr((p)), v)
  239: #define stl_raw(p, v) stl_p(saddr((p)), v)
  240: #define stq_raw(p, v) stq_p(saddr((p)), v)
  241: #define stfl_raw(p, v) stfl_p(saddr((p)), v)
  242: #define stfq_raw(p, v) stfq_p(saddr((p)), v)
  243: 
  244: 
  245: #if defined(CONFIG_USER_ONLY)
  246: 
  247: /* if user mode, no other memory access functions */
  248: #define ldub(p) ldub_raw(p)
  249: #define ldsb(p) ldsb_raw(p)
  250: #define lduw(p) lduw_raw(p)
  251: #define ldsw(p) ldsw_raw(p)
  252: #define ldl(p) ldl_raw(p)
  253: #define ldq(p) ldq_raw(p)
  254: #define ldfl(p) ldfl_raw(p)
  255: #define ldfq(p) ldfq_raw(p)
  256: #define stb(p, v) stb_raw(p, v)
  257: #define stw(p, v) stw_raw(p, v)
  258: #define stl(p, v) stl_raw(p, v)
  259: #define stq(p, v) stq_raw(p, v)
  260: #define stfl(p, v) stfl_raw(p, v)
  261: #define stfq(p, v) stfq_raw(p, v)
  262: 
  263: #ifndef CONFIG_TCG_PASS_AREG0
  264: #define ldub_code(p) ldub_raw(p)
  265: #define ldsb_code(p) ldsb_raw(p)
  266: #define lduw_code(p) lduw_raw(p)
  267: #define ldsw_code(p) ldsw_raw(p)
  268: #define ldl_code(p) ldl_raw(p)
  269: #define ldq_code(p) ldq_raw(p)
  270: #else
  271: #define cpu_ldub_code(env1, p) ldub_raw(p)
  272: #define cpu_ldsb_code(env1, p) ldsb_raw(p)
  273: #define cpu_lduw_code(env1, p) lduw_raw(p)
  274: #define cpu_ldsw_code(env1, p) ldsw_raw(p)
  275: #define cpu_ldl_code(env1, p) ldl_raw(p)
  276: #define cpu_ldq_code(env1, p) ldq_raw(p)
  277: #endif
  278: 
  279: #define ldub_kernel(p) ldub_raw(p)
  280: #define ldsb_kernel(p) ldsb_raw(p)
  281: #define lduw_kernel(p) lduw_raw(p)
  282: #define ldsw_kernel(p) ldsw_raw(p)
  283: #define ldl_kernel(p) ldl_raw(p)
  284: #define ldq_kernel(p) ldq_raw(p)
  285: #define ldfl_kernel(p) ldfl_raw(p)
  286: #define ldfq_kernel(p) ldfq_raw(p)
  287: #define stb_kernel(p, v) stb_raw(p, v)
  288: #define stw_kernel(p, v) stw_raw(p, v)
  289: #define stl_kernel(p, v) stl_raw(p, v)
  290: #define stq_kernel(p, v) stq_raw(p, v)
  291: #define stfl_kernel(p, v) stfl_raw(p, v)
  292: #define stfq_kernel(p, vt) stfq_raw(p, v)
  293: 
  294: #endif /* defined(CONFIG_USER_ONLY) */
  295: 
  296: /* page related stuff */
  297: 
  298: #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
  299: #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
  300: #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
  301: 
  302: /* ??? These should be the larger of uintptr_t and target_ulong.  */
  303: extern uintptr_t qemu_real_host_page_size;
  304: extern uintptr_t qemu_host_page_size;
  305: extern uintptr_t qemu_host_page_mask;
  306: 
  307: #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
  308: 
  309: /* same as PROT_xxx */
  310: #define PAGE_READ      0x0001
  311: #define PAGE_WRITE     0x0002
  312: #define PAGE_EXEC      0x0004
  313: #define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
  314: #define PAGE_VALID     0x0008
  315: /* original state of the write flag (used when tracking self-modifying
  316:    code */
  317: #define PAGE_WRITE_ORG 0x0010
  318: #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
  319: /* FIXME: Code that sets/uses this is broken and needs to go away.  */
  320: #define PAGE_RESERVED  0x0020
  321: #endif
  322: 
  323: #if defined(CONFIG_USER_ONLY)
  324: void page_dump(FILE *f);
  325: 
  326: typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
  327:                                       abi_ulong, unsigned long);
  328: int walk_memory_regions(void *, walk_memory_regions_fn);
  329: 
  330: int page_get_flags(target_ulong address);
  331: void page_set_flags(target_ulong start, target_ulong end, int flags);
  332: int page_check_range(target_ulong start, target_ulong len, int flags);
  333: #endif
  334: 
  335: CPUArchState *cpu_copy(CPUArchState *env);
  336: CPUArchState *qemu_get_cpu(int cpu);
  337: 
  338: #define CPU_DUMP_CODE 0x00010000
  339: 
  340: void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
  341:                     int flags);
  342: void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
  343:                          int flags);
  344: 
  345: void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
  346:     GCC_FMT_ATTR(2, 3);
  347: extern CPUArchState *first_cpu;
  348: DECLARE_TLS(CPUArchState *,cpu_single_env);
  349: #define cpu_single_env tls_var(cpu_single_env)
  350: 
  351: /* Flags for use in ENV->INTERRUPT_PENDING.
  352: 
  353:    The numbers assigned here are non-sequential in order to preserve
  354:    binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
  355:    previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
  356:    the vmstate dump.  */
  357: 
  358: /* External hardware interrupt pending.  This is typically used for
  359:    interrupts from devices.  */
  360: #define CPU_INTERRUPT_HARD        0x0002
  361: 
  362: /* Exit the current TB.  This is typically used when some system-level device
  363:    makes some change to the memory mapping.  E.g. the a20 line change.  */
  364: #define CPU_INTERRUPT_EXITTB      0x0004
  365: 
  366: /* Halt the CPU.  */
  367: #define CPU_INTERRUPT_HALT        0x0020
  368: 
  369: /* Debug event pending.  */
  370: #define CPU_INTERRUPT_DEBUG       0x0080
  371: 
  372: /* Several target-specific external hardware interrupts.  Each target/cpu.h
  373:    should define proper names based on these defines.  */
  374: #define CPU_INTERRUPT_TGT_EXT_0   0x0008
  375: #define CPU_INTERRUPT_TGT_EXT_1   0x0010
  376: #define CPU_INTERRUPT_TGT_EXT_2   0x0040
  377: #define CPU_INTERRUPT_TGT_EXT_3   0x0200
  378: #define CPU_INTERRUPT_TGT_EXT_4   0x1000
  379: 
  380: /* Several target-specific internal interrupts.  These differ from the
  381:    preceding target-specific interrupts in that they are intended to
  382:    originate from within the cpu itself, typically in response to some
  383:    instruction being executed.  These, therefore, are not masked while
  384:    single-stepping within the debugger.  */
  385: #define CPU_INTERRUPT_TGT_INT_0   0x0100
  386: #define CPU_INTERRUPT_TGT_INT_1   0x0400
  387: #define CPU_INTERRUPT_TGT_INT_2   0x0800
  388: #define CPU_INTERRUPT_TGT_INT_3   0x2000
  389: 
  390: /* First unused bit: 0x4000.  */
  391: 
  392: /* The set of all bits that should be masked when single-stepping.  */
  393: #define CPU_INTERRUPT_SSTEP_MASK \
  394:     (CPU_INTERRUPT_HARD          \
  395:      | CPU_INTERRUPT_TGT_EXT_0   \
  396:      | CPU_INTERRUPT_TGT_EXT_1   \
  397:      | CPU_INTERRUPT_TGT_EXT_2   \
  398:      | CPU_INTERRUPT_TGT_EXT_3   \
  399:      | CPU_INTERRUPT_TGT_EXT_4)
  400: 
  401: #ifndef CONFIG_USER_ONLY
  402: typedef void (*CPUInterruptHandler)(CPUArchState *, int);
  403: 
  404: extern CPUInterruptHandler cpu_interrupt_handler;
  405: 
  406: static inline void cpu_interrupt(CPUArchState *s, int mask)
  407: {
  408:     cpu_interrupt_handler(s, mask);
  409: }
  410: #else /* USER_ONLY */
  411: void cpu_interrupt(CPUArchState *env, int mask);
  412: #endif /* USER_ONLY */
  413: 
  414: void cpu_reset_interrupt(CPUArchState *env, int mask);
  415: 
  416: void cpu_exit(CPUArchState *s);
  417: 
  418: bool qemu_cpu_has_work(CPUArchState *env);
  419: 
  420: /* Breakpoint/watchpoint flags */
  421: #define BP_MEM_READ           0x01
  422: #define BP_MEM_WRITE          0x02
  423: #define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
  424: #define BP_STOP_BEFORE_ACCESS 0x04
  425: #define BP_WATCHPOINT_HIT     0x08
  426: #define BP_GDB                0x10
  427: #define BP_CPU                0x20
  428: 
  429: int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
  430:                           CPUBreakpoint **breakpoint);
  431: int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
  432: void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
  433: void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
  434: int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
  435:                           int flags, CPUWatchpoint **watchpoint);
  436: int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
  437:                           target_ulong len, int flags);
  438: void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
  439: void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
  440: 
  441: #define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
  442: #define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
  443: #define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
  444: 
  445: void cpu_single_step(CPUArchState *env, int enabled);
  446: void cpu_state_reset(CPUArchState *s);
  447: int cpu_is_stopped(CPUArchState *env);
  448: void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data);
  449: 
  450: #define CPU_LOG_TB_OUT_ASM (1 << 0)
  451: #define CPU_LOG_TB_IN_ASM  (1 << 1)
  452: #define CPU_LOG_TB_OP      (1 << 2)
  453: #define CPU_LOG_TB_OP_OPT  (1 << 3)
  454: #define CPU_LOG_INT        (1 << 4)
  455: #define CPU_LOG_EXEC       (1 << 5)
  456: #define CPU_LOG_PCALL      (1 << 6)
  457: #define CPU_LOG_IOPORT     (1 << 7)
  458: #define CPU_LOG_TB_CPU     (1 << 8)
  459: #define CPU_LOG_RESET      (1 << 9)
  460: 
  461: /* define log items */
  462: typedef struct CPULogItem {
  463:     int mask;
  464:     const char *name;
  465:     const char *help;
  466: } CPULogItem;
  467: 
  468: extern const CPULogItem cpu_log_items[];
  469: 
  470: void cpu_set_log(int log_flags);
  471: void cpu_set_log_filename(const char *filename);
  472: int cpu_str_to_log_mask(const char *str);
  473: 
  474: #if !defined(CONFIG_USER_ONLY)
  475: 
  476: /* Return the physical page corresponding to a virtual one. Use it
  477:    only for debugging because no protection checks are done. Return -1
  478:    if no page found. */
  479: target_phys_addr_t cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
  480: 
  481: /* memory API */
  482: 
  483: extern int phys_ram_fd;
  484: extern ram_addr_t ram_size;
  485: 
  486: /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
  487: #define RAM_PREALLOC_MASK   (1 << 0)
  488: 
  489: typedef struct RAMBlock {
  490:     struct MemoryRegion *mr;
  491:     uint8_t *host;
  492:     ram_addr_t offset;
  493:     ram_addr_t length;
  494:     uint32_t flags;
  495:     char idstr[256];
  496:     QLIST_ENTRY(RAMBlock) next;
  497: #if defined(__linux__) && !defined(TARGET_S390X)
  498:     int fd;
  499: #endif
  500: } RAMBlock;
  501: 
  502: typedef struct RAMList {
  503:     uint8_t *phys_dirty;
  504:     QLIST_HEAD(, RAMBlock) blocks;
  505: } RAMList;
  506: extern RAMList ram_list;
  507: 
  508: extern const char *mem_path;
  509: extern int mem_prealloc;
  510: 
  511: /* Flags stored in the low bits of the TLB virtual address.  These are
  512:    defined so that fast path ram access is all zeros.  */
  513: /* Zero if TLB entry is valid.  */
  514: #define TLB_INVALID_MASK   (1 << 3)
  515: /* Set if TLB entry references a clean RAM page.  The iotlb entry will
  516:    contain the page physical address.  */
  517: #define TLB_NOTDIRTY    (1 << 4)
  518: /* Set if TLB entry is an IO callback.  */
  519: #define TLB_MMIO        (1 << 5)
  520: 
  521: void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
  522: #endif /* !CONFIG_USER_ONLY */
  523: 
  524: int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
  525:                         uint8_t *buf, int len, int is_write);
  526: 
  527: #endif /* CPU_ALL_H */

unix.superglobalmegacorp.com