File:  [Qemu by Fabrice Bellard] / qemu / cpu-all.h
Revision 1.1.1.13 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 18:56:05 2018 UTC (3 years, 1 month ago) by root
Branches: qemu, MAIN
CVS tags: qemu1000, qemu0151, HEAD
qemu 0.15.1

    1: /*
    2:  * defines common to all virtual CPUs
    3:  *
    4:  *  Copyright (c) 2003 Fabrice Bellard
    5:  *
    6:  * This library is free software; you can redistribute it and/or
    7:  * modify it under the terms of the GNU Lesser General Public
    8:  * License as published by the Free Software Foundation; either
    9:  * version 2 of the License, or (at your option) any later version.
   10:  *
   11:  * This library is distributed in the hope that it will be useful,
   12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14:  * Lesser General Public License for more details.
   15:  *
   16:  * You should have received a copy of the GNU Lesser General Public
   17:  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
   18:  */
   19: #ifndef CPU_ALL_H
   20: #define CPU_ALL_H
   21: 
   22: #include "qemu-common.h"
   23: #include "cpu-common.h"
   24: 
   25: /* some important defines:
   26:  *
   27:  * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
   28:  * memory accesses.
   29:  *
   30:  * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
   31:  * otherwise little endian.
   32:  *
   33:  * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
   34:  *
   35:  * TARGET_WORDS_BIGENDIAN : same for target cpu
   36:  */
   37: 
   38: #include "softfloat.h"
   39: 
   40: #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
   41: #define BSWAP_NEEDED
   42: #endif
   43: 
   44: #ifdef BSWAP_NEEDED
   45: 
   46: static inline uint16_t tswap16(uint16_t s)
   47: {
   48:     return bswap16(s);
   49: }
   50: 
   51: static inline uint32_t tswap32(uint32_t s)
   52: {
   53:     return bswap32(s);
   54: }
   55: 
   56: static inline uint64_t tswap64(uint64_t s)
   57: {
   58:     return bswap64(s);
   59: }
   60: 
   61: static inline void tswap16s(uint16_t *s)
   62: {
   63:     *s = bswap16(*s);
   64: }
   65: 
   66: static inline void tswap32s(uint32_t *s)
   67: {
   68:     *s = bswap32(*s);
   69: }
   70: 
   71: static inline void tswap64s(uint64_t *s)
   72: {
   73:     *s = bswap64(*s);
   74: }
   75: 
   76: #else
   77: 
   78: static inline uint16_t tswap16(uint16_t s)
   79: {
   80:     return s;
   81: }
   82: 
   83: static inline uint32_t tswap32(uint32_t s)
   84: {
   85:     return s;
   86: }
   87: 
   88: static inline uint64_t tswap64(uint64_t s)
   89: {
   90:     return s;
   91: }
   92: 
   93: static inline void tswap16s(uint16_t *s)
   94: {
   95: }
   96: 
   97: static inline void tswap32s(uint32_t *s)
   98: {
   99: }
  100: 
  101: static inline void tswap64s(uint64_t *s)
  102: {
  103: }
  104: 
  105: #endif
  106: 
  107: #if TARGET_LONG_SIZE == 4
  108: #define tswapl(s) tswap32(s)
  109: #define tswapls(s) tswap32s((uint32_t *)(s))
  110: #define bswaptls(s) bswap32s(s)
  111: #else
  112: #define tswapl(s) tswap64(s)
  113: #define tswapls(s) tswap64s((uint64_t *)(s))
  114: #define bswaptls(s) bswap64s(s)
  115: #endif
  116: 
  117: typedef union {
  118:     float32 f;
  119:     uint32_t l;
  120: } CPU_FloatU;
  121: 
  122: /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
  123:    endian ! */
  124: typedef union {
  125:     float64 d;
  126: #if defined(HOST_WORDS_BIGENDIAN)
  127:     struct {
  128:         uint32_t upper;
  129:         uint32_t lower;
  130:     } l;
  131: #else
  132:     struct {
  133:         uint32_t lower;
  134:         uint32_t upper;
  135:     } l;
  136: #endif
  137:     uint64_t ll;
  138: } CPU_DoubleU;
  139: 
  140: typedef union {
  141:      floatx80 d;
  142:      struct {
  143:          uint64_t lower;
  144:          uint16_t upper;
  145:      } l;
  146: } CPU_LDoubleU;
  147: 
  148: typedef union {
  149:     float128 q;
  150: #if defined(HOST_WORDS_BIGENDIAN)
  151:     struct {
  152:         uint32_t upmost;
  153:         uint32_t upper;
  154:         uint32_t lower;
  155:         uint32_t lowest;
  156:     } l;
  157:     struct {
  158:         uint64_t upper;
  159:         uint64_t lower;
  160:     } ll;
  161: #else
  162:     struct {
  163:         uint32_t lowest;
  164:         uint32_t lower;
  165:         uint32_t upper;
  166:         uint32_t upmost;
  167:     } l;
  168:     struct {
  169:         uint64_t lower;
  170:         uint64_t upper;
  171:     } ll;
  172: #endif
  173: } CPU_QuadU;
  174: 
  175: /* CPU memory access without any memory or io remapping */
  176: 
  177: /*
  178:  * the generic syntax for the memory accesses is:
  179:  *
  180:  * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
  181:  *
  182:  * store: st{type}{size}{endian}_{access_type}(ptr, val)
  183:  *
  184:  * type is:
  185:  * (empty): integer access
  186:  *   f    : float access
  187:  *
  188:  * sign is:
  189:  * (empty): for floats or 32 bit size
  190:  *   u    : unsigned
  191:  *   s    : signed
  192:  *
  193:  * size is:
  194:  *   b: 8 bits
  195:  *   w: 16 bits
  196:  *   l: 32 bits
  197:  *   q: 64 bits
  198:  *
  199:  * endian is:
  200:  * (empty): target cpu endianness or 8 bit access
  201:  *   r    : reversed target cpu endianness (not implemented yet)
  202:  *   be   : big endian (not implemented yet)
  203:  *   le   : little endian (not implemented yet)
  204:  *
  205:  * access_type is:
  206:  *   raw    : host memory access
  207:  *   user   : user mode access using soft MMU
  208:  *   kernel : kernel mode access using soft MMU
  209:  */
  210: static inline int ldub_p(const void *ptr)
  211: {
  212:     return *(uint8_t *)ptr;
  213: }
  214: 
  215: static inline int ldsb_p(const void *ptr)
  216: {
  217:     return *(int8_t *)ptr;
  218: }
  219: 
  220: static inline void stb_p(void *ptr, int v)
  221: {
  222:     *(uint8_t *)ptr = v;
  223: }
  224: 
  225: /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
  226:    kernel handles unaligned load/stores may give better results, but
  227:    it is a system wide setting : bad */
  228: #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
  229: 
  230: /* conservative code for little endian unaligned accesses */
  231: static inline int lduw_le_p(const void *ptr)
  232: {
  233: #ifdef _ARCH_PPC
  234:     int val;
  235:     __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
  236:     return val;
  237: #else
  238:     const uint8_t *p = ptr;
  239:     return p[0] | (p[1] << 8);
  240: #endif
  241: }
  242: 
  243: static inline int ldsw_le_p(const void *ptr)
  244: {
  245: #ifdef _ARCH_PPC
  246:     int val;
  247:     __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
  248:     return (int16_t)val;
  249: #else
  250:     const uint8_t *p = ptr;
  251:     return (int16_t)(p[0] | (p[1] << 8));
  252: #endif
  253: }
  254: 
  255: static inline int ldl_le_p(const void *ptr)
  256: {
  257: #ifdef _ARCH_PPC
  258:     int val;
  259:     __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
  260:     return val;
  261: #else
  262:     const uint8_t *p = ptr;
  263:     return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
  264: #endif
  265: }
  266: 
  267: static inline uint64_t ldq_le_p(const void *ptr)
  268: {
  269:     const uint8_t *p = ptr;
  270:     uint32_t v1, v2;
  271:     v1 = ldl_le_p(p);
  272:     v2 = ldl_le_p(p + 4);
  273:     return v1 | ((uint64_t)v2 << 32);
  274: }
  275: 
  276: static inline void stw_le_p(void *ptr, int v)
  277: {
  278: #ifdef _ARCH_PPC
  279:     __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
  280: #else
  281:     uint8_t *p = ptr;
  282:     p[0] = v;
  283:     p[1] = v >> 8;
  284: #endif
  285: }
  286: 
  287: static inline void stl_le_p(void *ptr, int v)
  288: {
  289: #ifdef _ARCH_PPC
  290:     __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
  291: #else
  292:     uint8_t *p = ptr;
  293:     p[0] = v;
  294:     p[1] = v >> 8;
  295:     p[2] = v >> 16;
  296:     p[3] = v >> 24;
  297: #endif
  298: }
  299: 
  300: static inline void stq_le_p(void *ptr, uint64_t v)
  301: {
  302:     uint8_t *p = ptr;
  303:     stl_le_p(p, (uint32_t)v);
  304:     stl_le_p(p + 4, v >> 32);
  305: }
  306: 
  307: /* float access */
  308: 
  309: static inline float32 ldfl_le_p(const void *ptr)
  310: {
  311:     union {
  312:         float32 f;
  313:         uint32_t i;
  314:     } u;
  315:     u.i = ldl_le_p(ptr);
  316:     return u.f;
  317: }
  318: 
  319: static inline void stfl_le_p(void *ptr, float32 v)
  320: {
  321:     union {
  322:         float32 f;
  323:         uint32_t i;
  324:     } u;
  325:     u.f = v;
  326:     stl_le_p(ptr, u.i);
  327: }
  328: 
  329: static inline float64 ldfq_le_p(const void *ptr)
  330: {
  331:     CPU_DoubleU u;
  332:     u.l.lower = ldl_le_p(ptr);
  333:     u.l.upper = ldl_le_p(ptr + 4);
  334:     return u.d;
  335: }
  336: 
  337: static inline void stfq_le_p(void *ptr, float64 v)
  338: {
  339:     CPU_DoubleU u;
  340:     u.d = v;
  341:     stl_le_p(ptr, u.l.lower);
  342:     stl_le_p(ptr + 4, u.l.upper);
  343: }
  344: 
  345: #else
  346: 
  347: static inline int lduw_le_p(const void *ptr)
  348: {
  349:     return *(uint16_t *)ptr;
  350: }
  351: 
  352: static inline int ldsw_le_p(const void *ptr)
  353: {
  354:     return *(int16_t *)ptr;
  355: }
  356: 
  357: static inline int ldl_le_p(const void *ptr)
  358: {
  359:     return *(uint32_t *)ptr;
  360: }
  361: 
  362: static inline uint64_t ldq_le_p(const void *ptr)
  363: {
  364:     return *(uint64_t *)ptr;
  365: }
  366: 
  367: static inline void stw_le_p(void *ptr, int v)
  368: {
  369:     *(uint16_t *)ptr = v;
  370: }
  371: 
  372: static inline void stl_le_p(void *ptr, int v)
  373: {
  374:     *(uint32_t *)ptr = v;
  375: }
  376: 
  377: static inline void stq_le_p(void *ptr, uint64_t v)
  378: {
  379:     *(uint64_t *)ptr = v;
  380: }
  381: 
  382: /* float access */
  383: 
  384: static inline float32 ldfl_le_p(const void *ptr)
  385: {
  386:     return *(float32 *)ptr;
  387: }
  388: 
  389: static inline float64 ldfq_le_p(const void *ptr)
  390: {
  391:     return *(float64 *)ptr;
  392: }
  393: 
  394: static inline void stfl_le_p(void *ptr, float32 v)
  395: {
  396:     *(float32 *)ptr = v;
  397: }
  398: 
  399: static inline void stfq_le_p(void *ptr, float64 v)
  400: {
  401:     *(float64 *)ptr = v;
  402: }
  403: #endif
  404: 
  405: #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
  406: 
  407: static inline int lduw_be_p(const void *ptr)
  408: {
  409: #if defined(__i386__)
  410:     int val;
  411:     asm volatile ("movzwl %1, %0\n"
  412:                   "xchgb %b0, %h0\n"
  413:                   : "=q" (val)
  414:                   : "m" (*(uint16_t *)ptr));
  415:     return val;
  416: #else
  417:     const uint8_t *b = ptr;
  418:     return ((b[0] << 8) | b[1]);
  419: #endif
  420: }
  421: 
  422: static inline int ldsw_be_p(const void *ptr)
  423: {
  424: #if defined(__i386__)
  425:     int val;
  426:     asm volatile ("movzwl %1, %0\n"
  427:                   "xchgb %b0, %h0\n"
  428:                   : "=q" (val)
  429:                   : "m" (*(uint16_t *)ptr));
  430:     return (int16_t)val;
  431: #else
  432:     const uint8_t *b = ptr;
  433:     return (int16_t)((b[0] << 8) | b[1]);
  434: #endif
  435: }
  436: 
  437: static inline int ldl_be_p(const void *ptr)
  438: {
  439: #if defined(__i386__) || defined(__x86_64__)
  440:     int val;
  441:     asm volatile ("movl %1, %0\n"
  442:                   "bswap %0\n"
  443:                   : "=r" (val)
  444:                   : "m" (*(uint32_t *)ptr));
  445:     return val;
  446: #else
  447:     const uint8_t *b = ptr;
  448:     return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
  449: #endif
  450: }
  451: 
  452: static inline uint64_t ldq_be_p(const void *ptr)
  453: {
  454:     uint32_t a,b;
  455:     a = ldl_be_p(ptr);
  456:     b = ldl_be_p((uint8_t *)ptr + 4);
  457:     return (((uint64_t)a<<32)|b);
  458: }
  459: 
  460: static inline void stw_be_p(void *ptr, int v)
  461: {
  462: #if defined(__i386__)
  463:     asm volatile ("xchgb %b0, %h0\n"
  464:                   "movw %w0, %1\n"
  465:                   : "=q" (v)
  466:                   : "m" (*(uint16_t *)ptr), "0" (v));
  467: #else
  468:     uint8_t *d = (uint8_t *) ptr;
  469:     d[0] = v >> 8;
  470:     d[1] = v;
  471: #endif
  472: }
  473: 
  474: static inline void stl_be_p(void *ptr, int v)
  475: {
  476: #if defined(__i386__) || defined(__x86_64__)
  477:     asm volatile ("bswap %0\n"
  478:                   "movl %0, %1\n"
  479:                   : "=r" (v)
  480:                   : "m" (*(uint32_t *)ptr), "0" (v));
  481: #else
  482:     uint8_t *d = (uint8_t *) ptr;
  483:     d[0] = v >> 24;
  484:     d[1] = v >> 16;
  485:     d[2] = v >> 8;
  486:     d[3] = v;
  487: #endif
  488: }
  489: 
  490: static inline void stq_be_p(void *ptr, uint64_t v)
  491: {
  492:     stl_be_p(ptr, v >> 32);
  493:     stl_be_p((uint8_t *)ptr + 4, v);
  494: }
  495: 
  496: /* float access */
  497: 
  498: static inline float32 ldfl_be_p(const void *ptr)
  499: {
  500:     union {
  501:         float32 f;
  502:         uint32_t i;
  503:     } u;
  504:     u.i = ldl_be_p(ptr);
  505:     return u.f;
  506: }
  507: 
  508: static inline void stfl_be_p(void *ptr, float32 v)
  509: {
  510:     union {
  511:         float32 f;
  512:         uint32_t i;
  513:     } u;
  514:     u.f = v;
  515:     stl_be_p(ptr, u.i);
  516: }
  517: 
  518: static inline float64 ldfq_be_p(const void *ptr)
  519: {
  520:     CPU_DoubleU u;
  521:     u.l.upper = ldl_be_p(ptr);
  522:     u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
  523:     return u.d;
  524: }
  525: 
  526: static inline void stfq_be_p(void *ptr, float64 v)
  527: {
  528:     CPU_DoubleU u;
  529:     u.d = v;
  530:     stl_be_p(ptr, u.l.upper);
  531:     stl_be_p((uint8_t *)ptr + 4, u.l.lower);
  532: }
  533: 
  534: #else
  535: 
  536: static inline int lduw_be_p(const void *ptr)
  537: {
  538:     return *(uint16_t *)ptr;
  539: }
  540: 
  541: static inline int ldsw_be_p(const void *ptr)
  542: {
  543:     return *(int16_t *)ptr;
  544: }
  545: 
  546: static inline int ldl_be_p(const void *ptr)
  547: {
  548:     return *(uint32_t *)ptr;
  549: }
  550: 
  551: static inline uint64_t ldq_be_p(const void *ptr)
  552: {
  553:     return *(uint64_t *)ptr;
  554: }
  555: 
  556: static inline void stw_be_p(void *ptr, int v)
  557: {
  558:     *(uint16_t *)ptr = v;
  559: }
  560: 
  561: static inline void stl_be_p(void *ptr, int v)
  562: {
  563:     *(uint32_t *)ptr = v;
  564: }
  565: 
  566: static inline void stq_be_p(void *ptr, uint64_t v)
  567: {
  568:     *(uint64_t *)ptr = v;
  569: }
  570: 
  571: /* float access */
  572: 
  573: static inline float32 ldfl_be_p(const void *ptr)
  574: {
  575:     return *(float32 *)ptr;
  576: }
  577: 
  578: static inline float64 ldfq_be_p(const void *ptr)
  579: {
  580:     return *(float64 *)ptr;
  581: }
  582: 
  583: static inline void stfl_be_p(void *ptr, float32 v)
  584: {
  585:     *(float32 *)ptr = v;
  586: }
  587: 
  588: static inline void stfq_be_p(void *ptr, float64 v)
  589: {
  590:     *(float64 *)ptr = v;
  591: }
  592: 
  593: #endif
  594: 
  595: /* target CPU memory access functions */
  596: #if defined(TARGET_WORDS_BIGENDIAN)
  597: #define lduw_p(p) lduw_be_p(p)
  598: #define ldsw_p(p) ldsw_be_p(p)
  599: #define ldl_p(p) ldl_be_p(p)
  600: #define ldq_p(p) ldq_be_p(p)
  601: #define ldfl_p(p) ldfl_be_p(p)
  602: #define ldfq_p(p) ldfq_be_p(p)
  603: #define stw_p(p, v) stw_be_p(p, v)
  604: #define stl_p(p, v) stl_be_p(p, v)
  605: #define stq_p(p, v) stq_be_p(p, v)
  606: #define stfl_p(p, v) stfl_be_p(p, v)
  607: #define stfq_p(p, v) stfq_be_p(p, v)
  608: #else
  609: #define lduw_p(p) lduw_le_p(p)
  610: #define ldsw_p(p) ldsw_le_p(p)
  611: #define ldl_p(p) ldl_le_p(p)
  612: #define ldq_p(p) ldq_le_p(p)
  613: #define ldfl_p(p) ldfl_le_p(p)
  614: #define ldfq_p(p) ldfq_le_p(p)
  615: #define stw_p(p, v) stw_le_p(p, v)
  616: #define stl_p(p, v) stl_le_p(p, v)
  617: #define stq_p(p, v) stq_le_p(p, v)
  618: #define stfl_p(p, v) stfl_le_p(p, v)
  619: #define stfq_p(p, v) stfq_le_p(p, v)
  620: #endif
  621: 
  622: /* MMU memory access macros */
  623: 
  624: #if defined(CONFIG_USER_ONLY)
  625: #include <assert.h>
  626: #include "qemu-types.h"
  627: 
  628: /* On some host systems the guest address space is reserved on the host.
  629:  * This allows the guest address space to be offset to a convenient location.
  630:  */
  631: #if defined(CONFIG_USE_GUEST_BASE)
  632: extern unsigned long guest_base;
  633: extern int have_guest_base;
  634: extern unsigned long reserved_va;
  635: #define GUEST_BASE guest_base
  636: #define RESERVED_VA reserved_va
  637: #else
  638: #define GUEST_BASE 0ul
  639: #define RESERVED_VA 0ul
  640: #endif
  641: 
  642: /* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
  643: #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
  644: 
  645: #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
  646: #define h2g_valid(x) 1
  647: #else
  648: #define h2g_valid(x) ({ \
  649:     unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
  650:     __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
  651: })
  652: #endif
  653: 
  654: #define h2g(x) ({ \
  655:     unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
  656:     /* Check if given address fits target address space */ \
  657:     assert(h2g_valid(x)); \
  658:     (abi_ulong)__ret; \
  659: })
  660: 
  661: #define saddr(x) g2h(x)
  662: #define laddr(x) g2h(x)
  663: 
  664: #else /* !CONFIG_USER_ONLY */
  665: /* NOTE: we use double casts if pointers and target_ulong have
  666:    different sizes */
  667: #define saddr(x) (uint8_t *)(long)(x)
  668: #define laddr(x) (uint8_t *)(long)(x)
  669: #endif
  670: 
  671: #define ldub_raw(p) ldub_p(laddr((p)))
  672: #define ldsb_raw(p) ldsb_p(laddr((p)))
  673: #define lduw_raw(p) lduw_p(laddr((p)))
  674: #define ldsw_raw(p) ldsw_p(laddr((p)))
  675: #define ldl_raw(p) ldl_p(laddr((p)))
  676: #define ldq_raw(p) ldq_p(laddr((p)))
  677: #define ldfl_raw(p) ldfl_p(laddr((p)))
  678: #define ldfq_raw(p) ldfq_p(laddr((p)))
  679: #define stb_raw(p, v) stb_p(saddr((p)), v)
  680: #define stw_raw(p, v) stw_p(saddr((p)), v)
  681: #define stl_raw(p, v) stl_p(saddr((p)), v)
  682: #define stq_raw(p, v) stq_p(saddr((p)), v)
  683: #define stfl_raw(p, v) stfl_p(saddr((p)), v)
  684: #define stfq_raw(p, v) stfq_p(saddr((p)), v)
  685: 
  686: 
  687: #if defined(CONFIG_USER_ONLY)
  688: 
  689: /* if user mode, no other memory access functions */
  690: #define ldub(p) ldub_raw(p)
  691: #define ldsb(p) ldsb_raw(p)
  692: #define lduw(p) lduw_raw(p)
  693: #define ldsw(p) ldsw_raw(p)
  694: #define ldl(p) ldl_raw(p)
  695: #define ldq(p) ldq_raw(p)
  696: #define ldfl(p) ldfl_raw(p)
  697: #define ldfq(p) ldfq_raw(p)
  698: #define stb(p, v) stb_raw(p, v)
  699: #define stw(p, v) stw_raw(p, v)
  700: #define stl(p, v) stl_raw(p, v)
  701: #define stq(p, v) stq_raw(p, v)
  702: #define stfl(p, v) stfl_raw(p, v)
  703: #define stfq(p, v) stfq_raw(p, v)
  704: 
  705: #define ldub_code(p) ldub_raw(p)
  706: #define ldsb_code(p) ldsb_raw(p)
  707: #define lduw_code(p) lduw_raw(p)
  708: #define ldsw_code(p) ldsw_raw(p)
  709: #define ldl_code(p) ldl_raw(p)
  710: #define ldq_code(p) ldq_raw(p)
  711: 
  712: #define ldub_kernel(p) ldub_raw(p)
  713: #define ldsb_kernel(p) ldsb_raw(p)
  714: #define lduw_kernel(p) lduw_raw(p)
  715: #define ldsw_kernel(p) ldsw_raw(p)
  716: #define ldl_kernel(p) ldl_raw(p)
  717: #define ldq_kernel(p) ldq_raw(p)
  718: #define ldfl_kernel(p) ldfl_raw(p)
  719: #define ldfq_kernel(p) ldfq_raw(p)
  720: #define stb_kernel(p, v) stb_raw(p, v)
  721: #define stw_kernel(p, v) stw_raw(p, v)
  722: #define stl_kernel(p, v) stl_raw(p, v)
  723: #define stq_kernel(p, v) stq_raw(p, v)
  724: #define stfl_kernel(p, v) stfl_raw(p, v)
  725: #define stfq_kernel(p, vt) stfq_raw(p, v)
  726: 
  727: #endif /* defined(CONFIG_USER_ONLY) */
  728: 
  729: /* page related stuff */
  730: 
  731: #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
  732: #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
  733: #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
  734: 
  735: /* ??? These should be the larger of unsigned long and target_ulong.  */
  736: extern unsigned long qemu_real_host_page_size;
  737: extern unsigned long qemu_host_page_bits;
  738: extern unsigned long qemu_host_page_size;
  739: extern unsigned long qemu_host_page_mask;
  740: 
  741: #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
  742: 
  743: /* same as PROT_xxx */
  744: #define PAGE_READ      0x0001
  745: #define PAGE_WRITE     0x0002
  746: #define PAGE_EXEC      0x0004
  747: #define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
  748: #define PAGE_VALID     0x0008
  749: /* original state of the write flag (used when tracking self-modifying
  750:    code */
  751: #define PAGE_WRITE_ORG 0x0010
  752: #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
  753: /* FIXME: Code that sets/uses this is broken and needs to go away.  */
  754: #define PAGE_RESERVED  0x0020
  755: #endif
  756: 
  757: #if defined(CONFIG_USER_ONLY)
  758: void page_dump(FILE *f);
  759: 
  760: typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
  761:                                       abi_ulong, unsigned long);
  762: int walk_memory_regions(void *, walk_memory_regions_fn);
  763: 
  764: int page_get_flags(target_ulong address);
  765: void page_set_flags(target_ulong start, target_ulong end, int flags);
  766: int page_check_range(target_ulong start, target_ulong len, int flags);
  767: #endif
  768: 
  769: CPUState *cpu_copy(CPUState *env);
  770: CPUState *qemu_get_cpu(int cpu);
  771: 
  772: #define CPU_DUMP_CODE 0x00010000
  773: 
  774: void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
  775:                     int flags);
  776: void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
  777:                          int flags);
  778: 
  779: void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
  780:     GCC_FMT_ATTR(2, 3);
  781: extern CPUState *first_cpu;
  782: extern CPUState *cpu_single_env;
  783: 
  784: /* Flags for use in ENV->INTERRUPT_PENDING.
  785: 
  786:    The numbers assigned here are non-sequential in order to preserve
  787:    binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
  788:    previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
  789:    the vmstate dump.  */
  790: 
  791: /* External hardware interrupt pending.  This is typically used for
  792:    interrupts from devices.  */
  793: #define CPU_INTERRUPT_HARD        0x0002
  794: 
  795: /* Exit the current TB.  This is typically used when some system-level device
  796:    makes some change to the memory mapping.  E.g. the a20 line change.  */
  797: #define CPU_INTERRUPT_EXITTB      0x0004
  798: 
  799: /* Halt the CPU.  */
  800: #define CPU_INTERRUPT_HALT        0x0020
  801: 
  802: /* Debug event pending.  */
  803: #define CPU_INTERRUPT_DEBUG       0x0080
  804: 
  805: /* Several target-specific external hardware interrupts.  Each target/cpu.h
  806:    should define proper names based on these defines.  */
  807: #define CPU_INTERRUPT_TGT_EXT_0   0x0008
  808: #define CPU_INTERRUPT_TGT_EXT_1   0x0010
  809: #define CPU_INTERRUPT_TGT_EXT_2   0x0040
  810: #define CPU_INTERRUPT_TGT_EXT_3   0x0200
  811: #define CPU_INTERRUPT_TGT_EXT_4   0x1000
  812: 
  813: /* Several target-specific internal interrupts.  These differ from the
  814:    preceeding target-specific interrupts in that they are intended to
  815:    originate from within the cpu itself, typically in response to some
  816:    instruction being executed.  These, therefore, are not masked while
  817:    single-stepping within the debugger.  */
  818: #define CPU_INTERRUPT_TGT_INT_0   0x0100
  819: #define CPU_INTERRUPT_TGT_INT_1   0x0400
  820: #define CPU_INTERRUPT_TGT_INT_2   0x0800
  821: 
  822: /* First unused bit: 0x2000.  */
  823: 
  824: /* The set of all bits that should be masked when single-stepping.  */
  825: #define CPU_INTERRUPT_SSTEP_MASK \
  826:     (CPU_INTERRUPT_HARD          \
  827:      | CPU_INTERRUPT_TGT_EXT_0   \
  828:      | CPU_INTERRUPT_TGT_EXT_1   \
  829:      | CPU_INTERRUPT_TGT_EXT_2   \
  830:      | CPU_INTERRUPT_TGT_EXT_3   \
  831:      | CPU_INTERRUPT_TGT_EXT_4)
  832: 
  833: #ifndef CONFIG_USER_ONLY
  834: typedef void (*CPUInterruptHandler)(CPUState *, int);
  835: 
  836: extern CPUInterruptHandler cpu_interrupt_handler;
  837: 
  838: static inline void cpu_interrupt(CPUState *s, int mask)
  839: {
  840:     cpu_interrupt_handler(s, mask);
  841: }
  842: #else /* USER_ONLY */
  843: void cpu_interrupt(CPUState *env, int mask);
  844: #endif /* USER_ONLY */
  845: 
  846: void cpu_reset_interrupt(CPUState *env, int mask);
  847: 
  848: void cpu_exit(CPUState *s);
  849: 
  850: bool qemu_cpu_has_work(CPUState *env);
  851: 
  852: /* Breakpoint/watchpoint flags */
  853: #define BP_MEM_READ           0x01
  854: #define BP_MEM_WRITE          0x02
  855: #define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
  856: #define BP_STOP_BEFORE_ACCESS 0x04
  857: #define BP_WATCHPOINT_HIT     0x08
  858: #define BP_GDB                0x10
  859: #define BP_CPU                0x20
  860: 
  861: int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
  862:                           CPUBreakpoint **breakpoint);
  863: int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
  864: void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
  865: void cpu_breakpoint_remove_all(CPUState *env, int mask);
  866: int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
  867:                           int flags, CPUWatchpoint **watchpoint);
  868: int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
  869:                           target_ulong len, int flags);
  870: void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
  871: void cpu_watchpoint_remove_all(CPUState *env, int mask);
  872: 
  873: #define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
  874: #define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
  875: #define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
  876: 
  877: void cpu_single_step(CPUState *env, int enabled);
  878: void cpu_reset(CPUState *s);
  879: int cpu_is_stopped(CPUState *env);
  880: void run_on_cpu(CPUState *env, void (*func)(void *data), void *data);
  881: 
  882: #define CPU_LOG_TB_OUT_ASM (1 << 0)
  883: #define CPU_LOG_TB_IN_ASM  (1 << 1)
  884: #define CPU_LOG_TB_OP      (1 << 2)
  885: #define CPU_LOG_TB_OP_OPT  (1 << 3)
  886: #define CPU_LOG_INT        (1 << 4)
  887: #define CPU_LOG_EXEC       (1 << 5)
  888: #define CPU_LOG_PCALL      (1 << 6)
  889: #define CPU_LOG_IOPORT     (1 << 7)
  890: #define CPU_LOG_TB_CPU     (1 << 8)
  891: #define CPU_LOG_RESET      (1 << 9)
  892: 
  893: /* define log items */
  894: typedef struct CPULogItem {
  895:     int mask;
  896:     const char *name;
  897:     const char *help;
  898: } CPULogItem;
  899: 
  900: extern const CPULogItem cpu_log_items[];
  901: 
  902: void cpu_set_log(int log_flags);
  903: void cpu_set_log_filename(const char *filename);
  904: int cpu_str_to_log_mask(const char *str);
  905: 
  906: #if !defined(CONFIG_USER_ONLY)
  907: 
  908: /* Return the physical page corresponding to a virtual one. Use it
  909:    only for debugging because no protection checks are done. Return -1
  910:    if no page found. */
  911: target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
  912: 
  913: /* memory API */
  914: 
  915: extern int phys_ram_fd;
  916: extern ram_addr_t ram_size;
  917: 
  918: /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
  919: #define RAM_PREALLOC_MASK   (1 << 0)
  920: 
  921: typedef struct RAMBlock {
  922:     uint8_t *host;
  923:     ram_addr_t offset;
  924:     ram_addr_t length;
  925:     uint32_t flags;
  926:     char idstr[256];
  927:     QLIST_ENTRY(RAMBlock) next;
  928: #if defined(__linux__) && !defined(TARGET_S390X)
  929:     int fd;
  930: #endif
  931: } RAMBlock;
  932: 
  933: typedef struct RAMList {
  934:     uint8_t *phys_dirty;
  935:     QLIST_HEAD(ram, RAMBlock) blocks;
  936: } RAMList;
  937: extern RAMList ram_list;
  938: 
  939: extern const char *mem_path;
  940: extern int mem_prealloc;
  941: 
  942: /* physical memory access */
  943: 
  944: /* MMIO pages are identified by a combination of an IO device index and
  945:    3 flags.  The ROMD code stores the page ram offset in iotlb entry, 
  946:    so only a limited number of ids are avaiable.  */
  947: 
  948: #define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
  949: 
  950: /* Flags stored in the low bits of the TLB virtual address.  These are
  951:    defined so that fast path ram access is all zeros.  */
  952: /* Zero if TLB entry is valid.  */
  953: #define TLB_INVALID_MASK   (1 << 3)
  954: /* Set if TLB entry references a clean RAM page.  The iotlb entry will
  955:    contain the page physical address.  */
  956: #define TLB_NOTDIRTY    (1 << 4)
  957: /* Set if TLB entry is an IO callback.  */
  958: #define TLB_MMIO        (1 << 5)
  959: 
  960: #define VGA_DIRTY_FLAG       0x01
  961: #define CODE_DIRTY_FLAG      0x02
  962: #define MIGRATION_DIRTY_FLAG 0x08
  963: 
  964: /* read dirty bit (return 0 or 1) */
  965: static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
  966: {
  967:     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
  968: }
  969: 
  970: static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
  971: {
  972:     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
  973: }
  974: 
  975: static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
  976:                                                 int dirty_flags)
  977: {
  978:     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
  979: }
  980: 
  981: static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
  982: {
  983:     ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
  984: }
  985: 
  986: static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
  987:                                                       int dirty_flags)
  988: {
  989:     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
  990: }
  991: 
  992: static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
  993:                                                         int length,
  994:                                                         int dirty_flags)
  995: {
  996:     int i, mask, len;
  997:     uint8_t *p;
  998: 
  999:     len = length >> TARGET_PAGE_BITS;
 1000:     mask = ~dirty_flags;
 1001:     p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
 1002:     for (i = 0; i < len; i++) {
 1003:         p[i] &= mask;
 1004:     }
 1005: }
 1006: 
 1007: void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
 1008:                                      int dirty_flags);
 1009: void cpu_tlb_update_dirty(CPUState *env);
 1010: 
 1011: int cpu_physical_memory_set_dirty_tracking(int enable);
 1012: 
 1013: int cpu_physical_memory_get_dirty_tracking(void);
 1014: 
 1015: int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
 1016:                                    target_phys_addr_t end_addr);
 1017: 
 1018: int cpu_physical_log_start(target_phys_addr_t start_addr,
 1019:                            ram_addr_t size);
 1020: 
 1021: int cpu_physical_log_stop(target_phys_addr_t start_addr,
 1022:                           ram_addr_t size);
 1023: 
 1024: void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
 1025: #endif /* !CONFIG_USER_ONLY */
 1026: 
 1027: int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
 1028:                         uint8_t *buf, int len, int is_write);
 1029: 
 1030: #endif /* CPU_ALL_H */

unix.superglobalmegacorp.com