File:  [Qemu by Fabrice Bellard] / qemu / softmmu_template.h
Revision 1.1.1.5 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 16:50:58 2018 UTC (3 years, 3 months ago) by root
Branches: qemu, MAIN
CVS tags: qemu0105, qemu0104, qemu0103, qemu0102, qemu0101, qemu0100, HEAD
qemu 0.10.0

    1: /*
    2:  *  Software MMU support
    3:  *
    4:  *  Copyright (c) 2003 Fabrice Bellard
    5:  *
    6:  * This library is free software; you can redistribute it and/or
    7:  * modify it under the terms of the GNU Lesser General Public
    8:  * License as published by the Free Software Foundation; either
    9:  * version 2 of the License, or (at your option) any later version.
   10:  *
   11:  * This library is distributed in the hope that it will be useful,
   12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14:  * Lesser General Public License for more details.
   15:  *
   16:  * You should have received a copy of the GNU Lesser General Public
   17:  * License along with this library; if not, write to the Free Software
   18:  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
   19:  */
   20: #define DATA_SIZE (1 << SHIFT)
   21: 
   22: #if DATA_SIZE == 8
   23: #define SUFFIX q
   24: #define USUFFIX q
   25: #define DATA_TYPE uint64_t
   26: #elif DATA_SIZE == 4
   27: #define SUFFIX l
   28: #define USUFFIX l
   29: #define DATA_TYPE uint32_t
   30: #elif DATA_SIZE == 2
   31: #define SUFFIX w
   32: #define USUFFIX uw
   33: #define DATA_TYPE uint16_t
   34: #elif DATA_SIZE == 1
   35: #define SUFFIX b
   36: #define USUFFIX ub
   37: #define DATA_TYPE uint8_t
   38: #else
   39: #error unsupported data size
   40: #endif
   41: 
   42: #ifdef SOFTMMU_CODE_ACCESS
   43: #define READ_ACCESS_TYPE 2
   44: #define ADDR_READ addr_code
   45: #else
   46: #define READ_ACCESS_TYPE 0
   47: #define ADDR_READ addr_read
   48: #endif
   49: 
   50: static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
   51:                                                         int mmu_idx,
   52:                                                         void *retaddr);
   53: static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
   54:                                               target_ulong addr,
   55:                                               void *retaddr)
   56: {
   57:     DATA_TYPE res;
   58:     int index;
   59:     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
   60:     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
   61:     env->mem_io_pc = (unsigned long)retaddr;
   62:     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
   63:             && !can_do_io(env)) {
   64:         cpu_io_recompile(env, retaddr);
   65:     }
   66: 
   67:     env->mem_io_vaddr = addr;
   68: #if SHIFT <= 2
   69:     res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
   70: #else
   71: #ifdef TARGET_WORDS_BIGENDIAN
   72:     res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
   73:     res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
   74: #else
   75:     res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
   76:     res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
   77: #endif
   78: #endif /* SHIFT > 2 */
   79: #ifdef USE_KQEMU
   80:     env->last_io_time = cpu_get_time_fast();
   81: #endif
   82:     return res;
   83: }
   84: 
   85: /* handle all cases except unaligned access which span two pages */
   86: DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
   87:                                                       int mmu_idx)
   88: {
   89:     DATA_TYPE res;
   90:     int index;
   91:     target_ulong tlb_addr;
   92:     target_phys_addr_t addend;
   93:     void *retaddr;
   94: 
   95:     /* test if there is match for unaligned or IO access */
   96:     /* XXX: could done more in memory macro in a non portable way */
   97:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
   98:  redo:
   99:     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
  100:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
  101:         if (tlb_addr & ~TARGET_PAGE_MASK) {
  102:             /* IO access */
  103:             if ((addr & (DATA_SIZE - 1)) != 0)
  104:                 goto do_unaligned_access;
  105:             retaddr = GETPC();
  106:             addend = env->iotlb[mmu_idx][index];
  107:             res = glue(io_read, SUFFIX)(addend, addr, retaddr);
  108:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
  109:             /* slow unaligned access (it spans two pages or IO) */
  110:         do_unaligned_access:
  111:             retaddr = GETPC();
  112: #ifdef ALIGNED_ONLY
  113:             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
  114: #endif
  115:             res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
  116:                                                          mmu_idx, retaddr);
  117:         } else {
  118:             /* unaligned/aligned access in the same page */
  119: #ifdef ALIGNED_ONLY
  120:             if ((addr & (DATA_SIZE - 1)) != 0) {
  121:                 retaddr = GETPC();
  122:                 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
  123:             }
  124: #endif
  125:             addend = env->tlb_table[mmu_idx][index].addend;
  126:             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
  127:         }
  128:     } else {
  129:         /* the page is not in the TLB : fill it */
  130:         retaddr = GETPC();
  131: #ifdef ALIGNED_ONLY
  132:         if ((addr & (DATA_SIZE - 1)) != 0)
  133:             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
  134: #endif
  135:         tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
  136:         goto redo;
  137:     }
  138:     return res;
  139: }
  140: 
  141: /* handle all unaligned cases */
  142: static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
  143:                                                         int mmu_idx,
  144:                                                         void *retaddr)
  145: {
  146:     DATA_TYPE res, res1, res2;
  147:     int index, shift;
  148:     target_phys_addr_t addend;
  149:     target_ulong tlb_addr, addr1, addr2;
  150: 
  151:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  152:  redo:
  153:     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
  154:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
  155:         if (tlb_addr & ~TARGET_PAGE_MASK) {
  156:             /* IO access */
  157:             if ((addr & (DATA_SIZE - 1)) != 0)
  158:                 goto do_unaligned_access;
  159:             retaddr = GETPC();
  160:             addend = env->iotlb[mmu_idx][index];
  161:             res = glue(io_read, SUFFIX)(addend, addr, retaddr);
  162:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
  163:         do_unaligned_access:
  164:             /* slow unaligned access (it spans two pages) */
  165:             addr1 = addr & ~(DATA_SIZE - 1);
  166:             addr2 = addr1 + DATA_SIZE;
  167:             res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
  168:                                                           mmu_idx, retaddr);
  169:             res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
  170:                                                           mmu_idx, retaddr);
  171:             shift = (addr & (DATA_SIZE - 1)) * 8;
  172: #ifdef TARGET_WORDS_BIGENDIAN
  173:             res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
  174: #else
  175:             res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
  176: #endif
  177:             res = (DATA_TYPE)res;
  178:         } else {
  179:             /* unaligned/aligned access in the same page */
  180:             addend = env->tlb_table[mmu_idx][index].addend;
  181:             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
  182:         }
  183:     } else {
  184:         /* the page is not in the TLB : fill it */
  185:         tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
  186:         goto redo;
  187:     }
  188:     return res;
  189: }
  190: 
  191: #ifndef SOFTMMU_CODE_ACCESS
  192: 
  193: static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
  194:                                                    DATA_TYPE val,
  195:                                                    int mmu_idx,
  196:                                                    void *retaddr);
  197: 
  198: static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
  199:                                           DATA_TYPE val,
  200:                                           target_ulong addr,
  201:                                           void *retaddr)
  202: {
  203:     int index;
  204:     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  205:     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
  206:     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
  207:             && !can_do_io(env)) {
  208:         cpu_io_recompile(env, retaddr);
  209:     }
  210: 
  211:     env->mem_io_vaddr = addr;
  212:     env->mem_io_pc = (unsigned long)retaddr;
  213: #if SHIFT <= 2
  214:     io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
  215: #else
  216: #ifdef TARGET_WORDS_BIGENDIAN
  217:     io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
  218:     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
  219: #else
  220:     io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
  221:     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
  222: #endif
  223: #endif /* SHIFT > 2 */
  224: #ifdef USE_KQEMU
  225:     env->last_io_time = cpu_get_time_fast();
  226: #endif
  227: }
  228: 
  229: void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
  230:                                                  DATA_TYPE val,
  231:                                                  int mmu_idx)
  232: {
  233:     target_phys_addr_t addend;
  234:     target_ulong tlb_addr;
  235:     void *retaddr;
  236:     int index;
  237: 
  238:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  239:  redo:
  240:     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
  241:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
  242:         if (tlb_addr & ~TARGET_PAGE_MASK) {
  243:             /* IO access */
  244:             if ((addr & (DATA_SIZE - 1)) != 0)
  245:                 goto do_unaligned_access;
  246:             retaddr = GETPC();
  247:             addend = env->iotlb[mmu_idx][index];
  248:             glue(io_write, SUFFIX)(addend, val, addr, retaddr);
  249:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
  250:         do_unaligned_access:
  251:             retaddr = GETPC();
  252: #ifdef ALIGNED_ONLY
  253:             do_unaligned_access(addr, 1, mmu_idx, retaddr);
  254: #endif
  255:             glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
  256:                                                    mmu_idx, retaddr);
  257:         } else {
  258:             /* aligned/unaligned access in the same page */
  259: #ifdef ALIGNED_ONLY
  260:             if ((addr & (DATA_SIZE - 1)) != 0) {
  261:                 retaddr = GETPC();
  262:                 do_unaligned_access(addr, 1, mmu_idx, retaddr);
  263:             }
  264: #endif
  265:             addend = env->tlb_table[mmu_idx][index].addend;
  266:             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
  267:         }
  268:     } else {
  269:         /* the page is not in the TLB : fill it */
  270:         retaddr = GETPC();
  271: #ifdef ALIGNED_ONLY
  272:         if ((addr & (DATA_SIZE - 1)) != 0)
  273:             do_unaligned_access(addr, 1, mmu_idx, retaddr);
  274: #endif
  275:         tlb_fill(addr, 1, mmu_idx, retaddr);
  276:         goto redo;
  277:     }
  278: }
  279: 
  280: /* handles all unaligned cases */
  281: static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
  282:                                                    DATA_TYPE val,
  283:                                                    int mmu_idx,
  284:                                                    void *retaddr)
  285: {
  286:     target_phys_addr_t addend;
  287:     target_ulong tlb_addr;
  288:     int index, i;
  289: 
  290:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  291:  redo:
  292:     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
  293:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
  294:         if (tlb_addr & ~TARGET_PAGE_MASK) {
  295:             /* IO access */
  296:             if ((addr & (DATA_SIZE - 1)) != 0)
  297:                 goto do_unaligned_access;
  298:             addend = env->iotlb[mmu_idx][index];
  299:             glue(io_write, SUFFIX)(addend, val, addr, retaddr);
  300:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
  301:         do_unaligned_access:
  302:             /* XXX: not efficient, but simple */
  303:             /* Note: relies on the fact that tlb_fill() does not remove the
  304:              * previous page from the TLB cache.  */
  305:             for(i = DATA_SIZE - 1; i >= 0; i--) {
  306: #ifdef TARGET_WORDS_BIGENDIAN
  307:                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
  308:                                           mmu_idx, retaddr);
  309: #else
  310:                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
  311:                                           mmu_idx, retaddr);
  312: #endif
  313:             }
  314:         } else {
  315:             /* aligned/unaligned access in the same page */
  316:             addend = env->tlb_table[mmu_idx][index].addend;
  317:             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
  318:         }
  319:     } else {
  320:         /* the page is not in the TLB : fill it */
  321:         tlb_fill(addr, 1, mmu_idx, retaddr);
  322:         goto redo;
  323:     }
  324: }
  325: 
  326: #endif /* !defined(SOFTMMU_CODE_ACCESS) */
  327: 
  328: #undef READ_ACCESS_TYPE
  329: #undef SHIFT
  330: #undef DATA_TYPE
  331: #undef SUFFIX
  332: #undef USUFFIX
  333: #undef DATA_SIZE
  334: #undef ADDR_READ

unix.superglobalmegacorp.com