Annotation of qemu/softmmu_template.h, revision 1.1.1.9

1.1       root        1: /*
                      2:  *  Software MMU support
1.1.1.4   root        3:  *
1.1.1.9 ! root        4:  * Generate helpers used by TCG for qemu_ld/st ops and code load
        !             5:  * functions.
        !             6:  *
        !             7:  * Included from target op helpers and exec.c.
        !             8:  *
1.1       root        9:  *  Copyright (c) 2003 Fabrice Bellard
                     10:  *
                     11:  * This library is free software; you can redistribute it and/or
                     12:  * modify it under the terms of the GNU Lesser General Public
                     13:  * License as published by the Free Software Foundation; either
                     14:  * version 2 of the License, or (at your option) any later version.
                     15:  *
                     16:  * This library is distributed in the hope that it will be useful,
                     17:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
                     18:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
                     19:  * Lesser General Public License for more details.
                     20:  *
                     21:  * You should have received a copy of the GNU Lesser General Public
1.1.1.6   root       22:  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1.1       root       23:  */
1.1.1.8   root       24: #include "qemu-timer.h"
                     25: 
1.1       root       26: #define DATA_SIZE (1 << SHIFT)
                     27: 
                     28: #if DATA_SIZE == 8
                     29: #define SUFFIX q
                     30: #define USUFFIX q
                     31: #define DATA_TYPE uint64_t
                     32: #elif DATA_SIZE == 4
                     33: #define SUFFIX l
                     34: #define USUFFIX l
                     35: #define DATA_TYPE uint32_t
                     36: #elif DATA_SIZE == 2
                     37: #define SUFFIX w
                     38: #define USUFFIX uw
                     39: #define DATA_TYPE uint16_t
                     40: #elif DATA_SIZE == 1
                     41: #define SUFFIX b
                     42: #define USUFFIX ub
                     43: #define DATA_TYPE uint8_t
                     44: #else
                     45: #error unsupported data size
                     46: #endif
                     47: 
                     48: #ifdef SOFTMMU_CODE_ACCESS
                     49: #define READ_ACCESS_TYPE 2
1.1.1.2   root       50: #define ADDR_READ addr_code
1.1       root       51: #else
                     52: #define READ_ACCESS_TYPE 0
1.1.1.2   root       53: #define ADDR_READ addr_read
1.1       root       54: #endif
                     55: 
1.1.1.4   root       56: static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
                     57:                                                         int mmu_idx,
1.1       root       58:                                                         void *retaddr);
1.1.1.4   root       59: static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
1.1.1.5   root       60:                                               target_ulong addr,
                     61:                                               void *retaddr)
1.1       root       62: {
                     63:     DATA_TYPE res;
                     64:     int index;
1.1.1.5   root       65:     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                     66:     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
                     67:     env->mem_io_pc = (unsigned long)retaddr;
                     68:     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
                     69:             && !can_do_io(env)) {
                     70:         cpu_io_recompile(env, retaddr);
                     71:     }
1.1       root       72: 
1.1.1.5   root       73:     env->mem_io_vaddr = addr;
1.1       root       74: #if SHIFT <= 2
                     75:     res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
                     76: #else
                     77: #ifdef TARGET_WORDS_BIGENDIAN
                     78:     res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
                     79:     res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
                     80: #else
                     81:     res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
                     82:     res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
                     83: #endif
                     84: #endif /* SHIFT > 2 */
                     85:     return res;
                     86: }
                     87: 
                     88: /* handle all cases except unaligned access which span two pages */
1.1.1.5   root       89: DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
                     90:                                                       int mmu_idx)
1.1       root       91: {
                     92:     DATA_TYPE res;
                     93:     int index;
                     94:     target_ulong tlb_addr;
1.1.1.8   root       95:     target_phys_addr_t ioaddr;
                     96:     unsigned long addend;
1.1       root       97:     void *retaddr;
1.1.1.4   root       98: 
1.1       root       99:     /* test if there is match for unaligned or IO access */
                    100:     /* XXX: could done more in memory macro in a non portable way */
                    101:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
                    102:  redo:
1.1.1.4   root      103:     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
1.1       root      104:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
                    105:         if (tlb_addr & ~TARGET_PAGE_MASK) {
                    106:             /* IO access */
                    107:             if ((addr & (DATA_SIZE - 1)) != 0)
                    108:                 goto do_unaligned_access;
1.1.1.5   root      109:             retaddr = GETPC();
1.1.1.8   root      110:             ioaddr = env->iotlb[mmu_idx][index];
                    111:             res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
1.1.1.2   root      112:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
1.1       root      113:             /* slow unaligned access (it spans two pages or IO) */
                    114:         do_unaligned_access:
                    115:             retaddr = GETPC();
1.1.1.2   root      116: #ifdef ALIGNED_ONLY
1.1.1.4   root      117:             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
1.1.1.2   root      118: #endif
1.1.1.4   root      119:             res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
                    120:                                                          mmu_idx, retaddr);
1.1       root      121:         } else {
1.1.1.2   root      122:             /* unaligned/aligned access in the same page */
                    123: #ifdef ALIGNED_ONLY
                    124:             if ((addr & (DATA_SIZE - 1)) != 0) {
                    125:                 retaddr = GETPC();
1.1.1.4   root      126:                 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
1.1.1.2   root      127:             }
                    128: #endif
1.1.1.5   root      129:             addend = env->tlb_table[mmu_idx][index].addend;
                    130:             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
1.1       root      131:         }
                    132:     } else {
                    133:         /* the page is not in the TLB : fill it */
                    134:         retaddr = GETPC();
1.1.1.2   root      135: #ifdef ALIGNED_ONLY
                    136:         if ((addr & (DATA_SIZE - 1)) != 0)
1.1.1.4   root      137:             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
1.1.1.2   root      138: #endif
1.1.1.9 ! root      139:         tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
1.1       root      140:         goto redo;
                    141:     }
                    142:     return res;
                    143: }
                    144: 
                    145: /* handle all unaligned cases */
1.1.1.4   root      146: static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
                    147:                                                         int mmu_idx,
1.1       root      148:                                                         void *retaddr)
                    149: {
                    150:     DATA_TYPE res, res1, res2;
                    151:     int index, shift;
1.1.1.8   root      152:     target_phys_addr_t ioaddr;
                    153:     unsigned long addend;
1.1       root      154:     target_ulong tlb_addr, addr1, addr2;
                    155: 
                    156:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
                    157:  redo:
1.1.1.4   root      158:     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
1.1       root      159:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
                    160:         if (tlb_addr & ~TARGET_PAGE_MASK) {
                    161:             /* IO access */
                    162:             if ((addr & (DATA_SIZE - 1)) != 0)
                    163:                 goto do_unaligned_access;
1.1.1.8   root      164:             ioaddr = env->iotlb[mmu_idx][index];
                    165:             res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
1.1.1.2   root      166:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
1.1       root      167:         do_unaligned_access:
                    168:             /* slow unaligned access (it spans two pages) */
                    169:             addr1 = addr & ~(DATA_SIZE - 1);
                    170:             addr2 = addr1 + DATA_SIZE;
1.1.1.4   root      171:             res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
                    172:                                                           mmu_idx, retaddr);
                    173:             res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
                    174:                                                           mmu_idx, retaddr);
1.1       root      175:             shift = (addr & (DATA_SIZE - 1)) * 8;
                    176: #ifdef TARGET_WORDS_BIGENDIAN
                    177:             res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
                    178: #else
                    179:             res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
                    180: #endif
                    181:             res = (DATA_TYPE)res;
                    182:         } else {
                    183:             /* unaligned/aligned access in the same page */
1.1.1.5   root      184:             addend = env->tlb_table[mmu_idx][index].addend;
                    185:             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
1.1       root      186:         }
                    187:     } else {
                    188:         /* the page is not in the TLB : fill it */
1.1.1.9 ! root      189:         tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
1.1       root      190:         goto redo;
                    191:     }
                    192:     return res;
                    193: }
                    194: 
                    195: #ifndef SOFTMMU_CODE_ACCESS
                    196: 
1.1.1.4   root      197: static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
                    198:                                                    DATA_TYPE val,
                    199:                                                    int mmu_idx,
1.1       root      200:                                                    void *retaddr);
                    201: 
1.1.1.4   root      202: static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
1.1       root      203:                                           DATA_TYPE val,
1.1.1.5   root      204:                                           target_ulong addr,
1.1       root      205:                                           void *retaddr)
                    206: {
                    207:     int index;
1.1.1.5   root      208:     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                    209:     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
                    210:     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
                    211:             && !can_do_io(env)) {
                    212:         cpu_io_recompile(env, retaddr);
                    213:     }
1.1       root      214: 
1.1.1.5   root      215:     env->mem_io_vaddr = addr;
                    216:     env->mem_io_pc = (unsigned long)retaddr;
1.1       root      217: #if SHIFT <= 2
                    218:     io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
                    219: #else
                    220: #ifdef TARGET_WORDS_BIGENDIAN
                    221:     io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
                    222:     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
                    223: #else
                    224:     io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
                    225:     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
                    226: #endif
                    227: #endif /* SHIFT > 2 */
                    228: }
                    229: 
1.1.1.5   root      230: void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
                    231:                                                  DATA_TYPE val,
                    232:                                                  int mmu_idx)
1.1       root      233: {
1.1.1.8   root      234:     target_phys_addr_t ioaddr;
                    235:     unsigned long addend;
1.1       root      236:     target_ulong tlb_addr;
                    237:     void *retaddr;
                    238:     int index;
1.1.1.4   root      239: 
1.1       root      240:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
                    241:  redo:
1.1.1.4   root      242:     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1.1       root      243:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
                    244:         if (tlb_addr & ~TARGET_PAGE_MASK) {
                    245:             /* IO access */
                    246:             if ((addr & (DATA_SIZE - 1)) != 0)
                    247:                 goto do_unaligned_access;
                    248:             retaddr = GETPC();
1.1.1.8   root      249:             ioaddr = env->iotlb[mmu_idx][index];
                    250:             glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
1.1.1.2   root      251:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
1.1       root      252:         do_unaligned_access:
                    253:             retaddr = GETPC();
1.1.1.2   root      254: #ifdef ALIGNED_ONLY
1.1.1.4   root      255:             do_unaligned_access(addr, 1, mmu_idx, retaddr);
1.1.1.2   root      256: #endif
1.1.1.4   root      257:             glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
                    258:                                                    mmu_idx, retaddr);
1.1       root      259:         } else {
                    260:             /* aligned/unaligned access in the same page */
1.1.1.2   root      261: #ifdef ALIGNED_ONLY
                    262:             if ((addr & (DATA_SIZE - 1)) != 0) {
                    263:                 retaddr = GETPC();
1.1.1.4   root      264:                 do_unaligned_access(addr, 1, mmu_idx, retaddr);
1.1.1.2   root      265:             }
                    266: #endif
1.1.1.5   root      267:             addend = env->tlb_table[mmu_idx][index].addend;
                    268:             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
1.1       root      269:         }
                    270:     } else {
                    271:         /* the page is not in the TLB : fill it */
                    272:         retaddr = GETPC();
1.1.1.2   root      273: #ifdef ALIGNED_ONLY
                    274:         if ((addr & (DATA_SIZE - 1)) != 0)
1.1.1.4   root      275:             do_unaligned_access(addr, 1, mmu_idx, retaddr);
1.1.1.2   root      276: #endif
1.1.1.9 ! root      277:         tlb_fill(env, addr, 1, mmu_idx, retaddr);
1.1       root      278:         goto redo;
                    279:     }
                    280: }
                    281: 
                    282: /* handles all unaligned cases */
1.1.1.4   root      283: static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
1.1       root      284:                                                    DATA_TYPE val,
1.1.1.4   root      285:                                                    int mmu_idx,
1.1       root      286:                                                    void *retaddr)
                    287: {
1.1.1.8   root      288:     target_phys_addr_t ioaddr;
                    289:     unsigned long addend;
1.1       root      290:     target_ulong tlb_addr;
                    291:     int index, i;
                    292: 
                    293:     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
                    294:  redo:
1.1.1.4   root      295:     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1.1       root      296:     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
                    297:         if (tlb_addr & ~TARGET_PAGE_MASK) {
                    298:             /* IO access */
                    299:             if ((addr & (DATA_SIZE - 1)) != 0)
                    300:                 goto do_unaligned_access;
1.1.1.8   root      301:             ioaddr = env->iotlb[mmu_idx][index];
                    302:             glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
1.1.1.2   root      303:         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
1.1       root      304:         do_unaligned_access:
                    305:             /* XXX: not efficient, but simple */
1.1.1.4   root      306:             /* Note: relies on the fact that tlb_fill() does not remove the
                    307:              * previous page from the TLB cache.  */
                    308:             for(i = DATA_SIZE - 1; i >= 0; i--) {
1.1       root      309: #ifdef TARGET_WORDS_BIGENDIAN
1.1.1.4   root      310:                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
                    311:                                           mmu_idx, retaddr);
1.1       root      312: #else
1.1.1.4   root      313:                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
                    314:                                           mmu_idx, retaddr);
1.1       root      315: #endif
                    316:             }
                    317:         } else {
                    318:             /* aligned/unaligned access in the same page */
1.1.1.5   root      319:             addend = env->tlb_table[mmu_idx][index].addend;
                    320:             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
1.1       root      321:         }
                    322:     } else {
                    323:         /* the page is not in the TLB : fill it */
1.1.1.9 ! root      324:         tlb_fill(env, addr, 1, mmu_idx, retaddr);
1.1       root      325:         goto redo;
                    326:     }
                    327: }
                    328: 
                    329: #endif /* !defined(SOFTMMU_CODE_ACCESS) */
                    330: 
                    331: #undef READ_ACCESS_TYPE
                    332: #undef SHIFT
                    333: #undef DATA_TYPE
                    334: #undef SUFFIX
                    335: #undef USUFFIX
                    336: #undef DATA_SIZE
1.1.1.2   root      337: #undef ADDR_READ

unix.superglobalmegacorp.com