File:  [Qemu by Fabrice Bellard] / qemu / memory.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 19:17:45 2018 UTC (3 years, 4 months ago) by root
Branches: qemu, MAIN
CVS tags: qemu1001, HEAD
qemu 1.0.1

    1: /*
    2:  * Physical memory management
    3:  *
    4:  * Copyright 2011 Red Hat, Inc. and/or its affiliates
    5:  *
    6:  * Authors:
    7:  *  Avi Kivity <avi@redhat.com>
    8:  *
    9:  * This work is licensed under the terms of the GNU GPL, version 2.  See
   10:  * the COPYING file in the top-level directory.
   11:  *
   12:  */
   13: 
   14: #include "memory.h"
   15: #include "exec-memory.h"
   16: #include "ioport.h"
   17: #include "bitops.h"
   18: #include "kvm.h"
   19: #include <assert.h>
   20: 
   21: unsigned memory_region_transaction_depth = 0;
   22: 
   23: typedef struct AddrRange AddrRange;
   24: 
   25: /*
   26:  * Note using signed integers limits us to physical addresses at most
   27:  * 63 bits wide.  They are needed for negative offsetting in aliases
   28:  * (large MemoryRegion::alias_offset).
   29:  */
   30: struct AddrRange {
   31:     Int128 start;
   32:     Int128 size;
   33: };
   34: 
   35: static AddrRange addrrange_make(Int128 start, Int128 size)
   36: {
   37:     return (AddrRange) { start, size };
   38: }
   39: 
   40: static bool addrrange_equal(AddrRange r1, AddrRange r2)
   41: {
   42:     return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
   43: }
   44: 
   45: static Int128 addrrange_end(AddrRange r)
   46: {
   47:     return int128_add(r.start, r.size);
   48: }
   49: 
   50: static AddrRange addrrange_shift(AddrRange range, Int128 delta)
   51: {
   52:     int128_addto(&range.start, delta);
   53:     return range;
   54: }
   55: 
   56: static bool addrrange_contains(AddrRange range, Int128 addr)
   57: {
   58:     return int128_ge(addr, range.start)
   59:         && int128_lt(addr, addrrange_end(range));
   60: }
   61: 
   62: static bool addrrange_intersects(AddrRange r1, AddrRange r2)
   63: {
   64:     return addrrange_contains(r1, r2.start)
   65:         || addrrange_contains(r2, r1.start);
   66: }
   67: 
   68: static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
   69: {
   70:     Int128 start = int128_max(r1.start, r2.start);
   71:     Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
   72:     return addrrange_make(start, int128_sub(end, start));
   73: }
   74: 
   75: struct CoalescedMemoryRange {
   76:     AddrRange addr;
   77:     QTAILQ_ENTRY(CoalescedMemoryRange) link;
   78: };
   79: 
   80: struct MemoryRegionIoeventfd {
   81:     AddrRange addr;
   82:     bool match_data;
   83:     uint64_t data;
   84:     int fd;
   85: };
   86: 
   87: static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
   88:                                            MemoryRegionIoeventfd b)
   89: {
   90:     if (int128_lt(a.addr.start, b.addr.start)) {
   91:         return true;
   92:     } else if (int128_gt(a.addr.start, b.addr.start)) {
   93:         return false;
   94:     } else if (int128_lt(a.addr.size, b.addr.size)) {
   95:         return true;
   96:     } else if (int128_gt(a.addr.size, b.addr.size)) {
   97:         return false;
   98:     } else if (a.match_data < b.match_data) {
   99:         return true;
  100:     } else  if (a.match_data > b.match_data) {
  101:         return false;
  102:     } else if (a.match_data) {
  103:         if (a.data < b.data) {
  104:             return true;
  105:         } else if (a.data > b.data) {
  106:             return false;
  107:         }
  108:     }
  109:     if (a.fd < b.fd) {
  110:         return true;
  111:     } else if (a.fd > b.fd) {
  112:         return false;
  113:     }
  114:     return false;
  115: }
  116: 
  117: static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
  118:                                           MemoryRegionIoeventfd b)
  119: {
  120:     return !memory_region_ioeventfd_before(a, b)
  121:         && !memory_region_ioeventfd_before(b, a);
  122: }
  123: 
  124: typedef struct FlatRange FlatRange;
  125: typedef struct FlatView FlatView;
  126: 
  127: /* Range of memory in the global map.  Addresses are absolute. */
  128: struct FlatRange {
  129:     MemoryRegion *mr;
  130:     target_phys_addr_t offset_in_region;
  131:     AddrRange addr;
  132:     uint8_t dirty_log_mask;
  133:     bool readable;
  134:     bool readonly;
  135: };
  136: 
  137: /* Flattened global view of current active memory hierarchy.  Kept in sorted
  138:  * order.
  139:  */
  140: struct FlatView {
  141:     FlatRange *ranges;
  142:     unsigned nr;
  143:     unsigned nr_allocated;
  144: };
  145: 
  146: typedef struct AddressSpace AddressSpace;
  147: typedef struct AddressSpaceOps AddressSpaceOps;
  148: 
  149: /* A system address space - I/O, memory, etc. */
  150: struct AddressSpace {
  151:     const AddressSpaceOps *ops;
  152:     MemoryRegion *root;
  153:     FlatView current_map;
  154:     int ioeventfd_nb;
  155:     MemoryRegionIoeventfd *ioeventfds;
  156: };
  157: 
  158: struct AddressSpaceOps {
  159:     void (*range_add)(AddressSpace *as, FlatRange *fr);
  160:     void (*range_del)(AddressSpace *as, FlatRange *fr);
  161:     void (*log_start)(AddressSpace *as, FlatRange *fr);
  162:     void (*log_stop)(AddressSpace *as, FlatRange *fr);
  163:     void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
  164:     void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
  165: };
  166: 
  167: #define FOR_EACH_FLAT_RANGE(var, view)          \
  168:     for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
  169: 
  170: static bool flatrange_equal(FlatRange *a, FlatRange *b)
  171: {
  172:     return a->mr == b->mr
  173:         && addrrange_equal(a->addr, b->addr)
  174:         && a->offset_in_region == b->offset_in_region
  175:         && a->readable == b->readable
  176:         && a->readonly == b->readonly;
  177: }
  178: 
  179: static void flatview_init(FlatView *view)
  180: {
  181:     view->ranges = NULL;
  182:     view->nr = 0;
  183:     view->nr_allocated = 0;
  184: }
  185: 
  186: /* Insert a range into a given position.  Caller is responsible for maintaining
  187:  * sorting order.
  188:  */
  189: static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
  190: {
  191:     if (view->nr == view->nr_allocated) {
  192:         view->nr_allocated = MAX(2 * view->nr, 10);
  193:         view->ranges = g_realloc(view->ranges,
  194:                                     view->nr_allocated * sizeof(*view->ranges));
  195:     }
  196:     memmove(view->ranges + pos + 1, view->ranges + pos,
  197:             (view->nr - pos) * sizeof(FlatRange));
  198:     view->ranges[pos] = *range;
  199:     ++view->nr;
  200: }
  201: 
  202: static void flatview_destroy(FlatView *view)
  203: {
  204:     g_free(view->ranges);
  205: }
  206: 
  207: static bool can_merge(FlatRange *r1, FlatRange *r2)
  208: {
  209:     return int128_eq(addrrange_end(r1->addr), r2->addr.start)
  210:         && r1->mr == r2->mr
  211:         && int128_eq(int128_add(int128_make64(r1->offset_in_region),
  212:                                 r1->addr.size),
  213:                      int128_make64(r2->offset_in_region))
  214:         && r1->dirty_log_mask == r2->dirty_log_mask
  215:         && r1->readable == r2->readable
  216:         && r1->readonly == r2->readonly;
  217: }
  218: 
  219: /* Attempt to simplify a view by merging ajacent ranges */
  220: static void flatview_simplify(FlatView *view)
  221: {
  222:     unsigned i, j;
  223: 
  224:     i = 0;
  225:     while (i < view->nr) {
  226:         j = i + 1;
  227:         while (j < view->nr
  228:                && can_merge(&view->ranges[j-1], &view->ranges[j])) {
  229:             int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
  230:             ++j;
  231:         }
  232:         ++i;
  233:         memmove(&view->ranges[i], &view->ranges[j],
  234:                 (view->nr - j) * sizeof(view->ranges[j]));
  235:         view->nr -= j - i;
  236:     }
  237: }
  238: 
  239: static void memory_region_read_accessor(void *opaque,
  240:                                         target_phys_addr_t addr,
  241:                                         uint64_t *value,
  242:                                         unsigned size,
  243:                                         unsigned shift,
  244:                                         uint64_t mask)
  245: {
  246:     MemoryRegion *mr = opaque;
  247:     uint64_t tmp;
  248: 
  249:     tmp = mr->ops->read(mr->opaque, addr, size);
  250:     *value |= (tmp & mask) << shift;
  251: }
  252: 
  253: static void memory_region_write_accessor(void *opaque,
  254:                                          target_phys_addr_t addr,
  255:                                          uint64_t *value,
  256:                                          unsigned size,
  257:                                          unsigned shift,
  258:                                          uint64_t mask)
  259: {
  260:     MemoryRegion *mr = opaque;
  261:     uint64_t tmp;
  262: 
  263:     tmp = (*value >> shift) & mask;
  264:     mr->ops->write(mr->opaque, addr, tmp, size);
  265: }
  266: 
  267: static void access_with_adjusted_size(target_phys_addr_t addr,
  268:                                       uint64_t *value,
  269:                                       unsigned size,
  270:                                       unsigned access_size_min,
  271:                                       unsigned access_size_max,
  272:                                       void (*access)(void *opaque,
  273:                                                      target_phys_addr_t addr,
  274:                                                      uint64_t *value,
  275:                                                      unsigned size,
  276:                                                      unsigned shift,
  277:                                                      uint64_t mask),
  278:                                       void *opaque)
  279: {
  280:     uint64_t access_mask;
  281:     unsigned access_size;
  282:     unsigned i;
  283: 
  284:     if (!access_size_min) {
  285:         access_size_min = 1;
  286:     }
  287:     if (!access_size_max) {
  288:         access_size_max = 4;
  289:     }
  290:     access_size = MAX(MIN(size, access_size_max), access_size_min);
  291:     access_mask = -1ULL >> (64 - access_size * 8);
  292:     for (i = 0; i < size; i += access_size) {
  293:         /* FIXME: big-endian support */
  294:         access(opaque, addr + i, value, access_size, i * 8, access_mask);
  295:     }
  296: }
  297: 
  298: static void memory_region_prepare_ram_addr(MemoryRegion *mr);
  299: 
  300: static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
  301: {
  302:     ram_addr_t phys_offset, region_offset;
  303: 
  304:     memory_region_prepare_ram_addr(fr->mr);
  305: 
  306:     phys_offset = fr->mr->ram_addr;
  307:     region_offset = fr->offset_in_region;
  308:     /* cpu_register_physical_memory_log() wants region_offset for
  309:      * mmio, but prefers offseting phys_offset for RAM.  Humour it.
  310:      */
  311:     if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
  312:         phys_offset += region_offset;
  313:         region_offset = 0;
  314:     }
  315: 
  316:     if (!fr->readable) {
  317:         phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
  318:     }
  319: 
  320:     if (fr->readonly) {
  321:         phys_offset |= IO_MEM_ROM;
  322:     }
  323: 
  324:     cpu_register_physical_memory_log(int128_get64(fr->addr.start),
  325:                                      int128_get64(fr->addr.size),
  326:                                      phys_offset,
  327:                                      region_offset,
  328:                                      fr->dirty_log_mask);
  329: }
  330: 
  331: static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
  332: {
  333:     if (fr->dirty_log_mask) {
  334:         Int128 end = addrrange_end(fr->addr);
  335:         cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
  336:                                        int128_get64(end));
  337:     }
  338:     cpu_register_physical_memory(int128_get64(fr->addr.start),
  339:                                  int128_get64(fr->addr.size),
  340:                                  IO_MEM_UNASSIGNED);
  341: }
  342: 
  343: static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
  344: {
  345:     cpu_physical_log_start(int128_get64(fr->addr.start),
  346:                            int128_get64(fr->addr.size));
  347: }
  348: 
  349: static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
  350: {
  351:     cpu_physical_log_stop(int128_get64(fr->addr.start),
  352:                           int128_get64(fr->addr.size));
  353: }
  354: 
  355: static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
  356: {
  357:     int r;
  358: 
  359:     assert(fd->match_data && int128_get64(fd->addr.size) == 4);
  360: 
  361:     r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
  362:                                     fd->data, true);
  363:     if (r < 0) {
  364:         abort();
  365:     }
  366: }
  367: 
  368: static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
  369: {
  370:     int r;
  371: 
  372:     r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
  373:                                     fd->data, false);
  374:     if (r < 0) {
  375:         abort();
  376:     }
  377: }
  378: 
  379: static const AddressSpaceOps address_space_ops_memory = {
  380:     .range_add = as_memory_range_add,
  381:     .range_del = as_memory_range_del,
  382:     .log_start = as_memory_log_start,
  383:     .log_stop = as_memory_log_stop,
  384:     .ioeventfd_add = as_memory_ioeventfd_add,
  385:     .ioeventfd_del = as_memory_ioeventfd_del,
  386: };
  387: 
  388: static AddressSpace address_space_memory = {
  389:     .ops = &address_space_ops_memory,
  390: };
  391: 
  392: static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
  393:                                              unsigned width, bool write)
  394: {
  395:     const MemoryRegionPortio *mrp;
  396: 
  397:     for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
  398:         if (offset >= mrp->offset && offset < mrp->offset + mrp->len
  399:             && width == mrp->size
  400:             && (write ? (bool)mrp->write : (bool)mrp->read)) {
  401:             return mrp;
  402:         }
  403:     }
  404:     return NULL;
  405: }
  406: 
  407: static void memory_region_iorange_read(IORange *iorange,
  408:                                        uint64_t offset,
  409:                                        unsigned width,
  410:                                        uint64_t *data)
  411: {
  412:     MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
  413: 
  414:     if (mr->ops->old_portio) {
  415:         const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
  416: 
  417:         *data = ((uint64_t)1 << (width * 8)) - 1;
  418:         if (mrp) {
  419:             *data = mrp->read(mr->opaque, offset + mr->offset);
  420:         } else if (width == 2) {
  421:             mrp = find_portio(mr, offset, 1, false);
  422:             assert(mrp);
  423:             *data = mrp->read(mr->opaque, offset + mr->offset) |
  424:                     (mrp->read(mr->opaque, offset + mr->offset + 1) << 8);
  425:         }
  426:         return;
  427:     }
  428:     *data = 0;
  429:     access_with_adjusted_size(offset + mr->offset, data, width,
  430:                               mr->ops->impl.min_access_size,
  431:                               mr->ops->impl.max_access_size,
  432:                               memory_region_read_accessor, mr);
  433: }
  434: 
  435: static void memory_region_iorange_write(IORange *iorange,
  436:                                         uint64_t offset,
  437:                                         unsigned width,
  438:                                         uint64_t data)
  439: {
  440:     MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
  441: 
  442:     if (mr->ops->old_portio) {
  443:         const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
  444: 
  445:         if (mrp) {
  446:             mrp->write(mr->opaque, offset + mr->offset, data);
  447:         } else if (width == 2) {
  448:             mrp = find_portio(mr, offset, 1, false);
  449:             assert(mrp);
  450:             mrp->write(mr->opaque, offset + mr->offset, data & 0xff);
  451:             mrp->write(mr->opaque, offset + mr->offset + 1, data >> 8);
  452:         }
  453:         return;
  454:     }
  455:     access_with_adjusted_size(offset + mr->offset, &data, width,
  456:                               mr->ops->impl.min_access_size,
  457:                               mr->ops->impl.max_access_size,
  458:                               memory_region_write_accessor, mr);
  459: }
  460: 
  461: static const IORangeOps memory_region_iorange_ops = {
  462:     .read = memory_region_iorange_read,
  463:     .write = memory_region_iorange_write,
  464: };
  465: 
  466: static void as_io_range_add(AddressSpace *as, FlatRange *fr)
  467: {
  468:     iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
  469:                  int128_get64(fr->addr.start), int128_get64(fr->addr.size));
  470:     ioport_register(&fr->mr->iorange);
  471: }
  472: 
  473: static void as_io_range_del(AddressSpace *as, FlatRange *fr)
  474: {
  475:     isa_unassign_ioport(int128_get64(fr->addr.start),
  476:                         int128_get64(fr->addr.size));
  477: }
  478: 
  479: static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
  480: {
  481:     int r;
  482: 
  483:     assert(fd->match_data && int128_get64(fd->addr.size) == 2);
  484: 
  485:     r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
  486:                                    fd->data, true);
  487:     if (r < 0) {
  488:         abort();
  489:     }
  490: }
  491: 
  492: static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
  493: {
  494:     int r;
  495: 
  496:     r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
  497:                                    fd->data, false);
  498:     if (r < 0) {
  499:         abort();
  500:     }
  501: }
  502: 
  503: static const AddressSpaceOps address_space_ops_io = {
  504:     .range_add = as_io_range_add,
  505:     .range_del = as_io_range_del,
  506:     .ioeventfd_add = as_io_ioeventfd_add,
  507:     .ioeventfd_del = as_io_ioeventfd_del,
  508: };
  509: 
  510: static AddressSpace address_space_io = {
  511:     .ops = &address_space_ops_io,
  512: };
  513: 
  514: /* Render a memory region into the global view.  Ranges in @view obscure
  515:  * ranges in @mr.
  516:  */
  517: static void render_memory_region(FlatView *view,
  518:                                  MemoryRegion *mr,
  519:                                  Int128 base,
  520:                                  AddrRange clip,
  521:                                  bool readonly)
  522: {
  523:     MemoryRegion *subregion;
  524:     unsigned i;
  525:     target_phys_addr_t offset_in_region;
  526:     Int128 remain;
  527:     Int128 now;
  528:     FlatRange fr;
  529:     AddrRange tmp;
  530: 
  531:     int128_addto(&base, int128_make64(mr->addr));
  532:     readonly |= mr->readonly;
  533: 
  534:     tmp = addrrange_make(base, mr->size);
  535: 
  536:     if (!addrrange_intersects(tmp, clip)) {
  537:         return;
  538:     }
  539: 
  540:     clip = addrrange_intersection(tmp, clip);
  541: 
  542:     if (mr->alias) {
  543:         int128_subfrom(&base, int128_make64(mr->alias->addr));
  544:         int128_subfrom(&base, int128_make64(mr->alias_offset));
  545:         render_memory_region(view, mr->alias, base, clip, readonly);
  546:         return;
  547:     }
  548: 
  549:     /* Render subregions in priority order. */
  550:     QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
  551:         render_memory_region(view, subregion, base, clip, readonly);
  552:     }
  553: 
  554:     if (!mr->terminates) {
  555:         return;
  556:     }
  557: 
  558:     offset_in_region = int128_get64(int128_sub(clip.start, base));
  559:     base = clip.start;
  560:     remain = clip.size;
  561: 
  562:     /* Render the region itself into any gaps left by the current view. */
  563:     for (i = 0; i < view->nr && int128_nz(remain); ++i) {
  564:         if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
  565:             continue;
  566:         }
  567:         if (int128_lt(base, view->ranges[i].addr.start)) {
  568:             now = int128_min(remain,
  569:                              int128_sub(view->ranges[i].addr.start, base));
  570:             fr.mr = mr;
  571:             fr.offset_in_region = offset_in_region;
  572:             fr.addr = addrrange_make(base, now);
  573:             fr.dirty_log_mask = mr->dirty_log_mask;
  574:             fr.readable = mr->readable;
  575:             fr.readonly = readonly;
  576:             flatview_insert(view, i, &fr);
  577:             ++i;
  578:             int128_addto(&base, now);
  579:             offset_in_region += int128_get64(now);
  580:             int128_subfrom(&remain, now);
  581:         }
  582:         if (int128_eq(base, view->ranges[i].addr.start)) {
  583:             now = int128_min(remain, view->ranges[i].addr.size);
  584:             int128_addto(&base, now);
  585:             offset_in_region += int128_get64(now);
  586:             int128_subfrom(&remain, now);
  587:         }
  588:     }
  589:     if (int128_nz(remain)) {
  590:         fr.mr = mr;
  591:         fr.offset_in_region = offset_in_region;
  592:         fr.addr = addrrange_make(base, remain);
  593:         fr.dirty_log_mask = mr->dirty_log_mask;
  594:         fr.readable = mr->readable;
  595:         fr.readonly = readonly;
  596:         flatview_insert(view, i, &fr);
  597:     }
  598: }
  599: 
  600: /* Render a memory topology into a list of disjoint absolute ranges. */
  601: static FlatView generate_memory_topology(MemoryRegion *mr)
  602: {
  603:     FlatView view;
  604: 
  605:     flatview_init(&view);
  606: 
  607:     render_memory_region(&view, mr, int128_zero(),
  608:                          addrrange_make(int128_zero(), int128_2_64()), false);
  609:     flatview_simplify(&view);
  610: 
  611:     return view;
  612: }
  613: 
  614: static void address_space_add_del_ioeventfds(AddressSpace *as,
  615:                                              MemoryRegionIoeventfd *fds_new,
  616:                                              unsigned fds_new_nb,
  617:                                              MemoryRegionIoeventfd *fds_old,
  618:                                              unsigned fds_old_nb)
  619: {
  620:     unsigned iold, inew;
  621: 
  622:     /* Generate a symmetric difference of the old and new fd sets, adding
  623:      * and deleting as necessary.
  624:      */
  625: 
  626:     iold = inew = 0;
  627:     while (iold < fds_old_nb || inew < fds_new_nb) {
  628:         if (iold < fds_old_nb
  629:             && (inew == fds_new_nb
  630:                 || memory_region_ioeventfd_before(fds_old[iold],
  631:                                                   fds_new[inew]))) {
  632:             as->ops->ioeventfd_del(as, &fds_old[iold]);
  633:             ++iold;
  634:         } else if (inew < fds_new_nb
  635:                    && (iold == fds_old_nb
  636:                        || memory_region_ioeventfd_before(fds_new[inew],
  637:                                                          fds_old[iold]))) {
  638:             as->ops->ioeventfd_add(as, &fds_new[inew]);
  639:             ++inew;
  640:         } else {
  641:             ++iold;
  642:             ++inew;
  643:         }
  644:     }
  645: }
  646: 
  647: static void address_space_update_ioeventfds(AddressSpace *as)
  648: {
  649:     FlatRange *fr;
  650:     unsigned ioeventfd_nb = 0;
  651:     MemoryRegionIoeventfd *ioeventfds = NULL;
  652:     AddrRange tmp;
  653:     unsigned i;
  654: 
  655:     FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
  656:         for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
  657:             tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
  658:                                   int128_sub(fr->addr.start,
  659:                                              int128_make64(fr->offset_in_region)));
  660:             if (addrrange_intersects(fr->addr, tmp)) {
  661:                 ++ioeventfd_nb;
  662:                 ioeventfds = g_realloc(ioeventfds,
  663:                                           ioeventfd_nb * sizeof(*ioeventfds));
  664:                 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
  665:                 ioeventfds[ioeventfd_nb-1].addr = tmp;
  666:             }
  667:         }
  668:     }
  669: 
  670:     address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
  671:                                      as->ioeventfds, as->ioeventfd_nb);
  672: 
  673:     g_free(as->ioeventfds);
  674:     as->ioeventfds = ioeventfds;
  675:     as->ioeventfd_nb = ioeventfd_nb;
  676: }
  677: 
  678: static void address_space_update_topology_pass(AddressSpace *as,
  679:                                                FlatView old_view,
  680:                                                FlatView new_view,
  681:                                                bool adding)
  682: {
  683:     unsigned iold, inew;
  684:     FlatRange *frold, *frnew;
  685: 
  686:     /* Generate a symmetric difference of the old and new memory maps.
  687:      * Kill ranges in the old map, and instantiate ranges in the new map.
  688:      */
  689:     iold = inew = 0;
  690:     while (iold < old_view.nr || inew < new_view.nr) {
  691:         if (iold < old_view.nr) {
  692:             frold = &old_view.ranges[iold];
  693:         } else {
  694:             frold = NULL;
  695:         }
  696:         if (inew < new_view.nr) {
  697:             frnew = &new_view.ranges[inew];
  698:         } else {
  699:             frnew = NULL;
  700:         }
  701: 
  702:         if (frold
  703:             && (!frnew
  704:                 || int128_lt(frold->addr.start, frnew->addr.start)
  705:                 || (int128_eq(frold->addr.start, frnew->addr.start)
  706:                     && !flatrange_equal(frold, frnew)))) {
  707:             /* In old, but (not in new, or in new but attributes changed). */
  708: 
  709:             if (!adding) {
  710:                 as->ops->range_del(as, frold);
  711:             }
  712: 
  713:             ++iold;
  714:         } else if (frold && frnew && flatrange_equal(frold, frnew)) {
  715:             /* In both (logging may have changed) */
  716: 
  717:             if (adding) {
  718:                 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
  719:                     as->ops->log_stop(as, frnew);
  720:                 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
  721:                     as->ops->log_start(as, frnew);
  722:                 }
  723:             }
  724: 
  725:             ++iold;
  726:             ++inew;
  727:         } else {
  728:             /* In new */
  729: 
  730:             if (adding) {
  731:                 as->ops->range_add(as, frnew);
  732:             }
  733: 
  734:             ++inew;
  735:         }
  736:     }
  737: }
  738: 
  739: 
  740: static void address_space_update_topology(AddressSpace *as)
  741: {
  742:     FlatView old_view = as->current_map;
  743:     FlatView new_view = generate_memory_topology(as->root);
  744: 
  745:     address_space_update_topology_pass(as, old_view, new_view, false);
  746:     address_space_update_topology_pass(as, old_view, new_view, true);
  747: 
  748:     as->current_map = new_view;
  749:     flatview_destroy(&old_view);
  750:     address_space_update_ioeventfds(as);
  751: }
  752: 
  753: static void memory_region_update_topology(void)
  754: {
  755:     if (memory_region_transaction_depth) {
  756:         return;
  757:     }
  758: 
  759:     if (address_space_memory.root) {
  760:         address_space_update_topology(&address_space_memory);
  761:     }
  762:     if (address_space_io.root) {
  763:         address_space_update_topology(&address_space_io);
  764:     }
  765: }
  766: 
  767: void memory_region_transaction_begin(void)
  768: {
  769:     ++memory_region_transaction_depth;
  770: }
  771: 
  772: void memory_region_transaction_commit(void)
  773: {
  774:     assert(memory_region_transaction_depth);
  775:     --memory_region_transaction_depth;
  776:     memory_region_update_topology();
  777: }
  778: 
  779: static void memory_region_destructor_none(MemoryRegion *mr)
  780: {
  781: }
  782: 
  783: static void memory_region_destructor_ram(MemoryRegion *mr)
  784: {
  785:     qemu_ram_free(mr->ram_addr);
  786: }
  787: 
  788: static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
  789: {
  790:     qemu_ram_free_from_ptr(mr->ram_addr);
  791: }
  792: 
  793: static void memory_region_destructor_iomem(MemoryRegion *mr)
  794: {
  795:     cpu_unregister_io_memory(mr->ram_addr);
  796: }
  797: 
  798: static void memory_region_destructor_rom_device(MemoryRegion *mr)
  799: {
  800:     qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
  801:     cpu_unregister_io_memory(mr->ram_addr & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
  802: }
  803: 
  804: void memory_region_init(MemoryRegion *mr,
  805:                         const char *name,
  806:                         uint64_t size)
  807: {
  808:     mr->ops = NULL;
  809:     mr->parent = NULL;
  810:     mr->size = int128_make64(size);
  811:     if (size == UINT64_MAX) {
  812:         mr->size = int128_2_64();
  813:     }
  814:     mr->addr = 0;
  815:     mr->offset = 0;
  816:     mr->terminates = false;
  817:     mr->readable = true;
  818:     mr->readonly = false;
  819:     mr->destructor = memory_region_destructor_none;
  820:     mr->priority = 0;
  821:     mr->may_overlap = false;
  822:     mr->alias = NULL;
  823:     QTAILQ_INIT(&mr->subregions);
  824:     memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
  825:     QTAILQ_INIT(&mr->coalesced);
  826:     mr->name = g_strdup(name);
  827:     mr->dirty_log_mask = 0;
  828:     mr->ioeventfd_nb = 0;
  829:     mr->ioeventfds = NULL;
  830: }
  831: 
  832: static bool memory_region_access_valid(MemoryRegion *mr,
  833:                                        target_phys_addr_t addr,
  834:                                        unsigned size)
  835: {
  836:     if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
  837:         return false;
  838:     }
  839: 
  840:     /* Treat zero as compatibility all valid */
  841:     if (!mr->ops->valid.max_access_size) {
  842:         return true;
  843:     }
  844: 
  845:     if (size > mr->ops->valid.max_access_size
  846:         || size < mr->ops->valid.min_access_size) {
  847:         return false;
  848:     }
  849:     return true;
  850: }
  851: 
  852: static uint32_t memory_region_read_thunk_n(void *_mr,
  853:                                            target_phys_addr_t addr,
  854:                                            unsigned size)
  855: {
  856:     MemoryRegion *mr = _mr;
  857:     uint64_t data = 0;
  858: 
  859:     if (!memory_region_access_valid(mr, addr, size)) {
  860:         return -1U; /* FIXME: better signalling */
  861:     }
  862: 
  863:     if (!mr->ops->read) {
  864:         return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
  865:     }
  866: 
  867:     /* FIXME: support unaligned access */
  868:     access_with_adjusted_size(addr + mr->offset, &data, size,
  869:                               mr->ops->impl.min_access_size,
  870:                               mr->ops->impl.max_access_size,
  871:                               memory_region_read_accessor, mr);
  872: 
  873:     return data;
  874: }
  875: 
  876: static void memory_region_write_thunk_n(void *_mr,
  877:                                         target_phys_addr_t addr,
  878:                                         unsigned size,
  879:                                         uint64_t data)
  880: {
  881:     MemoryRegion *mr = _mr;
  882: 
  883:     if (!memory_region_access_valid(mr, addr, size)) {
  884:         return; /* FIXME: better signalling */
  885:     }
  886: 
  887:     if (!mr->ops->write) {
  888:         mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
  889:         return;
  890:     }
  891: 
  892:     /* FIXME: support unaligned access */
  893:     access_with_adjusted_size(addr + mr->offset, &data, size,
  894:                               mr->ops->impl.min_access_size,
  895:                               mr->ops->impl.max_access_size,
  896:                               memory_region_write_accessor, mr);
  897: }
  898: 
  899: static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
  900: {
  901:     return memory_region_read_thunk_n(mr, addr, 1);
  902: }
  903: 
  904: static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
  905: {
  906:     return memory_region_read_thunk_n(mr, addr, 2);
  907: }
  908: 
  909: static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
  910: {
  911:     return memory_region_read_thunk_n(mr, addr, 4);
  912: }
  913: 
  914: static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
  915:                                         uint32_t data)
  916: {
  917:     memory_region_write_thunk_n(mr, addr, 1, data);
  918: }
  919: 
  920: static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
  921:                                         uint32_t data)
  922: {
  923:     memory_region_write_thunk_n(mr, addr, 2, data);
  924: }
  925: 
  926: static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
  927:                                         uint32_t data)
  928: {
  929:     memory_region_write_thunk_n(mr, addr, 4, data);
  930: }
  931: 
  932: static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
  933:     memory_region_read_thunk_b,
  934:     memory_region_read_thunk_w,
  935:     memory_region_read_thunk_l,
  936: };
  937: 
  938: static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
  939:     memory_region_write_thunk_b,
  940:     memory_region_write_thunk_w,
  941:     memory_region_write_thunk_l,
  942: };
  943: 
  944: static void memory_region_prepare_ram_addr(MemoryRegion *mr)
  945: {
  946:     if (mr->backend_registered) {
  947:         return;
  948:     }
  949: 
  950:     mr->destructor = memory_region_destructor_iomem;
  951:     mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
  952:                                           memory_region_write_thunk,
  953:                                           mr,
  954:                                           mr->ops->endianness);
  955:     mr->backend_registered = true;
  956: }
  957: 
  958: void memory_region_init_io(MemoryRegion *mr,
  959:                            const MemoryRegionOps *ops,
  960:                            void *opaque,
  961:                            const char *name,
  962:                            uint64_t size)
  963: {
  964:     memory_region_init(mr, name, size);
  965:     mr->ops = ops;
  966:     mr->opaque = opaque;
  967:     mr->terminates = true;
  968:     mr->backend_registered = false;
  969: }
  970: 
  971: void memory_region_init_ram(MemoryRegion *mr,
  972:                             DeviceState *dev,
  973:                             const char *name,
  974:                             uint64_t size)
  975: {
  976:     memory_region_init(mr, name, size);
  977:     mr->terminates = true;
  978:     mr->destructor = memory_region_destructor_ram;
  979:     mr->ram_addr = qemu_ram_alloc(dev, name, size);
  980:     mr->backend_registered = true;
  981: }
  982: 
  983: void memory_region_init_ram_ptr(MemoryRegion *mr,
  984:                                 DeviceState *dev,
  985:                                 const char *name,
  986:                                 uint64_t size,
  987:                                 void *ptr)
  988: {
  989:     memory_region_init(mr, name, size);
  990:     mr->terminates = true;
  991:     mr->destructor = memory_region_destructor_ram_from_ptr;
  992:     mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
  993:     mr->backend_registered = true;
  994: }
  995: 
  996: void memory_region_init_alias(MemoryRegion *mr,
  997:                               const char *name,
  998:                               MemoryRegion *orig,
  999:                               target_phys_addr_t offset,
 1000:                               uint64_t size)
 1001: {
 1002:     memory_region_init(mr, name, size);
 1003:     mr->alias = orig;
 1004:     mr->alias_offset = offset;
 1005: }
 1006: 
 1007: void memory_region_init_rom_device(MemoryRegion *mr,
 1008:                                    const MemoryRegionOps *ops,
 1009:                                    void *opaque,
 1010:                                    DeviceState *dev,
 1011:                                    const char *name,
 1012:                                    uint64_t size)
 1013: {
 1014:     memory_region_init(mr, name, size);
 1015:     mr->ops = ops;
 1016:     mr->opaque = opaque;
 1017:     mr->terminates = true;
 1018:     mr->destructor = memory_region_destructor_rom_device;
 1019:     mr->ram_addr = qemu_ram_alloc(dev, name, size);
 1020:     mr->ram_addr |= cpu_register_io_memory(memory_region_read_thunk,
 1021:                                            memory_region_write_thunk,
 1022:                                            mr,
 1023:                                            mr->ops->endianness);
 1024:     mr->ram_addr |= IO_MEM_ROMD;
 1025:     mr->backend_registered = true;
 1026: }
 1027: 
 1028: void memory_region_destroy(MemoryRegion *mr)
 1029: {
 1030:     assert(QTAILQ_EMPTY(&mr->subregions));
 1031:     mr->destructor(mr);
 1032:     memory_region_clear_coalescing(mr);
 1033:     g_free((char *)mr->name);
 1034:     g_free(mr->ioeventfds);
 1035: }
 1036: 
 1037: uint64_t memory_region_size(MemoryRegion *mr)
 1038: {
 1039:     if (int128_eq(mr->size, int128_2_64())) {
 1040:         return UINT64_MAX;
 1041:     }
 1042:     return int128_get64(mr->size);
 1043: }
 1044: 
 1045: void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
 1046: {
 1047:     mr->offset = offset;
 1048: }
 1049: 
 1050: void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
 1051: {
 1052:     uint8_t mask = 1 << client;
 1053: 
 1054:     mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
 1055:     memory_region_update_topology();
 1056: }
 1057: 
 1058: bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
 1059:                              unsigned client)
 1060: {
 1061:     assert(mr->terminates);
 1062:     return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
 1063: }
 1064: 
 1065: void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
 1066: {
 1067:     assert(mr->terminates);
 1068:     return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
 1069: }
 1070: 
 1071: void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
 1072: {
 1073:     FlatRange *fr;
 1074: 
 1075:     FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
 1076:         if (fr->mr == mr) {
 1077:             cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
 1078:                                            int128_get64(addrrange_end(fr->addr)));
 1079:         }
 1080:     }
 1081: }
 1082: 
 1083: void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
 1084: {
 1085:     if (mr->readonly != readonly) {
 1086:         mr->readonly = readonly;
 1087:         memory_region_update_topology();
 1088:     }
 1089: }
 1090: 
 1091: void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
 1092: {
 1093:     if (mr->readable != readable) {
 1094:         mr->readable = readable;
 1095:         memory_region_update_topology();
 1096:     }
 1097: }
 1098: 
 1099: void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
 1100:                                target_phys_addr_t size, unsigned client)
 1101: {
 1102:     assert(mr->terminates);
 1103:     cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
 1104:                                     mr->ram_addr + addr + size,
 1105:                                     1 << client);
 1106: }
 1107: 
 1108: void *memory_region_get_ram_ptr(MemoryRegion *mr)
 1109: {
 1110:     if (mr->alias) {
 1111:         return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
 1112:     }
 1113: 
 1114:     assert(mr->terminates);
 1115: 
 1116:     return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
 1117: }
 1118: 
 1119: static void memory_region_update_coalesced_range(MemoryRegion *mr)
 1120: {
 1121:     FlatRange *fr;
 1122:     CoalescedMemoryRange *cmr;
 1123:     AddrRange tmp;
 1124: 
 1125:     FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
 1126:         if (fr->mr == mr) {
 1127:             qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
 1128:                                            int128_get64(fr->addr.size));
 1129:             QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
 1130:                 tmp = addrrange_shift(cmr->addr,
 1131:                                       int128_sub(fr->addr.start,
 1132:                                                  int128_make64(fr->offset_in_region)));
 1133:                 if (!addrrange_intersects(tmp, fr->addr)) {
 1134:                     continue;
 1135:                 }
 1136:                 tmp = addrrange_intersection(tmp, fr->addr);
 1137:                 qemu_register_coalesced_mmio(int128_get64(tmp.start),
 1138:                                              int128_get64(tmp.size));
 1139:             }
 1140:         }
 1141:     }
 1142: }
 1143: 
 1144: void memory_region_set_coalescing(MemoryRegion *mr)
 1145: {
 1146:     memory_region_clear_coalescing(mr);
 1147:     memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
 1148: }
 1149: 
 1150: void memory_region_add_coalescing(MemoryRegion *mr,
 1151:                                   target_phys_addr_t offset,
 1152:                                   uint64_t size)
 1153: {
 1154:     CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
 1155: 
 1156:     cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
 1157:     QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
 1158:     memory_region_update_coalesced_range(mr);
 1159: }
 1160: 
 1161: void memory_region_clear_coalescing(MemoryRegion *mr)
 1162: {
 1163:     CoalescedMemoryRange *cmr;
 1164: 
 1165:     while (!QTAILQ_EMPTY(&mr->coalesced)) {
 1166:         cmr = QTAILQ_FIRST(&mr->coalesced);
 1167:         QTAILQ_REMOVE(&mr->coalesced, cmr, link);
 1168:         g_free(cmr);
 1169:     }
 1170:     memory_region_update_coalesced_range(mr);
 1171: }
 1172: 
 1173: void memory_region_add_eventfd(MemoryRegion *mr,
 1174:                                target_phys_addr_t addr,
 1175:                                unsigned size,
 1176:                                bool match_data,
 1177:                                uint64_t data,
 1178:                                int fd)
 1179: {
 1180:     MemoryRegionIoeventfd mrfd = {
 1181:         .addr.start = int128_make64(addr),
 1182:         .addr.size = int128_make64(size),
 1183:         .match_data = match_data,
 1184:         .data = data,
 1185:         .fd = fd,
 1186:     };
 1187:     unsigned i;
 1188: 
 1189:     for (i = 0; i < mr->ioeventfd_nb; ++i) {
 1190:         if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
 1191:             break;
 1192:         }
 1193:     }
 1194:     ++mr->ioeventfd_nb;
 1195:     mr->ioeventfds = g_realloc(mr->ioeventfds,
 1196:                                   sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
 1197:     memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
 1198:             sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
 1199:     mr->ioeventfds[i] = mrfd;
 1200:     memory_region_update_topology();
 1201: }
 1202: 
 1203: void memory_region_del_eventfd(MemoryRegion *mr,
 1204:                                target_phys_addr_t addr,
 1205:                                unsigned size,
 1206:                                bool match_data,
 1207:                                uint64_t data,
 1208:                                int fd)
 1209: {
 1210:     MemoryRegionIoeventfd mrfd = {
 1211:         .addr.start = int128_make64(addr),
 1212:         .addr.size = int128_make64(size),
 1213:         .match_data = match_data,
 1214:         .data = data,
 1215:         .fd = fd,
 1216:     };
 1217:     unsigned i;
 1218: 
 1219:     for (i = 0; i < mr->ioeventfd_nb; ++i) {
 1220:         if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
 1221:             break;
 1222:         }
 1223:     }
 1224:     assert(i != mr->ioeventfd_nb);
 1225:     memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
 1226:             sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
 1227:     --mr->ioeventfd_nb;
 1228:     mr->ioeventfds = g_realloc(mr->ioeventfds,
 1229:                                   sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
 1230:     memory_region_update_topology();
 1231: }
 1232: 
 1233: static void memory_region_add_subregion_common(MemoryRegion *mr,
 1234:                                                target_phys_addr_t offset,
 1235:                                                MemoryRegion *subregion)
 1236: {
 1237:     MemoryRegion *other;
 1238: 
 1239:     assert(!subregion->parent);
 1240:     subregion->parent = mr;
 1241:     subregion->addr = offset;
 1242:     QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
 1243:         if (subregion->may_overlap || other->may_overlap) {
 1244:             continue;
 1245:         }
 1246:         if (int128_gt(int128_make64(offset),
 1247:                       int128_add(int128_make64(other->addr), other->size))
 1248:             || int128_le(int128_add(int128_make64(offset), subregion->size),
 1249:                          int128_make64(other->addr))) {
 1250:             continue;
 1251:         }
 1252: #if 0
 1253:         printf("warning: subregion collision %llx/%llx (%s) "
 1254:                "vs %llx/%llx (%s)\n",
 1255:                (unsigned long long)offset,
 1256:                (unsigned long long)int128_get64(subregion->size),
 1257:                subregion->name,
 1258:                (unsigned long long)other->addr,
 1259:                (unsigned long long)int128_get64(other->size),
 1260:                other->name);
 1261: #endif
 1262:     }
 1263:     QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
 1264:         if (subregion->priority >= other->priority) {
 1265:             QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
 1266:             goto done;
 1267:         }
 1268:     }
 1269:     QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
 1270: done:
 1271:     memory_region_update_topology();
 1272: }
 1273: 
 1274: 
 1275: void memory_region_add_subregion(MemoryRegion *mr,
 1276:                                  target_phys_addr_t offset,
 1277:                                  MemoryRegion *subregion)
 1278: {
 1279:     subregion->may_overlap = false;
 1280:     subregion->priority = 0;
 1281:     memory_region_add_subregion_common(mr, offset, subregion);
 1282: }
 1283: 
 1284: void memory_region_add_subregion_overlap(MemoryRegion *mr,
 1285:                                          target_phys_addr_t offset,
 1286:                                          MemoryRegion *subregion,
 1287:                                          unsigned priority)
 1288: {
 1289:     subregion->may_overlap = true;
 1290:     subregion->priority = priority;
 1291:     memory_region_add_subregion_common(mr, offset, subregion);
 1292: }
 1293: 
 1294: void memory_region_del_subregion(MemoryRegion *mr,
 1295:                                  MemoryRegion *subregion)
 1296: {
 1297:     assert(subregion->parent == mr);
 1298:     subregion->parent = NULL;
 1299:     QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
 1300:     memory_region_update_topology();
 1301: }
 1302: 
 1303: void set_system_memory_map(MemoryRegion *mr)
 1304: {
 1305:     address_space_memory.root = mr;
 1306:     memory_region_update_topology();
 1307: }
 1308: 
 1309: void set_system_io_map(MemoryRegion *mr)
 1310: {
 1311:     address_space_io.root = mr;
 1312:     memory_region_update_topology();
 1313: }
 1314: 
 1315: typedef struct MemoryRegionList MemoryRegionList;
 1316: 
 1317: struct MemoryRegionList {
 1318:     const MemoryRegion *mr;
 1319:     bool printed;
 1320:     QTAILQ_ENTRY(MemoryRegionList) queue;
 1321: };
 1322: 
 1323: typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
 1324: 
 1325: static void mtree_print_mr(fprintf_function mon_printf, void *f,
 1326:                            const MemoryRegion *mr, unsigned int level,
 1327:                            target_phys_addr_t base,
 1328:                            MemoryRegionListHead *alias_print_queue)
 1329: {
 1330:     MemoryRegionList *new_ml, *ml, *next_ml;
 1331:     MemoryRegionListHead submr_print_queue;
 1332:     const MemoryRegion *submr;
 1333:     unsigned int i;
 1334: 
 1335:     if (!mr) {
 1336:         return;
 1337:     }
 1338: 
 1339:     for (i = 0; i < level; i++) {
 1340:         mon_printf(f, "  ");
 1341:     }
 1342: 
 1343:     if (mr->alias) {
 1344:         MemoryRegionList *ml;
 1345:         bool found = false;
 1346: 
 1347:         /* check if the alias is already in the queue */
 1348:         QTAILQ_FOREACH(ml, alias_print_queue, queue) {
 1349:             if (ml->mr == mr->alias && !ml->printed) {
 1350:                 found = true;
 1351:             }
 1352:         }
 1353: 
 1354:         if (!found) {
 1355:             ml = g_new(MemoryRegionList, 1);
 1356:             ml->mr = mr->alias;
 1357:             ml->printed = false;
 1358:             QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
 1359:         }
 1360:         mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s "
 1361:                    TARGET_FMT_plx "-" TARGET_FMT_plx "\n",
 1362:                    base + mr->addr,
 1363:                    base + mr->addr
 1364:                    + (target_phys_addr_t)int128_get64(mr->size) - 1,
 1365:                    mr->priority,
 1366:                    mr->name,
 1367:                    mr->alias->name,
 1368:                    mr->alias_offset,
 1369:                    mr->alias_offset
 1370:                    + (target_phys_addr_t)int128_get64(mr->size) - 1);
 1371:     } else {
 1372:         mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n",
 1373:                    base + mr->addr,
 1374:                    base + mr->addr
 1375:                    + (target_phys_addr_t)int128_get64(mr->size) - 1,
 1376:                    mr->priority,
 1377:                    mr->name);
 1378:     }
 1379: 
 1380:     QTAILQ_INIT(&submr_print_queue);
 1381: 
 1382:     QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
 1383:         new_ml = g_new(MemoryRegionList, 1);
 1384:         new_ml->mr = submr;
 1385:         QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
 1386:             if (new_ml->mr->addr < ml->mr->addr ||
 1387:                 (new_ml->mr->addr == ml->mr->addr &&
 1388:                  new_ml->mr->priority > ml->mr->priority)) {
 1389:                 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
 1390:                 new_ml = NULL;
 1391:                 break;
 1392:             }
 1393:         }
 1394:         if (new_ml) {
 1395:             QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
 1396:         }
 1397:     }
 1398: 
 1399:     QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
 1400:         mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
 1401:                        alias_print_queue);
 1402:     }
 1403: 
 1404:     QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
 1405:         g_free(ml);
 1406:     }
 1407: }
 1408: 
 1409: void mtree_info(fprintf_function mon_printf, void *f)
 1410: {
 1411:     MemoryRegionListHead ml_head;
 1412:     MemoryRegionList *ml, *ml2;
 1413: 
 1414:     QTAILQ_INIT(&ml_head);
 1415: 
 1416:     mon_printf(f, "memory\n");
 1417:     mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head);
 1418: 
 1419:     /* print aliased regions */
 1420:     QTAILQ_FOREACH(ml, &ml_head, queue) {
 1421:         if (!ml->printed) {
 1422:             mon_printf(f, "%s\n", ml->mr->name);
 1423:             mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
 1424:         }
 1425:     }
 1426: 
 1427:     QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
 1428:         g_free(ml);
 1429:     }
 1430: 
 1431:     if (address_space_io.root &&
 1432:         !QTAILQ_EMPTY(&address_space_io.root->subregions)) {
 1433:         QTAILQ_INIT(&ml_head);
 1434:         mon_printf(f, "I/O\n");
 1435:         mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head);
 1436:     }
 1437: }

unix.superglobalmegacorp.com