File:  [Qemu by Fabrice Bellard] / qemu / kvm-all.c
Revision 1.1.1.11 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 19:16:52 2018 UTC (3 years, 1 month ago) by root
Branches: qemu, MAIN
CVS tags: qemu1001, HEAD
qemu 1.0.1

    1: /*
    2:  * QEMU KVM support
    3:  *
    4:  * Copyright IBM, Corp. 2008
    5:  *           Red Hat, Inc. 2008
    6:  *
    7:  * Authors:
    8:  *  Anthony Liguori   <aliguori@us.ibm.com>
    9:  *  Glauber Costa     <gcosta@redhat.com>
   10:  *
   11:  * This work is licensed under the terms of the GNU GPL, version 2 or later.
   12:  * See the COPYING file in the top-level directory.
   13:  *
   14:  */
   15: 
   16: #include <sys/types.h>
   17: #include <sys/ioctl.h>
   18: #include <sys/mman.h>
   19: #include <stdarg.h>
   20: 
   21: #include <linux/kvm.h>
   22: 
   23: #include "qemu-common.h"
   24: #include "qemu-barrier.h"
   25: #include "sysemu.h"
   26: #include "hw/hw.h"
   27: #include "gdbstub.h"
   28: #include "kvm.h"
   29: #include "bswap.h"
   30: 
   31: /* This check must be after config-host.h is included */
   32: #ifdef CONFIG_EVENTFD
   33: #include <sys/eventfd.h>
   34: #endif
   35: 
   36: /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
   37: #define PAGE_SIZE TARGET_PAGE_SIZE
   38: 
   39: //#define DEBUG_KVM
   40: 
   41: #ifdef DEBUG_KVM
   42: #define DPRINTF(fmt, ...) \
   43:     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
   44: #else
   45: #define DPRINTF(fmt, ...) \
   46:     do { } while (0)
   47: #endif
   48: 
   49: typedef struct KVMSlot
   50: {
   51:     target_phys_addr_t start_addr;
   52:     ram_addr_t memory_size;
   53:     ram_addr_t phys_offset;
   54:     int slot;
   55:     int flags;
   56: } KVMSlot;
   57: 
   58: typedef struct kvm_dirty_log KVMDirtyLog;
   59: 
   60: struct KVMState
   61: {
   62:     KVMSlot slots[32];
   63:     int fd;
   64:     int vmfd;
   65:     int coalesced_mmio;
   66:     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
   67:     bool coalesced_flush_in_progress;
   68:     int broken_set_mem_region;
   69:     int migration_log;
   70:     int vcpu_events;
   71:     int robust_singlestep;
   72:     int debugregs;
   73: #ifdef KVM_CAP_SET_GUEST_DEBUG
   74:     struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
   75: #endif
   76:     int irqchip_in_kernel;
   77:     int pit_in_kernel;
   78:     int xsave, xcrs;
   79:     int many_ioeventfds;
   80: };
   81: 
   82: KVMState *kvm_state;
   83: 
   84: static const KVMCapabilityInfo kvm_required_capabilites[] = {
   85:     KVM_CAP_INFO(USER_MEMORY),
   86:     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
   87:     KVM_CAP_LAST_INFO
   88: };
   89: 
   90: static KVMSlot *kvm_alloc_slot(KVMState *s)
   91: {
   92:     int i;
   93: 
   94:     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
   95:         if (s->slots[i].memory_size == 0) {
   96:             return &s->slots[i];
   97:         }
   98:     }
   99: 
  100:     fprintf(stderr, "%s: no free slot available\n", __func__);
  101:     abort();
  102: }
  103: 
  104: static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
  105:                                          target_phys_addr_t start_addr,
  106:                                          target_phys_addr_t end_addr)
  107: {
  108:     int i;
  109: 
  110:     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  111:         KVMSlot *mem = &s->slots[i];
  112: 
  113:         if (start_addr == mem->start_addr &&
  114:             end_addr == mem->start_addr + mem->memory_size) {
  115:             return mem;
  116:         }
  117:     }
  118: 
  119:     return NULL;
  120: }
  121: 
  122: /*
  123:  * Find overlapping slot with lowest start address
  124:  */
  125: static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
  126:                                             target_phys_addr_t start_addr,
  127:                                             target_phys_addr_t end_addr)
  128: {
  129:     KVMSlot *found = NULL;
  130:     int i;
  131: 
  132:     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  133:         KVMSlot *mem = &s->slots[i];
  134: 
  135:         if (mem->memory_size == 0 ||
  136:             (found && found->start_addr < mem->start_addr)) {
  137:             continue;
  138:         }
  139: 
  140:         if (end_addr > mem->start_addr &&
  141:             start_addr < mem->start_addr + mem->memory_size) {
  142:             found = mem;
  143:         }
  144:     }
  145: 
  146:     return found;
  147: }
  148: 
  149: int kvm_physical_memory_addr_from_ram(KVMState *s, ram_addr_t ram_addr,
  150:                                       target_phys_addr_t *phys_addr)
  151: {
  152:     int i;
  153: 
  154:     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  155:         KVMSlot *mem = &s->slots[i];
  156: 
  157:         if (ram_addr >= mem->phys_offset &&
  158:             ram_addr < mem->phys_offset + mem->memory_size) {
  159:             *phys_addr = mem->start_addr + (ram_addr - mem->phys_offset);
  160:             return 1;
  161:         }
  162:     }
  163: 
  164:     return 0;
  165: }
  166: 
  167: static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
  168: {
  169:     struct kvm_userspace_memory_region mem;
  170: 
  171:     mem.slot = slot->slot;
  172:     mem.guest_phys_addr = slot->start_addr;
  173:     mem.memory_size = slot->memory_size;
  174:     mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset);
  175:     mem.flags = slot->flags;
  176:     if (s->migration_log) {
  177:         mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
  178:     }
  179:     return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
  180: }
  181: 
  182: static void kvm_reset_vcpu(void *opaque)
  183: {
  184:     CPUState *env = opaque;
  185: 
  186:     kvm_arch_reset_vcpu(env);
  187: }
  188: 
  189: int kvm_irqchip_in_kernel(void)
  190: {
  191:     return kvm_state->irqchip_in_kernel;
  192: }
  193: 
  194: int kvm_pit_in_kernel(void)
  195: {
  196:     return kvm_state->pit_in_kernel;
  197: }
  198: 
  199: int kvm_init_vcpu(CPUState *env)
  200: {
  201:     KVMState *s = kvm_state;
  202:     long mmap_size;
  203:     int ret;
  204: 
  205:     DPRINTF("kvm_init_vcpu\n");
  206: 
  207:     ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
  208:     if (ret < 0) {
  209:         DPRINTF("kvm_create_vcpu failed\n");
  210:         goto err;
  211:     }
  212: 
  213:     env->kvm_fd = ret;
  214:     env->kvm_state = s;
  215:     env->kvm_vcpu_dirty = 1;
  216: 
  217:     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
  218:     if (mmap_size < 0) {
  219:         ret = mmap_size;
  220:         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
  221:         goto err;
  222:     }
  223: 
  224:     env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
  225:                         env->kvm_fd, 0);
  226:     if (env->kvm_run == MAP_FAILED) {
  227:         ret = -errno;
  228:         DPRINTF("mmap'ing vcpu state failed\n");
  229:         goto err;
  230:     }
  231: 
  232:     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
  233:         s->coalesced_mmio_ring =
  234:             (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
  235:     }
  236: 
  237:     ret = kvm_arch_init_vcpu(env);
  238:     if (ret == 0) {
  239:         qemu_register_reset(kvm_reset_vcpu, env);
  240:         kvm_arch_reset_vcpu(env);
  241:     }
  242: err:
  243:     return ret;
  244: }
  245: 
  246: /*
  247:  * dirty pages logging control
  248:  */
  249: 
  250: static int kvm_mem_flags(KVMState *s, bool log_dirty)
  251: {
  252:     return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
  253: }
  254: 
  255: static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
  256: {
  257:     KVMState *s = kvm_state;
  258:     int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
  259:     int old_flags;
  260: 
  261:     old_flags = mem->flags;
  262: 
  263:     flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
  264:     mem->flags = flags;
  265: 
  266:     /* If nothing changed effectively, no need to issue ioctl */
  267:     if (s->migration_log) {
  268:         flags |= KVM_MEM_LOG_DIRTY_PAGES;
  269:     }
  270: 
  271:     if (flags == old_flags) {
  272:         return 0;
  273:     }
  274: 
  275:     return kvm_set_user_memory_region(s, mem);
  276: }
  277: 
  278: static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
  279:                                       ram_addr_t size, bool log_dirty)
  280: {
  281:     KVMState *s = kvm_state;
  282:     KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
  283: 
  284:     if (mem == NULL)  {
  285:         fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
  286:                 TARGET_FMT_plx "\n", __func__, phys_addr,
  287:                 (target_phys_addr_t)(phys_addr + size - 1));
  288:         return -EINVAL;
  289:     }
  290:     return kvm_slot_dirty_pages_log_change(mem, log_dirty);
  291: }
  292: 
  293: static int kvm_log_start(CPUPhysMemoryClient *client,
  294:                          target_phys_addr_t phys_addr, ram_addr_t size)
  295: {
  296:     return kvm_dirty_pages_log_change(phys_addr, size, true);
  297: }
  298: 
  299: static int kvm_log_stop(CPUPhysMemoryClient *client,
  300:                         target_phys_addr_t phys_addr, ram_addr_t size)
  301: {
  302:     return kvm_dirty_pages_log_change(phys_addr, size, false);
  303: }
  304: 
  305: static int kvm_set_migration_log(int enable)
  306: {
  307:     KVMState *s = kvm_state;
  308:     KVMSlot *mem;
  309:     int i, err;
  310: 
  311:     s->migration_log = enable;
  312: 
  313:     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  314:         mem = &s->slots[i];
  315: 
  316:         if (!mem->memory_size) {
  317:             continue;
  318:         }
  319:         if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
  320:             continue;
  321:         }
  322:         err = kvm_set_user_memory_region(s, mem);
  323:         if (err) {
  324:             return err;
  325:         }
  326:     }
  327:     return 0;
  328: }
  329: 
  330: /* get kvm's dirty pages bitmap and update qemu's */
  331: static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
  332:                                          unsigned long *bitmap,
  333:                                          unsigned long offset,
  334:                                          unsigned long mem_size)
  335: {
  336:     unsigned int i, j;
  337:     unsigned long page_number, addr, addr1, c;
  338:     ram_addr_t ram_addr;
  339:     unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
  340:         HOST_LONG_BITS;
  341: 
  342:     /*
  343:      * bitmap-traveling is faster than memory-traveling (for addr...)
  344:      * especially when most of the memory is not dirty.
  345:      */
  346:     for (i = 0; i < len; i++) {
  347:         if (bitmap[i] != 0) {
  348:             c = leul_to_cpu(bitmap[i]);
  349:             do {
  350:                 j = ffsl(c) - 1;
  351:                 c &= ~(1ul << j);
  352:                 page_number = i * HOST_LONG_BITS + j;
  353:                 addr1 = page_number * TARGET_PAGE_SIZE;
  354:                 addr = offset + addr1;
  355:                 ram_addr = cpu_get_physical_page_desc(addr);
  356:                 cpu_physical_memory_set_dirty(ram_addr);
  357:             } while (c != 0);
  358:         }
  359:     }
  360:     return 0;
  361: }
  362: 
  363: #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
  364: 
  365: /**
  366:  * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
  367:  * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
  368:  * This means all bits are set to dirty.
  369:  *
  370:  * @start_add: start of logged region.
  371:  * @end_addr: end of logged region.
  372:  */
  373: static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  374:                                           target_phys_addr_t end_addr)
  375: {
  376:     KVMState *s = kvm_state;
  377:     unsigned long size, allocated_size = 0;
  378:     KVMDirtyLog d;
  379:     KVMSlot *mem;
  380:     int ret = 0;
  381: 
  382:     d.dirty_bitmap = NULL;
  383:     while (start_addr < end_addr) {
  384:         mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
  385:         if (mem == NULL) {
  386:             break;
  387:         }
  388: 
  389:         /* XXX bad kernel interface alert
  390:          * For dirty bitmap, kernel allocates array of size aligned to
  391:          * bits-per-long.  But for case when the kernel is 64bits and
  392:          * the userspace is 32bits, userspace can't align to the same
  393:          * bits-per-long, since sizeof(long) is different between kernel
  394:          * and user space.  This way, userspace will provide buffer which
  395:          * may be 4 bytes less than the kernel will use, resulting in
  396:          * userspace memory corruption (which is not detectable by valgrind
  397:          * too, in most cases).
  398:          * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
  399:          * a hope that sizeof(long) wont become >8 any time soon.
  400:          */
  401:         size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
  402:                      /*HOST_LONG_BITS*/ 64) / 8;
  403:         if (!d.dirty_bitmap) {
  404:             d.dirty_bitmap = g_malloc(size);
  405:         } else if (size > allocated_size) {
  406:             d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
  407:         }
  408:         allocated_size = size;
  409:         memset(d.dirty_bitmap, 0, allocated_size);
  410: 
  411:         d.slot = mem->slot;
  412: 
  413:         if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
  414:             DPRINTF("ioctl failed %d\n", errno);
  415:             ret = -1;
  416:             break;
  417:         }
  418: 
  419:         kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap,
  420:                                       mem->start_addr, mem->memory_size);
  421:         start_addr = mem->start_addr + mem->memory_size;
  422:     }
  423:     g_free(d.dirty_bitmap);
  424: 
  425:     return ret;
  426: }
  427: 
  428: int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
  429: {
  430:     int ret = -ENOSYS;
  431:     KVMState *s = kvm_state;
  432: 
  433:     if (s->coalesced_mmio) {
  434:         struct kvm_coalesced_mmio_zone zone;
  435: 
  436:         zone.addr = start;
  437:         zone.size = size;
  438: 
  439:         ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
  440:     }
  441: 
  442:     return ret;
  443: }
  444: 
  445: int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
  446: {
  447:     int ret = -ENOSYS;
  448:     KVMState *s = kvm_state;
  449: 
  450:     if (s->coalesced_mmio) {
  451:         struct kvm_coalesced_mmio_zone zone;
  452: 
  453:         zone.addr = start;
  454:         zone.size = size;
  455: 
  456:         ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
  457:     }
  458: 
  459:     return ret;
  460: }
  461: 
  462: int kvm_check_extension(KVMState *s, unsigned int extension)
  463: {
  464:     int ret;
  465: 
  466:     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
  467:     if (ret < 0) {
  468:         ret = 0;
  469:     }
  470: 
  471:     return ret;
  472: }
  473: 
  474: static int kvm_check_many_ioeventfds(void)
  475: {
  476:     /* Userspace can use ioeventfd for io notification.  This requires a host
  477:      * that supports eventfd(2) and an I/O thread; since eventfd does not
  478:      * support SIGIO it cannot interrupt the vcpu.
  479:      *
  480:      * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
  481:      * can avoid creating too many ioeventfds.
  482:      */
  483: #if defined(CONFIG_EVENTFD)
  484:     int ioeventfds[7];
  485:     int i, ret = 0;
  486:     for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
  487:         ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
  488:         if (ioeventfds[i] < 0) {
  489:             break;
  490:         }
  491:         ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
  492:         if (ret < 0) {
  493:             close(ioeventfds[i]);
  494:             break;
  495:         }
  496:     }
  497: 
  498:     /* Decide whether many devices are supported or not */
  499:     ret = i == ARRAY_SIZE(ioeventfds);
  500: 
  501:     while (i-- > 0) {
  502:         kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
  503:         close(ioeventfds[i]);
  504:     }
  505:     return ret;
  506: #else
  507:     return 0;
  508: #endif
  509: }
  510: 
  511: static const KVMCapabilityInfo *
  512: kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
  513: {
  514:     while (list->name) {
  515:         if (!kvm_check_extension(s, list->value)) {
  516:             return list;
  517:         }
  518:         list++;
  519:     }
  520:     return NULL;
  521: }
  522: 
  523: static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
  524:                              ram_addr_t phys_offset, bool log_dirty)
  525: {
  526:     KVMState *s = kvm_state;
  527:     ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
  528:     KVMSlot *mem, old;
  529:     int err;
  530: 
  531:     /* kvm works in page size chunks, but the function may be called
  532:        with sub-page size and unaligned start address. */
  533:     size = TARGET_PAGE_ALIGN(size);
  534:     start_addr = TARGET_PAGE_ALIGN(start_addr);
  535: 
  536:     /* KVM does not support read-only slots */
  537:     phys_offset &= ~IO_MEM_ROM;
  538: 
  539:     while (1) {
  540:         mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
  541:         if (!mem) {
  542:             break;
  543:         }
  544: 
  545:         if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr &&
  546:             (start_addr + size <= mem->start_addr + mem->memory_size) &&
  547:             (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) {
  548:             /* The new slot fits into the existing one and comes with
  549:              * identical parameters - update flags and done. */
  550:             kvm_slot_dirty_pages_log_change(mem, log_dirty);
  551:             return;
  552:         }
  553: 
  554:         old = *mem;
  555: 
  556:         /* unregister the overlapping slot */
  557:         mem->memory_size = 0;
  558:         err = kvm_set_user_memory_region(s, mem);
  559:         if (err) {
  560:             fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
  561:                     __func__, strerror(-err));
  562:             abort();
  563:         }
  564: 
  565:         /* Workaround for older KVM versions: we can't join slots, even not by
  566:          * unregistering the previous ones and then registering the larger
  567:          * slot. We have to maintain the existing fragmentation. Sigh.
  568:          *
  569:          * This workaround assumes that the new slot starts at the same
  570:          * address as the first existing one. If not or if some overlapping
  571:          * slot comes around later, we will fail (not seen in practice so far)
  572:          * - and actually require a recent KVM version. */
  573:         if (s->broken_set_mem_region &&
  574:             old.start_addr == start_addr && old.memory_size < size &&
  575:             flags < IO_MEM_UNASSIGNED) {
  576:             mem = kvm_alloc_slot(s);
  577:             mem->memory_size = old.memory_size;
  578:             mem->start_addr = old.start_addr;
  579:             mem->phys_offset = old.phys_offset;
  580:             mem->flags = kvm_mem_flags(s, log_dirty);
  581: 
  582:             err = kvm_set_user_memory_region(s, mem);
  583:             if (err) {
  584:                 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
  585:                         strerror(-err));
  586:                 abort();
  587:             }
  588: 
  589:             start_addr += old.memory_size;
  590:             phys_offset += old.memory_size;
  591:             size -= old.memory_size;
  592:             continue;
  593:         }
  594: 
  595:         /* register prefix slot */
  596:         if (old.start_addr < start_addr) {
  597:             mem = kvm_alloc_slot(s);
  598:             mem->memory_size = start_addr - old.start_addr;
  599:             mem->start_addr = old.start_addr;
  600:             mem->phys_offset = old.phys_offset;
  601:             mem->flags =  kvm_mem_flags(s, log_dirty);
  602: 
  603:             err = kvm_set_user_memory_region(s, mem);
  604:             if (err) {
  605:                 fprintf(stderr, "%s: error registering prefix slot: %s\n",
  606:                         __func__, strerror(-err));
  607: #ifdef TARGET_PPC
  608:                 fprintf(stderr, "%s: This is probably because your kernel's " \
  609:                                 "PAGE_SIZE is too big. Please try to use 4k " \
  610:                                 "PAGE_SIZE!\n", __func__);
  611: #endif
  612:                 abort();
  613:             }
  614:         }
  615: 
  616:         /* register suffix slot */
  617:         if (old.start_addr + old.memory_size > start_addr + size) {
  618:             ram_addr_t size_delta;
  619: 
  620:             mem = kvm_alloc_slot(s);
  621:             mem->start_addr = start_addr + size;
  622:             size_delta = mem->start_addr - old.start_addr;
  623:             mem->memory_size = old.memory_size - size_delta;
  624:             mem->phys_offset = old.phys_offset + size_delta;
  625:             mem->flags = kvm_mem_flags(s, log_dirty);
  626: 
  627:             err = kvm_set_user_memory_region(s, mem);
  628:             if (err) {
  629:                 fprintf(stderr, "%s: error registering suffix slot: %s\n",
  630:                         __func__, strerror(-err));
  631:                 abort();
  632:             }
  633:         }
  634:     }
  635: 
  636:     /* in case the KVM bug workaround already "consumed" the new slot */
  637:     if (!size) {
  638:         return;
  639:     }
  640:     /* KVM does not need to know about this memory */
  641:     if (flags >= IO_MEM_UNASSIGNED) {
  642:         return;
  643:     }
  644:     mem = kvm_alloc_slot(s);
  645:     mem->memory_size = size;
  646:     mem->start_addr = start_addr;
  647:     mem->phys_offset = phys_offset;
  648:     mem->flags = kvm_mem_flags(s, log_dirty);
  649: 
  650:     err = kvm_set_user_memory_region(s, mem);
  651:     if (err) {
  652:         fprintf(stderr, "%s: error registering slot: %s\n", __func__,
  653:                 strerror(-err));
  654:         abort();
  655:     }
  656: }
  657: 
  658: static void kvm_client_set_memory(struct CPUPhysMemoryClient *client,
  659:                                   target_phys_addr_t start_addr,
  660:                                   ram_addr_t size, ram_addr_t phys_offset,
  661:                                   bool log_dirty)
  662: {
  663:     kvm_set_phys_mem(start_addr, size, phys_offset, log_dirty);
  664: }
  665: 
  666: static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
  667:                                         target_phys_addr_t start_addr,
  668:                                         target_phys_addr_t end_addr)
  669: {
  670:     return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
  671: }
  672: 
  673: static int kvm_client_migration_log(struct CPUPhysMemoryClient *client,
  674:                                     int enable)
  675: {
  676:     return kvm_set_migration_log(enable);
  677: }
  678: 
  679: static CPUPhysMemoryClient kvm_cpu_phys_memory_client = {
  680:     .set_memory = kvm_client_set_memory,
  681:     .sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
  682:     .migration_log = kvm_client_migration_log,
  683:     .log_start = kvm_log_start,
  684:     .log_stop = kvm_log_stop,
  685: };
  686: 
  687: static void kvm_handle_interrupt(CPUState *env, int mask)
  688: {
  689:     env->interrupt_request |= mask;
  690: 
  691:     if (!qemu_cpu_is_self(env)) {
  692:         qemu_cpu_kick(env);
  693:     }
  694: }
  695: 
  696: int kvm_init(void)
  697: {
  698:     static const char upgrade_note[] =
  699:         "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
  700:         "(see http://sourceforge.net/projects/kvm).\n";
  701:     KVMState *s;
  702:     const KVMCapabilityInfo *missing_cap;
  703:     int ret;
  704:     int i;
  705: 
  706:     s = g_malloc0(sizeof(KVMState));
  707: 
  708: #ifdef KVM_CAP_SET_GUEST_DEBUG
  709:     QTAILQ_INIT(&s->kvm_sw_breakpoints);
  710: #endif
  711:     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  712:         s->slots[i].slot = i;
  713:     }
  714:     s->vmfd = -1;
  715:     s->fd = qemu_open("/dev/kvm", O_RDWR);
  716:     if (s->fd == -1) {
  717:         fprintf(stderr, "Could not access KVM kernel module: %m\n");
  718:         ret = -errno;
  719:         goto err;
  720:     }
  721: 
  722:     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
  723:     if (ret < KVM_API_VERSION) {
  724:         if (ret > 0) {
  725:             ret = -EINVAL;
  726:         }
  727:         fprintf(stderr, "kvm version too old\n");
  728:         goto err;
  729:     }
  730: 
  731:     if (ret > KVM_API_VERSION) {
  732:         ret = -EINVAL;
  733:         fprintf(stderr, "kvm version not supported\n");
  734:         goto err;
  735:     }
  736: 
  737:     s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
  738:     if (s->vmfd < 0) {
  739: #ifdef TARGET_S390X
  740:         fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
  741:                         "your host kernel command line\n");
  742: #endif
  743:         ret = s->vmfd;
  744:         goto err;
  745:     }
  746: 
  747:     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
  748:     if (!missing_cap) {
  749:         missing_cap =
  750:             kvm_check_extension_list(s, kvm_arch_required_capabilities);
  751:     }
  752:     if (missing_cap) {
  753:         ret = -EINVAL;
  754:         fprintf(stderr, "kvm does not support %s\n%s",
  755:                 missing_cap->name, upgrade_note);
  756:         goto err;
  757:     }
  758: 
  759:     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
  760: 
  761:     s->broken_set_mem_region = 1;
  762:     ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
  763:     if (ret > 0) {
  764:         s->broken_set_mem_region = 0;
  765:     }
  766: 
  767: #ifdef KVM_CAP_VCPU_EVENTS
  768:     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
  769: #endif
  770: 
  771:     s->robust_singlestep =
  772:         kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
  773: 
  774: #ifdef KVM_CAP_DEBUGREGS
  775:     s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
  776: #endif
  777: 
  778: #ifdef KVM_CAP_XSAVE
  779:     s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
  780: #endif
  781: 
  782: #ifdef KVM_CAP_XCRS
  783:     s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
  784: #endif
  785: 
  786:     ret = kvm_arch_init(s);
  787:     if (ret < 0) {
  788:         goto err;
  789:     }
  790: 
  791:     kvm_state = s;
  792:     cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client);
  793: 
  794:     s->many_ioeventfds = kvm_check_many_ioeventfds();
  795: 
  796:     cpu_interrupt_handler = kvm_handle_interrupt;
  797: 
  798:     return 0;
  799: 
  800: err:
  801:     if (s) {
  802:         if (s->vmfd >= 0) {
  803:             close(s->vmfd);
  804:         }
  805:         if (s->fd != -1) {
  806:             close(s->fd);
  807:         }
  808:     }
  809:     g_free(s);
  810: 
  811:     return ret;
  812: }
  813: 
  814: static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
  815:                           uint32_t count)
  816: {
  817:     int i;
  818:     uint8_t *ptr = data;
  819: 
  820:     for (i = 0; i < count; i++) {
  821:         if (direction == KVM_EXIT_IO_IN) {
  822:             switch (size) {
  823:             case 1:
  824:                 stb_p(ptr, cpu_inb(port));
  825:                 break;
  826:             case 2:
  827:                 stw_p(ptr, cpu_inw(port));
  828:                 break;
  829:             case 4:
  830:                 stl_p(ptr, cpu_inl(port));
  831:                 break;
  832:             }
  833:         } else {
  834:             switch (size) {
  835:             case 1:
  836:                 cpu_outb(port, ldub_p(ptr));
  837:                 break;
  838:             case 2:
  839:                 cpu_outw(port, lduw_p(ptr));
  840:                 break;
  841:             case 4:
  842:                 cpu_outl(port, ldl_p(ptr));
  843:                 break;
  844:             }
  845:         }
  846: 
  847:         ptr += size;
  848:     }
  849: }
  850: 
  851: static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
  852: {
  853:     fprintf(stderr, "KVM internal error.");
  854:     if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
  855:         int i;
  856: 
  857:         fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
  858:         for (i = 0; i < run->internal.ndata; ++i) {
  859:             fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
  860:                     i, (uint64_t)run->internal.data[i]);
  861:         }
  862:     } else {
  863:         fprintf(stderr, "\n");
  864:     }
  865:     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
  866:         fprintf(stderr, "emulation failure\n");
  867:         if (!kvm_arch_stop_on_emulation_error(env)) {
  868:             cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
  869:             return EXCP_INTERRUPT;
  870:         }
  871:     }
  872:     /* FIXME: Should trigger a qmp message to let management know
  873:      * something went wrong.
  874:      */
  875:     return -1;
  876: }
  877: 
  878: void kvm_flush_coalesced_mmio_buffer(void)
  879: {
  880:     KVMState *s = kvm_state;
  881: 
  882:     if (s->coalesced_flush_in_progress) {
  883:         return;
  884:     }
  885: 
  886:     s->coalesced_flush_in_progress = true;
  887: 
  888:     if (s->coalesced_mmio_ring) {
  889:         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
  890:         while (ring->first != ring->last) {
  891:             struct kvm_coalesced_mmio *ent;
  892: 
  893:             ent = &ring->coalesced_mmio[ring->first];
  894: 
  895:             cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
  896:             smp_wmb();
  897:             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
  898:         }
  899:     }
  900: 
  901:     s->coalesced_flush_in_progress = false;
  902: }
  903: 
  904: static void do_kvm_cpu_synchronize_state(void *_env)
  905: {
  906:     CPUState *env = _env;
  907: 
  908:     if (!env->kvm_vcpu_dirty) {
  909:         kvm_arch_get_registers(env);
  910:         env->kvm_vcpu_dirty = 1;
  911:     }
  912: }
  913: 
  914: void kvm_cpu_synchronize_state(CPUState *env)
  915: {
  916:     if (!env->kvm_vcpu_dirty) {
  917:         run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
  918:     }
  919: }
  920: 
  921: void kvm_cpu_synchronize_post_reset(CPUState *env)
  922: {
  923:     kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
  924:     env->kvm_vcpu_dirty = 0;
  925: }
  926: 
  927: void kvm_cpu_synchronize_post_init(CPUState *env)
  928: {
  929:     kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
  930:     env->kvm_vcpu_dirty = 0;
  931: }
  932: 
  933: int kvm_cpu_exec(CPUState *env)
  934: {
  935:     struct kvm_run *run = env->kvm_run;
  936:     int ret, run_ret;
  937: 
  938:     DPRINTF("kvm_cpu_exec()\n");
  939: 
  940:     if (kvm_arch_process_async_events(env)) {
  941:         env->exit_request = 0;
  942:         return EXCP_HLT;
  943:     }
  944: 
  945:     cpu_single_env = env;
  946: 
  947:     do {
  948:         if (env->kvm_vcpu_dirty) {
  949:             kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
  950:             env->kvm_vcpu_dirty = 0;
  951:         }
  952: 
  953:         kvm_arch_pre_run(env, run);
  954:         if (env->exit_request) {
  955:             DPRINTF("interrupt exit requested\n");
  956:             /*
  957:              * KVM requires us to reenter the kernel after IO exits to complete
  958:              * instruction emulation. This self-signal will ensure that we
  959:              * leave ASAP again.
  960:              */
  961:             qemu_cpu_kick_self();
  962:         }
  963:         cpu_single_env = NULL;
  964:         qemu_mutex_unlock_iothread();
  965: 
  966:         run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
  967: 
  968:         qemu_mutex_lock_iothread();
  969:         cpu_single_env = env;
  970:         kvm_arch_post_run(env, run);
  971: 
  972:         kvm_flush_coalesced_mmio_buffer();
  973: 
  974:         if (run_ret < 0) {
  975:             if (run_ret == -EINTR || run_ret == -EAGAIN) {
  976:                 DPRINTF("io window exit\n");
  977:                 ret = EXCP_INTERRUPT;
  978:                 break;
  979:             }
  980:             DPRINTF("kvm run failed %s\n", strerror(-run_ret));
  981:             abort();
  982:         }
  983: 
  984:         switch (run->exit_reason) {
  985:         case KVM_EXIT_IO:
  986:             DPRINTF("handle_io\n");
  987:             kvm_handle_io(run->io.port,
  988:                           (uint8_t *)run + run->io.data_offset,
  989:                           run->io.direction,
  990:                           run->io.size,
  991:                           run->io.count);
  992:             ret = 0;
  993:             break;
  994:         case KVM_EXIT_MMIO:
  995:             DPRINTF("handle_mmio\n");
  996:             cpu_physical_memory_rw(run->mmio.phys_addr,
  997:                                    run->mmio.data,
  998:                                    run->mmio.len,
  999:                                    run->mmio.is_write);
 1000:             ret = 0;
 1001:             break;
 1002:         case KVM_EXIT_IRQ_WINDOW_OPEN:
 1003:             DPRINTF("irq_window_open\n");
 1004:             ret = EXCP_INTERRUPT;
 1005:             break;
 1006:         case KVM_EXIT_SHUTDOWN:
 1007:             DPRINTF("shutdown\n");
 1008:             qemu_system_reset_request();
 1009:             ret = EXCP_INTERRUPT;
 1010:             break;
 1011:         case KVM_EXIT_UNKNOWN:
 1012:             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
 1013:                     (uint64_t)run->hw.hardware_exit_reason);
 1014:             ret = -1;
 1015:             break;
 1016:         case KVM_EXIT_INTERNAL_ERROR:
 1017:             ret = kvm_handle_internal_error(env, run);
 1018:             break;
 1019:         default:
 1020:             DPRINTF("kvm_arch_handle_exit\n");
 1021:             ret = kvm_arch_handle_exit(env, run);
 1022:             break;
 1023:         }
 1024:     } while (ret == 0);
 1025: 
 1026:     if (ret < 0) {
 1027:         cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
 1028:         vm_stop(RUN_STATE_INTERNAL_ERROR);
 1029:     }
 1030: 
 1031:     env->exit_request = 0;
 1032:     cpu_single_env = NULL;
 1033:     return ret;
 1034: }
 1035: 
 1036: int kvm_ioctl(KVMState *s, int type, ...)
 1037: {
 1038:     int ret;
 1039:     void *arg;
 1040:     va_list ap;
 1041: 
 1042:     va_start(ap, type);
 1043:     arg = va_arg(ap, void *);
 1044:     va_end(ap);
 1045: 
 1046:     ret = ioctl(s->fd, type, arg);
 1047:     if (ret == -1) {
 1048:         ret = -errno;
 1049:     }
 1050:     return ret;
 1051: }
 1052: 
 1053: int kvm_vm_ioctl(KVMState *s, int type, ...)
 1054: {
 1055:     int ret;
 1056:     void *arg;
 1057:     va_list ap;
 1058: 
 1059:     va_start(ap, type);
 1060:     arg = va_arg(ap, void *);
 1061:     va_end(ap);
 1062: 
 1063:     ret = ioctl(s->vmfd, type, arg);
 1064:     if (ret == -1) {
 1065:         ret = -errno;
 1066:     }
 1067:     return ret;
 1068: }
 1069: 
 1070: int kvm_vcpu_ioctl(CPUState *env, int type, ...)
 1071: {
 1072:     int ret;
 1073:     void *arg;
 1074:     va_list ap;
 1075: 
 1076:     va_start(ap, type);
 1077:     arg = va_arg(ap, void *);
 1078:     va_end(ap);
 1079: 
 1080:     ret = ioctl(env->kvm_fd, type, arg);
 1081:     if (ret == -1) {
 1082:         ret = -errno;
 1083:     }
 1084:     return ret;
 1085: }
 1086: 
 1087: int kvm_has_sync_mmu(void)
 1088: {
 1089:     return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
 1090: }
 1091: 
 1092: int kvm_has_vcpu_events(void)
 1093: {
 1094:     return kvm_state->vcpu_events;
 1095: }
 1096: 
 1097: int kvm_has_robust_singlestep(void)
 1098: {
 1099:     return kvm_state->robust_singlestep;
 1100: }
 1101: 
 1102: int kvm_has_debugregs(void)
 1103: {
 1104:     return kvm_state->debugregs;
 1105: }
 1106: 
 1107: int kvm_has_xsave(void)
 1108: {
 1109:     return kvm_state->xsave;
 1110: }
 1111: 
 1112: int kvm_has_xcrs(void)
 1113: {
 1114:     return kvm_state->xcrs;
 1115: }
 1116: 
 1117: int kvm_has_many_ioeventfds(void)
 1118: {
 1119:     if (!kvm_enabled()) {
 1120:         return 0;
 1121:     }
 1122:     return kvm_state->many_ioeventfds;
 1123: }
 1124: 
 1125: void kvm_setup_guest_memory(void *start, size_t size)
 1126: {
 1127:     if (!kvm_has_sync_mmu()) {
 1128:         int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
 1129: 
 1130:         if (ret) {
 1131:             perror("qemu_madvise");
 1132:             fprintf(stderr,
 1133:                     "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
 1134:             exit(1);
 1135:         }
 1136:     }
 1137: }
 1138: 
 1139: #ifdef KVM_CAP_SET_GUEST_DEBUG
 1140: struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
 1141:                                                  target_ulong pc)
 1142: {
 1143:     struct kvm_sw_breakpoint *bp;
 1144: 
 1145:     QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
 1146:         if (bp->pc == pc) {
 1147:             return bp;
 1148:         }
 1149:     }
 1150:     return NULL;
 1151: }
 1152: 
 1153: int kvm_sw_breakpoints_active(CPUState *env)
 1154: {
 1155:     return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
 1156: }
 1157: 
 1158: struct kvm_set_guest_debug_data {
 1159:     struct kvm_guest_debug dbg;
 1160:     CPUState *env;
 1161:     int err;
 1162: };
 1163: 
 1164: static void kvm_invoke_set_guest_debug(void *data)
 1165: {
 1166:     struct kvm_set_guest_debug_data *dbg_data = data;
 1167:     CPUState *env = dbg_data->env;
 1168: 
 1169:     dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
 1170: }
 1171: 
 1172: int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
 1173: {
 1174:     struct kvm_set_guest_debug_data data;
 1175: 
 1176:     data.dbg.control = reinject_trap;
 1177: 
 1178:     if (env->singlestep_enabled) {
 1179:         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
 1180:     }
 1181:     kvm_arch_update_guest_debug(env, &data.dbg);
 1182:     data.env = env;
 1183: 
 1184:     run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
 1185:     return data.err;
 1186: }
 1187: 
 1188: int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
 1189:                           target_ulong len, int type)
 1190: {
 1191:     struct kvm_sw_breakpoint *bp;
 1192:     CPUState *env;
 1193:     int err;
 1194: 
 1195:     if (type == GDB_BREAKPOINT_SW) {
 1196:         bp = kvm_find_sw_breakpoint(current_env, addr);
 1197:         if (bp) {
 1198:             bp->use_count++;
 1199:             return 0;
 1200:         }
 1201: 
 1202:         bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
 1203:         if (!bp) {
 1204:             return -ENOMEM;
 1205:         }
 1206: 
 1207:         bp->pc = addr;
 1208:         bp->use_count = 1;
 1209:         err = kvm_arch_insert_sw_breakpoint(current_env, bp);
 1210:         if (err) {
 1211:             g_free(bp);
 1212:             return err;
 1213:         }
 1214: 
 1215:         QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
 1216:                           bp, entry);
 1217:     } else {
 1218:         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
 1219:         if (err) {
 1220:             return err;
 1221:         }
 1222:     }
 1223: 
 1224:     for (env = first_cpu; env != NULL; env = env->next_cpu) {
 1225:         err = kvm_update_guest_debug(env, 0);
 1226:         if (err) {
 1227:             return err;
 1228:         }
 1229:     }
 1230:     return 0;
 1231: }
 1232: 
 1233: int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
 1234:                           target_ulong len, int type)
 1235: {
 1236:     struct kvm_sw_breakpoint *bp;
 1237:     CPUState *env;
 1238:     int err;
 1239: 
 1240:     if (type == GDB_BREAKPOINT_SW) {
 1241:         bp = kvm_find_sw_breakpoint(current_env, addr);
 1242:         if (!bp) {
 1243:             return -ENOENT;
 1244:         }
 1245: 
 1246:         if (bp->use_count > 1) {
 1247:             bp->use_count--;
 1248:             return 0;
 1249:         }
 1250: 
 1251:         err = kvm_arch_remove_sw_breakpoint(current_env, bp);
 1252:         if (err) {
 1253:             return err;
 1254:         }
 1255: 
 1256:         QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
 1257:         g_free(bp);
 1258:     } else {
 1259:         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
 1260:         if (err) {
 1261:             return err;
 1262:         }
 1263:     }
 1264: 
 1265:     for (env = first_cpu; env != NULL; env = env->next_cpu) {
 1266:         err = kvm_update_guest_debug(env, 0);
 1267:         if (err) {
 1268:             return err;
 1269:         }
 1270:     }
 1271:     return 0;
 1272: }
 1273: 
 1274: void kvm_remove_all_breakpoints(CPUState *current_env)
 1275: {
 1276:     struct kvm_sw_breakpoint *bp, *next;
 1277:     KVMState *s = current_env->kvm_state;
 1278:     CPUState *env;
 1279: 
 1280:     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
 1281:         if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
 1282:             /* Try harder to find a CPU that currently sees the breakpoint. */
 1283:             for (env = first_cpu; env != NULL; env = env->next_cpu) {
 1284:                 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
 1285:                     break;
 1286:                 }
 1287:             }
 1288:         }
 1289:     }
 1290:     kvm_arch_remove_all_hw_breakpoints();
 1291: 
 1292:     for (env = first_cpu; env != NULL; env = env->next_cpu) {
 1293:         kvm_update_guest_debug(env, 0);
 1294:     }
 1295: }
 1296: 
 1297: #else /* !KVM_CAP_SET_GUEST_DEBUG */
 1298: 
 1299: int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
 1300: {
 1301:     return -EINVAL;
 1302: }
 1303: 
 1304: int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
 1305:                           target_ulong len, int type)
 1306: {
 1307:     return -EINVAL;
 1308: }
 1309: 
 1310: int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
 1311:                           target_ulong len, int type)
 1312: {
 1313:     return -EINVAL;
 1314: }
 1315: 
 1316: void kvm_remove_all_breakpoints(CPUState *current_env)
 1317: {
 1318: }
 1319: #endif /* !KVM_CAP_SET_GUEST_DEBUG */
 1320: 
 1321: int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
 1322: {
 1323:     struct kvm_signal_mask *sigmask;
 1324:     int r;
 1325: 
 1326:     if (!sigset) {
 1327:         return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
 1328:     }
 1329: 
 1330:     sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
 1331: 
 1332:     sigmask->len = 8;
 1333:     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
 1334:     r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
 1335:     g_free(sigmask);
 1336: 
 1337:     return r;
 1338: }
 1339: 
 1340: int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
 1341: {
 1342:     int ret;
 1343:     struct kvm_ioeventfd iofd;
 1344: 
 1345:     iofd.datamatch = val;
 1346:     iofd.addr = addr;
 1347:     iofd.len = 4;
 1348:     iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
 1349:     iofd.fd = fd;
 1350: 
 1351:     if (!kvm_enabled()) {
 1352:         return -ENOSYS;
 1353:     }
 1354: 
 1355:     if (!assign) {
 1356:         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 1357:     }
 1358: 
 1359:     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
 1360: 
 1361:     if (ret < 0) {
 1362:         return -errno;
 1363:     }
 1364: 
 1365:     return 0;
 1366: }
 1367: 
 1368: int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
 1369: {
 1370:     struct kvm_ioeventfd kick = {
 1371:         .datamatch = val,
 1372:         .addr = addr,
 1373:         .len = 2,
 1374:         .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
 1375:         .fd = fd,
 1376:     };
 1377:     int r;
 1378:     if (!kvm_enabled()) {
 1379:         return -ENOSYS;
 1380:     }
 1381:     if (!assign) {
 1382:         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 1383:     }
 1384:     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
 1385:     if (r < 0) {
 1386:         return r;
 1387:     }
 1388:     return 0;
 1389: }
 1390: 
 1391: int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
 1392: {
 1393:     return kvm_arch_on_sigbus_vcpu(env, code, addr);
 1394: }
 1395: 
 1396: int kvm_on_sigbus(int code, void *addr)
 1397: {
 1398:     return kvm_arch_on_sigbus(code, addr);
 1399: }

unix.superglobalmegacorp.com