Diff for /qemu/block.c between versions 1.1.1.17 and 1.1.1.23

version 1.1.1.17, 2018/04/24 18:16:35 version 1.1.1.23, 2018/04/24 19:34:33
Line 23 Line 23
  */   */
 #include "config-host.h"  #include "config-host.h"
 #include "qemu-common.h"  #include "qemu-common.h"
   #include "trace.h"
 #include "monitor.h"  #include "monitor.h"
 #include "block_int.h"  #include "block_int.h"
 #include "module.h"  #include "module.h"
 #include "qemu-objects.h"  #include "qjson.h"
   #include "qemu-coroutine.h"
   #include "qmp-commands.h"
   #include "qemu-timer.h"
   
 #ifdef CONFIG_BSD  #ifdef CONFIG_BSD
 #include <sys/types.h>  #include <sys/types.h>
Line 42 Line 46
 #include <windows.h>  #include <windows.h>
 #endif  #endif
   
   #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
   
   typedef enum {
       BDRV_REQ_COPY_ON_READ = 0x1,
       BDRV_REQ_ZERO_WRITE   = 0x2,
   } BdrvRequestFlags;
   
   static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,  static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,          int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque);          BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,  static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,          int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque);          BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,  static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);                                           int64_t sector_num, int nb_sectors,
 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,                                           QEMUIOVector *iov);
                         uint8_t *buf, int nb_sectors);  static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,                                           int64_t sector_num, int nb_sectors,
                          const uint8_t *buf, int nb_sectors);                                           QEMUIOVector *iov);
   static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags);
   static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags);
   static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
                                                  int64_t sector_num,
                                                  QEMUIOVector *qiov,
                                                  int nb_sectors,
                                                  BlockDriverCompletionFunc *cb,
                                                  void *opaque,
                                                  bool is_write);
   static void coroutine_fn bdrv_co_do_rw(void *opaque);
   static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors);
   
   static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
           bool is_write, double elapsed_time, uint64_t *wait);
   static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
           double elapsed_time, uint64_t *wait);
   static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
           bool is_write, int64_t *wait);
   
   static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
       QTAILQ_HEAD_INITIALIZER(bdrv_states);
   
 BlockDriverState *bdrv_first;  static QLIST_HEAD(, BlockDriver) bdrv_drivers =
       QLIST_HEAD_INITIALIZER(bdrv_drivers);
   
 static BlockDriver *first_drv;  /* The device to use for VM snapshots */
   static BlockDriverState *bs_snapshots;
   
 /* If non-zero, use only whitelisted block drivers */  /* If non-zero, use only whitelisted block drivers */
 static int use_bdrv_whitelist;  static int use_bdrv_whitelist;
   
 int path_is_absolute(const char *path)  #ifdef _WIN32
   static int is_windows_drive_prefix(const char *filename)
   {
       return (((filename[0] >= 'a' && filename[0] <= 'z') ||
                (filename[0] >= 'A' && filename[0] <= 'Z')) &&
               filename[1] == ':');
   }
   
   int is_windows_drive(const char *filename)
   {
       if (is_windows_drive_prefix(filename) &&
           filename[2] == '\0')
           return 1;
       if (strstart(filename, "\\\\.\\", NULL) ||
           strstart(filename, "//./", NULL))
           return 1;
       return 0;
   }
   #endif
   
   /* throttling disk I/O limits */
   void bdrv_io_limits_disable(BlockDriverState *bs)
   {
       bs->io_limits_enabled = false;
   
       while (qemu_co_queue_next(&bs->throttled_reqs));
   
       if (bs->block_timer) {
           qemu_del_timer(bs->block_timer);
           qemu_free_timer(bs->block_timer);
           bs->block_timer = NULL;
       }
   
       bs->slice_start = 0;
       bs->slice_end   = 0;
       bs->slice_time  = 0;
       memset(&bs->io_base, 0, sizeof(bs->io_base));
   }
   
   static void bdrv_block_timer(void *opaque)
   {
       BlockDriverState *bs = opaque;
   
       qemu_co_queue_next(&bs->throttled_reqs);
   }
   
   void bdrv_io_limits_enable(BlockDriverState *bs)
   {
       qemu_co_queue_init(&bs->throttled_reqs);
       bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
       bs->slice_time  = 5 * BLOCK_IO_SLICE_TIME;
       bs->slice_start = qemu_get_clock_ns(vm_clock);
       bs->slice_end   = bs->slice_start + bs->slice_time;
       memset(&bs->io_base, 0, sizeof(bs->io_base));
       bs->io_limits_enabled = true;
   }
   
   bool bdrv_io_limits_enabled(BlockDriverState *bs)
   {
       BlockIOLimit *io_limits = &bs->io_limits;
       return io_limits->bps[BLOCK_IO_LIMIT_READ]
            || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
            || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
            || io_limits->iops[BLOCK_IO_LIMIT_READ]
            || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
            || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
   }
   
   static void bdrv_io_limits_intercept(BlockDriverState *bs,
                                        bool is_write, int nb_sectors)
   {
       int64_t wait_time = -1;
   
       if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
           qemu_co_queue_wait(&bs->throttled_reqs);
       }
   
       /* In fact, we hope to keep each request's timing, in FIFO mode. The next
        * throttled requests will not be dequeued until the current request is
        * allowed to be serviced. So if the current request still exceeds the
        * limits, it will be inserted to the head. All requests followed it will
        * be still in throttled_reqs queue.
        */
   
       while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
           qemu_mod_timer(bs->block_timer,
                          wait_time + qemu_get_clock_ns(vm_clock));
           qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
       }
   
       qemu_co_queue_next(&bs->throttled_reqs);
   }
   
   /* check if the path starts with "<protocol>:" */
   static int path_has_protocol(const char *path)
 {  {
     const char *p;      const char *p;
   
 #ifdef _WIN32  #ifdef _WIN32
     /* specific case for names like: "\\.\d:" */      if (is_windows_drive(path) ||
     if (*path == '/' || *path == '\\')          is_windows_drive_prefix(path)) {
         return 1;          return 0;
       }
       p = path + strcspn(path, ":/\\");
   #else
       p = path + strcspn(path, ":/");
 #endif  #endif
     p = strchr(path, ':');  
     if (p)      return *p == ':';
         p++;  }
     else  
         p = path;  int path_is_absolute(const char *path)
   {
 #ifdef _WIN32  #ifdef _WIN32
     return (*p == '/' || *p == '\\');      /* specific case for names like: "\\.\d:" */
       if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
           return 1;
       }
       return (*path == '/' || *path == '\\');
 #else  #else
     return (*p == '/');      return (*path == '/');
 #endif  #endif
 }  }
   
Line 126  void path_combine(char *dest, int dest_s Line 270  void path_combine(char *dest, int dest_s
     }      }
 }  }
   
 void bdrv_register(BlockDriver *bdrv)  void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
 {  {
     if (!bdrv->bdrv_aio_readv) {      if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
         /* add AIO emulation layer */          pstrcpy(dest, sz, bs->backing_file);
         bdrv->bdrv_aio_readv = bdrv_aio_readv_em;      } else {
         bdrv->bdrv_aio_writev = bdrv_aio_writev_em;          path_combine(dest, sz, bs->filename, bs->backing_file);
     } else if (!bdrv->bdrv_read) {  
         /* add synchronous IO emulation layer */  
         bdrv->bdrv_read = bdrv_read_em;  
         bdrv->bdrv_write = bdrv_write_em;  
     }      }
   }
   
     if (!bdrv->bdrv_aio_flush)  void bdrv_register(BlockDriver *bdrv)
         bdrv->bdrv_aio_flush = bdrv_aio_flush_em;  {
       /* Block drivers without coroutine functions need emulation */
       if (!bdrv->bdrv_co_readv) {
           bdrv->bdrv_co_readv = bdrv_co_readv_em;
           bdrv->bdrv_co_writev = bdrv_co_writev_em;
   
           /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
            * the block driver lacks aio we need to emulate that too.
            */
           if (!bdrv->bdrv_aio_readv) {
               /* add AIO emulation layer */
               bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
               bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
           }
       }
   
     bdrv->next = first_drv;      QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
     first_drv = bdrv;  
 }  }
   
 /* create a new block device (by default it is empty) */  /* create a new block device (by default it is empty) */
 BlockDriverState *bdrv_new(const char *device_name)  BlockDriverState *bdrv_new(const char *device_name)
 {  {
     BlockDriverState **pbs, *bs;      BlockDriverState *bs;
   
     bs = qemu_mallocz(sizeof(BlockDriverState));      bs = g_malloc0(sizeof(BlockDriverState));
     pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);      pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
     if (device_name[0] != '\0') {      if (device_name[0] != '\0') {
         /* insert at the end */          QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
         pbs = &bdrv_first;  
         while (*pbs != NULL)  
             pbs = &(*pbs)->next;  
         *pbs = bs;  
     }      }
       bdrv_iostatus_disable(bs);
     return bs;      return bs;
 }  }
   
 BlockDriver *bdrv_find_format(const char *format_name)  BlockDriver *bdrv_find_format(const char *format_name)
 {  {
     BlockDriver *drv1;      BlockDriver *drv1;
     for(drv1 = first_drv; drv1 != NULL; drv1 = drv1->next) {      QLIST_FOREACH(drv1, &bdrv_drivers, list) {
         if (!strcmp(drv1->format_name, format_name))          if (!strcmp(drv1->format_name, format_name)) {
             return drv1;              return drv1;
           }
     }      }
     return NULL;      return NULL;
 }  }
Line 196  BlockDriver *bdrv_find_whitelisted_forma Line 348  BlockDriver *bdrv_find_whitelisted_forma
     return drv && bdrv_is_whitelisted(drv) ? drv : NULL;      return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
 }  }
   
   typedef struct CreateCo {
       BlockDriver *drv;
       char *filename;
       QEMUOptionParameter *options;
       int ret;
   } CreateCo;
   
   static void coroutine_fn bdrv_create_co_entry(void *opaque)
   {
       CreateCo *cco = opaque;
       assert(cco->drv);
   
       cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
   }
   
 int bdrv_create(BlockDriver *drv, const char* filename,  int bdrv_create(BlockDriver *drv, const char* filename,
     QEMUOptionParameter *options)      QEMUOptionParameter *options)
 {  {
     if (!drv->bdrv_create)      int ret;
   
       Coroutine *co;
       CreateCo cco = {
           .drv = drv,
           .filename = g_strdup(filename),
           .options = options,
           .ret = NOT_DONE,
       };
   
       if (!drv->bdrv_create) {
         return -ENOTSUP;          return -ENOTSUP;
       }
   
     return drv->bdrv_create(filename, options);      if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_create_co_entry(&cco);
       } else {
           co = qemu_coroutine_create(bdrv_create_co_entry);
           qemu_coroutine_enter(co, &cco);
           while (cco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
   
       ret = cco.ret;
       g_free(cco.filename);
   
       return ret;
 }  }
   
 #ifdef _WIN32  int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
 void get_tmp_filename(char *filename, int size)  
 {  {
     char temp_dir[MAX_PATH];      BlockDriver *drv;
   
       drv = bdrv_find_protocol(filename);
       if (drv == NULL) {
           return -ENOENT;
       }
   
     GetTempPath(MAX_PATH, temp_dir);      return bdrv_create(drv, filename, options);
     GetTempFileName(temp_dir, "qem", 0, filename);  
 }  }
 #else  
 void get_tmp_filename(char *filename, int size)  /*
    * Create a uniquely-named empty temporary file.
    * Return 0 upon success, otherwise a negative errno value.
    */
   int get_tmp_filename(char *filename, int size)
 {  {
   #ifdef _WIN32
       char temp_dir[MAX_PATH];
       /* GetTempFileName requires that its output buffer (4th param)
          have length MAX_PATH or greater.  */
       assert(size >= MAX_PATH);
       return (GetTempPath(MAX_PATH, temp_dir)
               && GetTempFileName(temp_dir, "qem", 0, filename)
               ? 0 : -GetLastError());
   #else
     int fd;      int fd;
     const char *tmpdir;      const char *tmpdir;
     /* XXX: race condition possible */  
     tmpdir = getenv("TMPDIR");      tmpdir = getenv("TMPDIR");
     if (!tmpdir)      if (!tmpdir)
         tmpdir = "/tmp";          tmpdir = "/tmp";
     snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);      if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
           return -EOVERFLOW;
       }
     fd = mkstemp(filename);      fd = mkstemp(filename);
     close(fd);      if (fd < 0 || close(fd)) {
 }          return -errno;
 #endif      }
   
 #ifdef _WIN32  
 static int is_windows_drive_prefix(const char *filename)  
 {  
     return (((filename[0] >= 'a' && filename[0] <= 'z') ||  
              (filename[0] >= 'A' && filename[0] <= 'Z')) &&  
             filename[1] == ':');  
 }  
   
 int is_windows_drive(const char *filename)  
 {  
     if (is_windows_drive_prefix(filename) &&  
         filename[2] == '\0')  
         return 1;  
     if (strstart(filename, "\\\\.\\", NULL) ||  
         strstart(filename, "//./", NULL))  
         return 1;  
     return 0;      return 0;
 }  
 #endif  #endif
   
 static BlockDriver *find_protocol(const char *filename)  
 {  
     BlockDriver *drv1;  
     char protocol[128];  
     int len;  
     const char *p;  
   
 #ifdef _WIN32  
     if (is_windows_drive(filename) ||  
         is_windows_drive_prefix(filename))  
         return bdrv_find_format("raw");  
 #endif  
     p = strchr(filename, ':');  
     if (!p)  
         return bdrv_find_format("raw");  
     len = p - filename;  
     if (len > sizeof(protocol) - 1)  
         len = sizeof(protocol) - 1;  
     memcpy(protocol, filename, len);  
     protocol[len] = '\0';  
     for(drv1 = first_drv; drv1 != NULL; drv1 = drv1->next) {  
         if (drv1->protocol_name &&  
             !strcmp(drv1->protocol_name, protocol))  
             return drv1;  
     }  
     return NULL;  
 }  }
   
 /*  /*
Line 285  static BlockDriver *find_hdev_driver(con Line 449  static BlockDriver *find_hdev_driver(con
     int score_max = 0, score;      int score_max = 0, score;
     BlockDriver *drv = NULL, *d;      BlockDriver *drv = NULL, *d;
   
     for (d = first_drv; d; d = d->next) {      QLIST_FOREACH(d, &bdrv_drivers, list) {
         if (d->bdrv_probe_device) {          if (d->bdrv_probe_device) {
             score = d->bdrv_probe_device(filename);              score = d->bdrv_probe_device(filename);
             if (score > score_max) {              if (score > score_max) {
Line 298  static BlockDriver *find_hdev_driver(con Line 462  static BlockDriver *find_hdev_driver(con
     return drv;      return drv;
 }  }
   
 static BlockDriver *find_image_format(const char *filename)  BlockDriver *bdrv_find_protocol(const char *filename)
   {
       BlockDriver *drv1;
       char protocol[128];
       int len;
       const char *p;
   
       /* TODO Drivers without bdrv_file_open must be specified explicitly */
   
       /*
        * XXX(hch): we really should not let host device detection
        * override an explicit protocol specification, but moving this
        * later breaks access to device names with colons in them.
        * Thanks to the brain-dead persistent naming schemes on udev-
        * based Linux systems those actually are quite common.
        */
       drv1 = find_hdev_driver(filename);
       if (drv1) {
           return drv1;
       }
   
       if (!path_has_protocol(filename)) {
           return bdrv_find_format("file");
       }
       p = strchr(filename, ':');
       assert(p != NULL);
       len = p - filename;
       if (len > sizeof(protocol) - 1)
           len = sizeof(protocol) - 1;
       memcpy(protocol, filename, len);
       protocol[len] = '\0';
       QLIST_FOREACH(drv1, &bdrv_drivers, list) {
           if (drv1->protocol_name &&
               !strcmp(drv1->protocol_name, protocol)) {
               return drv1;
           }
       }
       return NULL;
   }
   
   static int find_image_format(const char *filename, BlockDriver **pdrv)
 {  {
     int ret, score, score_max;      int ret, score, score_max;
     BlockDriver *drv1, *drv;      BlockDriver *drv1, *drv;
     uint8_t buf[2048];      uint8_t buf[2048];
     BlockDriverState *bs;      BlockDriverState *bs;
   
     drv = find_protocol(filename);      ret = bdrv_file_open(&bs, filename, 0);
     /* no need to test disk image formats for vvfat */      if (ret < 0) {
     if (drv && strcmp(drv->format_name, "vvfat") == 0)          *pdrv = NULL;
         return drv;          return ret;
       }
   
       /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
       if (bs->sg || !bdrv_is_inserted(bs)) {
           bdrv_delete(bs);
           drv = bdrv_find_format("raw");
           if (!drv) {
               ret = -ENOENT;
           }
           *pdrv = drv;
           return ret;
       }
   
     ret = bdrv_file_open(&bs, filename, BDRV_O_RDONLY);  
     if (ret < 0)  
         return NULL;  
     ret = bdrv_pread(bs, 0, buf, sizeof(buf));      ret = bdrv_pread(bs, 0, buf, sizeof(buf));
     bdrv_delete(bs);      bdrv_delete(bs);
     if (ret < 0) {      if (ret < 0) {
         return NULL;          *pdrv = NULL;
           return ret;
     }      }
   
     score_max = 0;      score_max = 0;
     for(drv1 = first_drv; drv1 != NULL; drv1 = drv1->next) {      drv = NULL;
       QLIST_FOREACH(drv1, &bdrv_drivers, list) {
         if (drv1->bdrv_probe) {          if (drv1->bdrv_probe) {
             score = drv1->bdrv_probe(buf, ret, filename);              score = drv1->bdrv_probe(buf, ret, filename);
             if (score > score_max) {              if (score > score_max) {
Line 329  static BlockDriver *find_image_format(co Line 544  static BlockDriver *find_image_format(co
             }              }
         }          }
     }      }
     return drv;      if (!drv) {
           ret = -ENOENT;
       }
       *pdrv = drv;
       return ret;
 }  }
   
 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)  /**
    * Set the current 'total_sectors' value
    */
   static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
 {  {
     BlockDriverState *bs;      BlockDriver *drv = bs->drv;
     int ret;  
   
     bs = bdrv_new("");      /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
     ret = bdrv_open2(bs, filename, flags | BDRV_O_FILE, NULL);      if (bs->sg)
     if (ret < 0) {          return 0;
         bdrv_delete(bs);  
         return ret;      /* query actual device if possible, otherwise just trust the hint */
       if (drv->bdrv_getlength) {
           int64_t length = drv->bdrv_getlength(bs);
           if (length < 0) {
               return length;
           }
           hint = length >> BDRV_SECTOR_BITS;
     }      }
     bs->growable = 1;  
     *pbs = bs;      bs->total_sectors = hint;
     return 0;      return 0;
 }  }
   
 int bdrv_open(BlockDriverState *bs, const char *filename, int flags)  /**
    * Set open flags for a given cache mode
    *
    * Return 0 on success, -1 if the cache mode was invalid.
    */
   int bdrv_parse_cache_flags(const char *mode, int *flags)
 {  {
     return bdrv_open2(bs, filename, flags, NULL);      *flags &= ~BDRV_O_CACHE_MASK;
   
       if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
           *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
       } else if (!strcmp(mode, "directsync")) {
           *flags |= BDRV_O_NOCACHE;
       } else if (!strcmp(mode, "writeback")) {
           *flags |= BDRV_O_CACHE_WB;
       } else if (!strcmp(mode, "unsafe")) {
           *flags |= BDRV_O_CACHE_WB;
           *flags |= BDRV_O_NO_FLUSH;
       } else if (!strcmp(mode, "writethrough")) {
           /* this is the default */
       } else {
           return -1;
       }
   
       return 0;
 }  }
   
 int bdrv_open2(BlockDriverState *bs, const char *filename, int flags,  /**
                BlockDriver *drv)   * The copy-on-read flag is actually a reference count so multiple users may
    * use the feature without worrying about clobbering its previous state.
    * Copy-on-read stays enabled until all users have called to disable it.
    */
   void bdrv_enable_copy_on_read(BlockDriverState *bs)
 {  {
     int ret, open_flags, try_rw;      bs->copy_on_read++;
     char tmp_filename[PATH_MAX];  }
     char backing_filename[PATH_MAX];  
   
     bs->is_temporary = 0;  void bdrv_disable_copy_on_read(BlockDriverState *bs)
     bs->encrypted = 0;  {
     bs->valid_key = 0;      assert(bs->copy_on_read > 0);
     /* buffer_alignment defaulted to 512, drivers can change this value */      bs->copy_on_read--;
     bs->buffer_alignment = 512;  }
   
     if (flags & BDRV_O_SNAPSHOT) {  /*
         BlockDriverState *bs1;   * Common part for opening disk images and files
         int64_t total_size;   */
         int is_protocol = 0;  static int bdrv_open_common(BlockDriverState *bs, const char *filename,
         BlockDriver *bdrv_qcow2;      int flags, BlockDriver *drv)
         QEMUOptionParameter *options;  {
       int ret, open_flags;
   
         /* if snapshot, we create a temporary backing file and open it      assert(drv != NULL);
            instead of opening 'filename' directly */      assert(bs->file == NULL);
   
         /* if there is a backing file, use it */      trace_bdrv_open_common(bs, filename, flags, drv->format_name);
         bs1 = bdrv_new("");  
         ret = bdrv_open2(bs1, filename, 0, drv);  
         if (ret < 0) {  
             bdrv_delete(bs1);  
             return ret;  
         }  
         total_size = bdrv_getlength(bs1) >> BDRV_SECTOR_BITS;  
   
         if (bs1->drv && bs1->drv->protocol_name)      bs->open_flags = flags;
             is_protocol = 1;      bs->buffer_alignment = 512;
   
         bdrv_delete(bs1);      assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
       if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
           bdrv_enable_copy_on_read(bs);
       }
   
         get_tmp_filename(tmp_filename, sizeof(tmp_filename));      pstrcpy(bs->filename, sizeof(bs->filename), filename);
   
         /* Real path is meaningless for protocols */      if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
         if (is_protocol)          return -ENOTSUP;
             snprintf(backing_filename, sizeof(backing_filename),      }
                      "%s", filename);  
         else  
             realpath(filename, backing_filename);  
   
         bdrv_qcow2 = bdrv_find_format("qcow2");      bs->drv = drv;
         options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);      bs->opaque = g_malloc0(drv->instance_size);
   
         set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size * 512);      bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
   
       /*
        * Clear flags that are internal to the block layer before opening the
        * image.
        */
       open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
   
       /*
        * Snapshots should be writable.
        */
       if (bs->is_temporary) {
           open_flags |= BDRV_O_RDWR;
       }
   
       bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
   
       /* Open the image, either directly or using a protocol */
       if (drv->bdrv_file_open) {
           ret = drv->bdrv_file_open(bs, filename, open_flags);
       } else {
           ret = bdrv_file_open(&bs->file, filename, open_flags);
           if (ret >= 0) {
               ret = drv->bdrv_open(bs, open_flags);
           }
       }
   
       if (ret < 0) {
           goto free_and_fail;
       }
   
       ret = refresh_total_sectors(bs, bs->total_sectors);
       if (ret < 0) {
           goto free_and_fail;
       }
   
   #ifndef _WIN32
       if (bs->is_temporary) {
           unlink(filename);
       }
   #endif
       return 0;
   
   free_and_fail:
       if (bs->file) {
           bdrv_delete(bs->file);
           bs->file = NULL;
       }
       g_free(bs->opaque);
       bs->opaque = NULL;
       bs->drv = NULL;
       return ret;
   }
   
   /*
    * Opens a file using a protocol (file, host_device, nbd, ...)
    */
   int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
   {
       BlockDriverState *bs;
       BlockDriver *drv;
       int ret;
   
       drv = bdrv_find_protocol(filename);
       if (!drv) {
           return -ENOENT;
       }
   
       bs = bdrv_new("");
       ret = bdrv_open_common(bs, filename, flags, drv);
       if (ret < 0) {
           bdrv_delete(bs);
           return ret;
       }
       bs->growable = 1;
       *pbs = bs;
       return 0;
   }
   
   /*
    * Opens a disk image (raw, qcow2, vmdk, ...)
    */
   int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
                 BlockDriver *drv)
   {
       int ret;
       char tmp_filename[PATH_MAX];
   
       if (flags & BDRV_O_SNAPSHOT) {
           BlockDriverState *bs1;
           int64_t total_size;
           int is_protocol = 0;
           BlockDriver *bdrv_qcow2;
           QEMUOptionParameter *options;
           char backing_filename[PATH_MAX];
   
           /* if snapshot, we create a temporary backing file and open it
              instead of opening 'filename' directly */
   
           /* if there is a backing file, use it */
           bs1 = bdrv_new("");
           ret = bdrv_open(bs1, filename, 0, drv);
           if (ret < 0) {
               bdrv_delete(bs1);
               return ret;
           }
           total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
   
           if (bs1->drv && bs1->drv->protocol_name)
               is_protocol = 1;
   
           bdrv_delete(bs1);
   
           ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
           if (ret < 0) {
               return ret;
           }
   
           /* Real path is meaningless for protocols */
           if (is_protocol)
               snprintf(backing_filename, sizeof(backing_filename),
                        "%s", filename);
           else if (!realpath(filename, backing_filename))
               return -errno;
   
           bdrv_qcow2 = bdrv_find_format("qcow2");
           options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
   
           set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
         set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);          set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
         if (drv) {          if (drv) {
             set_option_parameter(options, BLOCK_OPT_BACKING_FMT,              set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
Line 410  int bdrv_open2(BlockDriverState *bs, con Line 784  int bdrv_open2(BlockDriverState *bs, con
         }          }
   
         ret = bdrv_create(bdrv_qcow2, tmp_filename, options);          ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
           free_option_parameters(options);
         if (ret < 0) {          if (ret < 0) {
             return ret;              return ret;
         }          }
Line 419  int bdrv_open2(BlockDriverState *bs, con Line 794  int bdrv_open2(BlockDriverState *bs, con
         bs->is_temporary = 1;          bs->is_temporary = 1;
     }      }
   
     pstrcpy(bs->filename, sizeof(bs->filename), filename);      /* Find the right image format driver */
     if (flags & BDRV_O_FILE) {      if (!drv) {
         drv = find_protocol(filename);          ret = find_image_format(filename, &drv);
     } else if (!drv) {  
         drv = find_hdev_driver(filename);  
         if (!drv) {  
             drv = find_image_format(filename);  
         }  
     }      }
   
     if (!drv) {      if (!drv) {
         ret = -ENOENT;  
         goto unlink_and_fail;          goto unlink_and_fail;
     }      }
     bs->drv = drv;  
     bs->opaque = qemu_mallocz(drv->instance_size);  
   
     /*      /* Open the image */
      * Yes, BDRV_O_NOCACHE aka O_DIRECT means we have to present a      ret = bdrv_open_common(bs, filename, flags, drv);
      * write cache to the guest.  We do need the fdatasync to flush      if (ret < 0) {
      * out transactions for block allocations, and we maybe have a          goto unlink_and_fail;
      * volatile write cache in our backing device to deal with.      }
      */  
     if (flags & (BDRV_O_CACHE_WB|BDRV_O_NOCACHE))  
         bs->enable_write_cache = 1;  
   
     /* Note: for compatibility, we open disk image files as RDWR, and      /* If there is a backing file, use it */
        RDONLY as fallback */      if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
     try_rw = !bs->read_only || bs->is_temporary;          char backing_filename[PATH_MAX];
     if (!(flags & BDRV_O_FILE))          int back_flags;
         open_flags = (try_rw ? BDRV_O_RDWR : 0) |          BlockDriver *back_drv = NULL;
             (flags & (BDRV_O_CACHE_MASK|BDRV_O_NATIVE_AIO));  
     else  
         open_flags = flags & ~(BDRV_O_FILE | BDRV_O_SNAPSHOT);  
   
     bs->open_flags = open_flags;          bs->backing_hd = bdrv_new("");
     if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv))          bdrv_get_full_backing_filename(bs, backing_filename,
         ret = -ENOTSUP;                                         sizeof(backing_filename));
     else  
         ret = drv->bdrv_open(bs, filename, open_flags);          if (bs->backing_format[0] != '\0') {
     if ((ret == -EACCES || ret == -EPERM) && !(flags & BDRV_O_FILE)) {              back_drv = bdrv_find_format(bs->backing_format);
         ret = drv->bdrv_open(bs, filename, open_flags & ~BDRV_O_RDWR);          }
         bs->read_only = 1;  
           /* backing files always opened read-only */
           back_flags =
               flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
   
           ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
           if (ret < 0) {
               bdrv_close(bs);
               return ret;
           }
           if (bs->is_temporary) {
               bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
           } else {
               /* base image inherits from "parent" */
               bs->backing_hd->keep_read_only = bs->keep_read_only;
           }
     }      }
     if (ret < 0) {  
         qemu_free(bs->opaque);      if (!bdrv_key_required(bs)) {
         bs->opaque = NULL;          bdrv_dev_change_media_cb(bs, true);
         bs->drv = NULL;  
     unlink_and_fail:  
         if (bs->is_temporary)  
             unlink(filename);  
         return ret;  
     }      }
     if (drv->bdrv_getlength) {  
         bs->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;      /* throttling disk I/O limits */
       if (bs->io_limits_enabled) {
           bdrv_io_limits_enable(bs);
     }      }
 #ifndef _WIN32  
       return 0;
   
   unlink_and_fail:
     if (bs->is_temporary) {      if (bs->is_temporary) {
         unlink(filename);          unlink(filename);
     }      }
       return ret;
   }
   
   void bdrv_close(BlockDriverState *bs)
   {
       bdrv_flush(bs);
       if (bs->drv) {
           if (bs->job) {
               block_job_cancel_sync(bs->job);
           }
           bdrv_drain_all();
   
           if (bs == bs_snapshots) {
               bs_snapshots = NULL;
           }
           if (bs->backing_hd) {
               bdrv_delete(bs->backing_hd);
               bs->backing_hd = NULL;
           }
           bs->drv->bdrv_close(bs);
           g_free(bs->opaque);
   #ifdef _WIN32
           if (bs->is_temporary) {
               unlink(bs->filename);
           }
 #endif  #endif
     if (bs->backing_file[0] != '\0') {          bs->opaque = NULL;
         /* if there is a backing file, use it */          bs->drv = NULL;
         BlockDriver *back_drv = NULL;          bs->copy_on_read = 0;
         bs->backing_hd = bdrv_new("");          bs->backing_file[0] = '\0';
         /* pass on read_only property to the backing_hd */          bs->backing_format[0] = '\0';
         bs->backing_hd->read_only = bs->read_only;          bs->total_sectors = 0;
         path_combine(backing_filename, sizeof(backing_filename),          bs->encrypted = 0;
                      filename, bs->backing_file);          bs->valid_key = 0;
         if (bs->backing_format[0] != '\0')          bs->sg = 0;
             back_drv = bdrv_find_format(bs->backing_format);          bs->growable = 0;
         ret = bdrv_open2(bs->backing_hd, backing_filename, open_flags,  
                          back_drv);          if (bs->file != NULL) {
               bdrv_delete(bs->file);
               bs->file = NULL;
           }
   
           bdrv_dev_change_media_cb(bs, false);
       }
   
       /*throttling disk I/O limits*/
       if (bs->io_limits_enabled) {
           bdrv_io_limits_disable(bs);
       }
   }
   
   void bdrv_close_all(void)
   {
       BlockDriverState *bs;
   
       QTAILQ_FOREACH(bs, &bdrv_states, list) {
           bdrv_close(bs);
       }
   }
   
   /*
    * Wait for pending requests to complete across all BlockDriverStates
    *
    * This function does not flush data to disk, use bdrv_flush_all() for that
    * after calling this function.
    *
    * Note that completion of an asynchronous I/O operation can trigger any
    * number of other I/O operations on other devices---for example a coroutine
    * can be arbitrarily complex and a constant flow of I/O can come until the
    * coroutine is complete.  Because of this, it is not possible to have a
    * function to drain a single device's I/O queue.
    */
   void bdrv_drain_all(void)
   {
       BlockDriverState *bs;
       bool busy;
   
       do {
           busy = qemu_aio_wait();
   
           /* FIXME: We do not have timer support here, so this is effectively
            * a busy wait.
            */
           QTAILQ_FOREACH(bs, &bdrv_states, list) {
               if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
                   qemu_co_queue_restart_all(&bs->throttled_reqs);
                   busy = true;
               }
           }
       } while (busy);
   
       /* If requests are still pending there is a bug somewhere */
       QTAILQ_FOREACH(bs, &bdrv_states, list) {
           assert(QLIST_EMPTY(&bs->tracked_requests));
           assert(qemu_co_queue_empty(&bs->throttled_reqs));
       }
   }
   
   /* make a BlockDriverState anonymous by removing from bdrv_state list.
      Also, NULL terminate the device_name to prevent double remove */
   void bdrv_make_anon(BlockDriverState *bs)
   {
       if (bs->device_name[0] != '\0') {
           QTAILQ_REMOVE(&bdrv_states, bs, list);
       }
       bs->device_name[0] = '\0';
   }
   
   static void bdrv_rebind(BlockDriverState *bs)
   {
       if (bs->drv && bs->drv->bdrv_rebind) {
           bs->drv->bdrv_rebind(bs);
       }
   }
   
   /*
    * Add new bs contents at the top of an image chain while the chain is
    * live, while keeping required fields on the top layer.
    *
    * This will modify the BlockDriverState fields, and swap contents
    * between bs_new and bs_top. Both bs_new and bs_top are modified.
    *
    * bs_new is required to be anonymous.
    *
    * This function does not create any image files.
    */
   void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
   {
       BlockDriverState tmp;
   
       /* bs_new must be anonymous */
       assert(bs_new->device_name[0] == '\0');
   
       tmp = *bs_new;
   
       /* there are some fields that need to stay on the top layer: */
       tmp.open_flags        = bs_top->open_flags;
   
       /* dev info */
       tmp.dev_ops           = bs_top->dev_ops;
       tmp.dev_opaque        = bs_top->dev_opaque;
       tmp.dev               = bs_top->dev;
       tmp.buffer_alignment  = bs_top->buffer_alignment;
       tmp.copy_on_read      = bs_top->copy_on_read;
   
       /* i/o timing parameters */
       tmp.slice_time        = bs_top->slice_time;
       tmp.slice_start       = bs_top->slice_start;
       tmp.slice_end         = bs_top->slice_end;
       tmp.io_limits         = bs_top->io_limits;
       tmp.io_base           = bs_top->io_base;
       tmp.throttled_reqs    = bs_top->throttled_reqs;
       tmp.block_timer       = bs_top->block_timer;
       tmp.io_limits_enabled = bs_top->io_limits_enabled;
   
       /* geometry */
       tmp.cyls              = bs_top->cyls;
       tmp.heads             = bs_top->heads;
       tmp.secs              = bs_top->secs;
       tmp.translation       = bs_top->translation;
   
       /* r/w error */
       tmp.on_read_error     = bs_top->on_read_error;
       tmp.on_write_error    = bs_top->on_write_error;
   
       /* i/o status */
       tmp.iostatus_enabled  = bs_top->iostatus_enabled;
       tmp.iostatus          = bs_top->iostatus;
   
       /* keep the same entry in bdrv_states */
       pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name);
       tmp.list = bs_top->list;
   
       /* The contents of 'tmp' will become bs_top, as we are
        * swapping bs_new and bs_top contents. */
       tmp.backing_hd = bs_new;
       pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename);
       bdrv_get_format(bs_top, tmp.backing_format, sizeof(tmp.backing_format));
   
       /* swap contents of the fixed new bs and the current top */
       *bs_new = *bs_top;
       *bs_top = tmp;
   
       /* device_name[] was carried over from the old bs_top.  bs_new
        * shouldn't be in bdrv_states, so we need to make device_name[]
        * reflect the anonymity of bs_new
        */
       bs_new->device_name[0] = '\0';
   
       /* clear the copied fields in the new backing file */
       bdrv_detach_dev(bs_new, bs_new->dev);
   
       qemu_co_queue_init(&bs_new->throttled_reqs);
       memset(&bs_new->io_base,   0, sizeof(bs_new->io_base));
       memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits));
       bdrv_iostatus_disable(bs_new);
   
       /* we don't use bdrv_io_limits_disable() for this, because we don't want
        * to affect or delete the block_timer, as it has been moved to bs_top */
       bs_new->io_limits_enabled = false;
       bs_new->block_timer       = NULL;
       bs_new->slice_time        = 0;
       bs_new->slice_start       = 0;
       bs_new->slice_end         = 0;
   
       bdrv_rebind(bs_new);
       bdrv_rebind(bs_top);
   }
   
   void bdrv_delete(BlockDriverState *bs)
   {
       assert(!bs->dev);
       assert(!bs->job);
       assert(!bs->in_use);
   
       /* remove from list, if necessary */
       bdrv_make_anon(bs);
   
       bdrv_close(bs);
   
       assert(bs != bs_snapshots);
       g_free(bs);
   }
   
   int bdrv_attach_dev(BlockDriverState *bs, void *dev)
   /* TODO change to DeviceState *dev when all users are qdevified */
   {
       if (bs->dev) {
           return -EBUSY;
       }
       bs->dev = dev;
       bdrv_iostatus_reset(bs);
       return 0;
   }
   
   /* TODO qdevified devices don't use this, remove when devices are qdevified */
   void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
   {
       if (bdrv_attach_dev(bs, dev) < 0) {
           abort();
       }
   }
   
   void bdrv_detach_dev(BlockDriverState *bs, void *dev)
   /* TODO change to DeviceState *dev when all users are qdevified */
   {
       assert(bs->dev == dev);
       bs->dev = NULL;
       bs->dev_ops = NULL;
       bs->dev_opaque = NULL;
       bs->buffer_alignment = 512;
   }
   
   /* TODO change to return DeviceState * when all users are qdevified */
   void *bdrv_get_attached_dev(BlockDriverState *bs)
   {
       return bs->dev;
   }
   
   void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
                         void *opaque)
   {
       bs->dev_ops = ops;
       bs->dev_opaque = opaque;
       if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
           bs_snapshots = NULL;
       }
   }
   
   void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
                                  BlockQMPEventAction action, int is_read)
   {
       QObject *data;
       const char *action_str;
   
       switch (action) {
       case BDRV_ACTION_REPORT:
           action_str = "report";
           break;
       case BDRV_ACTION_IGNORE:
           action_str = "ignore";
           break;
       case BDRV_ACTION_STOP:
           action_str = "stop";
           break;
       default:
           abort();
       }
   
       data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
                                 bdrv->device_name,
                                 action_str,
                                 is_read ? "read" : "write");
       monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
   
       qobject_decref(data);
   }
   
   static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
   {
       QObject *data;
   
       data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
                                 bdrv_get_device_name(bs), ejected);
       monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
   
       qobject_decref(data);
   }
   
   static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
   {
       if (bs->dev_ops && bs->dev_ops->change_media_cb) {
           bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
           bs->dev_ops->change_media_cb(bs->dev_opaque, load);
           if (tray_was_closed) {
               /* tray open */
               bdrv_emit_qmp_eject_event(bs, true);
           }
           if (load) {
               /* tray close */
               bdrv_emit_qmp_eject_event(bs, false);
           }
       }
   }
   
   bool bdrv_dev_has_removable_media(BlockDriverState *bs)
   {
       return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
   }
   
   void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
   {
       if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
           bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
       }
   }
   
   bool bdrv_dev_is_tray_open(BlockDriverState *bs)
   {
       if (bs->dev_ops && bs->dev_ops->is_tray_open) {
           return bs->dev_ops->is_tray_open(bs->dev_opaque);
       }
       return false;
   }
   
   static void bdrv_dev_resize_cb(BlockDriverState *bs)
   {
       if (bs->dev_ops && bs->dev_ops->resize_cb) {
           bs->dev_ops->resize_cb(bs->dev_opaque);
       }
   }
   
   bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
   {
       if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
           return bs->dev_ops->is_medium_locked(bs->dev_opaque);
       }
       return false;
   }
   
   /*
    * Run consistency checks on an image
    *
    * Returns 0 if the check could be completed (it doesn't mean that the image is
    * free of errors) or -errno when an internal error occurred. The results of the
    * check are stored in res.
    */
   int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
   {
       if (bs->drv->bdrv_check == NULL) {
           return -ENOTSUP;
       }
   
       memset(res, 0, sizeof(*res));
       return bs->drv->bdrv_check(bs, res);
   }
   
   #define COMMIT_BUF_SECTORS 2048
   
   /* commit COW file into the raw image */
   int bdrv_commit(BlockDriverState *bs)
   {
       BlockDriver *drv = bs->drv;
       BlockDriver *backing_drv;
       int64_t sector, total_sectors;
       int n, ro, open_flags;
       int ret = 0, rw_ret = 0;
       uint8_t *buf;
       char filename[1024];
       BlockDriverState *bs_rw, *bs_ro;
   
       if (!drv)
           return -ENOMEDIUM;
       
       if (!bs->backing_hd) {
           return -ENOTSUP;
       }
   
       if (bs->backing_hd->keep_read_only) {
           return -EACCES;
       }
   
       if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
           return -EBUSY;
       }
   
       backing_drv = bs->backing_hd->drv;
       ro = bs->backing_hd->read_only;
       strncpy(filename, bs->backing_hd->filename, sizeof(filename));
       open_flags =  bs->backing_hd->open_flags;
   
       if (ro) {
           /* re-open as RW */
           bdrv_delete(bs->backing_hd);
           bs->backing_hd = NULL;
           bs_rw = bdrv_new("");
           rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
               backing_drv);
           if (rw_ret < 0) {
               bdrv_delete(bs_rw);
               /* try to re-open read-only */
               bs_ro = bdrv_new("");
               ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
                   backing_drv);
               if (ret < 0) {
                   bdrv_delete(bs_ro);
                   /* drive not functional anymore */
                   bs->drv = NULL;
                   return ret;
               }
               bs->backing_hd = bs_ro;
               return rw_ret;
           }
           bs->backing_hd = bs_rw;
       }
   
       total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
       buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
   
       for (sector = 0; sector < total_sectors; sector += n) {
           if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
   
               if (bdrv_read(bs, sector, buf, n) != 0) {
                   ret = -EIO;
                   goto ro_cleanup;
               }
   
               if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
                   ret = -EIO;
                   goto ro_cleanup;
               }
           }
       }
   
       if (drv->bdrv_make_empty) {
           ret = drv->bdrv_make_empty(bs);
           bdrv_flush(bs);
       }
   
       /*
        * Make sure all data we wrote to the backing device is actually
        * stable on disk.
        */
       if (bs->backing_hd)
           bdrv_flush(bs->backing_hd);
   
   ro_cleanup:
       g_free(buf);
   
       if (ro) {
           /* re-open as RO */
           bdrv_delete(bs->backing_hd);
           bs->backing_hd = NULL;
           bs_ro = bdrv_new("");
           ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
               backing_drv);
         if (ret < 0) {          if (ret < 0) {
             bdrv_close(bs);              bdrv_delete(bs_ro);
               /* drive not functional anymore */
               bs->drv = NULL;
             return ret;              return ret;
         }          }
           bs->backing_hd = bs_ro;
           bs->backing_hd->keep_read_only = 0;
     }      }
   
     if (!bdrv_key_required(bs)) {      return ret;
         /* call the change callback */  
         bs->media_changed = 1;  
         if (bs->change_cb)  
             bs->change_cb(bs->change_opaque);  
     }  
     return 0;  
 }  }
   
 void bdrv_close(BlockDriverState *bs)  int bdrv_commit_all(void)
 {  {
     if (bs->drv) {      BlockDriverState *bs;
         if (bs->backing_hd)  
             bdrv_delete(bs->backing_hd);  
         bs->drv->bdrv_close(bs);  
         qemu_free(bs->opaque);  
 #ifdef _WIN32  
         if (bs->is_temporary) {  
             unlink(bs->filename);  
         }  
 #endif  
         bs->opaque = NULL;  
         bs->drv = NULL;  
   
         /* call the change callback */      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         bs->media_changed = 1;          int ret = bdrv_commit(bs);
         if (bs->change_cb)          if (ret < 0) {
             bs->change_cb(bs->change_opaque);              return ret;
           }
     }      }
       return 0;
 }  }
   
 void bdrv_delete(BlockDriverState *bs)  struct BdrvTrackedRequest {
       BlockDriverState *bs;
       int64_t sector_num;
       int nb_sectors;
       bool is_write;
       QLIST_ENTRY(BdrvTrackedRequest) list;
       Coroutine *co; /* owner, used for deadlock detection */
       CoQueue wait_queue; /* coroutines blocked on this request */
   };
   
   /**
    * Remove an active request from the tracked requests list
    *
    * This function should be called when a tracked request is completing.
    */
   static void tracked_request_end(BdrvTrackedRequest *req)
 {  {
     BlockDriverState **pbs;      QLIST_REMOVE(req, list);
       qemu_co_queue_restart_all(&req->wait_queue);
   }
   
   /**
    * Add an active request to the tracked requests list
    */
   static void tracked_request_begin(BdrvTrackedRequest *req,
                                     BlockDriverState *bs,
                                     int64_t sector_num,
                                     int nb_sectors, bool is_write)
   {
       *req = (BdrvTrackedRequest){
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .is_write = is_write,
           .co = qemu_coroutine_self(),
       };
   
     pbs = &bdrv_first;      qemu_co_queue_init(&req->wait_queue);
     while (*pbs != bs && *pbs != NULL)  
         pbs = &(*pbs)->next;  
     if (*pbs == bs)  
         *pbs = bs->next;  
   
     bdrv_close(bs);      QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
     qemu_free(bs);  
 }  }
   
 /*  /**
  * Run consistency checks on an image   * Round a region to cluster boundaries
  *  
  * Returns the number of errors or -errno when an internal error occurs  
  */   */
 int bdrv_check(BlockDriverState *bs)  static void round_to_clusters(BlockDriverState *bs,
 {                                int64_t sector_num, int nb_sectors,
     if (bs->drv->bdrv_check == NULL) {                                int64_t *cluster_sector_num,
         return -ENOTSUP;                                int *cluster_nb_sectors)
   {
       BlockDriverInfo bdi;
   
       if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
           *cluster_sector_num = sector_num;
           *cluster_nb_sectors = nb_sectors;
       } else {
           int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
           *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
           *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
                                               nb_sectors, c);
     }      }
   }
   
     return bs->drv->bdrv_check(bs);  static bool tracked_request_overlaps(BdrvTrackedRequest *req,
                                        int64_t sector_num, int nb_sectors) {
       /*        aaaa   bbbb */
       if (sector_num >= req->sector_num + req->nb_sectors) {
           return false;
       }
       /* bbbb   aaaa        */
       if (req->sector_num >= sector_num + nb_sectors) {
           return false;
       }
       return true;
 }  }
   
 /* commit COW file into the raw image */  static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
 int bdrv_commit(BlockDriverState *bs)          int64_t sector_num, int nb_sectors)
 {  {
     BlockDriver *drv = bs->drv;      BdrvTrackedRequest *req;
     int64_t i, total_sectors;      int64_t cluster_sector_num;
     int n, j;      int cluster_nb_sectors;
     unsigned char sector[512];      bool retry;
   
     if (!drv)      /* If we touch the same cluster it counts as an overlap.  This guarantees
         return -ENOMEDIUM;       * that allocating writes will be serialized and not race with each other
        * for the same cluster.  For example, in copy-on-read it ensures that the
        * CoR read and write operations are atomic and guest writes cannot
        * interleave between them.
        */
       round_to_clusters(bs, sector_num, nb_sectors,
                         &cluster_sector_num, &cluster_nb_sectors);
   
     if (bs->read_only) {      do {
         return -EACCES;          retry = false;
     }          QLIST_FOREACH(req, &bs->tracked_requests, list) {
               if (tracked_request_overlaps(req, cluster_sector_num,
                                            cluster_nb_sectors)) {
                   /* Hitting this means there was a reentrant request, for
                    * example, a block driver issuing nested requests.  This must
                    * never happen since it means deadlock.
                    */
                   assert(qemu_coroutine_self() != req->co);
   
     if (!bs->backing_hd) {                  qemu_co_queue_wait(&req->wait_queue);
         return -ENOTSUP;                  retry = true;
     }                  break;
               }
           }
       } while (retry);
   }
   
     total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;  /*
     for (i = 0; i < total_sectors;) {   * Return values:
         if (drv->bdrv_is_allocated(bs, i, 65536, &n)) {   * 0        - success
             for(j = 0; j < n; j++) {   * -EINVAL  - backing format specified, but no file
                 if (bdrv_read(bs, i, sector, 1) != 0) {   * -ENOSPC  - can't update the backing file because no space is left in the
                     return -EIO;   *            image file header
                 }   * -ENOTSUP - format driver doesn't support changing the backing file
    */
   int bdrv_change_backing_file(BlockDriverState *bs,
       const char *backing_file, const char *backing_fmt)
   {
       BlockDriver *drv = bs->drv;
       int ret;
   
                 if (bdrv_write(bs->backing_hd, i, sector, 1) != 0) {      /* Backing file format doesn't make sense without a backing file */
                     return -EIO;      if (backing_fmt && !backing_file) {
                 }          return -EINVAL;
                 i++;  
             }  
         } else {  
             i += n;  
         }  
     }      }
   
     if (drv->bdrv_make_empty)      if (drv->bdrv_change_backing_file != NULL) {
         return drv->bdrv_make_empty(bs);          ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
       } else {
           ret = -ENOTSUP;
       }
   
     return 0;      if (ret == 0) {
           pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
           pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
       }
       return ret;
 }  }
   
 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,  static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
Line 624  static int bdrv_check_byte_request(Block Line 1526  static int bdrv_check_byte_request(Block
 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,  static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
                               int nb_sectors)                                int nb_sectors)
 {  {
     return bdrv_check_byte_request(bs, sector_num * 512, nb_sectors * 512);      return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
                                      nb_sectors * BDRV_SECTOR_SIZE);
   }
   
   typedef struct RwCo {
       BlockDriverState *bs;
       int64_t sector_num;
       int nb_sectors;
       QEMUIOVector *qiov;
       bool is_write;
       int ret;
   } RwCo;
   
   static void coroutine_fn bdrv_rw_co_entry(void *opaque)
   {
       RwCo *rwco = opaque;
   
       if (!rwco->is_write) {
           rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
                                        rwco->nb_sectors, rwco->qiov, 0);
       } else {
           rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
                                         rwco->nb_sectors, rwco->qiov, 0);
       }
   }
   
   /*
    * Process a synchronous request using coroutines
    */
   static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
                         int nb_sectors, bool is_write)
   {
       QEMUIOVector qiov;
       struct iovec iov = {
           .iov_base = (void *)buf,
           .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
       };
       Coroutine *co;
       RwCo rwco = {
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .qiov = &qiov,
           .is_write = is_write,
           .ret = NOT_DONE,
       };
   
       qemu_iovec_init_external(&qiov, &iov, 1);
   
       /**
        * In sync call context, when the vcpu is blocked, this throttling timer
        * will not fire; so the I/O throttling function has to be disabled here
        * if it has been enabled.
        */
       if (bs->io_limits_enabled) {
           fprintf(stderr, "Disabling I/O throttling on '%s' due "
                           "to synchronous I/O.\n", bdrv_get_device_name(bs));
           bdrv_io_limits_disable(bs);
       }
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_rw_co_entry(&rwco);
       } else {
           co = qemu_coroutine_create(bdrv_rw_co_entry);
           qemu_coroutine_enter(co, &rwco);
           while (rwco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
       return rwco.ret;
 }  }
   
 /* return < 0 if error. See bdrv_write() for the return codes */  /* return < 0 if error. See bdrv_write() for the return codes */
 int bdrv_read(BlockDriverState *bs, int64_t sector_num,  int bdrv_read(BlockDriverState *bs, int64_t sector_num,
               uint8_t *buf, int nb_sectors)                uint8_t *buf, int nb_sectors)
 {  {
     BlockDriver *drv = bs->drv;      return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
   
     if (!drv)  
         return -ENOMEDIUM;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return -EIO;  
   
     return drv->bdrv_read(bs, sector_num, buf, nb_sectors);  
 }  }
   
   #define BITS_PER_LONG  (sizeof(unsigned long) * 8)
   
 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,  static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
                              int nb_sectors, int dirty)                               int nb_sectors, int dirty)
 {  {
Line 651  static void set_dirty_bitmap(BlockDriver Line 1618  static void set_dirty_bitmap(BlockDriver
     end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;      end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
   
     for (; start <= end; start++) {      for (; start <= end; start++) {
         idx = start / (sizeof(unsigned long) * 8);          idx = start / BITS_PER_LONG;
         bit = start % (sizeof(unsigned long) * 8);          bit = start % BITS_PER_LONG;
         val = bs->dirty_bitmap[idx];          val = bs->dirty_bitmap[idx];
         if (dirty) {          if (dirty) {
             val |= 1 << bit;              if (!(val & (1UL << bit))) {
                   bs->dirty_count++;
                   val |= 1UL << bit;
               }
         } else {          } else {
             val &= ~(1 << bit);              if (val & (1UL << bit)) {
                   bs->dirty_count--;
                   val &= ~(1UL << bit);
               }
         }          }
         bs->dirty_bitmap[idx] = val;          bs->dirty_bitmap[idx] = val;
     }      }
Line 672  static void set_dirty_bitmap(BlockDriver Line 1645  static void set_dirty_bitmap(BlockDriver
 int bdrv_write(BlockDriverState *bs, int64_t sector_num,  int bdrv_write(BlockDriverState *bs, int64_t sector_num,
                const uint8_t *buf, int nb_sectors)                 const uint8_t *buf, int nb_sectors)
 {  {
     BlockDriver *drv = bs->drv;      return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
     if (!bs->drv)  
         return -ENOMEDIUM;  
     if (bs->read_only)  
         return -EACCES;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return -EIO;  
   
     if (bs->dirty_bitmap) {  
         set_dirty_bitmap(bs, sector_num, nb_sectors, 1);  
     }  
   
     return drv->bdrv_write(bs, sector_num, buf, nb_sectors);  
 }  }
   
 int bdrv_pread(BlockDriverState *bs, int64_t offset,  int bdrv_pread(BlockDriverState *bs, int64_t offset,
Line 732  int bdrv_pread(BlockDriverState *bs, int Line 1693  int bdrv_pread(BlockDriverState *bs, int
     return count1;      return count1;
 }  }
   
 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,  int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
                 const void *buf, int count1)                  const void *buf, int count1)
   {
       uint8_t tmp_buf[BDRV_SECTOR_SIZE];
       int len, nb_sectors, count;
       int64_t sector_num;
       int ret;
   
       count = count1;
       /* first write to align to sector start */
       len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
       if (len > count)
           len = count;
       sector_num = offset >> BDRV_SECTOR_BITS;
       if (len > 0) {
           if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
               return ret;
           memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
           if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
               return ret;
           count -= len;
           if (count == 0)
               return count1;
           sector_num++;
           buf += len;
       }
   
       /* write the sectors "in place" */
       nb_sectors = count >> BDRV_SECTOR_BITS;
       if (nb_sectors > 0) {
           if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
               return ret;
           sector_num += nb_sectors;
           len = nb_sectors << BDRV_SECTOR_BITS;
           buf += len;
           count -= len;
       }
   
       /* add data from the last sector */
       if (count > 0) {
           if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
               return ret;
           memcpy(tmp_buf, buf, count);
           if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
               return ret;
       }
       return count1;
   }
   
   /*
    * Writes to the file and ensures that no writes are reordered across this
    * request (acts as a barrier)
    *
    * Returns 0 on success, -errno in error cases.
    */
   int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
       const void *buf, int count)
   {
       int ret;
   
       ret = bdrv_pwrite(bs, offset, buf, count);
       if (ret < 0) {
           return ret;
       }
   
       /* No flush needed for cache modes that use O_DSYNC */
       if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) {
           bdrv_flush(bs);
       }
   
       return 0;
   }
   
   static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
           int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
   {
       /* Perform I/O through a temporary buffer so that users who scribble over
        * their read buffer while the operation is in progress do not end up
        * modifying the image file.  This is critical for zero-copy guest I/O
        * where anything might happen inside guest memory.
        */
       void *bounce_buffer;
   
       BlockDriver *drv = bs->drv;
       struct iovec iov;
       QEMUIOVector bounce_qiov;
       int64_t cluster_sector_num;
       int cluster_nb_sectors;
       size_t skip_bytes;
       int ret;
   
       /* Cover entire cluster so no additional backing file I/O is required when
        * allocating cluster in the image file.
        */
       round_to_clusters(bs, sector_num, nb_sectors,
                         &cluster_sector_num, &cluster_nb_sectors);
   
       trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
                                      cluster_sector_num, cluster_nb_sectors);
   
       iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
       iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
       qemu_iovec_init_external(&bounce_qiov, &iov, 1);
   
       ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
                                &bounce_qiov);
       if (ret < 0) {
           goto err;
       }
   
       if (drv->bdrv_co_write_zeroes &&
           buffer_is_zero(bounce_buffer, iov.iov_len)) {
           ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
                                         cluster_nb_sectors);
       } else {
           ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
                                     &bounce_qiov);
       }
   
       if (ret < 0) {
           /* It might be okay to ignore write errors for guest requests.  If this
            * is a deliberate copy-on-read then we don't want to ignore the error.
            * Simply report it in all cases.
            */
           goto err;
       }
   
       skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
       qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
                              nb_sectors * BDRV_SECTOR_SIZE);
   
   err:
       qemu_vfree(bounce_buffer);
       return ret;
   }
   
   /*
    * Handle a read request in coroutine context
    */
   static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags)
   {
       BlockDriver *drv = bs->drv;
       BdrvTrackedRequest req;
       int ret;
   
       if (!drv) {
           return -ENOMEDIUM;
       }
       if (bdrv_check_request(bs, sector_num, nb_sectors)) {
           return -EIO;
       }
   
       /* throttling disk read I/O */
       if (bs->io_limits_enabled) {
           bdrv_io_limits_intercept(bs, false, nb_sectors);
       }
   
       if (bs->copy_on_read) {
           flags |= BDRV_REQ_COPY_ON_READ;
       }
       if (flags & BDRV_REQ_COPY_ON_READ) {
           bs->copy_on_read_in_flight++;
       }
   
       if (bs->copy_on_read_in_flight) {
           wait_for_overlapping_requests(bs, sector_num, nb_sectors);
       }
   
       tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
   
       if (flags & BDRV_REQ_COPY_ON_READ) {
           int pnum;
   
           ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
           if (ret < 0) {
               goto out;
           }
   
           if (!ret || pnum != nb_sectors) {
               ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
               goto out;
           }
       }
   
       ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
   
   out:
       tracked_request_end(&req);
   
       if (flags & BDRV_REQ_COPY_ON_READ) {
           bs->copy_on_read_in_flight--;
       }
   
       return ret;
   }
   
   int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
       int nb_sectors, QEMUIOVector *qiov)
   {
       trace_bdrv_co_readv(bs, sector_num, nb_sectors);
   
       return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
   }
   
   int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
 {  {
     uint8_t tmp_buf[BDRV_SECTOR_SIZE];      trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
     int len, nb_sectors, count;  
     int64_t sector_num;      return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
                               BDRV_REQ_COPY_ON_READ);
   }
   
   static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors)
   {
       BlockDriver *drv = bs->drv;
       QEMUIOVector qiov;
       struct iovec iov;
     int ret;      int ret;
   
     count = count1;      /* TODO Emulate only part of misaligned requests instead of letting block
     /* first write to align to sector start */       * drivers return -ENOTSUP and emulate everything */
     len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);  
     if (len > count)  
         len = count;  
     sector_num = offset >> BDRV_SECTOR_BITS;  
     if (len > 0) {  
         if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)  
             return ret;  
         memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);  
         if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)  
             return ret;  
         count -= len;  
         if (count == 0)  
             return count1;  
         sector_num++;  
         buf += len;  
     }  
   
     /* write the sectors "in place" */      /* First try the efficient write zeroes operation */
     nb_sectors = count >> BDRV_SECTOR_BITS;      if (drv->bdrv_co_write_zeroes) {
     if (nb_sectors > 0) {          ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
         if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)          if (ret != -ENOTSUP) {
             return ret;              return ret;
         sector_num += nb_sectors;          }
         len = nb_sectors << BDRV_SECTOR_BITS;  
         buf += len;  
         count -= len;  
     }      }
   
     /* add data from the last sector */      /* Fall back to bounce buffer if write zeroes is unsupported */
     if (count > 0) {      iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE;
         if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)      iov.iov_base = qemu_blockalign(bs, iov.iov_len);
             return ret;      memset(iov.iov_base, 0, iov.iov_len);
         memcpy(tmp_buf, buf, count);      qemu_iovec_init_external(&qiov, &iov, 1);
         if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)  
             return ret;      ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
     }  
     return count1;      qemu_vfree(iov.iov_base);
       return ret;
 }  }
   
 /*  /*
  * Writes to the file and ensures that no writes are reordered across this   * Handle a write request in coroutine context
  * request (acts as a barrier)  
  *  
  * Returns 0 on success, -errno in error cases.  
  */   */
 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,  static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
     const void *buf, int count)      int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags)
 {  {
       BlockDriver *drv = bs->drv;
       BdrvTrackedRequest req;
     int ret;      int ret;
   
     ret = bdrv_pwrite(bs, offset, buf, count);      if (!bs->drv) {
     if (ret < 0) {          return -ENOMEDIUM;
         return ret;      }
       if (bs->read_only) {
           return -EACCES;
       }
       if (bdrv_check_request(bs, sector_num, nb_sectors)) {
           return -EIO;
     }      }
   
     /* No flush needed for cache=writethrough, it uses O_DSYNC */      /* throttling disk write I/O */
     if ((bs->open_flags & BDRV_O_CACHE_MASK) != 0) {      if (bs->io_limits_enabled) {
         bdrv_flush(bs);          bdrv_io_limits_intercept(bs, true, nb_sectors);
     }      }
   
     return 0;      if (bs->copy_on_read_in_flight) {
           wait_for_overlapping_requests(bs, sector_num, nb_sectors);
       }
   
       tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
   
       if (flags & BDRV_REQ_ZERO_WRITE) {
           ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
       } else {
           ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
       }
   
       if (bs->dirty_bitmap) {
           set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
       }
   
       if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
           bs->wr_highest_sector = sector_num + nb_sectors - 1;
       }
   
       tracked_request_end(&req);
   
       return ret;
 }  }
   
 /*  int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
  * Writes to the file and ensures that no writes are reordered across this      int nb_sectors, QEMUIOVector *qiov)
  * request (acts as a barrier)  {
  *      trace_bdrv_co_writev(bs, sector_num, nb_sectors);
  * Returns 0 on success, -errno in error cases.  
  */      return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
 int bdrv_write_sync(BlockDriverState *bs, int64_t sector_num,  }
     const uint8_t *buf, int nb_sectors)  
   int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
                                         int64_t sector_num, int nb_sectors)
 {  {
     return bdrv_pwrite_sync(bs, BDRV_SECTOR_SIZE * sector_num,      trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
         buf, BDRV_SECTOR_SIZE * nb_sectors);  
       return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
                                BDRV_REQ_ZERO_WRITE);
 }  }
   
 /**  /**
Line 824  int bdrv_write_sync(BlockDriverState *bs Line 2013  int bdrv_write_sync(BlockDriverState *bs
 int bdrv_truncate(BlockDriverState *bs, int64_t offset)  int bdrv_truncate(BlockDriverState *bs, int64_t offset)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
       int ret;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_truncate)      if (!drv->bdrv_truncate)
         return -ENOTSUP;          return -ENOTSUP;
     if (bs->read_only)      if (bs->read_only)
         return -EACCES;          return -EACCES;
     return drv->bdrv_truncate(bs, offset);      if (bdrv_in_use(bs))
           return -EBUSY;
       ret = drv->bdrv_truncate(bs, offset);
       if (ret == 0) {
           ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
           bdrv_dev_resize_cb(bs);
       }
       return ret;
   }
   
   /**
    * Length of a allocated file in bytes. Sparse files are counted by actual
    * allocated space. Return < 0 if error or unknown.
    */
   int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
   {
       BlockDriver *drv = bs->drv;
       if (!drv) {
           return -ENOMEDIUM;
       }
       if (drv->bdrv_get_allocated_file_size) {
           return drv->bdrv_get_allocated_file_size(bs);
       }
       if (bs->file) {
           return bdrv_get_allocated_file_size(bs->file);
       }
       return -ENOTSUP;
 }  }
   
 /**  /**
Line 841  int64_t bdrv_getlength(BlockDriverState  Line 2057  int64_t bdrv_getlength(BlockDriverState 
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_getlength) {  
         /* legacy mode */      if (bs->growable || bdrv_dev_has_removable_media(bs)) {
         return bs->total_sectors * BDRV_SECTOR_SIZE;          if (drv->bdrv_getlength) {
               return drv->bdrv_getlength(bs);
           }
     }      }
     return drv->bdrv_getlength(bs);      return bs->total_sectors * BDRV_SECTOR_SIZE;
 }  }
   
 /* return 0 as number of sectors if no device present or error */  /* return 0 as number of sectors if no device present or error */
Line 871  struct partition { Line 2089  struct partition {
         uint8_t end_cyl;            /* end cylinder */          uint8_t end_cyl;            /* end cylinder */
         uint32_t start_sect;        /* starting sector counting from 0 */          uint32_t start_sect;        /* starting sector counting from 0 */
         uint32_t nr_sects;          /* nr of sectors in partition */          uint32_t nr_sects;          /* nr of sectors in partition */
 } __attribute__((packed));  } QEMU_PACKED;
   
 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */  /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
 static int guess_disk_lchs(BlockDriverState *bs,  static int guess_disk_lchs(BlockDriverState *bs,
                            int *pcylinders, int *pheads, int *psectors)                             int *pcylinders, int *pheads, int *psectors)
 {  {
     uint8_t buf[512];      uint8_t buf[BDRV_SECTOR_SIZE];
     int ret, i, heads, sectors, cylinders;      int ret, i, heads, sectors, cylinders;
     struct partition *p;      struct partition *p;
     uint32_t nr_sects;      uint32_t nr_sects;
     uint64_t nb_sectors;      uint64_t nb_sectors;
       bool enabled;
   
     bdrv_get_geometry(bs, &nb_sectors);      bdrv_get_geometry(bs, &nb_sectors);
   
       /**
        * The function will be invoked during startup not only in sync I/O mode,
        * but also in async I/O mode. So the I/O throttling function has to
        * be disabled temporarily here, not permanently.
        */
       enabled = bs->io_limits_enabled;
       bs->io_limits_enabled = false;
     ret = bdrv_read(bs, 0, buf, 1);      ret = bdrv_read(bs, 0, buf, 1);
       bs->io_limits_enabled = enabled;
     if (ret < 0)      if (ret < 0)
         return -1;          return -1;
     /* test msdos magic */      /* test msdos magic */
Line 984  void bdrv_set_geometry_hint(BlockDriverS Line 2211  void bdrv_set_geometry_hint(BlockDriverS
     bs->secs = secs;      bs->secs = secs;
 }  }
   
 void bdrv_set_type_hint(BlockDriverState *bs, int type)  
 {  
     bs->type = type;  
     bs->removable = ((type == BDRV_TYPE_CDROM ||  
                       type == BDRV_TYPE_FLOPPY));  
 }  
   
 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)  void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
 {  {
     bs->translation = translation;      bs->translation = translation;
Line 1004  void bdrv_get_geometry_hint(BlockDriverS Line 2224  void bdrv_get_geometry_hint(BlockDriverS
     *psecs = bs->secs;      *psecs = bs->secs;
 }  }
   
 int bdrv_get_type_hint(BlockDriverState *bs)  /* throttling disk io limits */
 {  void bdrv_set_io_limits(BlockDriverState *bs,
     return bs->type;                          BlockIOLimit *io_limits)
   {
       bs->io_limits = *io_limits;
       bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
   }
   
   /* Recognize floppy formats */
   typedef struct FDFormat {
       FDriveType drive;
       uint8_t last_sect;
       uint8_t max_track;
       uint8_t max_head;
       FDriveRate rate;
   } FDFormat;
   
   static const FDFormat fd_formats[] = {
       /* First entry is default format */
       /* 1.44 MB 3"1/2 floppy disks */
       { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, },
       /* 2.88 MB 3"1/2 floppy disks */
       { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, },
       { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, },
       { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, },
       { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, },
       { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, },
       /* 720 kB 3"1/2 floppy disks */
       { FDRIVE_DRV_144,  9, 80, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, },
       /* 1.2 MB 5"1/4 floppy disks */
       { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, },
       { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, },
       /* 720 kB 5"1/4 floppy disks */
       { FDRIVE_DRV_120,  9, 80, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, },
       /* 360 kB 5"1/4 floppy disks */
       { FDRIVE_DRV_120,  9, 40, 1, FDRIVE_RATE_300K, },
       { FDRIVE_DRV_120,  9, 40, 0, FDRIVE_RATE_300K, },
       { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, },
       { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, },
       /* 320 kB 5"1/4 floppy disks */
       { FDRIVE_DRV_120,  8, 40, 1, FDRIVE_RATE_250K, },
       { FDRIVE_DRV_120,  8, 40, 0, FDRIVE_RATE_250K, },
       /* 360 kB must match 5"1/4 better than 3"1/2... */
       { FDRIVE_DRV_144,  9, 80, 0, FDRIVE_RATE_250K, },
       /* end */
       { FDRIVE_DRV_NONE, -1, -1, 0, 0, },
   };
   
   void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
                                      int *max_track, int *last_sect,
                                      FDriveType drive_in, FDriveType *drive,
                                      FDriveRate *rate)
   {
       const FDFormat *parse;
       uint64_t nb_sectors, size;
       int i, first_match, match;
   
       bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
       if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
           /* User defined disk */
           *rate = FDRIVE_RATE_500K;
       } else {
           bdrv_get_geometry(bs, &nb_sectors);
           match = -1;
           first_match = -1;
           for (i = 0; ; i++) {
               parse = &fd_formats[i];
               if (parse->drive == FDRIVE_DRV_NONE) {
                   break;
               }
               if (drive_in == parse->drive ||
                   drive_in == FDRIVE_DRV_NONE) {
                   size = (parse->max_head + 1) * parse->max_track *
                       parse->last_sect;
                   if (nb_sectors == size) {
                       match = i;
                       break;
                   }
                   if (first_match == -1) {
                       first_match = i;
                   }
               }
           }
           if (match == -1) {
               if (first_match == -1) {
                   match = 1;
               } else {
                   match = first_match;
               }
               parse = &fd_formats[match];
           }
           *nb_heads = parse->max_head + 1;
           *max_track = parse->max_track;
           *last_sect = parse->last_sect;
           *drive = parse->drive;
           *rate = parse->rate;
       }
 }  }
   
 int bdrv_get_translation_hint(BlockDriverState *bs)  int bdrv_get_translation_hint(BlockDriverState *bs)
Line 1014  int bdrv_get_translation_hint(BlockDrive Line 2344  int bdrv_get_translation_hint(BlockDrive
     return bs->translation;      return bs->translation;
 }  }
   
 int bdrv_is_removable(BlockDriverState *bs)  void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
                          BlockErrorAction on_write_error)
 {  {
     return bs->removable;      bs->on_read_error = on_read_error;
       bs->on_write_error = on_write_error;
 }  }
   
 int bdrv_is_read_only(BlockDriverState *bs)  BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
 {  {
     return bs->read_only;      return is_read ? bs->on_read_error : bs->on_write_error;
 }  }
   
 int bdrv_set_read_only(BlockDriverState *bs, int read_only)  int bdrv_is_read_only(BlockDriverState *bs)
 {  {
     int ret = bs->read_only;      return bs->read_only;
     bs->read_only = read_only;  
     return ret;  
 }  }
   
 int bdrv_is_sg(BlockDriverState *bs)  int bdrv_is_sg(BlockDriverState *bs)
Line 1041  int bdrv_enable_write_cache(BlockDriverS Line 2371  int bdrv_enable_write_cache(BlockDriverS
     return bs->enable_write_cache;      return bs->enable_write_cache;
 }  }
   
 /* XXX: no longer used */  
 void bdrv_set_change_cb(BlockDriverState *bs,  
                         void (*change_cb)(void *opaque), void *opaque)  
 {  
     bs->change_cb = change_cb;  
     bs->change_opaque = opaque;  
 }  
   
 int bdrv_is_encrypted(BlockDriverState *bs)  int bdrv_is_encrypted(BlockDriverState *bs)
 {  {
     if (bs->backing_hd && bs->backing_hd->encrypted)      if (bs->backing_hd && bs->backing_hd->encrypted)
Line 1075  int bdrv_set_key(BlockDriverState *bs, c Line 2397  int bdrv_set_key(BlockDriverState *bs, c
         if (!bs->encrypted)          if (!bs->encrypted)
             return 0;              return 0;
     }      }
     if (!bs->encrypted || !bs->drv || !bs->drv->bdrv_set_key)      if (!bs->encrypted) {
         return -1;          return -EINVAL;
       } else if (!bs->drv || !bs->drv->bdrv_set_key) {
           return -ENOMEDIUM;
       }
     ret = bs->drv->bdrv_set_key(bs, key);      ret = bs->drv->bdrv_set_key(bs, key);
     if (ret < 0) {      if (ret < 0) {
         bs->valid_key = 0;          bs->valid_key = 0;
     } else if (!bs->valid_key) {      } else if (!bs->valid_key) {
         bs->valid_key = 1;          bs->valid_key = 1;
         /* call the change callback now, we skipped it on open */          /* call the change callback now, we skipped it on open */
         bs->media_changed = 1;          bdrv_dev_change_media_cb(bs, true);
         if (bs->change_cb)  
             bs->change_cb(bs->change_opaque);  
     }      }
     return ret;      return ret;
 }  }
Line 1104  void bdrv_iterate_format(void (*it)(void Line 2427  void bdrv_iterate_format(void (*it)(void
 {  {
     BlockDriver *drv;      BlockDriver *drv;
   
     for (drv = first_drv; drv != NULL; drv = drv->next) {      QLIST_FOREACH(drv, &bdrv_drivers, list) {
         it(opaque, drv->format_name);          it(opaque, drv->format_name);
     }      }
 }  }
Line 1113  BlockDriverState *bdrv_find(const char * Line 2436  BlockDriverState *bdrv_find(const char *
 {  {
     BlockDriverState *bs;      BlockDriverState *bs;
   
     for (bs = bdrv_first; bs != NULL; bs = bs->next) {      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         if (!strcmp(name, bs->device_name))          if (!strcmp(name, bs->device_name)) {
             return bs;              return bs;
           }
     }      }
     return NULL;      return NULL;
 }  }
   
   BlockDriverState *bdrv_next(BlockDriverState *bs)
   {
       if (!bs) {
           return QTAILQ_FIRST(&bdrv_states);
       }
       return QTAILQ_NEXT(bs, list);
   }
   
 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)  void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
 {  {
     BlockDriverState *bs;      BlockDriverState *bs;
   
     for (bs = bdrv_first; bs != NULL; bs = bs->next) {      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         it(opaque, bs);          it(opaque, bs);
     }      }
 }  }
Line 1134  const char *bdrv_get_device_name(BlockDr Line 2466  const char *bdrv_get_device_name(BlockDr
     return bs->device_name;      return bs->device_name;
 }  }
   
 void bdrv_flush(BlockDriverState *bs)  void bdrv_flush_all(void)
 {  {
     if (!bs->drv)      BlockDriverState *bs;
         return;  
     if (bs->drv->bdrv_flush)      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         bs->drv->bdrv_flush(bs);          bdrv_flush(bs);
     if (bs->backing_hd)      }
         bdrv_flush(bs->backing_hd);  
 }  }
   
 void bdrv_flush_all(void)  int bdrv_has_zero_init(BlockDriverState *bs)
 {  {
     BlockDriverState *bs;      assert(bs->drv);
   
       if (bs->drv->bdrv_has_zero_init) {
           return bs->drv->bdrv_has_zero_init(bs);
       }
   
     for (bs = bdrv_first; bs != NULL; bs = bs->next)      return 1;
         if (bs->drv && !bdrv_is_read_only(bs) &&   
             (!bdrv_is_removable(bs) || bdrv_is_inserted(bs)))  
             bdrv_flush(bs);  
 }  }
   
   typedef struct BdrvCoIsAllocatedData {
       BlockDriverState *bs;
       int64_t sector_num;
       int nb_sectors;
       int *pnum;
       int ret;
       bool done;
   } BdrvCoIsAllocatedData;
   
 /*  /*
  * Returns true iff the specified sector is present in the disk image. Drivers   * Returns true iff the specified sector is present in the disk image. Drivers
  * not implementing the functionality are assumed to not support backing files,   * not implementing the functionality are assumed to not support backing files,
  * hence all their sectors are reported as allocated.   * hence all their sectors are reported as allocated.
  *   *
    * If 'sector_num' is beyond the end of the disk image the return value is 0
    * and 'pnum' is set to 0.
    *
  * 'pnum' is set to the number of sectors (including and immediately following   * 'pnum' is set to the number of sectors (including and immediately following
  * the specified sector) that are known to be in the same   * the specified sector) that are known to be in the same
  * allocated/unallocated state.   * allocated/unallocated state.
  *   *
  * 'nb_sectors' is the max value 'pnum' should be set to.   * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
    * beyond the end of the disk image it will be clamped.
  */   */
 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,  int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
         int *pnum)                                        int nb_sectors, int *pnum)
 {  {
     int64_t n;      int64_t n;
     if (!bs->drv->bdrv_is_allocated) {  
         if (sector_num >= bs->total_sectors) {  
             *pnum = 0;  
             return 0;  
         }  
         n = bs->total_sectors - sector_num;  
         *pnum = (n < nb_sectors) ? (n) : (nb_sectors);  
         return 1;  
     }  
     return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);  
 }  
   
 static void bdrv_print_dict(QObject *obj, void *opaque)  
 {  
     QDict *bs_dict;  
     Monitor *mon = opaque;  
   
     bs_dict = qobject_to_qdict(obj);  
   
     monitor_printf(mon, "%s: type=%s removable=%d",      if (sector_num >= bs->total_sectors) {
                         qdict_get_str(bs_dict, "device"),          *pnum = 0;
                         qdict_get_str(bs_dict, "type"),          return 0;
                         qdict_get_bool(bs_dict, "removable"));  
   
     if (qdict_get_bool(bs_dict, "removable")) {  
         monitor_printf(mon, " locked=%d", qdict_get_bool(bs_dict, "locked"));  
     }      }
   
     if (qdict_haskey(bs_dict, "inserted")) {      n = bs->total_sectors - sector_num;
         QDict *qdict = qobject_to_qdict(qdict_get(bs_dict, "inserted"));      if (n < nb_sectors) {
           nb_sectors = n;
       }
   
         monitor_printf(mon, " file=");      if (!bs->drv->bdrv_co_is_allocated) {
         monitor_print_filename(mon, qdict_get_str(qdict, "file"));          *pnum = nb_sectors;
         if (qdict_haskey(qdict, "backing_file")) {          return 1;
             monitor_printf(mon, " backing_file=");  
             monitor_print_filename(mon, qdict_get_str(qdict, "backing_file"));  
         }  
         monitor_printf(mon, " ro=%d drv=%s encrypted=%d",  
                             qdict_get_bool(qdict, "ro"),  
                             qdict_get_str(qdict, "drv"),  
                             qdict_get_bool(qdict, "encrypted"));  
     } else {  
         monitor_printf(mon, " [not inserted]");  
     }      }
   
     monitor_printf(mon, "\n");      return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
 }  }
   
 void bdrv_info_print(Monitor *mon, const QObject *data)  /* Coroutine wrapper for bdrv_is_allocated() */
   static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
 {  {
     qlist_iter(qobject_to_qlist(data), bdrv_print_dict, mon);      BdrvCoIsAllocatedData *data = opaque;
       BlockDriverState *bs = data->bs;
   
       data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
                                        data->pnum);
       data->done = true;
 }  }
   
 /**  /*
  * bdrv_info(): Block devices information   * Synchronous wrapper around bdrv_co_is_allocated().
  *  
  * Each block device information is stored in a QDict and the  
  * returned QObject is a QList of all devices.  
  *  
  * The QDict contains the following:  
  *  
  * - "device": device name  
  * - "type": device type  
  * - "removable": true if the device is removable, false otherwise  
  * - "locked": true if the device is locked, false otherwise  
  * - "inserted": only present if the device is inserted, it is a QDict  
  *    containing the following:  
  *          - "file": device file name  
  *          - "ro": true if read-only, false otherwise  
  *          - "drv": driver format name  
  *          - "backing_file": backing file name if one is used  
  *          - "encrypted": true if encrypted, false otherwise  
  *  
  * Example:  
  *   *
  * [ { "device": "ide0-hd0", "type": "hd", "removable": false, "locked": false,   * See bdrv_co_is_allocated() for details.
  *     "inserted": { "file": "/tmp/foobar", "ro": false, "drv": "qcow2" } },  
  *   { "device": "floppy0", "type": "floppy", "removable": true,  
  *     "locked": false } ]  
  */   */
 void bdrv_info(Monitor *mon, QObject **ret_data)  int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
                         int *pnum)
   {
       Coroutine *co;
       BdrvCoIsAllocatedData data = {
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .pnum = pnum,
           .done = false,
       };
   
       co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
       qemu_coroutine_enter(co, &data);
       while (!data.done) {
           qemu_aio_wait();
       }
       return data.ret;
   }
   
   BlockInfoList *qmp_query_block(Error **errp)
 {  {
     QList *bs_list;      BlockInfoList *head = NULL, *cur_item = NULL;
     BlockDriverState *bs;      BlockDriverState *bs;
   
     bs_list = qlist_new();      QTAILQ_FOREACH(bs, &bdrv_states, list) {
           BlockInfoList *info = g_malloc0(sizeof(*info));
   
           info->value = g_malloc0(sizeof(*info->value));
           info->value->device = g_strdup(bs->device_name);
           info->value->type = g_strdup("unknown");
           info->value->locked = bdrv_dev_is_medium_locked(bs);
           info->value->removable = bdrv_dev_has_removable_media(bs);
   
           if (bdrv_dev_has_removable_media(bs)) {
               info->value->has_tray_open = true;
               info->value->tray_open = bdrv_dev_is_tray_open(bs);
           }
   
     for (bs = bdrv_first; bs != NULL; bs = bs->next) {          if (bdrv_iostatus_is_enabled(bs)) {
         QObject *bs_obj;              info->value->has_io_status = true;
         const char *type = "unknown";              info->value->io_status = bs->iostatus;
           }
         switch(bs->type) {  
         case BDRV_TYPE_HD:  
             type = "hd";  
             break;  
         case BDRV_TYPE_CDROM:  
             type = "cdrom";  
             break;  
         case BDRV_TYPE_FLOPPY:  
             type = "floppy";  
             break;  
         }  
   
         bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': %s, "  
                                     "'removable': %i, 'locked': %i }",  
                                     bs->device_name, type, bs->removable,  
                                     bs->locked);  
         assert(bs_obj != NULL);  
   
         if (bs->drv) {          if (bs->drv) {
             QObject *obj;              info->value->has_inserted = true;
             QDict *bs_dict = qobject_to_qdict(bs_obj);              info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
               info->value->inserted->file = g_strdup(bs->filename);
               info->value->inserted->ro = bs->read_only;
               info->value->inserted->drv = g_strdup(bs->drv->format_name);
               info->value->inserted->encrypted = bs->encrypted;
               if (bs->backing_file[0]) {
                   info->value->inserted->has_backing_file = true;
                   info->value->inserted->backing_file = g_strdup(bs->backing_file);
               }
   
             obj = qobject_from_jsonf("{ 'file': %s, 'ro': %i, 'drv': %s, "              if (bs->io_limits_enabled) {
                                      "'encrypted': %i }",                  info->value->inserted->bps =
                                      bs->filename, bs->read_only,                                 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
                                      bs->drv->format_name,                  info->value->inserted->bps_rd =
                                      bdrv_is_encrypted(bs));                                 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
             assert(obj != NULL);                  info->value->inserted->bps_wr =
             if (bs->backing_file[0] != '\0') {                                 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
                 QDict *qdict = qobject_to_qdict(obj);                  info->value->inserted->iops =
                 qdict_put(qdict, "backing_file",                                 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
                           qstring_from_str(bs->backing_file));                  info->value->inserted->iops_rd =
                                  bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
                   info->value->inserted->iops_wr =
                                  bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
             }              }
           }
   
             qdict_put_obj(bs_dict, "inserted", obj);          /* XXX: waiting for the qapi to support GSList */
           if (!cur_item) {
               head = cur_item = info;
           } else {
               cur_item->next = info;
               cur_item = info;
         }          }
         qlist_append_obj(bs_list, bs_obj);  
     }      }
   
     *ret_data = QOBJECT(bs_list);      return head;
 }  }
   
 static void bdrv_stats_iter(QObject *data, void *opaque)  /* Consider exposing this as a full fledged QMP command */
   static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
 {  {
     QDict *qdict;      BlockStats *s;
     Monitor *mon = opaque;  
   
     qdict = qobject_to_qdict(data);      s = g_malloc0(sizeof(*s));
     monitor_printf(mon, "%s:", qdict_get_str(qdict, "device"));  
   
     qdict = qobject_to_qdict(qdict_get(qdict, "stats"));      if (bs->device_name[0]) {
     monitor_printf(mon, " rd_bytes=%" PRId64          s->has_device = true;
                         " wr_bytes=%" PRId64          s->device = g_strdup(bs->device_name);
                         " rd_operations=%" PRId64      }
                         " wr_operations=%" PRId64  
                         "\n",  
                         qdict_get_int(qdict, "rd_bytes"),  
                         qdict_get_int(qdict, "wr_bytes"),  
                         qdict_get_int(qdict, "rd_operations"),  
                         qdict_get_int(qdict, "wr_operations"));  
 }  
   
 void bdrv_stats_print(Monitor *mon, const QObject *data)      s->stats = g_malloc0(sizeof(*s->stats));
 {      s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
     qlist_iter(qobject_to_qlist(data), bdrv_stats_iter, mon);      s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
       s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
       s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
       s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
       s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
       s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
       s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
       s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
   
       if (bs->file) {
           s->has_parent = true;
           s->parent = qmp_query_blockstat(bs->file, NULL);
       }
   
       return s;
 }  }
   
 /**  BlockStatsList *qmp_query_blockstats(Error **errp)
  * bdrv_info_stats(): show block device statistics  
  *  
  * Each device statistic information is stored in a QDict and  
  * the returned QObject is a QList of all devices.  
  *  
  * The QDict contains the following:  
  *  
  * - "device": device name  
  * - "stats": A QDict with the statistics information, it contains:  
  *     - "rd_bytes": bytes read  
  *     - "wr_bytes": bytes written  
  *     - "rd_operations": read operations  
  *     - "wr_operations": write operations  
  *   
  * Example:  
  *  
  * [ { "device": "ide0-hd0",  
  *               "stats": { "rd_bytes": 512,  
  *                          "wr_bytes": 0,  
  *                          "rd_operations": 1,  
  *                          "wr_operations": 0 } },  
  *   { "device": "ide1-cd0",  
  *               "stats": { "rd_bytes": 0,  
  *                          "wr_bytes": 0,  
  *                          "rd_operations": 0,  
  *                          "wr_operations": 0 } } ]  
  */  
 void bdrv_info_stats(Monitor *mon, QObject **ret_data)  
 {  {
     QObject *obj;      BlockStatsList *head = NULL, *cur_item = NULL;
     QList *devices;  
     BlockDriverState *bs;      BlockDriverState *bs;
   
     devices = qlist_new();      QTAILQ_FOREACH(bs, &bdrv_states, list) {
           BlockStatsList *info = g_malloc0(sizeof(*info));
     for (bs = bdrv_first; bs != NULL; bs = bs->next) {          info->value = qmp_query_blockstat(bs, NULL);
         obj = qobject_from_jsonf("{ 'device': %s, 'stats': {"  
                                  "'rd_bytes': %" PRId64 ","          /* XXX: waiting for the qapi to support GSList */
                                  "'wr_bytes': %" PRId64 ","          if (!cur_item) {
                                  "'rd_operations': %" PRId64 ","              head = cur_item = info;
                                  "'wr_operations': %" PRId64          } else {
                                  "} }",              cur_item->next = info;
                                  bs->device_name,              cur_item = info;
                                  bs->rd_bytes, bs->wr_bytes,          }
                                  bs->rd_ops, bs->wr_ops);  
         assert(obj != NULL);  
         qlist_append_obj(devices, obj);  
     }      }
   
     *ret_data = QOBJECT(devices);      return head;
 }  }
   
 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)  const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
Line 1393  const char *bdrv_get_encrypted_filename( Line 2698  const char *bdrv_get_encrypted_filename(
 void bdrv_get_backing_filename(BlockDriverState *bs,  void bdrv_get_backing_filename(BlockDriverState *bs,
                                char *filename, int filename_size)                                 char *filename, int filename_size)
 {  {
     if (!bs->backing_hd) {      pstrcpy(filename, filename_size, bs->backing_file);
         pstrcpy(filename, filename_size, "");  
     } else {  
         pstrcpy(filename, filename_size, bs->backing_file);  
     }  
 }  }
   
 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,  int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
Line 1435  int bdrv_save_vmstate(BlockDriverState * Line 2736  int bdrv_save_vmstate(BlockDriverState *
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_save_vmstate)      if (drv->bdrv_save_vmstate)
         return -ENOTSUP;          return drv->bdrv_save_vmstate(bs, buf, pos, size);
     return drv->bdrv_save_vmstate(bs, buf, pos, size);      if (bs->file)
           return bdrv_save_vmstate(bs->file, buf, pos, size);
       return -ENOTSUP;
 }  }
   
 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,  int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
Line 1446  int bdrv_load_vmstate(BlockDriverState * Line 2749  int bdrv_load_vmstate(BlockDriverState *
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_load_vmstate)      if (drv->bdrv_load_vmstate)
         return -ENOTSUP;          return drv->bdrv_load_vmstate(bs, buf, pos, size);
     return drv->bdrv_load_vmstate(bs, buf, pos, size);      if (bs->file)
           return bdrv_load_vmstate(bs->file, buf, pos, size);
       return -ENOTSUP;
   }
   
   void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
   {
       BlockDriver *drv = bs->drv;
   
       if (!drv || !drv->bdrv_debug_event) {
           return;
       }
   
       return drv->bdrv_debug_event(bs, event);
   
 }  }
   
 /**************************************************************/  /**************************************************************/
 /* handling of snapshots */  /* handling of snapshots */
   
   int bdrv_can_snapshot(BlockDriverState *bs)
   {
       BlockDriver *drv = bs->drv;
       if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
           return 0;
       }
   
       if (!drv->bdrv_snapshot_create) {
           if (bs->file != NULL) {
               return bdrv_can_snapshot(bs->file);
           }
           return 0;
       }
   
       return 1;
   }
   
   int bdrv_is_snapshot(BlockDriverState *bs)
   {
       return !!(bs->open_flags & BDRV_O_SNAPSHOT);
   }
   
   BlockDriverState *bdrv_snapshots(void)
   {
       BlockDriverState *bs;
   
       if (bs_snapshots) {
           return bs_snapshots;
       }
   
       bs = NULL;
       while ((bs = bdrv_next(bs))) {
           if (bdrv_can_snapshot(bs)) {
               bs_snapshots = bs;
               return bs;
           }
       }
       return NULL;
   }
   
 int bdrv_snapshot_create(BlockDriverState *bs,  int bdrv_snapshot_create(BlockDriverState *bs,
                          QEMUSnapshotInfo *sn_info)                           QEMUSnapshotInfo *sn_info)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_snapshot_create)      if (drv->bdrv_snapshot_create)
         return -ENOTSUP;          return drv->bdrv_snapshot_create(bs, sn_info);
     return drv->bdrv_snapshot_create(bs, sn_info);      if (bs->file)
           return bdrv_snapshot_create(bs->file, sn_info);
       return -ENOTSUP;
 }  }
   
 int bdrv_snapshot_goto(BlockDriverState *bs,  int bdrv_snapshot_goto(BlockDriverState *bs,
                        const char *snapshot_id)                         const char *snapshot_id)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
       int ret, open_ret;
   
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_snapshot_goto)      if (drv->bdrv_snapshot_goto)
         return -ENOTSUP;          return drv->bdrv_snapshot_goto(bs, snapshot_id);
     return drv->bdrv_snapshot_goto(bs, snapshot_id);  
       if (bs->file) {
           drv->bdrv_close(bs);
           ret = bdrv_snapshot_goto(bs->file, snapshot_id);
           open_ret = drv->bdrv_open(bs, bs->open_flags);
           if (open_ret < 0) {
               bdrv_delete(bs->file);
               bs->drv = NULL;
               return open_ret;
           }
           return ret;
       }
   
       return -ENOTSUP;
 }  }
   
 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)  int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
Line 1481  int bdrv_snapshot_delete(BlockDriverStat Line 2855  int bdrv_snapshot_delete(BlockDriverStat
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_snapshot_delete)      if (drv->bdrv_snapshot_delete)
         return -ENOTSUP;          return drv->bdrv_snapshot_delete(bs, snapshot_id);
     return drv->bdrv_snapshot_delete(bs, snapshot_id);      if (bs->file)
           return bdrv_snapshot_delete(bs->file, snapshot_id);
       return -ENOTSUP;
 }  }
   
 int bdrv_snapshot_list(BlockDriverState *bs,  int bdrv_snapshot_list(BlockDriverState *bs,
Line 1492  int bdrv_snapshot_list(BlockDriverState  Line 2868  int bdrv_snapshot_list(BlockDriverState 
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
     if (!drv->bdrv_snapshot_list)      if (drv->bdrv_snapshot_list)
         return -ENOTSUP;          return drv->bdrv_snapshot_list(bs, psn_info);
     return drv->bdrv_snapshot_list(bs, psn_info);      if (bs->file)
           return bdrv_snapshot_list(bs->file, psn_info);
       return -ENOTSUP;
   }
   
   int bdrv_snapshot_load_tmp(BlockDriverState *bs,
           const char *snapshot_name)
   {
       BlockDriver *drv = bs->drv;
       if (!drv) {
           return -ENOMEDIUM;
       }
       if (!bs->read_only) {
           return -EINVAL;
       }
       if (drv->bdrv_snapshot_load_tmp) {
           return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
       }
       return -ENOTSUP;
   }
   
   BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
           const char *backing_file)
   {
       if (!bs->drv) {
           return NULL;
       }
   
       if (bs->backing_hd) {
           if (strcmp(bs->backing_file, backing_file) == 0) {
               return bs->backing_hd;
           } else {
               return bdrv_find_backing_image(bs->backing_hd, backing_file);
           }
       }
   
       return NULL;
 }  }
   
 #define NB_SUFFIXES 4  #define NB_SUFFIXES 4
Line 1570  char *bdrv_snapshot_dump(char *buf, int  Line 2982  char *bdrv_snapshot_dump(char *buf, int 
     return buf;      return buf;
 }  }
   
   
 /**************************************************************/  /**************************************************************/
 /* async I/Os */  /* async I/Os */
   
Line 1578  BlockDriverAIOCB *bdrv_aio_readv(BlockDr Line 2989  BlockDriverAIOCB *bdrv_aio_readv(BlockDr
                                  QEMUIOVector *qiov, int nb_sectors,                                   QEMUIOVector *qiov, int nb_sectors,
                                  BlockDriverCompletionFunc *cb, void *opaque)                                   BlockDriverCompletionFunc *cb, void *opaque)
 {  {
     BlockDriver *drv = bs->drv;      trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
     BlockDriverAIOCB *ret;  
   
     if (!drv)  
         return NULL;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return NULL;  
   
     ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,      return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
                               cb, opaque);                                   cb, opaque, false);
   
     if (ret) {  
         /* Update stats even though technically transfer has not happened. */  
         bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;  
         bs->rd_ops ++;  
     }  
   
     return ret;  
 }  }
   
 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,  BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
                                   QEMUIOVector *qiov, int nb_sectors,                                    QEMUIOVector *qiov, int nb_sectors,
                                   BlockDriverCompletionFunc *cb, void *opaque)                                    BlockDriverCompletionFunc *cb, void *opaque)
 {  {
     BlockDriver *drv = bs->drv;      trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
     BlockDriverAIOCB *ret;  
   
     if (!drv)  
         return NULL;  
     if (bs->read_only)  
         return NULL;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return NULL;  
   
     if (bs->dirty_bitmap) {      return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
         set_dirty_bitmap(bs, sector_num, nb_sectors, 1);                                   cb, opaque, true);
     }  
   
     ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,  
                                cb, opaque);  
   
     if (ret) {  
         /* Update stats even though technically transfer has not happened. */  
         bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;  
         bs->wr_ops ++;  
     }  
   
     return ret;  
 }  }
   
   
Line 1637  typedef struct MultiwriteCB { Line 3014  typedef struct MultiwriteCB {
         BlockDriverCompletionFunc *cb;          BlockDriverCompletionFunc *cb;
         void *opaque;          void *opaque;
         QEMUIOVector *free_qiov;          QEMUIOVector *free_qiov;
         void *free_buf;  
     } callbacks[];      } callbacks[];
 } MultiwriteCB;  } MultiwriteCB;
   
Line 1650  static void multiwrite_user_cb(Multiwrit Line 3026  static void multiwrite_user_cb(Multiwrit
         if (mcb->callbacks[i].free_qiov) {          if (mcb->callbacks[i].free_qiov) {
             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);              qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
         }          }
         qemu_free(mcb->callbacks[i].free_qiov);          g_free(mcb->callbacks[i].free_qiov);
         qemu_vfree(mcb->callbacks[i].free_buf);  
     }      }
 }  }
   
Line 1659  static void multiwrite_cb(void *opaque,  Line 3034  static void multiwrite_cb(void *opaque, 
 {  {
     MultiwriteCB *mcb = opaque;      MultiwriteCB *mcb = opaque;
   
       trace_multiwrite_cb(mcb, ret);
   
     if (ret < 0 && !mcb->error) {      if (ret < 0 && !mcb->error) {
         mcb->error = ret;          mcb->error = ret;
     }      }
Line 1666  static void multiwrite_cb(void *opaque,  Line 3043  static void multiwrite_cb(void *opaque, 
     mcb->num_requests--;      mcb->num_requests--;
     if (mcb->num_requests == 0) {      if (mcb->num_requests == 0) {
         multiwrite_user_cb(mcb);          multiwrite_user_cb(mcb);
         qemu_free(mcb);          g_free(mcb);
     }      }
 }  }
   
Line 1706  static int multiwrite_merge(BlockDriverS Line 3083  static int multiwrite_merge(BlockDriverS
         int merge = 0;          int merge = 0;
         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;          int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
   
         // This handles the cases that are valid for all block drivers, namely          // Handle exactly sequential writes and overlapping writes.
         // exactly sequential writes and overlapping writes.  
         if (reqs[i].sector <= oldreq_last) {          if (reqs[i].sector <= oldreq_last) {
             merge = 1;              merge = 1;
         }          }
   
         // The block driver may decide that it makes sense to combine requests  
         // even if there is a gap of some sectors between them. In this case,  
         // the gap is filled with zeros (therefore only applicable for yet  
         // unused space in format like qcow2).  
         if (!merge && bs->drv->bdrv_merge_requests) {  
             merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);  
         }  
   
         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {          if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
             merge = 0;              merge = 0;
         }          }
   
         if (merge) {          if (merge) {
             size_t size;              size_t size;
             QEMUIOVector *qiov = qemu_mallocz(sizeof(*qiov));              QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
             qemu_iovec_init(qiov,              qemu_iovec_init(qiov,
                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);                  reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
   
Line 1735  static int multiwrite_merge(BlockDriverS Line 3103  static int multiwrite_merge(BlockDriverS
             size = (reqs[i].sector - reqs[outidx].sector) << 9;              size = (reqs[i].sector - reqs[outidx].sector) << 9;
             qemu_iovec_concat(qiov, reqs[outidx].qiov, size);              qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
   
             // We might need to add some zeros between the two requests              // We should need to add any zeros between the two requests
             if (reqs[i].sector > oldreq_last) {              assert (reqs[i].sector <= oldreq_last);
                 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;  
                 uint8_t *buf = qemu_blockalign(bs, zero_bytes);  
                 memset(buf, 0, zero_bytes);  
                 qemu_iovec_add(qiov, buf, zero_bytes);  
                 mcb->callbacks[i].free_buf = buf;  
             }  
   
             // Add the second request              // Add the second request
             qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);              qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
Line 1778  static int multiwrite_merge(BlockDriverS Line 3140  static int multiwrite_merge(BlockDriverS
  */   */
 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)  int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
 {  {
     BlockDriverAIOCB *acb;  
     MultiwriteCB *mcb;      MultiwriteCB *mcb;
     int i;      int i;
   
       /* don't submit writes if we don't have a medium */
       if (bs->drv == NULL) {
           for (i = 0; i < num_reqs; i++) {
               reqs[i].error = -ENOMEDIUM;
           }
           return -1;
       }
   
     if (num_reqs == 0) {      if (num_reqs == 0) {
         return 0;          return 0;
     }      }
   
     // Create MultiwriteCB structure      // Create MultiwriteCB structure
     mcb = qemu_mallocz(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));      mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
     mcb->num_requests = 0;      mcb->num_requests = 0;
     mcb->num_callbacks = num_reqs;      mcb->num_callbacks = num_reqs;
   
Line 1799  int bdrv_aio_multiwrite(BlockDriverState Line 3168  int bdrv_aio_multiwrite(BlockDriverState
     // Check for mergable requests      // Check for mergable requests
     num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);      num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
   
     /*      trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
      * Run the aio requests. As soon as one request can't be submitted  
      * successfully, fail all requests that are not yet submitted (we must  
      * return failure for all requests anyway)  
      *  
      * num_requests cannot be set to the right value immediately: If  
      * bdrv_aio_writev fails for some request, num_requests would be too high  
      * and therefore multiwrite_cb() would never recognize the multiwrite  
      * request as completed. We also cannot use the loop variable i to set it  
      * when the first request fails because the callback may already have been  
      * called for previously submitted requests. Thus, num_requests must be  
      * incremented for each request that is submitted.  
      *  
      * The problem that callbacks may be called early also means that we need  
      * to take care that num_requests doesn't become 0 before all requests are  
      * submitted - multiwrite_cb() would consider the multiwrite request  
      * completed. A dummy request that is "completed" by a manual call to  
      * multiwrite_cb() takes care of this.  
      */  
     mcb->num_requests = 1;  
   
       /* Run the aio requests. */
       mcb->num_requests = num_reqs;
     for (i = 0; i < num_reqs; i++) {      for (i = 0; i < num_reqs; i++) {
         mcb->num_requests++;          bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
         acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,  
             reqs[i].nb_sectors, multiwrite_cb, mcb);              reqs[i].nb_sectors, multiwrite_cb, mcb);
       }
   
         if (acb == NULL) {      return 0;
             // We can only fail the whole thing if no request has been  }
             // submitted yet. Otherwise we'll wait for the submitted AIOs to  
             // complete and report the error in the callback.  void bdrv_aio_cancel(BlockDriverAIOCB *acb)
             if (i == 0) {  {
                 goto fail;      acb->pool->cancel(acb);
             } else {  }
                 multiwrite_cb(mcb, -EIO);  
                 break;  /* block I/O throttling */
             }  static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
                    bool is_write, double elapsed_time, uint64_t *wait)
   {
       uint64_t bps_limit = 0;
       double   bytes_limit, bytes_base, bytes_res;
       double   slice_time, wait_time;
   
       if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
           bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
       } else if (bs->io_limits.bps[is_write]) {
           bps_limit = bs->io_limits.bps[is_write];
       } else {
           if (wait) {
               *wait = 0;
         }          }
   
           return false;
     }      }
   
     /* Complete the dummy request */      slice_time = bs->slice_end - bs->slice_start;
     multiwrite_cb(mcb, 0);      slice_time /= (NANOSECONDS_PER_SECOND);
       bytes_limit = bps_limit * slice_time;
       bytes_base  = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
       if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
           bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
       }
   
       /* bytes_base: the bytes of data which have been read/written; and
        *             it is obtained from the history statistic info.
        * bytes_res: the remaining bytes of data which need to be read/written.
        * (bytes_base + bytes_res) / bps_limit: used to calcuate
        *             the total time for completing reading/writting all data.
        */
       bytes_res   = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
   
     return 0;      if (bytes_base + bytes_res <= bytes_limit) {
           if (wait) {
               *wait = 0;
           }
   
 fail:          return false;
     for (i = 0; i < mcb->num_callbacks; i++) {  
         reqs[i].error = -EIO;  
     }      }
     qemu_free(mcb);  
     return -1;      /* Calc approx time to dispatch */
       wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
   
       /* When the I/O rate at runtime exceeds the limits,
        * bs->slice_end need to be extended in order that the current statistic
        * info can be kept until the timer fire, so it is increased and tuned
        * based on the result of experiment.
        */
       bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
       bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
       if (wait) {
           *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
       }
   
       return true;
 }  }
   
 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,  static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
         BlockDriverCompletionFunc *cb, void *opaque)                               double elapsed_time, uint64_t *wait)
 {  {
     BlockDriver *drv = bs->drv;      uint64_t iops_limit = 0;
       double   ios_limit, ios_base;
       double   slice_time, wait_time;
   
     if (!drv)      if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
         return NULL;          iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
       } else if (bs->io_limits.iops[is_write]) {
           iops_limit = bs->io_limits.iops[is_write];
       } else {
           if (wait) {
               *wait = 0;
           }
   
     /*          return false;
      * Note that unlike bdrv_flush the driver is reponsible for flushing a      }
      * backing image if it exists.  
      */      slice_time = bs->slice_end - bs->slice_start;
     return drv->bdrv_aio_flush(bs, cb, opaque);      slice_time /= (NANOSECONDS_PER_SECOND);
       ios_limit  = iops_limit * slice_time;
       ios_base   = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
       if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
           ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
       }
   
       if (ios_base + 1 <= ios_limit) {
           if (wait) {
               *wait = 0;
           }
   
           return false;
       }
   
       /* Calc approx time to dispatch */
       wait_time = (ios_base + 1) / iops_limit;
       if (wait_time > elapsed_time) {
           wait_time = wait_time - elapsed_time;
       } else {
           wait_time = 0;
       }
   
       bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
       bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
       if (wait) {
           *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
       }
   
       return true;
 }  }
   
 void bdrv_aio_cancel(BlockDriverAIOCB *acb)  static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
                              bool is_write, int64_t *wait)
 {  {
     acb->pool->cancel(acb);      int64_t  now, max_wait;
 }      uint64_t bps_wait = 0, iops_wait = 0;
       double   elapsed_time;
       int      bps_ret, iops_ret;
   
       now = qemu_get_clock_ns(vm_clock);
       if ((bs->slice_start < now)
           && (bs->slice_end > now)) {
           bs->slice_end = now + bs->slice_time;
       } else {
           bs->slice_time  =  5 * BLOCK_IO_SLICE_TIME;
           bs->slice_start = now;
           bs->slice_end   = now + bs->slice_time;
   
           bs->io_base.bytes[is_write]  = bs->nr_bytes[is_write];
           bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
   
           bs->io_base.ios[is_write]    = bs->nr_ops[is_write];
           bs->io_base.ios[!is_write]   = bs->nr_ops[!is_write];
       }
   
       elapsed_time  = now - bs->slice_start;
       elapsed_time  /= (NANOSECONDS_PER_SECOND);
   
       bps_ret  = bdrv_exceed_bps_limits(bs, nb_sectors,
                                         is_write, elapsed_time, &bps_wait);
       iops_ret = bdrv_exceed_iops_limits(bs, is_write,
                                         elapsed_time, &iops_wait);
       if (bps_ret || iops_ret) {
           max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
           if (wait) {
               *wait = max_wait;
           }
   
           now = qemu_get_clock_ns(vm_clock);
           if (bs->slice_end < now + max_wait) {
               bs->slice_end = now + max_wait;
           }
   
           return true;
       }
   
       if (wait) {
           *wait = 0;
       }
   
       return false;
   }
   
 /**************************************************************/  /**************************************************************/
 /* async block device emulation */  /* async block device emulation */
Line 1887  typedef struct BlockDriverAIOCBSync { Line 3365  typedef struct BlockDriverAIOCBSync {
   
 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)  static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
 {  {
     BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;      BlockDriverAIOCBSync *acb =
           container_of(blockacb, BlockDriverAIOCBSync, common);
     qemu_bh_delete(acb->bh);      qemu_bh_delete(acb->bh);
     acb->bh = NULL;      acb->bh = NULL;
     qemu_aio_release(acb);      qemu_aio_release(acb);
Line 1926  static BlockDriverAIOCB *bdrv_aio_rw_vec Line 3405  static BlockDriverAIOCB *bdrv_aio_rw_vec
     acb->is_write = is_write;      acb->is_write = is_write;
     acb->qiov = qiov;      acb->qiov = qiov;
     acb->bounce = qemu_blockalign(bs, qiov->size);      acb->bounce = qemu_blockalign(bs, qiov->size);
       acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
   
       if (is_write) {
           qemu_iovec_to_buffer(acb->qiov, acb->bounce);
           acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
       } else {
           acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
       }
   
       qemu_bh_schedule(acb->bh);
   
       return &acb->common;
   }
   
   static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
           int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
           BlockDriverCompletionFunc *cb, void *opaque)
   {
       return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
   }
   
   static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
           int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
           BlockDriverCompletionFunc *cb, void *opaque)
   {
       return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
   }
   
   
   typedef struct BlockDriverAIOCBCoroutine {
       BlockDriverAIOCB common;
       BlockRequest req;
       bool is_write;
       QEMUBH* bh;
   } BlockDriverAIOCBCoroutine;
   
   static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
   {
       qemu_aio_flush();
   }
   
   static AIOPool bdrv_em_co_aio_pool = {
       .aiocb_size         = sizeof(BlockDriverAIOCBCoroutine),
       .cancel             = bdrv_aio_co_cancel_em,
   };
   
   static void bdrv_co_em_bh(void *opaque)
   {
       BlockDriverAIOCBCoroutine *acb = opaque;
   
       acb->common.cb(acb->common.opaque, acb->req.error);
       qemu_bh_delete(acb->bh);
       qemu_aio_release(acb);
   }
   
   /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
   static void coroutine_fn bdrv_co_do_rw(void *opaque)
   {
       BlockDriverAIOCBCoroutine *acb = opaque;
       BlockDriverState *bs = acb->common.bs;
   
       if (!acb->is_write) {
           acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
               acb->req.nb_sectors, acb->req.qiov, 0);
       } else {
           acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
               acb->req.nb_sectors, acb->req.qiov, 0);
       }
   
       acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
       qemu_bh_schedule(acb->bh);
   }
   
   static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
                                                  int64_t sector_num,
                                                  QEMUIOVector *qiov,
                                                  int nb_sectors,
                                                  BlockDriverCompletionFunc *cb,
                                                  void *opaque,
                                                  bool is_write)
   {
       Coroutine *co;
       BlockDriverAIOCBCoroutine *acb;
   
       acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
       acb->req.sector = sector_num;
       acb->req.nb_sectors = nb_sectors;
       acb->req.qiov = qiov;
       acb->is_write = is_write;
   
       co = qemu_coroutine_create(bdrv_co_do_rw);
       qemu_coroutine_enter(co, acb);
   
       return &acb->common;
   }
   
   static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
   {
       BlockDriverAIOCBCoroutine *acb = opaque;
       BlockDriverState *bs = acb->common.bs;
   
       acb->req.error = bdrv_co_flush(bs);
       acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
       qemu_bh_schedule(acb->bh);
   }
   
   BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
           BlockDriverCompletionFunc *cb, void *opaque)
   {
       trace_bdrv_aio_flush(bs, opaque);
   
       Coroutine *co;
       BlockDriverAIOCBCoroutine *acb;
   
       acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
       co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
       qemu_coroutine_enter(co, acb);
   
       return &acb->common;
   }
   
   static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
   {
       BlockDriverAIOCBCoroutine *acb = opaque;
       BlockDriverState *bs = acb->common.bs;
   
       acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
       acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
       qemu_bh_schedule(acb->bh);
   }
   
   BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
           int64_t sector_num, int nb_sectors,
           BlockDriverCompletionFunc *cb, void *opaque)
   {
       Coroutine *co;
       BlockDriverAIOCBCoroutine *acb;
   
       trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
   
       acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
       acb->req.sector = sector_num;
       acb->req.nb_sectors = nb_sectors;
       co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
       qemu_coroutine_enter(co, acb);
   
       return &acb->common;
   }
   
   void bdrv_init(void)
   {
       module_call_init(MODULE_INIT_BLOCK);
   }
   
   void bdrv_init_with_whitelist(void)
   {
       use_bdrv_whitelist = 1;
       bdrv_init();
   }
   
   void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
                      BlockDriverCompletionFunc *cb, void *opaque)
   {
       BlockDriverAIOCB *acb;
   
       if (pool->free_aiocb) {
           acb = pool->free_aiocb;
           pool->free_aiocb = acb->next;
       } else {
           acb = g_malloc0(pool->aiocb_size);
           acb->pool = pool;
       }
       acb->bs = bs;
       acb->cb = cb;
       acb->opaque = opaque;
       return acb;
   }
   
   void qemu_aio_release(void *p)
   {
       BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
       AIOPool *pool = acb->pool;
       acb->next = pool->free_aiocb;
       pool->free_aiocb = acb;
   }
   
   /**************************************************************/
   /* Coroutine block device emulation */
   
   typedef struct CoroutineIOCompletion {
       Coroutine *coroutine;
       int ret;
   } CoroutineIOCompletion;
   
   static void bdrv_co_io_em_complete(void *opaque, int ret)
   {
       CoroutineIOCompletion *co = opaque;
   
     if (!acb->bh)      co->ret = ret;
         acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);      qemu_coroutine_enter(co->coroutine, NULL);
   }
   
   static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
                                         int nb_sectors, QEMUIOVector *iov,
                                         bool is_write)
   {
       CoroutineIOCompletion co = {
           .coroutine = qemu_coroutine_self(),
       };
       BlockDriverAIOCB *acb;
   
     if (is_write) {      if (is_write) {
         qemu_iovec_to_buffer(acb->qiov, acb->bounce);          acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
         acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);                                         bdrv_co_io_em_complete, &co);
     } else {      } else {
         acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);          acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
                                         bdrv_co_io_em_complete, &co);
     }      }
   
     qemu_bh_schedule(acb->bh);      trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
       if (!acb) {
           return -EIO;
       }
       qemu_coroutine_yield();
   
     return &acb->common;      return co.ret;
 }  }
   
 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,  static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,                                           int64_t sector_num, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque)                                           QEMUIOVector *iov)
 {  {
     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);      return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
 }  }
   
 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,  static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,                                           int64_t sector_num, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque)                                           QEMUIOVector *iov)
 {  {
     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);      return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
 }  }
   
 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,  static void coroutine_fn bdrv_flush_co_entry(void *opaque)
         BlockDriverCompletionFunc *cb, void *opaque)  
 {  {
     BlockDriverAIOCBSync *acb;      RwCo *rwco = opaque;
   
     acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);  
     acb->is_write = 1; /* don't bounce in the completion hadler */  
     acb->qiov = NULL;  
     acb->bounce = NULL;  
     acb->ret = 0;  
   
     if (!acb->bh)  
         acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);  
   
     bdrv_flush(bs);  
     qemu_bh_schedule(acb->bh);  
     return &acb->common;  
 }  
   
 /**************************************************************/      rwco->ret = bdrv_co_flush(rwco->bs);
 /* sync block device emulation */  
   
 static void bdrv_rw_em_cb(void *opaque, int ret)  
 {  
     *(int *)opaque = ret;  
 }  }
   
 #define NOT_DONE 0x7fffffff  int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
   
 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,  
                         uint8_t *buf, int nb_sectors)  
 {  {
     int async_ret;      int ret;
     BlockDriverAIOCB *acb;  
     struct iovec iov;  
     QEMUIOVector qiov;  
   
     async_context_push();      if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
           return 0;
       }
   
     async_ret = NOT_DONE;      /* Write back cached data to the OS even with cache=unsafe */
     iov.iov_base = (void *)buf;      if (bs->drv->bdrv_co_flush_to_os) {
     iov.iov_len = nb_sectors * 512;          ret = bs->drv->bdrv_co_flush_to_os(bs);
     qemu_iovec_init_external(&qiov, &iov, 1);          if (ret < 0) {
     acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,              return ret;
         bdrv_rw_em_cb, &async_ret);          }
     if (acb == NULL) {  
         async_ret = -1;  
         goto fail;  
     }      }
   
     while (async_ret == NOT_DONE) {      /* But don't actually force it to the disk with cache=unsafe */
         qemu_aio_wait();      if (bs->open_flags & BDRV_O_NO_FLUSH) {
           return 0;
     }      }
   
       if (bs->drv->bdrv_co_flush_to_disk) {
           ret = bs->drv->bdrv_co_flush_to_disk(bs);
       } else if (bs->drv->bdrv_aio_flush) {
           BlockDriverAIOCB *acb;
           CoroutineIOCompletion co = {
               .coroutine = qemu_coroutine_self(),
           };
   
           acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
           if (acb == NULL) {
               ret = -EIO;
           } else {
               qemu_coroutine_yield();
               ret = co.ret;
           }
       } else {
           /*
            * Some block drivers always operate in either writethrough or unsafe
            * mode and don't support bdrv_flush therefore. Usually qemu doesn't
            * know how the server works (because the behaviour is hardcoded or
            * depends on server-side configuration), so we can't ensure that
            * everything is safe on disk. Returning an error doesn't work because
            * that would break guests even if the server operates in writethrough
            * mode.
            *
            * Let's hope the user knows what he's doing.
            */
           ret = 0;
       }
       if (ret < 0) {
           return ret;
       }
   
 fail:      /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
     async_context_pop();       * in the case of cache=unsafe, so there are no useless flushes.
     return async_ret;       */
       return bdrv_co_flush(bs->file);
 }  }
   
 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,  void bdrv_invalidate_cache(BlockDriverState *bs)
                          const uint8_t *buf, int nb_sectors)  
 {  {
     int async_ret;      if (bs->drv && bs->drv->bdrv_invalidate_cache) {
     BlockDriverAIOCB *acb;          bs->drv->bdrv_invalidate_cache(bs);
     struct iovec iov;      }
     QEMUIOVector qiov;  }
   
     async_context_push();  void bdrv_invalidate_cache_all(void)
   {
       BlockDriverState *bs;
   
     async_ret = NOT_DONE;      QTAILQ_FOREACH(bs, &bdrv_states, list) {
     iov.iov_base = (void *)buf;          bdrv_invalidate_cache(bs);
     iov.iov_len = nb_sectors * 512;  
     qemu_iovec_init_external(&qiov, &iov, 1);  
     acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,  
         bdrv_rw_em_cb, &async_ret);  
     if (acb == NULL) {  
         async_ret = -1;  
         goto fail;  
     }  
     while (async_ret == NOT_DONE) {  
         qemu_aio_wait();  
     }      }
   }
   
   void bdrv_clear_incoming_migration_all(void)
   {
       BlockDriverState *bs;
   
 fail:      QTAILQ_FOREACH(bs, &bdrv_states, list) {
     async_context_pop();          bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
     return async_ret;      }
 }  }
   
 void bdrv_init(void)  int bdrv_flush(BlockDriverState *bs)
 {  {
     module_call_init(MODULE_INIT_BLOCK);      Coroutine *co;
       RwCo rwco = {
           .bs = bs,
           .ret = NOT_DONE,
       };
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_flush_co_entry(&rwco);
       } else {
           co = qemu_coroutine_create(bdrv_flush_co_entry);
           qemu_coroutine_enter(co, &rwco);
           while (rwco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
   
       return rwco.ret;
 }  }
   
 void bdrv_init_with_whitelist(void)  static void coroutine_fn bdrv_discard_co_entry(void *opaque)
 {  {
     use_bdrv_whitelist = 1;      RwCo *rwco = opaque;
     bdrv_init();  
       rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
 }  }
   
 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,  int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
                    BlockDriverCompletionFunc *cb, void *opaque)                                   int nb_sectors)
 {  {
     BlockDriverAIOCB *acb;      if (!bs->drv) {
           return -ENOMEDIUM;
       } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
           return -EIO;
       } else if (bs->read_only) {
           return -EROFS;
       } else if (bs->drv->bdrv_co_discard) {
           return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
       } else if (bs->drv->bdrv_aio_discard) {
           BlockDriverAIOCB *acb;
           CoroutineIOCompletion co = {
               .coroutine = qemu_coroutine_self(),
           };
   
     if (pool->free_aiocb) {          acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
         acb = pool->free_aiocb;                                          bdrv_co_io_em_complete, &co);
         pool->free_aiocb = acb->next;          if (acb == NULL) {
               return -EIO;
           } else {
               qemu_coroutine_yield();
               return co.ret;
           }
     } else {      } else {
         acb = qemu_mallocz(pool->aiocb_size);          return 0;
         acb->pool = pool;  
     }      }
     acb->bs = bs;  
     acb->cb = cb;  
     acb->opaque = opaque;  
     return acb;  
 }  }
   
 void qemu_aio_release(void *p)  int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
 {  {
     BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;      Coroutine *co;
     AIOPool *pool = acb->pool;      RwCo rwco = {
     acb->next = pool->free_aiocb;          .bs = bs,
     pool->free_aiocb = acb;          .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .ret = NOT_DONE,
       };
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_discard_co_entry(&rwco);
       } else {
           co = qemu_coroutine_create(bdrv_discard_co_entry);
           qemu_coroutine_enter(co, &rwco);
           while (rwco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
   
       return rwco.ret;
 }  }
   
 /**************************************************************/  /**************************************************************/
Line 2091  void qemu_aio_release(void *p) Line 3831  void qemu_aio_release(void *p)
 int bdrv_is_inserted(BlockDriverState *bs)  int bdrv_is_inserted(BlockDriverState *bs)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     int ret;  
     if (!drv)      if (!drv)
         return 0;          return 0;
     if (!drv->bdrv_is_inserted)      if (!drv->bdrv_is_inserted)
         return 1;          return 1;
     ret = drv->bdrv_is_inserted(bs);      return drv->bdrv_is_inserted(bs);
     return ret;  
 }  }
   
 /**  /**
  * Return TRUE if the media changed since the last call to this   * Return whether the media changed since the last call to this
  * function. It is currently only used for floppy disks   * function, or -ENOTSUP if we don't know.  Most drivers don't know.
  */   */
 int bdrv_media_changed(BlockDriverState *bs)  int bdrv_media_changed(BlockDriverState *bs)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     int ret;  
   
     if (!drv || !drv->bdrv_media_changed)      if (drv && drv->bdrv_media_changed) {
         ret = -ENOTSUP;          return drv->bdrv_media_changed(bs);
     else      }
         ret = drv->bdrv_media_changed(bs);      return -ENOTSUP;
     if (ret == -ENOTSUP)  
         ret = bs->media_changed;  
     bs->media_changed = 0;  
     return ret;  
 }  }
   
 /**  /**
  * If eject_flag is TRUE, eject the media. Otherwise, close the tray   * If eject_flag is TRUE, eject the media. Otherwise, close the tray
  */   */
 int bdrv_eject(BlockDriverState *bs, int eject_flag)  void bdrv_eject(BlockDriverState *bs, bool eject_flag)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     int ret;  
   
     if (bs->locked) {      if (drv && drv->bdrv_eject) {
         return -EBUSY;          drv->bdrv_eject(bs, eject_flag);
     }      }
   
     if (!drv || !drv->bdrv_eject) {      if (bs->device_name[0] != '\0') {
         ret = -ENOTSUP;          bdrv_emit_qmp_eject_event(bs, eject_flag);
     } else {  
         ret = drv->bdrv_eject(bs, eject_flag);  
     }      }
     if (ret == -ENOTSUP) {  
         if (eject_flag)  
             bdrv_close(bs);  
         ret = 0;  
     }  
   
     return ret;  
 }  
   
 int bdrv_is_locked(BlockDriverState *bs)  
 {  
     return bs->locked;  
 }  }
   
 /**  /**
  * Lock or unlock the media (if it is locked, the user won't be able   * Lock or unlock the media (if it is locked, the user won't be able
  * to eject it manually).   * to eject it manually).
  */   */
 void bdrv_set_locked(BlockDriverState *bs, int locked)  void bdrv_lock_medium(BlockDriverState *bs, bool locked)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
   
     bs->locked = locked;      trace_bdrv_lock_medium(bs, locked);
     if (drv && drv->bdrv_set_locked) {  
         drv->bdrv_set_locked(bs, locked);      if (drv && drv->bdrv_lock_medium) {
           drv->bdrv_lock_medium(bs, locked);
     }      }
 }  }
   
Line 2186  BlockDriverAIOCB *bdrv_aio_ioctl(BlockDr Line 3906  BlockDriverAIOCB *bdrv_aio_ioctl(BlockDr
     return NULL;      return NULL;
 }  }
   
   void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
   {
       bs->buffer_alignment = align;
   }
   
 void *qemu_blockalign(BlockDriverState *bs, size_t size)  void *qemu_blockalign(BlockDriverState *bs, size_t size)
 {  {
Line 2197  void bdrv_set_dirty_tracking(BlockDriver Line 3920  void bdrv_set_dirty_tracking(BlockDriver
 {  {
     int64_t bitmap_size;      int64_t bitmap_size;
   
       bs->dirty_count = 0;
     if (enable) {      if (enable) {
         if (!bs->dirty_bitmap) {          if (!bs->dirty_bitmap) {
             bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +              bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
                     BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;                      BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG - 1;
             bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;              bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG;
   
             bs->dirty_bitmap = qemu_mallocz(bitmap_size);              bs->dirty_bitmap = g_new0(unsigned long, bitmap_size);
         }          }
     } else {      } else {
         if (bs->dirty_bitmap) {          if (bs->dirty_bitmap) {
             qemu_free(bs->dirty_bitmap);              g_free(bs->dirty_bitmap);
             bs->dirty_bitmap = NULL;              bs->dirty_bitmap = NULL;
         }          }
     }      }
Line 2219  int bdrv_get_dirty(BlockDriverState *bs, Line 3943  int bdrv_get_dirty(BlockDriverState *bs,
   
     if (bs->dirty_bitmap &&      if (bs->dirty_bitmap &&
         (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {          (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
         return bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &          return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
             (1 << (chunk % (sizeof(unsigned long) * 8)));              (1UL << (chunk % (sizeof(unsigned long) * 8))));
     } else {      } else {
         return 0;          return 0;
     }      }
Line 2231  void bdrv_reset_dirty(BlockDriverState * Line 3955  void bdrv_reset_dirty(BlockDriverState *
 {  {
     set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);      set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
 }  }
   
   int64_t bdrv_get_dirty_count(BlockDriverState *bs)
   {
       return bs->dirty_count;
   }
   
   void bdrv_set_in_use(BlockDriverState *bs, int in_use)
   {
       assert(bs->in_use != in_use);
       bs->in_use = in_use;
   }
   
   int bdrv_in_use(BlockDriverState *bs)
   {
       return bs->in_use;
   }
   
   void bdrv_iostatus_enable(BlockDriverState *bs)
   {
       bs->iostatus_enabled = true;
       bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
   }
   
   /* The I/O status is only enabled if the drive explicitly
    * enables it _and_ the VM is configured to stop on errors */
   bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
   {
       return (bs->iostatus_enabled &&
              (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
               bs->on_write_error == BLOCK_ERR_STOP_ANY    ||
               bs->on_read_error == BLOCK_ERR_STOP_ANY));
   }
   
   void bdrv_iostatus_disable(BlockDriverState *bs)
   {
       bs->iostatus_enabled = false;
   }
   
   void bdrv_iostatus_reset(BlockDriverState *bs)
   {
       if (bdrv_iostatus_is_enabled(bs)) {
           bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
       }
   }
   
   /* XXX: Today this is set by device models because it makes the implementation
      quite simple. However, the block layer knows about the error, so it's
      possible to implement this without device models being involved */
   void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
   {
       if (bdrv_iostatus_is_enabled(bs) &&
           bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
           assert(error >= 0);
           bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
                                            BLOCK_DEVICE_IO_STATUS_FAILED;
       }
   }
   
   void
   bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
           enum BlockAcctType type)
   {
       assert(type < BDRV_MAX_IOTYPE);
   
       cookie->bytes = bytes;
       cookie->start_time_ns = get_clock();
       cookie->type = type;
   }
   
   void
   bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
   {
       assert(cookie->type < BDRV_MAX_IOTYPE);
   
       bs->nr_bytes[cookie->type] += cookie->bytes;
       bs->nr_ops[cookie->type]++;
       bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
   }
   
   int bdrv_img_create(const char *filename, const char *fmt,
                       const char *base_filename, const char *base_fmt,
                       char *options, uint64_t img_size, int flags)
   {
       QEMUOptionParameter *param = NULL, *create_options = NULL;
       QEMUOptionParameter *backing_fmt, *backing_file, *size;
       BlockDriverState *bs = NULL;
       BlockDriver *drv, *proto_drv;
       BlockDriver *backing_drv = NULL;
       int ret = 0;
   
       /* Find driver and parse its options */
       drv = bdrv_find_format(fmt);
       if (!drv) {
           error_report("Unknown file format '%s'", fmt);
           ret = -EINVAL;
           goto out;
       }
   
       proto_drv = bdrv_find_protocol(filename);
       if (!proto_drv) {
           error_report("Unknown protocol '%s'", filename);
           ret = -EINVAL;
           goto out;
       }
   
       create_options = append_option_parameters(create_options,
                                                 drv->create_options);
       create_options = append_option_parameters(create_options,
                                                 proto_drv->create_options);
   
       /* Create parameter list with default values */
       param = parse_option_parameters("", create_options, param);
   
       set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
   
       /* Parse -o options */
       if (options) {
           param = parse_option_parameters(options, create_options, param);
           if (param == NULL) {
               error_report("Invalid options for file format '%s'.", fmt);
               ret = -EINVAL;
               goto out;
           }
       }
   
       if (base_filename) {
           if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
                                    base_filename)) {
               error_report("Backing file not supported for file format '%s'",
                            fmt);
               ret = -EINVAL;
               goto out;
           }
       }
   
       if (base_fmt) {
           if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
               error_report("Backing file format not supported for file "
                            "format '%s'", fmt);
               ret = -EINVAL;
               goto out;
           }
       }
   
       backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
       if (backing_file && backing_file->value.s) {
           if (!strcmp(filename, backing_file->value.s)) {
               error_report("Error: Trying to create an image with the "
                            "same filename as the backing file");
               ret = -EINVAL;
               goto out;
           }
       }
   
       backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
       if (backing_fmt && backing_fmt->value.s) {
           backing_drv = bdrv_find_format(backing_fmt->value.s);
           if (!backing_drv) {
               error_report("Unknown backing file format '%s'",
                            backing_fmt->value.s);
               ret = -EINVAL;
               goto out;
           }
       }
   
       // The size for the image must always be specified, with one exception:
       // If we are using a backing file, we can obtain the size from there
       size = get_option_parameter(param, BLOCK_OPT_SIZE);
       if (size && size->value.n == -1) {
           if (backing_file && backing_file->value.s) {
               uint64_t size;
               char buf[32];
               int back_flags;
   
               /* backing files always opened read-only */
               back_flags =
                   flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
   
               bs = bdrv_new("");
   
               ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
               if (ret < 0) {
                   error_report("Could not open '%s'", backing_file->value.s);
                   goto out;
               }
               bdrv_get_geometry(bs, &size);
               size *= 512;
   
               snprintf(buf, sizeof(buf), "%" PRId64, size);
               set_option_parameter(param, BLOCK_OPT_SIZE, buf);
           } else {
               error_report("Image creation needs a size parameter");
               ret = -EINVAL;
               goto out;
           }
       }
   
       printf("Formatting '%s', fmt=%s ", filename, fmt);
       print_option_parameters(param);
       puts("");
   
       ret = bdrv_create(drv, filename, param);
   
       if (ret < 0) {
           if (ret == -ENOTSUP) {
               error_report("Formatting or formatting option not supported for "
                            "file format '%s'", fmt);
           } else if (ret == -EFBIG) {
               error_report("The image size is too large for file format '%s'",
                            fmt);
           } else {
               error_report("%s: error while creating %s: %s", filename, fmt,
                            strerror(-ret));
           }
       }
   
   out:
       free_option_parameters(create_options);
       free_option_parameters(param);
   
       if (bs) {
           bdrv_delete(bs);
       }
   
       return ret;
   }
   
   void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
                          int64_t speed, BlockDriverCompletionFunc *cb,
                          void *opaque, Error **errp)
   {
       BlockJob *job;
   
       if (bs->job || bdrv_in_use(bs)) {
           error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
           return NULL;
       }
       bdrv_set_in_use(bs, 1);
   
       job = g_malloc0(job_type->instance_size);
       job->job_type      = job_type;
       job->bs            = bs;
       job->cb            = cb;
       job->opaque        = opaque;
       job->busy          = true;
       bs->job = job;
   
       /* Only set speed when necessary to avoid NotSupported error */
       if (speed != 0) {
           Error *local_err = NULL;
   
           block_job_set_speed(job, speed, &local_err);
           if (error_is_set(&local_err)) {
               bs->job = NULL;
               g_free(job);
               bdrv_set_in_use(bs, 0);
               error_propagate(errp, local_err);
               return NULL;
           }
       }
       return job;
   }
   
   void block_job_complete(BlockJob *job, int ret)
   {
       BlockDriverState *bs = job->bs;
   
       assert(bs->job == job);
       job->cb(job->opaque, ret);
       bs->job = NULL;
       g_free(job);
       bdrv_set_in_use(bs, 0);
   }
   
   void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
   {
       Error *local_err = NULL;
   
       if (!job->job_type->set_speed) {
           error_set(errp, QERR_NOT_SUPPORTED);
           return;
       }
       job->job_type->set_speed(job, speed, &local_err);
       if (error_is_set(&local_err)) {
           error_propagate(errp, local_err);
           return;
       }
   
       job->speed = speed;
   }
   
   void block_job_cancel(BlockJob *job)
   {
       job->cancelled = true;
       if (job->co && !job->busy) {
           qemu_coroutine_enter(job->co, NULL);
       }
   }
   
   bool block_job_is_cancelled(BlockJob *job)
   {
       return job->cancelled;
   }
   
   struct BlockCancelData {
       BlockJob *job;
       BlockDriverCompletionFunc *cb;
       void *opaque;
       bool cancelled;
       int ret;
   };
   
   static void block_job_cancel_cb(void *opaque, int ret)
   {
       struct BlockCancelData *data = opaque;
   
       data->cancelled = block_job_is_cancelled(data->job);
       data->ret = ret;
       data->cb(data->opaque, ret);
   }
   
   int block_job_cancel_sync(BlockJob *job)
   {
       struct BlockCancelData data;
       BlockDriverState *bs = job->bs;
   
       assert(bs->job == job);
   
       /* Set up our own callback to store the result and chain to
        * the original callback.
        */
       data.job = job;
       data.cb = job->cb;
       data.opaque = job->opaque;
       data.ret = -EINPROGRESS;
       job->cb = block_job_cancel_cb;
       job->opaque = &data;
       block_job_cancel(job);
       while (data.ret == -EINPROGRESS) {
           qemu_aio_wait();
       }
       return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
   }
   
   void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
   {
       /* Check cancellation *before* setting busy = false, too!  */
       if (!block_job_is_cancelled(job)) {
           job->busy = false;
           co_sleep_ns(clock, ns);
           job->busy = true;
       }
   }

Removed from v.1.1.1.17  
changed lines
  Added in v.1.1.1.23


unix.superglobalmegacorp.com