Diff for /qemu/block.c between versions 1.1.1.21 and 1.1.1.23

version 1.1.1.21, 2018/04/24 18:56:16 version 1.1.1.23, 2018/04/24 19:34:33
Line 27 Line 27
 #include "monitor.h"  #include "monitor.h"
 #include "block_int.h"  #include "block_int.h"
 #include "module.h"  #include "module.h"
 #include "qemu-objects.h"  #include "qjson.h"
   #include "qemu-coroutine.h"
   #include "qmp-commands.h"
   #include "qemu-timer.h"
   
 #ifdef CONFIG_BSD  #ifdef CONFIG_BSD
 #include <sys/types.h>  #include <sys/types.h>
Line 43 Line 46
 #include <windows.h>  #include <windows.h>
 #endif  #endif
   
   #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
   
   typedef enum {
       BDRV_REQ_COPY_ON_READ = 0x1,
       BDRV_REQ_ZERO_WRITE   = 0x2,
   } BdrvRequestFlags;
   
   static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,  static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,          int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque);          BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,  static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,          int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque);          BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,  static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);                                           int64_t sector_num, int nb_sectors,
 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,                                           QEMUIOVector *iov);
         BlockDriverCompletionFunc *cb, void *opaque);  static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,                                           int64_t sector_num, int nb_sectors,
                         uint8_t *buf, int nb_sectors);                                           QEMUIOVector *iov);
 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,  static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
                          const uint8_t *buf, int nb_sectors);      int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags);
   static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags);
   static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
                                                  int64_t sector_num,
                                                  QEMUIOVector *qiov,
                                                  int nb_sectors,
                                                  BlockDriverCompletionFunc *cb,
                                                  void *opaque,
                                                  bool is_write);
   static void coroutine_fn bdrv_co_do_rw(void *opaque);
   static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors);
   
   static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
           bool is_write, double elapsed_time, uint64_t *wait);
   static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
           double elapsed_time, uint64_t *wait);
   static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
           bool is_write, int64_t *wait);
   
 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =  static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
     QTAILQ_HEAD_INITIALIZER(bdrv_states);      QTAILQ_HEAD_INITIALIZER(bdrv_states);
Line 90  int is_windows_drive(const char *filenam Line 122  int is_windows_drive(const char *filenam
 }  }
 #endif  #endif
   
   /* throttling disk I/O limits */
   void bdrv_io_limits_disable(BlockDriverState *bs)
   {
       bs->io_limits_enabled = false;
   
       while (qemu_co_queue_next(&bs->throttled_reqs));
   
       if (bs->block_timer) {
           qemu_del_timer(bs->block_timer);
           qemu_free_timer(bs->block_timer);
           bs->block_timer = NULL;
       }
   
       bs->slice_start = 0;
       bs->slice_end   = 0;
       bs->slice_time  = 0;
       memset(&bs->io_base, 0, sizeof(bs->io_base));
   }
   
   static void bdrv_block_timer(void *opaque)
   {
       BlockDriverState *bs = opaque;
   
       qemu_co_queue_next(&bs->throttled_reqs);
   }
   
   void bdrv_io_limits_enable(BlockDriverState *bs)
   {
       qemu_co_queue_init(&bs->throttled_reqs);
       bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
       bs->slice_time  = 5 * BLOCK_IO_SLICE_TIME;
       bs->slice_start = qemu_get_clock_ns(vm_clock);
       bs->slice_end   = bs->slice_start + bs->slice_time;
       memset(&bs->io_base, 0, sizeof(bs->io_base));
       bs->io_limits_enabled = true;
   }
   
   bool bdrv_io_limits_enabled(BlockDriverState *bs)
   {
       BlockIOLimit *io_limits = &bs->io_limits;
       return io_limits->bps[BLOCK_IO_LIMIT_READ]
            || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
            || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
            || io_limits->iops[BLOCK_IO_LIMIT_READ]
            || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
            || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
   }
   
   static void bdrv_io_limits_intercept(BlockDriverState *bs,
                                        bool is_write, int nb_sectors)
   {
       int64_t wait_time = -1;
   
       if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
           qemu_co_queue_wait(&bs->throttled_reqs);
       }
   
       /* In fact, we hope to keep each request's timing, in FIFO mode. The next
        * throttled requests will not be dequeued until the current request is
        * allowed to be serviced. So if the current request still exceeds the
        * limits, it will be inserted to the head. All requests followed it will
        * be still in throttled_reqs queue.
        */
   
       while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
           qemu_mod_timer(bs->block_timer,
                          wait_time + qemu_get_clock_ns(vm_clock));
           qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
       }
   
       qemu_co_queue_next(&bs->throttled_reqs);
   }
   
 /* check if the path starts with "<protocol>:" */  /* check if the path starts with "<protocol>:" */
 static int path_has_protocol(const char *path)  static int path_has_protocol(const char *path)
 {  {
       const char *p;
   
 #ifdef _WIN32  #ifdef _WIN32
     if (is_windows_drive(path) ||      if (is_windows_drive(path) ||
         is_windows_drive_prefix(path)) {          is_windows_drive_prefix(path)) {
         return 0;          return 0;
     }      }
       p = path + strcspn(path, ":/\\");
   #else
       p = path + strcspn(path, ":/");
 #endif  #endif
   
     return strchr(path, ':') != NULL;      return *p == ':';
 }  }
   
 int path_is_absolute(const char *path)  int path_is_absolute(const char *path)
 {  {
     const char *p;  
 #ifdef _WIN32  #ifdef _WIN32
     /* specific case for names like: "\\.\d:" */      /* specific case for names like: "\\.\d:" */
     if (*path == '/' || *path == '\\')      if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
         return 1;          return 1;
 #endif      }
     p = strchr(path, ':');      return (*path == '/' || *path == '\\');
     if (p)  
         p++;  
     else  
         p = path;  
 #ifdef _WIN32  
     return (*p == '/' || *p == '\\');  
 #else  #else
     return (*p == '/');      return (*path == '/');
 #endif  #endif
 }  }
   
Line 167  void path_combine(char *dest, int dest_s Line 270  void path_combine(char *dest, int dest_s
     }      }
 }  }
   
 void bdrv_register(BlockDriver *bdrv)  void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
 {  {
     if (!bdrv->bdrv_aio_readv) {      if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
         /* add AIO emulation layer */          pstrcpy(dest, sz, bs->backing_file);
         bdrv->bdrv_aio_readv = bdrv_aio_readv_em;      } else {
         bdrv->bdrv_aio_writev = bdrv_aio_writev_em;          path_combine(dest, sz, bs->filename, bs->backing_file);
     } else if (!bdrv->bdrv_read) {  
         /* add synchronous IO emulation layer */  
         bdrv->bdrv_read = bdrv_read_em;  
         bdrv->bdrv_write = bdrv_write_em;  
     }      }
   }
   
     if (!bdrv->bdrv_aio_flush)  void bdrv_register(BlockDriver *bdrv)
         bdrv->bdrv_aio_flush = bdrv_aio_flush_em;  {
       /* Block drivers without coroutine functions need emulation */
       if (!bdrv->bdrv_co_readv) {
           bdrv->bdrv_co_readv = bdrv_co_readv_em;
           bdrv->bdrv_co_writev = bdrv_co_writev_em;
   
           /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
            * the block driver lacks aio we need to emulate that too.
            */
           if (!bdrv->bdrv_aio_readv) {
               /* add AIO emulation layer */
               bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
               bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
           }
       }
   
     QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);      QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
 }  }
Line 190  BlockDriverState *bdrv_new(const char *d Line 304  BlockDriverState *bdrv_new(const char *d
 {  {
     BlockDriverState *bs;      BlockDriverState *bs;
   
     bs = qemu_mallocz(sizeof(BlockDriverState));      bs = g_malloc0(sizeof(BlockDriverState));
     pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);      pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
     if (device_name[0] != '\0') {      if (device_name[0] != '\0') {
         QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);          QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
     }      }
       bdrv_iostatus_disable(bs);
     return bs;      return bs;
 }  }
   
Line 233  BlockDriver *bdrv_find_whitelisted_forma Line 348  BlockDriver *bdrv_find_whitelisted_forma
     return drv && bdrv_is_whitelisted(drv) ? drv : NULL;      return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
 }  }
   
   typedef struct CreateCo {
       BlockDriver *drv;
       char *filename;
       QEMUOptionParameter *options;
       int ret;
   } CreateCo;
   
   static void coroutine_fn bdrv_create_co_entry(void *opaque)
   {
       CreateCo *cco = opaque;
       assert(cco->drv);
   
       cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
   }
   
 int bdrv_create(BlockDriver *drv, const char* filename,  int bdrv_create(BlockDriver *drv, const char* filename,
     QEMUOptionParameter *options)      QEMUOptionParameter *options)
 {  {
     if (!drv->bdrv_create)      int ret;
   
       Coroutine *co;
       CreateCo cco = {
           .drv = drv,
           .filename = g_strdup(filename),
           .options = options,
           .ret = NOT_DONE,
       };
   
       if (!drv->bdrv_create) {
         return -ENOTSUP;          return -ENOTSUP;
       }
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_create_co_entry(&cco);
       } else {
           co = qemu_coroutine_create(bdrv_create_co_entry);
           qemu_coroutine_enter(co, &cco);
           while (cco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
   
     return drv->bdrv_create(filename, options);      ret = cco.ret;
       g_free(cco.filename);
   
       return ret;
 }  }
   
 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)  int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
Line 254  int bdrv_create_file(const char* filenam Line 409  int bdrv_create_file(const char* filenam
     return bdrv_create(drv, filename, options);      return bdrv_create(drv, filename, options);
 }  }
   
 #ifdef _WIN32  /*
 void get_tmp_filename(char *filename, int size)   * Create a uniquely-named empty temporary file.
    * Return 0 upon success, otherwise a negative errno value.
    */
   int get_tmp_filename(char *filename, int size)
 {  {
   #ifdef _WIN32
     char temp_dir[MAX_PATH];      char temp_dir[MAX_PATH];
       /* GetTempFileName requires that its output buffer (4th param)
     GetTempPath(MAX_PATH, temp_dir);         have length MAX_PATH or greater.  */
     GetTempFileName(temp_dir, "qem", 0, filename);      assert(size >= MAX_PATH);
 }      return (GetTempPath(MAX_PATH, temp_dir)
               && GetTempFileName(temp_dir, "qem", 0, filename)
               ? 0 : -GetLastError());
 #else  #else
 void get_tmp_filename(char *filename, int size)  
 {  
     int fd;      int fd;
     const char *tmpdir;      const char *tmpdir;
     /* XXX: race condition possible */  
     tmpdir = getenv("TMPDIR");      tmpdir = getenv("TMPDIR");
     if (!tmpdir)      if (!tmpdir)
         tmpdir = "/tmp";          tmpdir = "/tmp";
     snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);      if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
           return -EOVERFLOW;
       }
     fd = mkstemp(filename);      fd = mkstemp(filename);
     close(fd);      if (fd < 0 || close(fd)) {
 }          return -errno;
       }
       return 0;
 #endif  #endif
   }
   
 /*  /*
  * Detect host devices. By convention, /dev/cdrom[N] is always   * Detect host devices. By convention, /dev/cdrom[N] is always
Line 412  static int refresh_total_sectors(BlockDr Line 575  static int refresh_total_sectors(BlockDr
     return 0;      return 0;
 }  }
   
   /**
    * Set open flags for a given cache mode
    *
    * Return 0 on success, -1 if the cache mode was invalid.
    */
   int bdrv_parse_cache_flags(const char *mode, int *flags)
   {
       *flags &= ~BDRV_O_CACHE_MASK;
   
       if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
           *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
       } else if (!strcmp(mode, "directsync")) {
           *flags |= BDRV_O_NOCACHE;
       } else if (!strcmp(mode, "writeback")) {
           *flags |= BDRV_O_CACHE_WB;
       } else if (!strcmp(mode, "unsafe")) {
           *flags |= BDRV_O_CACHE_WB;
           *flags |= BDRV_O_NO_FLUSH;
       } else if (!strcmp(mode, "writethrough")) {
           /* this is the default */
       } else {
           return -1;
       }
   
       return 0;
   }
   
   /**
    * The copy-on-read flag is actually a reference count so multiple users may
    * use the feature without worrying about clobbering its previous state.
    * Copy-on-read stays enabled until all users have called to disable it.
    */
   void bdrv_enable_copy_on_read(BlockDriverState *bs)
   {
       bs->copy_on_read++;
   }
   
   void bdrv_disable_copy_on_read(BlockDriverState *bs)
   {
       assert(bs->copy_on_read > 0);
       bs->copy_on_read--;
   }
   
 /*  /*
  * Common part for opening disk images and files   * Common part for opening disk images and files
  */   */
Line 421  static int bdrv_open_common(BlockDriverS Line 627  static int bdrv_open_common(BlockDriverS
     int ret, open_flags;      int ret, open_flags;
   
     assert(drv != NULL);      assert(drv != NULL);
       assert(bs->file == NULL);
   
       trace_bdrv_open_common(bs, filename, flags, drv->format_name);
   
     bs->file = NULL;  
     bs->total_sectors = 0;  
     bs->encrypted = 0;  
     bs->valid_key = 0;  
     bs->open_flags = flags;      bs->open_flags = flags;
     /* buffer_alignment defaulted to 512, drivers can change this value */  
     bs->buffer_alignment = 512;      bs->buffer_alignment = 512;
   
       assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
       if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
           bdrv_enable_copy_on_read(bs);
       }
   
     pstrcpy(bs->filename, sizeof(bs->filename), filename);      pstrcpy(bs->filename, sizeof(bs->filename), filename);
   
     if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {      if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
Line 437  static int bdrv_open_common(BlockDriverS Line 646  static int bdrv_open_common(BlockDriverS
     }      }
   
     bs->drv = drv;      bs->drv = drv;
     bs->opaque = qemu_mallocz(drv->instance_size);      bs->opaque = g_malloc0(drv->instance_size);
   
     if (flags & BDRV_O_CACHE_WB)      bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
         bs->enable_write_cache = 1;  
   
     /*      /*
      * Clear flags that are internal to the block layer before opening the       * Clear flags that are internal to the block layer before opening the
Line 455  static int bdrv_open_common(BlockDriverS Line 663  static int bdrv_open_common(BlockDriverS
         open_flags |= BDRV_O_RDWR;          open_flags |= BDRV_O_RDWR;
     }      }
   
       bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
   
     /* Open the image, either directly or using a protocol */      /* Open the image, either directly or using a protocol */
     if (drv->bdrv_file_open) {      if (drv->bdrv_file_open) {
         ret = drv->bdrv_file_open(bs, filename, open_flags);          ret = drv->bdrv_file_open(bs, filename, open_flags);
Line 469  static int bdrv_open_common(BlockDriverS Line 679  static int bdrv_open_common(BlockDriverS
         goto free_and_fail;          goto free_and_fail;
     }      }
   
     bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);  
   
     ret = refresh_total_sectors(bs, bs->total_sectors);      ret = refresh_total_sectors(bs, bs->total_sectors);
     if (ret < 0) {      if (ret < 0) {
         goto free_and_fail;          goto free_and_fail;
Line 488  free_and_fail: Line 696  free_and_fail:
         bdrv_delete(bs->file);          bdrv_delete(bs->file);
         bs->file = NULL;          bs->file = NULL;
     }      }
     qemu_free(bs->opaque);      g_free(bs->opaque);
     bs->opaque = NULL;      bs->opaque = NULL;
     bs->drv = NULL;      bs->drv = NULL;
     return ret;      return ret;
Line 526  int bdrv_open(BlockDriverState *bs, cons Line 734  int bdrv_open(BlockDriverState *bs, cons
               BlockDriver *drv)                BlockDriver *drv)
 {  {
     int ret;      int ret;
       char tmp_filename[PATH_MAX];
   
     if (flags & BDRV_O_SNAPSHOT) {      if (flags & BDRV_O_SNAPSHOT) {
         BlockDriverState *bs1;          BlockDriverState *bs1;
Line 533  int bdrv_open(BlockDriverState *bs, cons Line 742  int bdrv_open(BlockDriverState *bs, cons
         int is_protocol = 0;          int is_protocol = 0;
         BlockDriver *bdrv_qcow2;          BlockDriver *bdrv_qcow2;
         QEMUOptionParameter *options;          QEMUOptionParameter *options;
         char tmp_filename[PATH_MAX];  
         char backing_filename[PATH_MAX];          char backing_filename[PATH_MAX];
   
         /* if snapshot, we create a temporary backing file and open it          /* if snapshot, we create a temporary backing file and open it
Line 553  int bdrv_open(BlockDriverState *bs, cons Line 761  int bdrv_open(BlockDriverState *bs, cons
   
         bdrv_delete(bs1);          bdrv_delete(bs1);
   
         get_tmp_filename(tmp_filename, sizeof(tmp_filename));          ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
           if (ret < 0) {
               return ret;
           }
   
         /* Real path is meaningless for protocols */          /* Real path is meaningless for protocols */
         if (is_protocol)          if (is_protocol)
Line 605  int bdrv_open(BlockDriverState *bs, cons Line 816  int bdrv_open(BlockDriverState *bs, cons
         BlockDriver *back_drv = NULL;          BlockDriver *back_drv = NULL;
   
         bs->backing_hd = bdrv_new("");          bs->backing_hd = bdrv_new("");
           bdrv_get_full_backing_filename(bs, backing_filename,
         if (path_has_protocol(bs->backing_file)) {                                         sizeof(backing_filename));
             pstrcpy(backing_filename, sizeof(backing_filename),  
                     bs->backing_file);  
         } else {  
             path_combine(backing_filename, sizeof(backing_filename),  
                          filename, bs->backing_file);  
         }  
   
         if (bs->backing_format[0] != '\0') {          if (bs->backing_format[0] != '\0') {
             back_drv = bdrv_find_format(bs->backing_format);              back_drv = bdrv_find_format(bs->backing_format);
Line 636  int bdrv_open(BlockDriverState *bs, cons Line 841  int bdrv_open(BlockDriverState *bs, cons
     }      }
   
     if (!bdrv_key_required(bs)) {      if (!bdrv_key_required(bs)) {
         /* call the change callback */          bdrv_dev_change_media_cb(bs, true);
         bs->media_changed = 1;      }
         if (bs->change_cb)  
             bs->change_cb(bs->change_opaque, CHANGE_MEDIA);      /* throttling disk I/O limits */
       if (bs->io_limits_enabled) {
           bdrv_io_limits_enable(bs);
     }      }
   
     return 0;      return 0;
Line 653  unlink_and_fail: Line 860  unlink_and_fail:
   
 void bdrv_close(BlockDriverState *bs)  void bdrv_close(BlockDriverState *bs)
 {  {
       bdrv_flush(bs);
     if (bs->drv) {      if (bs->drv) {
           if (bs->job) {
               block_job_cancel_sync(bs->job);
           }
           bdrv_drain_all();
   
         if (bs == bs_snapshots) {          if (bs == bs_snapshots) {
             bs_snapshots = NULL;              bs_snapshots = NULL;
         }          }
Line 662  void bdrv_close(BlockDriverState *bs) Line 875  void bdrv_close(BlockDriverState *bs)
             bs->backing_hd = NULL;              bs->backing_hd = NULL;
         }          }
         bs->drv->bdrv_close(bs);          bs->drv->bdrv_close(bs);
         qemu_free(bs->opaque);          g_free(bs->opaque);
 #ifdef _WIN32  #ifdef _WIN32
         if (bs->is_temporary) {          if (bs->is_temporary) {
             unlink(bs->filename);              unlink(bs->filename);
Line 670  void bdrv_close(BlockDriverState *bs) Line 883  void bdrv_close(BlockDriverState *bs)
 #endif  #endif
         bs->opaque = NULL;          bs->opaque = NULL;
         bs->drv = NULL;          bs->drv = NULL;
           bs->copy_on_read = 0;
           bs->backing_file[0] = '\0';
           bs->backing_format[0] = '\0';
           bs->total_sectors = 0;
           bs->encrypted = 0;
           bs->valid_key = 0;
           bs->sg = 0;
           bs->growable = 0;
   
         if (bs->file != NULL) {          if (bs->file != NULL) {
             bdrv_close(bs->file);              bdrv_delete(bs->file);
               bs->file = NULL;
         }          }
   
         /* call the change callback */          bdrv_dev_change_media_cb(bs, false);
         bs->media_changed = 1;      }
         if (bs->change_cb)  
             bs->change_cb(bs->change_opaque, CHANGE_MEDIA);      /*throttling disk I/O limits*/
       if (bs->io_limits_enabled) {
           bdrv_io_limits_disable(bs);
     }      }
 }  }
   
Line 691  void bdrv_close_all(void) Line 915  void bdrv_close_all(void)
     }      }
 }  }
   
   /*
    * Wait for pending requests to complete across all BlockDriverStates
    *
    * This function does not flush data to disk, use bdrv_flush_all() for that
    * after calling this function.
    *
    * Note that completion of an asynchronous I/O operation can trigger any
    * number of other I/O operations on other devices---for example a coroutine
    * can be arbitrarily complex and a constant flow of I/O can come until the
    * coroutine is complete.  Because of this, it is not possible to have a
    * function to drain a single device's I/O queue.
    */
   void bdrv_drain_all(void)
   {
       BlockDriverState *bs;
       bool busy;
   
       do {
           busy = qemu_aio_wait();
   
           /* FIXME: We do not have timer support here, so this is effectively
            * a busy wait.
            */
           QTAILQ_FOREACH(bs, &bdrv_states, list) {
               if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
                   qemu_co_queue_restart_all(&bs->throttled_reqs);
                   busy = true;
               }
           }
       } while (busy);
   
       /* If requests are still pending there is a bug somewhere */
       QTAILQ_FOREACH(bs, &bdrv_states, list) {
           assert(QLIST_EMPTY(&bs->tracked_requests));
           assert(qemu_co_queue_empty(&bs->throttled_reqs));
       }
   }
   
 /* make a BlockDriverState anonymous by removing from bdrv_state list.  /* make a BlockDriverState anonymous by removing from bdrv_state list.
    Also, NULL terminate the device_name to prevent double remove */     Also, NULL terminate the device_name to prevent double remove */
 void bdrv_make_anon(BlockDriverState *bs)  void bdrv_make_anon(BlockDriverState *bs)
Line 701  void bdrv_make_anon(BlockDriverState *bs Line 963  void bdrv_make_anon(BlockDriverState *bs
     bs->device_name[0] = '\0';      bs->device_name[0] = '\0';
 }  }
   
   static void bdrv_rebind(BlockDriverState *bs)
   {
       if (bs->drv && bs->drv->bdrv_rebind) {
           bs->drv->bdrv_rebind(bs);
       }
   }
   
   /*
    * Add new bs contents at the top of an image chain while the chain is
    * live, while keeping required fields on the top layer.
    *
    * This will modify the BlockDriverState fields, and swap contents
    * between bs_new and bs_top. Both bs_new and bs_top are modified.
    *
    * bs_new is required to be anonymous.
    *
    * This function does not create any image files.
    */
   void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
   {
       BlockDriverState tmp;
   
       /* bs_new must be anonymous */
       assert(bs_new->device_name[0] == '\0');
   
       tmp = *bs_new;
   
       /* there are some fields that need to stay on the top layer: */
       tmp.open_flags        = bs_top->open_flags;
   
       /* dev info */
       tmp.dev_ops           = bs_top->dev_ops;
       tmp.dev_opaque        = bs_top->dev_opaque;
       tmp.dev               = bs_top->dev;
       tmp.buffer_alignment  = bs_top->buffer_alignment;
       tmp.copy_on_read      = bs_top->copy_on_read;
   
       /* i/o timing parameters */
       tmp.slice_time        = bs_top->slice_time;
       tmp.slice_start       = bs_top->slice_start;
       tmp.slice_end         = bs_top->slice_end;
       tmp.io_limits         = bs_top->io_limits;
       tmp.io_base           = bs_top->io_base;
       tmp.throttled_reqs    = bs_top->throttled_reqs;
       tmp.block_timer       = bs_top->block_timer;
       tmp.io_limits_enabled = bs_top->io_limits_enabled;
   
       /* geometry */
       tmp.cyls              = bs_top->cyls;
       tmp.heads             = bs_top->heads;
       tmp.secs              = bs_top->secs;
       tmp.translation       = bs_top->translation;
   
       /* r/w error */
       tmp.on_read_error     = bs_top->on_read_error;
       tmp.on_write_error    = bs_top->on_write_error;
   
       /* i/o status */
       tmp.iostatus_enabled  = bs_top->iostatus_enabled;
       tmp.iostatus          = bs_top->iostatus;
   
       /* keep the same entry in bdrv_states */
       pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name);
       tmp.list = bs_top->list;
   
       /* The contents of 'tmp' will become bs_top, as we are
        * swapping bs_new and bs_top contents. */
       tmp.backing_hd = bs_new;
       pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename);
       bdrv_get_format(bs_top, tmp.backing_format, sizeof(tmp.backing_format));
   
       /* swap contents of the fixed new bs and the current top */
       *bs_new = *bs_top;
       *bs_top = tmp;
   
       /* device_name[] was carried over from the old bs_top.  bs_new
        * shouldn't be in bdrv_states, so we need to make device_name[]
        * reflect the anonymity of bs_new
        */
       bs_new->device_name[0] = '\0';
   
       /* clear the copied fields in the new backing file */
       bdrv_detach_dev(bs_new, bs_new->dev);
   
       qemu_co_queue_init(&bs_new->throttled_reqs);
       memset(&bs_new->io_base,   0, sizeof(bs_new->io_base));
       memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits));
       bdrv_iostatus_disable(bs_new);
   
       /* we don't use bdrv_io_limits_disable() for this, because we don't want
        * to affect or delete the block_timer, as it has been moved to bs_top */
       bs_new->io_limits_enabled = false;
       bs_new->block_timer       = NULL;
       bs_new->slice_time        = 0;
       bs_new->slice_start       = 0;
       bs_new->slice_end         = 0;
   
       bdrv_rebind(bs_new);
       bdrv_rebind(bs_top);
   }
   
 void bdrv_delete(BlockDriverState *bs)  void bdrv_delete(BlockDriverState *bs)
 {  {
     assert(!bs->peer);      assert(!bs->dev);
       assert(!bs->job);
       assert(!bs->in_use);
   
     /* remove from list, if necessary */      /* remove from list, if necessary */
     bdrv_make_anon(bs);      bdrv_make_anon(bs);
   
     bdrv_close(bs);      bdrv_close(bs);
     if (bs->file != NULL) {  
         bdrv_delete(bs->file);  
     }  
   
     assert(bs != bs_snapshots);      assert(bs != bs_snapshots);
     qemu_free(bs);      g_free(bs);
 }  }
   
 int bdrv_attach(BlockDriverState *bs, DeviceState *qdev)  int bdrv_attach_dev(BlockDriverState *bs, void *dev)
   /* TODO change to DeviceState *dev when all users are qdevified */
 {  {
     if (bs->peer) {      if (bs->dev) {
         return -EBUSY;          return -EBUSY;
     }      }
     bs->peer = qdev;      bs->dev = dev;
       bdrv_iostatus_reset(bs);
     return 0;      return 0;
 }  }
   
 void bdrv_detach(BlockDriverState *bs, DeviceState *qdev)  /* TODO qdevified devices don't use this, remove when devices are qdevified */
   void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
   {
       if (bdrv_attach_dev(bs, dev) < 0) {
           abort();
       }
   }
   
   void bdrv_detach_dev(BlockDriverState *bs, void *dev)
   /* TODO change to DeviceState *dev when all users are qdevified */
   {
       assert(bs->dev == dev);
       bs->dev = NULL;
       bs->dev_ops = NULL;
       bs->dev_opaque = NULL;
       bs->buffer_alignment = 512;
   }
   
   /* TODO change to return DeviceState * when all users are qdevified */
   void *bdrv_get_attached_dev(BlockDriverState *bs)
   {
       return bs->dev;
   }
   
   void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
                         void *opaque)
   {
       bs->dev_ops = ops;
       bs->dev_opaque = opaque;
       if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
           bs_snapshots = NULL;
       }
   }
   
   void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
                                  BlockQMPEventAction action, int is_read)
   {
       QObject *data;
       const char *action_str;
   
       switch (action) {
       case BDRV_ACTION_REPORT:
           action_str = "report";
           break;
       case BDRV_ACTION_IGNORE:
           action_str = "ignore";
           break;
       case BDRV_ACTION_STOP:
           action_str = "stop";
           break;
       default:
           abort();
       }
   
       data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
                                 bdrv->device_name,
                                 action_str,
                                 is_read ? "read" : "write");
       monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
   
       qobject_decref(data);
   }
   
   static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
   {
       QObject *data;
   
       data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
                                 bdrv_get_device_name(bs), ejected);
       monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
   
       qobject_decref(data);
   }
   
   static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
   {
       if (bs->dev_ops && bs->dev_ops->change_media_cb) {
           bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
           bs->dev_ops->change_media_cb(bs->dev_opaque, load);
           if (tray_was_closed) {
               /* tray open */
               bdrv_emit_qmp_eject_event(bs, true);
           }
           if (load) {
               /* tray close */
               bdrv_emit_qmp_eject_event(bs, false);
           }
       }
   }
   
   bool bdrv_dev_has_removable_media(BlockDriverState *bs)
   {
       return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
   }
   
   void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
   {
       if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
           bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
       }
   }
   
   bool bdrv_dev_is_tray_open(BlockDriverState *bs)
   {
       if (bs->dev_ops && bs->dev_ops->is_tray_open) {
           return bs->dev_ops->is_tray_open(bs->dev_opaque);
       }
       return false;
   }
   
   static void bdrv_dev_resize_cb(BlockDriverState *bs)
 {  {
     assert(bs->peer == qdev);      if (bs->dev_ops && bs->dev_ops->resize_cb) {
     bs->peer = NULL;          bs->dev_ops->resize_cb(bs->dev_opaque);
       }
 }  }
   
 DeviceState *bdrv_get_attached(BlockDriverState *bs)  bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
 {  {
     return bs->peer;      if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
           return bs->dev_ops->is_medium_locked(bs->dev_opaque);
       }
       return false;
 }  }
   
 /*  /*
Line 779  int bdrv_commit(BlockDriverState *bs) Line 1257  int bdrv_commit(BlockDriverState *bs)
         return -EACCES;          return -EACCES;
     }      }
   
       if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
           return -EBUSY;
       }
   
     backing_drv = bs->backing_hd->drv;      backing_drv = bs->backing_hd->drv;
     ro = bs->backing_hd->read_only;      ro = bs->backing_hd->read_only;
     strncpy(filename, bs->backing_hd->filename, sizeof(filename));      strncpy(filename, bs->backing_hd->filename, sizeof(filename));
Line 810  int bdrv_commit(BlockDriverState *bs) Line 1292  int bdrv_commit(BlockDriverState *bs)
     }      }
   
     total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;      total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
     buf = qemu_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);      buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
   
     for (sector = 0; sector < total_sectors; sector += n) {      for (sector = 0; sector < total_sectors; sector += n) {
         if (drv->bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {          if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
   
             if (bdrv_read(bs, sector, buf, n) != 0) {              if (bdrv_read(bs, sector, buf, n) != 0) {
                 ret = -EIO;                  ret = -EIO;
Line 840  int bdrv_commit(BlockDriverState *bs) Line 1322  int bdrv_commit(BlockDriverState *bs)
         bdrv_flush(bs->backing_hd);          bdrv_flush(bs->backing_hd);
   
 ro_cleanup:  ro_cleanup:
     qemu_free(buf);      g_free(buf);
   
     if (ro) {      if (ro) {
         /* re-open as RO */          /* re-open as RO */
Line 862  ro_cleanup: Line 1344  ro_cleanup:
     return ret;      return ret;
 }  }
   
 void bdrv_commit_all(void)  int bdrv_commit_all(void)
 {  {
     BlockDriverState *bs;      BlockDriverState *bs;
   
     QTAILQ_FOREACH(bs, &bdrv_states, list) {      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         bdrv_commit(bs);          int ret = bdrv_commit(bs);
           if (ret < 0) {
               return ret;
           }
     }      }
       return 0;
 }  }
   
 /*  struct BdrvTrackedRequest {
  * Return values:      BlockDriverState *bs;
  * 0        - success      int64_t sector_num;
  * -EINVAL  - backing format specified, but no file      int nb_sectors;
  * -ENOSPC  - can't update the backing file because no space is left in the      bool is_write;
  *            image file header      QLIST_ENTRY(BdrvTrackedRequest) list;
  * -ENOTSUP - format driver doesn't support changing the backing file      Coroutine *co; /* owner, used for deadlock detection */
       CoQueue wait_queue; /* coroutines blocked on this request */
   };
   
   /**
    * Remove an active request from the tracked requests list
    *
    * This function should be called when a tracked request is completing.
  */   */
 int bdrv_change_backing_file(BlockDriverState *bs,  static void tracked_request_end(BdrvTrackedRequest *req)
     const char *backing_file, const char *backing_fmt)  
 {  {
     BlockDriver *drv = bs->drv;      QLIST_REMOVE(req, list);
       qemu_co_queue_restart_all(&req->wait_queue);
     if (drv->bdrv_change_backing_file != NULL) {  
         return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);  
     } else {  
         return -ENOTSUP;  
     }  
 }  }
   
 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,  /**
                                    size_t size)   * Add an active request to the tracked requests list
 {   */
   static void tracked_request_begin(BdrvTrackedRequest *req,
                                     BlockDriverState *bs,
                                     int64_t sector_num,
                                     int nb_sectors, bool is_write)
   {
       *req = (BdrvTrackedRequest){
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .is_write = is_write,
           .co = qemu_coroutine_self(),
       };
   
       qemu_co_queue_init(&req->wait_queue);
   
       QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
   }
   
   /**
    * Round a region to cluster boundaries
    */
   static void round_to_clusters(BlockDriverState *bs,
                                 int64_t sector_num, int nb_sectors,
                                 int64_t *cluster_sector_num,
                                 int *cluster_nb_sectors)
   {
       BlockDriverInfo bdi;
   
       if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
           *cluster_sector_num = sector_num;
           *cluster_nb_sectors = nb_sectors;
       } else {
           int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
           *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
           *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
                                               nb_sectors, c);
       }
   }
   
   static bool tracked_request_overlaps(BdrvTrackedRequest *req,
                                        int64_t sector_num, int nb_sectors) {
       /*        aaaa   bbbb */
       if (sector_num >= req->sector_num + req->nb_sectors) {
           return false;
       }
       /* bbbb   aaaa        */
       if (req->sector_num >= sector_num + nb_sectors) {
           return false;
       }
       return true;
   }
   
   static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
           int64_t sector_num, int nb_sectors)
   {
       BdrvTrackedRequest *req;
       int64_t cluster_sector_num;
       int cluster_nb_sectors;
       bool retry;
   
       /* If we touch the same cluster it counts as an overlap.  This guarantees
        * that allocating writes will be serialized and not race with each other
        * for the same cluster.  For example, in copy-on-read it ensures that the
        * CoR read and write operations are atomic and guest writes cannot
        * interleave between them.
        */
       round_to_clusters(bs, sector_num, nb_sectors,
                         &cluster_sector_num, &cluster_nb_sectors);
   
       do {
           retry = false;
           QLIST_FOREACH(req, &bs->tracked_requests, list) {
               if (tracked_request_overlaps(req, cluster_sector_num,
                                            cluster_nb_sectors)) {
                   /* Hitting this means there was a reentrant request, for
                    * example, a block driver issuing nested requests.  This must
                    * never happen since it means deadlock.
                    */
                   assert(qemu_coroutine_self() != req->co);
   
                   qemu_co_queue_wait(&req->wait_queue);
                   retry = true;
                   break;
               }
           }
       } while (retry);
   }
   
   /*
    * Return values:
    * 0        - success
    * -EINVAL  - backing format specified, but no file
    * -ENOSPC  - can't update the backing file because no space is left in the
    *            image file header
    * -ENOTSUP - format driver doesn't support changing the backing file
    */
   int bdrv_change_backing_file(BlockDriverState *bs,
       const char *backing_file, const char *backing_fmt)
   {
       BlockDriver *drv = bs->drv;
       int ret;
   
       /* Backing file format doesn't make sense without a backing file */
       if (backing_fmt && !backing_file) {
           return -EINVAL;
       }
   
       if (drv->bdrv_change_backing_file != NULL) {
           ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
       } else {
           ret = -ENOTSUP;
       }
   
       if (ret == 0) {
           pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
           pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
       }
       return ret;
   }
   
   static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
                                      size_t size)
   {
     int64_t len;      int64_t len;
   
     if (!bdrv_is_inserted(bs))      if (!bdrv_is_inserted(bs))
Line 920  static int bdrv_check_request(BlockDrive Line 1530  static int bdrv_check_request(BlockDrive
                                    nb_sectors * BDRV_SECTOR_SIZE);                                     nb_sectors * BDRV_SECTOR_SIZE);
 }  }
   
   typedef struct RwCo {
       BlockDriverState *bs;
       int64_t sector_num;
       int nb_sectors;
       QEMUIOVector *qiov;
       bool is_write;
       int ret;
   } RwCo;
   
   static void coroutine_fn bdrv_rw_co_entry(void *opaque)
   {
       RwCo *rwco = opaque;
   
       if (!rwco->is_write) {
           rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
                                        rwco->nb_sectors, rwco->qiov, 0);
       } else {
           rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
                                         rwco->nb_sectors, rwco->qiov, 0);
       }
   }
   
   /*
    * Process a synchronous request using coroutines
    */
   static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
                         int nb_sectors, bool is_write)
   {
       QEMUIOVector qiov;
       struct iovec iov = {
           .iov_base = (void *)buf,
           .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
       };
       Coroutine *co;
       RwCo rwco = {
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .qiov = &qiov,
           .is_write = is_write,
           .ret = NOT_DONE,
       };
   
       qemu_iovec_init_external(&qiov, &iov, 1);
   
       /**
        * In sync call context, when the vcpu is blocked, this throttling timer
        * will not fire; so the I/O throttling function has to be disabled here
        * if it has been enabled.
        */
       if (bs->io_limits_enabled) {
           fprintf(stderr, "Disabling I/O throttling on '%s' due "
                           "to synchronous I/O.\n", bdrv_get_device_name(bs));
           bdrv_io_limits_disable(bs);
       }
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_rw_co_entry(&rwco);
       } else {
           co = qemu_coroutine_create(bdrv_rw_co_entry);
           qemu_coroutine_enter(co, &rwco);
           while (rwco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
       return rwco.ret;
   }
   
 /* return < 0 if error. See bdrv_write() for the return codes */  /* return < 0 if error. See bdrv_write() for the return codes */
 int bdrv_read(BlockDriverState *bs, int64_t sector_num,  int bdrv_read(BlockDriverState *bs, int64_t sector_num,
               uint8_t *buf, int nb_sectors)                uint8_t *buf, int nb_sectors)
 {  {
     BlockDriver *drv = bs->drv;      return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
   
     if (!drv)  
         return -ENOMEDIUM;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return -EIO;  
   
     return drv->bdrv_read(bs, sector_num, buf, nb_sectors);  
 }  }
   
   #define BITS_PER_LONG  (sizeof(unsigned long) * 8)
   
 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,  static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
                              int nb_sectors, int dirty)                               int nb_sectors, int dirty)
 {  {
Line 944  static void set_dirty_bitmap(BlockDriver Line 1618  static void set_dirty_bitmap(BlockDriver
     end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;      end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
   
     for (; start <= end; start++) {      for (; start <= end; start++) {
         idx = start / (sizeof(unsigned long) * 8);          idx = start / BITS_PER_LONG;
         bit = start % (sizeof(unsigned long) * 8);          bit = start % BITS_PER_LONG;
         val = bs->dirty_bitmap[idx];          val = bs->dirty_bitmap[idx];
         if (dirty) {          if (dirty) {
             if (!(val & (1UL << bit))) {              if (!(val & (1UL << bit))) {
Line 971  static void set_dirty_bitmap(BlockDriver Line 1645  static void set_dirty_bitmap(BlockDriver
 int bdrv_write(BlockDriverState *bs, int64_t sector_num,  int bdrv_write(BlockDriverState *bs, int64_t sector_num,
                const uint8_t *buf, int nb_sectors)                 const uint8_t *buf, int nb_sectors)
 {  {
     BlockDriver *drv = bs->drv;      return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
     if (!bs->drv)  
         return -ENOMEDIUM;  
     if (bs->read_only)  
         return -EACCES;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return -EIO;  
   
     if (bs->dirty_bitmap) {  
         set_dirty_bitmap(bs, sector_num, nb_sectors, 1);  
     }  
   
     if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {  
         bs->wr_highest_sector = sector_num + nb_sectors - 1;  
     }  
   
     return drv->bdrv_write(bs, sector_num, buf, nb_sectors);  
 }  }
   
 int bdrv_pread(BlockDriverState *bs, int64_t offset,  int bdrv_pread(BlockDriverState *bs, int64_t offset,
Line 1100  int bdrv_pwrite_sync(BlockDriverState *b Line 1758  int bdrv_pwrite_sync(BlockDriverState *b
         return ret;          return ret;
     }      }
   
     /* No flush needed for cache=writethrough, it uses O_DSYNC */      /* No flush needed for cache modes that use O_DSYNC */
     if ((bs->open_flags & BDRV_O_CACHE_MASK) != 0) {      if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) {
         bdrv_flush(bs);          bdrv_flush(bs);
     }      }
   
     return 0;      return 0;
 }  }
   
   static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
           int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
   {
       /* Perform I/O through a temporary buffer so that users who scribble over
        * their read buffer while the operation is in progress do not end up
        * modifying the image file.  This is critical for zero-copy guest I/O
        * where anything might happen inside guest memory.
        */
       void *bounce_buffer;
   
       BlockDriver *drv = bs->drv;
       struct iovec iov;
       QEMUIOVector bounce_qiov;
       int64_t cluster_sector_num;
       int cluster_nb_sectors;
       size_t skip_bytes;
       int ret;
   
       /* Cover entire cluster so no additional backing file I/O is required when
        * allocating cluster in the image file.
        */
       round_to_clusters(bs, sector_num, nb_sectors,
                         &cluster_sector_num, &cluster_nb_sectors);
   
       trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
                                      cluster_sector_num, cluster_nb_sectors);
   
       iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
       iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
       qemu_iovec_init_external(&bounce_qiov, &iov, 1);
   
       ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
                                &bounce_qiov);
       if (ret < 0) {
           goto err;
       }
   
       if (drv->bdrv_co_write_zeroes &&
           buffer_is_zero(bounce_buffer, iov.iov_len)) {
           ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
                                         cluster_nb_sectors);
       } else {
           ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
                                     &bounce_qiov);
       }
   
       if (ret < 0) {
           /* It might be okay to ignore write errors for guest requests.  If this
            * is a deliberate copy-on-read then we don't want to ignore the error.
            * Simply report it in all cases.
            */
           goto err;
       }
   
       skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
       qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
                              nb_sectors * BDRV_SECTOR_SIZE);
   
   err:
       qemu_vfree(bounce_buffer);
       return ret;
   }
   
 /*  /*
  * Writes to the file and ensures that no writes are reordered across this   * Handle a read request in coroutine context
  * request (acts as a barrier)   */
  *  static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
  * Returns 0 on success, -errno in error cases.      int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags)
   {
       BlockDriver *drv = bs->drv;
       BdrvTrackedRequest req;
       int ret;
   
       if (!drv) {
           return -ENOMEDIUM;
       }
       if (bdrv_check_request(bs, sector_num, nb_sectors)) {
           return -EIO;
       }
   
       /* throttling disk read I/O */
       if (bs->io_limits_enabled) {
           bdrv_io_limits_intercept(bs, false, nb_sectors);
       }
   
       if (bs->copy_on_read) {
           flags |= BDRV_REQ_COPY_ON_READ;
       }
       if (flags & BDRV_REQ_COPY_ON_READ) {
           bs->copy_on_read_in_flight++;
       }
   
       if (bs->copy_on_read_in_flight) {
           wait_for_overlapping_requests(bs, sector_num, nb_sectors);
       }
   
       tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
   
       if (flags & BDRV_REQ_COPY_ON_READ) {
           int pnum;
   
           ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
           if (ret < 0) {
               goto out;
           }
   
           if (!ret || pnum != nb_sectors) {
               ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
               goto out;
           }
       }
   
       ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
   
   out:
       tracked_request_end(&req);
   
       if (flags & BDRV_REQ_COPY_ON_READ) {
           bs->copy_on_read_in_flight--;
       }
   
       return ret;
   }
   
   int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
       int nb_sectors, QEMUIOVector *qiov)
   {
       trace_bdrv_co_readv(bs, sector_num, nb_sectors);
   
       return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
   }
   
   int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
   {
       trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
   
       return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
                               BDRV_REQ_COPY_ON_READ);
   }
   
   static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
       int64_t sector_num, int nb_sectors)
   {
       BlockDriver *drv = bs->drv;
       QEMUIOVector qiov;
       struct iovec iov;
       int ret;
   
       /* TODO Emulate only part of misaligned requests instead of letting block
        * drivers return -ENOTSUP and emulate everything */
   
       /* First try the efficient write zeroes operation */
       if (drv->bdrv_co_write_zeroes) {
           ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
           if (ret != -ENOTSUP) {
               return ret;
           }
       }
   
       /* Fall back to bounce buffer if write zeroes is unsupported */
       iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE;
       iov.iov_base = qemu_blockalign(bs, iov.iov_len);
       memset(iov.iov_base, 0, iov.iov_len);
       qemu_iovec_init_external(&qiov, &iov, 1);
   
       ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
   
       qemu_vfree(iov.iov_base);
       return ret;
   }
   
   /*
    * Handle a write request in coroutine context
  */   */
 int bdrv_write_sync(BlockDriverState *bs, int64_t sector_num,  static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
     const uint8_t *buf, int nb_sectors)      int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
       BdrvRequestFlags flags)
   {
       BlockDriver *drv = bs->drv;
       BdrvTrackedRequest req;
       int ret;
   
       if (!bs->drv) {
           return -ENOMEDIUM;
       }
       if (bs->read_only) {
           return -EACCES;
       }
       if (bdrv_check_request(bs, sector_num, nb_sectors)) {
           return -EIO;
       }
   
       /* throttling disk write I/O */
       if (bs->io_limits_enabled) {
           bdrv_io_limits_intercept(bs, true, nb_sectors);
       }
   
       if (bs->copy_on_read_in_flight) {
           wait_for_overlapping_requests(bs, sector_num, nb_sectors);
       }
   
       tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
   
       if (flags & BDRV_REQ_ZERO_WRITE) {
           ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
       } else {
           ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
       }
   
       if (bs->dirty_bitmap) {
           set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
       }
   
       if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
           bs->wr_highest_sector = sector_num + nb_sectors - 1;
       }
   
       tracked_request_end(&req);
   
       return ret;
   }
   
   int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
       int nb_sectors, QEMUIOVector *qiov)
   {
       trace_bdrv_co_writev(bs, sector_num, nb_sectors);
   
       return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
   }
   
   int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
                                         int64_t sector_num, int nb_sectors)
 {  {
     return bdrv_pwrite_sync(bs, BDRV_SECTOR_SIZE * sector_num,      trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
         buf, BDRV_SECTOR_SIZE * nb_sectors);  
       return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
                                BDRV_REQ_ZERO_WRITE);
 }  }
   
 /**  /**
Line 1139  int bdrv_truncate(BlockDriverState *bs,  Line 2025  int bdrv_truncate(BlockDriverState *bs, 
     ret = drv->bdrv_truncate(bs, offset);      ret = drv->bdrv_truncate(bs, offset);
     if (ret == 0) {      if (ret == 0) {
         ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);          ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
         if (bs->change_cb) {          bdrv_dev_resize_cb(bs);
             bs->change_cb(bs->change_opaque, CHANGE_SIZE);  
         }  
     }      }
     return ret;      return ret;
 }  }
Line 1174  int64_t bdrv_getlength(BlockDriverState  Line 2058  int64_t bdrv_getlength(BlockDriverState 
     if (!drv)      if (!drv)
         return -ENOMEDIUM;          return -ENOMEDIUM;
   
     if (bs->growable || bs->removable) {      if (bs->growable || bdrv_dev_has_removable_media(bs)) {
         if (drv->bdrv_getlength) {          if (drv->bdrv_getlength) {
             return drv->bdrv_getlength(bs);              return drv->bdrv_getlength(bs);
         }          }
Line 1205  struct partition { Line 2089  struct partition {
         uint8_t end_cyl;            /* end cylinder */          uint8_t end_cyl;            /* end cylinder */
         uint32_t start_sect;        /* starting sector counting from 0 */          uint32_t start_sect;        /* starting sector counting from 0 */
         uint32_t nr_sects;          /* nr of sectors in partition */          uint32_t nr_sects;          /* nr of sectors in partition */
 } __attribute__((packed));  } QEMU_PACKED;
   
 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */  /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
 static int guess_disk_lchs(BlockDriverState *bs,  static int guess_disk_lchs(BlockDriverState *bs,
Line 1216  static int guess_disk_lchs(BlockDriverSt Line 2100  static int guess_disk_lchs(BlockDriverSt
     struct partition *p;      struct partition *p;
     uint32_t nr_sects;      uint32_t nr_sects;
     uint64_t nb_sectors;      uint64_t nb_sectors;
       bool enabled;
   
     bdrv_get_geometry(bs, &nb_sectors);      bdrv_get_geometry(bs, &nb_sectors);
   
       /**
        * The function will be invoked during startup not only in sync I/O mode,
        * but also in async I/O mode. So the I/O throttling function has to
        * be disabled temporarily here, not permanently.
        */
       enabled = bs->io_limits_enabled;
       bs->io_limits_enabled = false;
     ret = bdrv_read(bs, 0, buf, 1);      ret = bdrv_read(bs, 0, buf, 1);
       bs->io_limits_enabled = enabled;
     if (ret < 0)      if (ret < 0)
         return -1;          return -1;
     /* test msdos magic */      /* test msdos magic */
Line 1331  void bdrv_get_geometry_hint(BlockDriverS Line 2224  void bdrv_get_geometry_hint(BlockDriverS
     *psecs = bs->secs;      *psecs = bs->secs;
 }  }
   
   /* throttling disk io limits */
   void bdrv_set_io_limits(BlockDriverState *bs,
                           BlockIOLimit *io_limits)
   {
       bs->io_limits = *io_limits;
       bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
   }
   
 /* Recognize floppy formats */  /* Recognize floppy formats */
 typedef struct FDFormat {  typedef struct FDFormat {
     FDriveType drive;      FDriveType drive;
     uint8_t last_sect;      uint8_t last_sect;
     uint8_t max_track;      uint8_t max_track;
     uint8_t max_head;      uint8_t max_head;
       FDriveRate rate;
 } FDFormat;  } FDFormat;
   
 static const FDFormat fd_formats[] = {  static const FDFormat fd_formats[] = {
     /* First entry is default format */      /* First entry is default format */
     /* 1.44 MB 3"1/2 floppy disks */      /* 1.44 MB 3"1/2 floppy disks */
     { FDRIVE_DRV_144, 18, 80, 1, },      { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 20, 80, 1, },      { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 21, 80, 1, },      { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 21, 82, 1, },      { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 21, 83, 1, },      { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 22, 80, 1, },      { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 23, 80, 1, },      { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_144, 24, 80, 1, },      { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, },
     /* 2.88 MB 3"1/2 floppy disks */      /* 2.88 MB 3"1/2 floppy disks */
     { FDRIVE_DRV_288, 36, 80, 1, },      { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, },
     { FDRIVE_DRV_288, 39, 80, 1, },      { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, },
     { FDRIVE_DRV_288, 40, 80, 1, },      { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, },
     { FDRIVE_DRV_288, 44, 80, 1, },      { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, },
     { FDRIVE_DRV_288, 48, 80, 1, },      { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, },
     /* 720 kB 3"1/2 floppy disks */      /* 720 kB 3"1/2 floppy disks */
     { FDRIVE_DRV_144,  9, 80, 1, },      { FDRIVE_DRV_144,  9, 80, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_144, 10, 80, 1, },      { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_144, 10, 82, 1, },      { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_144, 10, 83, 1, },      { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_144, 13, 80, 1, },      { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_144, 14, 80, 1, },      { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, },
     /* 1.2 MB 5"1/4 floppy disks */      /* 1.2 MB 5"1/4 floppy disks */
     { FDRIVE_DRV_120, 15, 80, 1, },      { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_120, 18, 80, 1, },      { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_120, 18, 82, 1, },      { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_120, 18, 83, 1, },      { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, },
     { FDRIVE_DRV_120, 20, 80, 1, },      { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, },
     /* 720 kB 5"1/4 floppy disks */      /* 720 kB 5"1/4 floppy disks */
     { FDRIVE_DRV_120,  9, 80, 1, },      { FDRIVE_DRV_120,  9, 80, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_120, 11, 80, 1, },      { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, },
     /* 360 kB 5"1/4 floppy disks */      /* 360 kB 5"1/4 floppy disks */
     { FDRIVE_DRV_120,  9, 40, 1, },      { FDRIVE_DRV_120,  9, 40, 1, FDRIVE_RATE_300K, },
     { FDRIVE_DRV_120,  9, 40, 0, },      { FDRIVE_DRV_120,  9, 40, 0, FDRIVE_RATE_300K, },
     { FDRIVE_DRV_120, 10, 41, 1, },      { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, },
     { FDRIVE_DRV_120, 10, 42, 1, },      { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, },
     /* 320 kB 5"1/4 floppy disks */      /* 320 kB 5"1/4 floppy disks */
     { FDRIVE_DRV_120,  8, 40, 1, },      { FDRIVE_DRV_120,  8, 40, 1, FDRIVE_RATE_250K, },
     { FDRIVE_DRV_120,  8, 40, 0, },      { FDRIVE_DRV_120,  8, 40, 0, FDRIVE_RATE_250K, },
     /* 360 kB must match 5"1/4 better than 3"1/2... */      /* 360 kB must match 5"1/4 better than 3"1/2... */
     { FDRIVE_DRV_144,  9, 80, 0, },      { FDRIVE_DRV_144,  9, 80, 0, FDRIVE_RATE_250K, },
     /* end */      /* end */
     { FDRIVE_DRV_NONE, -1, -1, 0, },      { FDRIVE_DRV_NONE, -1, -1, 0, 0, },
 };  };
   
 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,  void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
                                    int *max_track, int *last_sect,                                     int *max_track, int *last_sect,
                                    FDriveType drive_in, FDriveType *drive)                                     FDriveType drive_in, FDriveType *drive,
                                      FDriveRate *rate)
 {  {
     const FDFormat *parse;      const FDFormat *parse;
     uint64_t nb_sectors, size;      uint64_t nb_sectors, size;
Line 1397  void bdrv_get_floppy_geometry_hint(Block Line 2300  void bdrv_get_floppy_geometry_hint(Block
     bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);      bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
     if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {      if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
         /* User defined disk */          /* User defined disk */
           *rate = FDRIVE_RATE_500K;
     } else {      } else {
         bdrv_get_geometry(bs, &nb_sectors);          bdrv_get_geometry(bs, &nb_sectors);
         match = -1;          match = -1;
Line 1431  void bdrv_get_floppy_geometry_hint(Block Line 2335  void bdrv_get_floppy_geometry_hint(Block
         *max_track = parse->max_track;          *max_track = parse->max_track;
         *last_sect = parse->last_sect;          *last_sect = parse->last_sect;
         *drive = parse->drive;          *drive = parse->drive;
           *rate = parse->rate;
     }      }
 }  }
   
Line 1451  BlockErrorAction bdrv_get_on_error(Block Line 2356  BlockErrorAction bdrv_get_on_error(Block
     return is_read ? bs->on_read_error : bs->on_write_error;      return is_read ? bs->on_read_error : bs->on_write_error;
 }  }
   
 void bdrv_set_removable(BlockDriverState *bs, int removable)  
 {  
     bs->removable = removable;  
     if (removable && bs == bs_snapshots) {  
         bs_snapshots = NULL;  
     }  
 }  
   
 int bdrv_is_removable(BlockDriverState *bs)  
 {  
     return bs->removable;  
 }  
   
 int bdrv_is_read_only(BlockDriverState *bs)  int bdrv_is_read_only(BlockDriverState *bs)
 {  {
     return bs->read_only;      return bs->read_only;
Line 1479  int bdrv_enable_write_cache(BlockDriverS Line 2371  int bdrv_enable_write_cache(BlockDriverS
     return bs->enable_write_cache;      return bs->enable_write_cache;
 }  }
   
 /* XXX: no longer used */  
 void bdrv_set_change_cb(BlockDriverState *bs,  
                         void (*change_cb)(void *opaque, int reason),  
                         void *opaque)  
 {  
     bs->change_cb = change_cb;  
     bs->change_opaque = opaque;  
 }  
   
 int bdrv_is_encrypted(BlockDriverState *bs)  int bdrv_is_encrypted(BlockDriverState *bs)
 {  {
     if (bs->backing_hd && bs->backing_hd->encrypted)      if (bs->backing_hd && bs->backing_hd->encrypted)
Line 1525  int bdrv_set_key(BlockDriverState *bs, c Line 2408  int bdrv_set_key(BlockDriverState *bs, c
     } else if (!bs->valid_key) {      } else if (!bs->valid_key) {
         bs->valid_key = 1;          bs->valid_key = 1;
         /* call the change callback now, we skipped it on open */          /* call the change callback now, we skipped it on open */
         bs->media_changed = 1;          bdrv_dev_change_media_cb(bs, true);
         if (bs->change_cb)  
             bs->change_cb(bs->change_opaque, CHANGE_MEDIA);  
     }      }
     return ret;      return ret;
 }  }
Line 1585  const char *bdrv_get_device_name(BlockDr Line 2466  const char *bdrv_get_device_name(BlockDr
     return bs->device_name;      return bs->device_name;
 }  }
   
 int bdrv_flush(BlockDriverState *bs)  void bdrv_flush_all(void)
 {  
     if (bs->open_flags & BDRV_O_NO_FLUSH) {  
         return 0;  
     }  
   
     if (bs->drv && bs->drv->bdrv_flush) {  
         return bs->drv->bdrv_flush(bs);  
     }  
   
     /*  
      * Some block drivers always operate in either writethrough or unsafe mode  
      * and don't support bdrv_flush therefore. Usually qemu doesn't know how  
      * the server works (because the behaviour is hardcoded or depends on  
      * server-side configuration), so we can't ensure that everything is safe  
      * on disk. Returning an error doesn't work because that would break guests  
      * even if the server operates in writethrough mode.  
      *  
      * Let's hope the user knows what he's doing.  
      */  
     return 0;  
 }  
   
 void bdrv_flush_all(void)  
 {  {
     BlockDriverState *bs;      BlockDriverState *bs;
   
     QTAILQ_FOREACH(bs, &bdrv_states, list) {      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         if (bs->drv && !bdrv_is_read_only(bs) &&          bdrv_flush(bs);
             (!bdrv_is_removable(bs) || bdrv_is_inserted(bs))) {  
             bdrv_flush(bs);  
         }  
     }      }
 }  }
   
Line 1631  int bdrv_has_zero_init(BlockDriverState  Line 2486  int bdrv_has_zero_init(BlockDriverState 
     return 1;      return 1;
 }  }
   
 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)  typedef struct BdrvCoIsAllocatedData {
 {      BlockDriverState *bs;
     if (!bs->drv) {      int64_t sector_num;
         return -ENOMEDIUM;      int nb_sectors;
     }      int *pnum;
     if (!bs->drv->bdrv_discard) {      int ret;
         return 0;      bool done;
     }  } BdrvCoIsAllocatedData;
     return bs->drv->bdrv_discard(bs, sector_num, nb_sectors);  
 }  
   
 /*  /*
  * Returns true iff the specified sector is present in the disk image. Drivers   * Returns true iff the specified sector is present in the disk image. Drivers
  * not implementing the functionality are assumed to not support backing files,   * not implementing the functionality are assumed to not support backing files,
  * hence all their sectors are reported as allocated.   * hence all their sectors are reported as allocated.
  *   *
    * If 'sector_num' is beyond the end of the disk image the return value is 0
    * and 'pnum' is set to 0.
    *
  * 'pnum' is set to the number of sectors (including and immediately following   * 'pnum' is set to the number of sectors (including and immediately following
  * the specified sector) that are known to be in the same   * the specified sector) that are known to be in the same
  * allocated/unallocated state.   * allocated/unallocated state.
  *   *
  * 'nb_sectors' is the max value 'pnum' should be set to.   * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
    * beyond the end of the disk image it will be clamped.
  */   */
 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,  int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
         int *pnum)                                        int nb_sectors, int *pnum)
 {  {
     int64_t n;      int64_t n;
     if (!bs->drv->bdrv_is_allocated) {  
         if (sector_num >= bs->total_sectors) {  
             *pnum = 0;  
             return 0;  
         }  
         n = bs->total_sectors - sector_num;  
         *pnum = (n < nb_sectors) ? (n) : (nb_sectors);  
         return 1;  
     }  
     return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);  
 }  
   
 void bdrv_mon_event(const BlockDriverState *bdrv,      if (sector_num >= bs->total_sectors) {
                     BlockMonEventAction action, int is_read)          *pnum = 0;
 {          return 0;
     QObject *data;      }
     const char *action_str;  
   
     switch (action) {      n = bs->total_sectors - sector_num;
     case BDRV_ACTION_REPORT:      if (n < nb_sectors) {
         action_str = "report";          nb_sectors = n;
         break;  
     case BDRV_ACTION_IGNORE:  
         action_str = "ignore";  
         break;  
     case BDRV_ACTION_STOP:  
         action_str = "stop";  
         break;  
     default:  
         abort();  
     }      }
   
     data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",      if (!bs->drv->bdrv_co_is_allocated) {
                               bdrv->device_name,          *pnum = nb_sectors;
                               action_str,          return 1;
                               is_read ? "read" : "write");      }
     monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);  
   
     qobject_decref(data);      return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
 }  }
   
 static void bdrv_print_dict(QObject *obj, void *opaque)  /* Coroutine wrapper for bdrv_is_allocated() */
   static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
 {  {
     QDict *bs_dict;      BdrvCoIsAllocatedData *data = opaque;
     Monitor *mon = opaque;      BlockDriverState *bs = data->bs;
   
     bs_dict = qobject_to_qdict(obj);  
   
     monitor_printf(mon, "%s: removable=%d",  
                         qdict_get_str(bs_dict, "device"),  
                         qdict_get_bool(bs_dict, "removable"));  
   
     if (qdict_get_bool(bs_dict, "removable")) {  
         monitor_printf(mon, " locked=%d", qdict_get_bool(bs_dict, "locked"));  
     }  
   
     if (qdict_haskey(bs_dict, "inserted")) {  
         QDict *qdict = qobject_to_qdict(qdict_get(bs_dict, "inserted"));  
   
         monitor_printf(mon, " file=");      data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
         monitor_print_filename(mon, qdict_get_str(qdict, "file"));                                       data->pnum);
         if (qdict_haskey(qdict, "backing_file")) {      data->done = true;
             monitor_printf(mon, " backing_file=");  
             monitor_print_filename(mon, qdict_get_str(qdict, "backing_file"));  
         }  
         monitor_printf(mon, " ro=%d drv=%s encrypted=%d",  
                             qdict_get_bool(qdict, "ro"),  
                             qdict_get_str(qdict, "drv"),  
                             qdict_get_bool(qdict, "encrypted"));  
     } else {  
         monitor_printf(mon, " [not inserted]");  
     }  
   
     monitor_printf(mon, "\n");  
 }  }
   
 void bdrv_info_print(Monitor *mon, const QObject *data)  /*
    * Synchronous wrapper around bdrv_co_is_allocated().
    *
    * See bdrv_co_is_allocated() for details.
    */
   int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
                         int *pnum)
 {  {
     qlist_iter(qobject_to_qlist(data), bdrv_print_dict, mon);      Coroutine *co;
       BdrvCoIsAllocatedData data = {
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .pnum = pnum,
           .done = false,
       };
   
       co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
       qemu_coroutine_enter(co, &data);
       while (!data.done) {
           qemu_aio_wait();
       }
       return data.ret;
 }  }
   
 void bdrv_info(Monitor *mon, QObject **ret_data)  BlockInfoList *qmp_query_block(Error **errp)
 {  {
     QList *bs_list;      BlockInfoList *head = NULL, *cur_item = NULL;
     BlockDriverState *bs;      BlockDriverState *bs;
   
     bs_list = qlist_new();  
   
     QTAILQ_FOREACH(bs, &bdrv_states, list) {      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         QObject *bs_obj;          BlockInfoList *info = g_malloc0(sizeof(*info));
   
         bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': 'unknown', "          info->value = g_malloc0(sizeof(*info->value));
                                     "'removable': %i, 'locked': %i }",          info->value->device = g_strdup(bs->device_name);
                                     bs->device_name, bs->removable,          info->value->type = g_strdup("unknown");
                                     bs->locked);          info->value->locked = bdrv_dev_is_medium_locked(bs);
           info->value->removable = bdrv_dev_has_removable_media(bs);
   
           if (bdrv_dev_has_removable_media(bs)) {
               info->value->has_tray_open = true;
               info->value->tray_open = bdrv_dev_is_tray_open(bs);
           }
   
           if (bdrv_iostatus_is_enabled(bs)) {
               info->value->has_io_status = true;
               info->value->io_status = bs->iostatus;
           }
   
         if (bs->drv) {          if (bs->drv) {
             QObject *obj;              info->value->has_inserted = true;
             QDict *bs_dict = qobject_to_qdict(bs_obj);              info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
               info->value->inserted->file = g_strdup(bs->filename);
               info->value->inserted->ro = bs->read_only;
               info->value->inserted->drv = g_strdup(bs->drv->format_name);
               info->value->inserted->encrypted = bs->encrypted;
               if (bs->backing_file[0]) {
                   info->value->inserted->has_backing_file = true;
                   info->value->inserted->backing_file = g_strdup(bs->backing_file);
               }
   
             obj = qobject_from_jsonf("{ 'file': %s, 'ro': %i, 'drv': %s, "              if (bs->io_limits_enabled) {
                                      "'encrypted': %i }",                  info->value->inserted->bps =
                                      bs->filename, bs->read_only,                                 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
                                      bs->drv->format_name,                  info->value->inserted->bps_rd =
                                      bdrv_is_encrypted(bs));                                 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
             if (bs->backing_file[0] != '\0') {                  info->value->inserted->bps_wr =
                 QDict *qdict = qobject_to_qdict(obj);                                 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
                 qdict_put(qdict, "backing_file",                  info->value->inserted->iops =
                           qstring_from_str(bs->backing_file));                                 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
                   info->value->inserted->iops_rd =
                                  bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
                   info->value->inserted->iops_wr =
                                  bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
             }              }
           }
   
             qdict_put_obj(bs_dict, "inserted", obj);          /* XXX: waiting for the qapi to support GSList */
           if (!cur_item) {
               head = cur_item = info;
           } else {
               cur_item->next = info;
               cur_item = info;
         }          }
         qlist_append_obj(bs_list, bs_obj);  
     }      }
   
     *ret_data = QOBJECT(bs_list);      return head;
 }  
   
 static void bdrv_stats_iter(QObject *data, void *opaque)  
 {  
     QDict *qdict;  
     Monitor *mon = opaque;  
   
     qdict = qobject_to_qdict(data);  
     monitor_printf(mon, "%s:", qdict_get_str(qdict, "device"));  
   
     qdict = qobject_to_qdict(qdict_get(qdict, "stats"));  
     monitor_printf(mon, " rd_bytes=%" PRId64  
                         " wr_bytes=%" PRId64  
                         " rd_operations=%" PRId64  
                         " wr_operations=%" PRId64  
                         "\n",  
                         qdict_get_int(qdict, "rd_bytes"),  
                         qdict_get_int(qdict, "wr_bytes"),  
                         qdict_get_int(qdict, "rd_operations"),  
                         qdict_get_int(qdict, "wr_operations"));  
 }  }
   
 void bdrv_stats_print(Monitor *mon, const QObject *data)  /* Consider exposing this as a full fledged QMP command */
   static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
 {  {
     qlist_iter(qobject_to_qlist(data), bdrv_stats_iter, mon);      BlockStats *s;
 }  
   
 static QObject* bdrv_info_stats_bs(BlockDriverState *bs)      s = g_malloc0(sizeof(*s));
 {  
     QObject *res;  
     QDict *dict;  
   
     res = qobject_from_jsonf("{ 'stats': {"      if (bs->device_name[0]) {
                              "'rd_bytes': %" PRId64 ","          s->has_device = true;
                              "'wr_bytes': %" PRId64 ","          s->device = g_strdup(bs->device_name);
                              "'rd_operations': %" PRId64 ","  
                              "'wr_operations': %" PRId64 ","  
                              "'wr_highest_offset': %" PRId64  
                              "} }",  
                              bs->rd_bytes, bs->wr_bytes,  
                              bs->rd_ops, bs->wr_ops,  
                              bs->wr_highest_sector *  
                              (uint64_t)BDRV_SECTOR_SIZE);  
     dict  = qobject_to_qdict(res);  
   
     if (*bs->device_name) {  
         qdict_put(dict, "device", qstring_from_str(bs->device_name));  
     }      }
   
       s->stats = g_malloc0(sizeof(*s->stats));
       s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
       s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
       s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
       s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
       s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
       s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
       s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
       s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
       s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
   
     if (bs->file) {      if (bs->file) {
         QObject *parent = bdrv_info_stats_bs(bs->file);          s->has_parent = true;
         qdict_put_obj(dict, "parent", parent);          s->parent = qmp_query_blockstat(bs->file, NULL);
     }      }
   
     return res;      return s;
 }  }
   
 void bdrv_info_stats(Monitor *mon, QObject **ret_data)  BlockStatsList *qmp_query_blockstats(Error **errp)
 {  {
     QObject *obj;      BlockStatsList *head = NULL, *cur_item = NULL;
     QList *devices;  
     BlockDriverState *bs;      BlockDriverState *bs;
   
     devices = qlist_new();  
   
     QTAILQ_FOREACH(bs, &bdrv_states, list) {      QTAILQ_FOREACH(bs, &bdrv_states, list) {
         obj = bdrv_info_stats_bs(bs);          BlockStatsList *info = g_malloc0(sizeof(*info));
         qlist_append_obj(devices, obj);          info->value = qmp_query_blockstat(bs, NULL);
   
           /* XXX: waiting for the qapi to support GSList */
           if (!cur_item) {
               head = cur_item = info;
           } else {
               cur_item->next = info;
               cur_item = info;
           }
     }      }
   
     *ret_data = QOBJECT(devices);      return head;
 }  }
   
 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)  const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
Line 1860  const char *bdrv_get_encrypted_filename( Line 2698  const char *bdrv_get_encrypted_filename(
 void bdrv_get_backing_filename(BlockDriverState *bs,  void bdrv_get_backing_filename(BlockDriverState *bs,
                                char *filename, int filename_size)                                 char *filename, int filename_size)
 {  {
     if (!bs->backing_file) {      pstrcpy(filename, filename_size, bs->backing_file);
         pstrcpy(filename, filename_size, "");  
     } else {  
         pstrcpy(filename, filename_size, bs->backing_file);  
     }  
 }  }
   
 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,  int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
Line 1940  void bdrv_debug_event(BlockDriverState * Line 2774  void bdrv_debug_event(BlockDriverState *
 int bdrv_can_snapshot(BlockDriverState *bs)  int bdrv_can_snapshot(BlockDriverState *bs)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     if (!drv || bdrv_is_removable(bs) || bdrv_is_read_only(bs)) {      if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
         return 0;          return 0;
     }      }
   
Line 2057  int bdrv_snapshot_load_tmp(BlockDriverSt Line 2891  int bdrv_snapshot_load_tmp(BlockDriverSt
     return -ENOTSUP;      return -ENOTSUP;
 }  }
   
   BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
           const char *backing_file)
   {
       if (!bs->drv) {
           return NULL;
       }
   
       if (bs->backing_hd) {
           if (strcmp(bs->backing_file, backing_file) == 0) {
               return bs->backing_hd;
           } else {
               return bdrv_find_backing_image(bs->backing_hd, backing_file);
           }
       }
   
       return NULL;
   }
   
 #define NB_SUFFIXES 4  #define NB_SUFFIXES 4
   
 char *get_human_readable_size(char *buf, int buf_size, int64_t size)  char *get_human_readable_size(char *buf, int buf_size, int64_t size)
Line 2130  char *bdrv_snapshot_dump(char *buf, int  Line 2982  char *bdrv_snapshot_dump(char *buf, int 
     return buf;      return buf;
 }  }
   
   
 /**************************************************************/  /**************************************************************/
 /* async I/Os */  /* async I/Os */
   
Line 2138  BlockDriverAIOCB *bdrv_aio_readv(BlockDr Line 2989  BlockDriverAIOCB *bdrv_aio_readv(BlockDr
                                  QEMUIOVector *qiov, int nb_sectors,                                   QEMUIOVector *qiov, int nb_sectors,
                                  BlockDriverCompletionFunc *cb, void *opaque)                                   BlockDriverCompletionFunc *cb, void *opaque)
 {  {
     BlockDriver *drv = bs->drv;  
     BlockDriverAIOCB *ret;  
   
     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);      trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
   
     if (!drv)      return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
         return NULL;                                   cb, opaque, false);
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return NULL;  
   
     ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,  
                               cb, opaque);  
   
     if (ret) {  
         /* Update stats even though technically transfer has not happened. */  
         bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;  
         bs->rd_ops ++;  
     }  
   
     return ret;  
 }  
   
 typedef struct BlockCompleteData {  
     BlockDriverCompletionFunc *cb;  
     void *opaque;  
     BlockDriverState *bs;  
     int64_t sector_num;  
     int nb_sectors;  
 } BlockCompleteData;  
   
 static void block_complete_cb(void *opaque, int ret)  
 {  
     BlockCompleteData *b = opaque;  
   
     if (b->bs->dirty_bitmap) {  
         set_dirty_bitmap(b->bs, b->sector_num, b->nb_sectors, 1);  
     }  
     b->cb(b->opaque, ret);  
     qemu_free(b);  
 }  
   
 static BlockCompleteData *blk_dirty_cb_alloc(BlockDriverState *bs,  
                                              int64_t sector_num,  
                                              int nb_sectors,  
                                              BlockDriverCompletionFunc *cb,  
                                              void *opaque)  
 {  
     BlockCompleteData *blkdata = qemu_mallocz(sizeof(BlockCompleteData));  
   
     blkdata->bs = bs;  
     blkdata->cb = cb;  
     blkdata->opaque = opaque;  
     blkdata->sector_num = sector_num;  
     blkdata->nb_sectors = nb_sectors;  
   
     return blkdata;  
 }  }
   
 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,  BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
                                   QEMUIOVector *qiov, int nb_sectors,                                    QEMUIOVector *qiov, int nb_sectors,
                                   BlockDriverCompletionFunc *cb, void *opaque)                                    BlockDriverCompletionFunc *cb, void *opaque)
 {  {
     BlockDriver *drv = bs->drv;  
     BlockDriverAIOCB *ret;  
     BlockCompleteData *blk_cb_data;  
   
     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);      trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
   
     if (!drv)      return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
         return NULL;                                   cb, opaque, true);
     if (bs->read_only)  
         return NULL;  
     if (bdrv_check_request(bs, sector_num, nb_sectors))  
         return NULL;  
   
     if (bs->dirty_bitmap) {  
         blk_cb_data = blk_dirty_cb_alloc(bs, sector_num, nb_sectors, cb,  
                                          opaque);  
         cb = &block_complete_cb;  
         opaque = blk_cb_data;  
     }  
   
     ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,  
                                cb, opaque);  
   
     if (ret) {  
         /* Update stats even though technically transfer has not happened. */  
         bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;  
         bs->wr_ops ++;  
         if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {  
             bs->wr_highest_sector = sector_num + nb_sectors - 1;  
         }  
     }  
   
     return ret;  
 }  }
   
   
Line 2244  typedef struct MultiwriteCB { Line 3014  typedef struct MultiwriteCB {
         BlockDriverCompletionFunc *cb;          BlockDriverCompletionFunc *cb;
         void *opaque;          void *opaque;
         QEMUIOVector *free_qiov;          QEMUIOVector *free_qiov;
         void *free_buf;  
     } callbacks[];      } callbacks[];
 } MultiwriteCB;  } MultiwriteCB;
   
Line 2257  static void multiwrite_user_cb(Multiwrit Line 3026  static void multiwrite_user_cb(Multiwrit
         if (mcb->callbacks[i].free_qiov) {          if (mcb->callbacks[i].free_qiov) {
             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);              qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
         }          }
         qemu_free(mcb->callbacks[i].free_qiov);          g_free(mcb->callbacks[i].free_qiov);
         qemu_vfree(mcb->callbacks[i].free_buf);  
     }      }
 }  }
   
Line 2275  static void multiwrite_cb(void *opaque,  Line 3043  static void multiwrite_cb(void *opaque, 
     mcb->num_requests--;      mcb->num_requests--;
     if (mcb->num_requests == 0) {      if (mcb->num_requests == 0) {
         multiwrite_user_cb(mcb);          multiwrite_user_cb(mcb);
         qemu_free(mcb);          g_free(mcb);
     }      }
 }  }
   
Line 2315  static int multiwrite_merge(BlockDriverS Line 3083  static int multiwrite_merge(BlockDriverS
         int merge = 0;          int merge = 0;
         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;          int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
   
         // This handles the cases that are valid for all block drivers, namely          // Handle exactly sequential writes and overlapping writes.
         // exactly sequential writes and overlapping writes.  
         if (reqs[i].sector <= oldreq_last) {          if (reqs[i].sector <= oldreq_last) {
             merge = 1;              merge = 1;
         }          }
   
         // The block driver may decide that it makes sense to combine requests  
         // even if there is a gap of some sectors between them. In this case,  
         // the gap is filled with zeros (therefore only applicable for yet  
         // unused space in format like qcow2).  
         if (!merge && bs->drv->bdrv_merge_requests) {  
             merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);  
         }  
   
         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {          if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
             merge = 0;              merge = 0;
         }          }
   
         if (merge) {          if (merge) {
             size_t size;              size_t size;
             QEMUIOVector *qiov = qemu_mallocz(sizeof(*qiov));              QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
             qemu_iovec_init(qiov,              qemu_iovec_init(qiov,
                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);                  reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
   
Line 2344  static int multiwrite_merge(BlockDriverS Line 3103  static int multiwrite_merge(BlockDriverS
             size = (reqs[i].sector - reqs[outidx].sector) << 9;              size = (reqs[i].sector - reqs[outidx].sector) << 9;
             qemu_iovec_concat(qiov, reqs[outidx].qiov, size);              qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
   
             // We might need to add some zeros between the two requests              // We should need to add any zeros between the two requests
             if (reqs[i].sector > oldreq_last) {              assert (reqs[i].sector <= oldreq_last);
                 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;  
                 uint8_t *buf = qemu_blockalign(bs, zero_bytes);  
                 memset(buf, 0, zero_bytes);  
                 qemu_iovec_add(qiov, buf, zero_bytes);  
                 mcb->callbacks[i].free_buf = buf;  
             }  
   
             // Add the second request              // Add the second request
             qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);              qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
Line 2387  static int multiwrite_merge(BlockDriverS Line 3140  static int multiwrite_merge(BlockDriverS
  */   */
 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)  int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
 {  {
     BlockDriverAIOCB *acb;  
     MultiwriteCB *mcb;      MultiwriteCB *mcb;
     int i;      int i;
   
Line 2404  int bdrv_aio_multiwrite(BlockDriverState Line 3156  int bdrv_aio_multiwrite(BlockDriverState
     }      }
   
     // Create MultiwriteCB structure      // Create MultiwriteCB structure
     mcb = qemu_mallocz(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));      mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
     mcb->num_requests = 0;      mcb->num_requests = 0;
     mcb->num_callbacks = num_reqs;      mcb->num_callbacks = num_reqs;
   
Line 2418  int bdrv_aio_multiwrite(BlockDriverState Line 3170  int bdrv_aio_multiwrite(BlockDriverState
   
     trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);      trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
   
     /*      /* Run the aio requests. */
      * Run the aio requests. As soon as one request can't be submitted      mcb->num_requests = num_reqs;
      * successfully, fail all requests that are not yet submitted (we must  
      * return failure for all requests anyway)  
      *  
      * num_requests cannot be set to the right value immediately: If  
      * bdrv_aio_writev fails for some request, num_requests would be too high  
      * and therefore multiwrite_cb() would never recognize the multiwrite  
      * request as completed. We also cannot use the loop variable i to set it  
      * when the first request fails because the callback may already have been  
      * called for previously submitted requests. Thus, num_requests must be  
      * incremented for each request that is submitted.  
      *  
      * The problem that callbacks may be called early also means that we need  
      * to take care that num_requests doesn't become 0 before all requests are  
      * submitted - multiwrite_cb() would consider the multiwrite request  
      * completed. A dummy request that is "completed" by a manual call to  
      * multiwrite_cb() takes care of this.  
      */  
     mcb->num_requests = 1;  
   
     // Run the aio requests  
     for (i = 0; i < num_reqs; i++) {      for (i = 0; i < num_reqs; i++) {
         mcb->num_requests++;          bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
         acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,  
             reqs[i].nb_sectors, multiwrite_cb, mcb);              reqs[i].nb_sectors, multiwrite_cb, mcb);
       }
   
         if (acb == NULL) {      return 0;
             // We can only fail the whole thing if no request has been  }
             // submitted yet. Otherwise we'll wait for the submitted AIOs to  
             // complete and report the error in the callback.  void bdrv_aio_cancel(BlockDriverAIOCB *acb)
             if (i == 0) {  {
                 trace_bdrv_aio_multiwrite_earlyfail(mcb);      acb->pool->cancel(acb);
                 goto fail;  }
             } else {  
                 trace_bdrv_aio_multiwrite_latefail(mcb, i);  /* block I/O throttling */
                 multiwrite_cb(mcb, -EIO);  static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
                 break;                   bool is_write, double elapsed_time, uint64_t *wait)
             }  {
       uint64_t bps_limit = 0;
       double   bytes_limit, bytes_base, bytes_res;
       double   slice_time, wait_time;
   
       if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
           bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
       } else if (bs->io_limits.bps[is_write]) {
           bps_limit = bs->io_limits.bps[is_write];
       } else {
           if (wait) {
               *wait = 0;
         }          }
   
           return false;
     }      }
   
     /* Complete the dummy request */      slice_time = bs->slice_end - bs->slice_start;
     multiwrite_cb(mcb, 0);      slice_time /= (NANOSECONDS_PER_SECOND);
       bytes_limit = bps_limit * slice_time;
       bytes_base  = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
       if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
           bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
       }
   
     return 0;      /* bytes_base: the bytes of data which have been read/written; and
        *             it is obtained from the history statistic info.
        * bytes_res: the remaining bytes of data which need to be read/written.
        * (bytes_base + bytes_res) / bps_limit: used to calcuate
        *             the total time for completing reading/writting all data.
        */
       bytes_res   = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
   
 fail:      if (bytes_base + bytes_res <= bytes_limit) {
     for (i = 0; i < mcb->num_callbacks; i++) {          if (wait) {
         reqs[i].error = -EIO;              *wait = 0;
           }
   
           return false;
     }      }
     qemu_free(mcb);  
     return -1;      /* Calc approx time to dispatch */
       wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
   
       /* When the I/O rate at runtime exceeds the limits,
        * bs->slice_end need to be extended in order that the current statistic
        * info can be kept until the timer fire, so it is increased and tuned
        * based on the result of experiment.
        */
       bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
       bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
       if (wait) {
           *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
       }
   
       return true;
 }  }
   
 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,  static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
         BlockDriverCompletionFunc *cb, void *opaque)                               double elapsed_time, uint64_t *wait)
 {  {
     BlockDriver *drv = bs->drv;      uint64_t iops_limit = 0;
       double   ios_limit, ios_base;
       double   slice_time, wait_time;
   
     trace_bdrv_aio_flush(bs, opaque);      if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
           iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
       } else if (bs->io_limits.iops[is_write]) {
           iops_limit = bs->io_limits.iops[is_write];
       } else {
           if (wait) {
               *wait = 0;
           }
   
     if (bs->open_flags & BDRV_O_NO_FLUSH) {          return false;
         return bdrv_aio_noop_em(bs, cb, opaque);  
     }      }
   
     if (!drv)      slice_time = bs->slice_end - bs->slice_start;
         return NULL;      slice_time /= (NANOSECONDS_PER_SECOND);
     return drv->bdrv_aio_flush(bs, cb, opaque);      ios_limit  = iops_limit * slice_time;
       ios_base   = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
       if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
           ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
       }
   
       if (ios_base + 1 <= ios_limit) {
           if (wait) {
               *wait = 0;
           }
   
           return false;
       }
   
       /* Calc approx time to dispatch */
       wait_time = (ios_base + 1) / iops_limit;
       if (wait_time > elapsed_time) {
           wait_time = wait_time - elapsed_time;
       } else {
           wait_time = 0;
       }
   
       bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
       bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
       if (wait) {
           *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
       }
   
       return true;
 }  }
   
 void bdrv_aio_cancel(BlockDriverAIOCB *acb)  static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
                              bool is_write, int64_t *wait)
 {  {
     acb->pool->cancel(acb);      int64_t  now, max_wait;
 }      uint64_t bps_wait = 0, iops_wait = 0;
       double   elapsed_time;
       int      bps_ret, iops_ret;
   
       now = qemu_get_clock_ns(vm_clock);
       if ((bs->slice_start < now)
           && (bs->slice_end > now)) {
           bs->slice_end = now + bs->slice_time;
       } else {
           bs->slice_time  =  5 * BLOCK_IO_SLICE_TIME;
           bs->slice_start = now;
           bs->slice_end   = now + bs->slice_time;
   
           bs->io_base.bytes[is_write]  = bs->nr_bytes[is_write];
           bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
   
           bs->io_base.ios[is_write]    = bs->nr_ops[is_write];
           bs->io_base.ios[!is_write]   = bs->nr_ops[!is_write];
       }
   
       elapsed_time  = now - bs->slice_start;
       elapsed_time  /= (NANOSECONDS_PER_SECOND);
   
       bps_ret  = bdrv_exceed_bps_limits(bs, nb_sectors,
                                         is_write, elapsed_time, &bps_wait);
       iops_ret = bdrv_exceed_iops_limits(bs, is_write,
                                         elapsed_time, &iops_wait);
       if (bps_ret || iops_ret) {
           max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
           if (wait) {
               *wait = max_wait;
           }
   
           now = qemu_get_clock_ns(vm_clock);
           if (bs->slice_end < now + max_wait) {
               bs->slice_end = now + max_wait;
           }
   
           return true;
       }
   
       if (wait) {
           *wait = 0;
       }
   
       return false;
   }
   
 /**************************************************************/  /**************************************************************/
 /* async block device emulation */  /* async block device emulation */
Line 2550  static BlockDriverAIOCB *bdrv_aio_rw_vec Line 3405  static BlockDriverAIOCB *bdrv_aio_rw_vec
     acb->is_write = is_write;      acb->is_write = is_write;
     acb->qiov = qiov;      acb->qiov = qiov;
     acb->bounce = qemu_blockalign(bs, qiov->size);      acb->bounce = qemu_blockalign(bs, qiov->size);
       acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
     if (!acb->bh)  
         acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);  
   
     if (is_write) {      if (is_write) {
         qemu_iovec_to_buffer(acb->qiov, acb->bounce);          qemu_iovec_to_buffer(acb->qiov, acb->bounce);
         acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);          acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
     } else {      } else {
         acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);          acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
     }      }
   
     qemu_bh_schedule(acb->bh);      qemu_bh_schedule(acb->bh);
Line 2580  static BlockDriverAIOCB *bdrv_aio_writev Line 3433  static BlockDriverAIOCB *bdrv_aio_writev
     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);      return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
 }  }
   
 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,  
         BlockDriverCompletionFunc *cb, void *opaque)  typedef struct BlockDriverAIOCBCoroutine {
       BlockDriverAIOCB common;
       BlockRequest req;
       bool is_write;
       QEMUBH* bh;
   } BlockDriverAIOCBCoroutine;
   
   static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
 {  {
     BlockDriverAIOCBSync *acb;      qemu_aio_flush();
   }
   
     acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);  static AIOPool bdrv_em_co_aio_pool = {
     acb->is_write = 1; /* don't bounce in the completion hadler */      .aiocb_size         = sizeof(BlockDriverAIOCBCoroutine),
     acb->qiov = NULL;      .cancel             = bdrv_aio_co_cancel_em,
     acb->bounce = NULL;  };
     acb->ret = 0;  
   
     if (!acb->bh)  static void bdrv_co_em_bh(void *opaque)
         acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);  {
       BlockDriverAIOCBCoroutine *acb = opaque;
   
     bdrv_flush(bs);      acb->common.cb(acb->common.opaque, acb->req.error);
     qemu_bh_schedule(acb->bh);      qemu_bh_delete(acb->bh);
     return &acb->common;      qemu_aio_release(acb);
 }  }
   
 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,  /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
         BlockDriverCompletionFunc *cb, void *opaque)  static void coroutine_fn bdrv_co_do_rw(void *opaque)
 {  {
     BlockDriverAIOCBSync *acb;      BlockDriverAIOCBCoroutine *acb = opaque;
       BlockDriverState *bs = acb->common.bs;
   
     acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);      if (!acb->is_write) {
     acb->is_write = 1; /* don't bounce in the completion handler */          acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
     acb->qiov = NULL;              acb->req.nb_sectors, acb->req.qiov, 0);
     acb->bounce = NULL;      } else {
     acb->ret = 0;          acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
               acb->req.nb_sectors, acb->req.qiov, 0);
     if (!acb->bh) {  
         acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);  
     }      }
   
       acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
     qemu_bh_schedule(acb->bh);      qemu_bh_schedule(acb->bh);
     return &acb->common;  
 }  }
   
 /**************************************************************/  static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
 /* sync block device emulation */                                                 int64_t sector_num,
                                                  QEMUIOVector *qiov,
                                                  int nb_sectors,
                                                  BlockDriverCompletionFunc *cb,
                                                  void *opaque,
                                                  bool is_write)
   {
       Coroutine *co;
       BlockDriverAIOCBCoroutine *acb;
   
       acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
       acb->req.sector = sector_num;
       acb->req.nb_sectors = nb_sectors;
       acb->req.qiov = qiov;
       acb->is_write = is_write;
   
 static void bdrv_rw_em_cb(void *opaque, int ret)      co = qemu_coroutine_create(bdrv_co_do_rw);
 {      qemu_coroutine_enter(co, acb);
     *(int *)opaque = ret;  
       return &acb->common;
 }  }
   
 #define NOT_DONE 0x7fffffff  static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
   {
       BlockDriverAIOCBCoroutine *acb = opaque;
       BlockDriverState *bs = acb->common.bs;
   
       acb->req.error = bdrv_co_flush(bs);
       acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
       qemu_bh_schedule(acb->bh);
   }
   
 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,  BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
                         uint8_t *buf, int nb_sectors)          BlockDriverCompletionFunc *cb, void *opaque)
 {  {
     int async_ret;      trace_bdrv_aio_flush(bs, opaque);
     BlockDriverAIOCB *acb;  
     struct iovec iov;  
     QEMUIOVector qiov;  
   
     async_context_push();      Coroutine *co;
       BlockDriverAIOCBCoroutine *acb;
   
     async_ret = NOT_DONE;      acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
     iov.iov_base = (void *)buf;      co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
     iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;      qemu_coroutine_enter(co, acb);
     qemu_iovec_init_external(&qiov, &iov, 1);  
     acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,  
         bdrv_rw_em_cb, &async_ret);  
     if (acb == NULL) {  
         async_ret = -1;  
         goto fail;  
     }  
   
     while (async_ret == NOT_DONE) {      return &acb->common;
         qemu_aio_wait();  }
     }  
   
   static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
   {
       BlockDriverAIOCBCoroutine *acb = opaque;
       BlockDriverState *bs = acb->common.bs;
   
 fail:      acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
     async_context_pop();      acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
     return async_ret;      qemu_bh_schedule(acb->bh);
 }  }
   
 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,  BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
                          const uint8_t *buf, int nb_sectors)          int64_t sector_num, int nb_sectors,
           BlockDriverCompletionFunc *cb, void *opaque)
 {  {
     int async_ret;      Coroutine *co;
     BlockDriverAIOCB *acb;      BlockDriverAIOCBCoroutine *acb;
     struct iovec iov;  
     QEMUIOVector qiov;  
   
     async_context_push();      trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
   
     async_ret = NOT_DONE;      acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
     iov.iov_base = (void *)buf;      acb->req.sector = sector_num;
     iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;      acb->req.nb_sectors = nb_sectors;
     qemu_iovec_init_external(&qiov, &iov, 1);      co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
     acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,      qemu_coroutine_enter(co, acb);
         bdrv_rw_em_cb, &async_ret);  
     if (acb == NULL) {  
         async_ret = -1;  
         goto fail;  
     }  
     while (async_ret == NOT_DONE) {  
         qemu_aio_wait();  
     }  
   
 fail:      return &acb->common;
     async_context_pop();  
     return async_ret;  
 }  }
   
 void bdrv_init(void)  void bdrv_init(void)
Line 2708  void *qemu_aio_get(AIOPool *pool, BlockD Line 3574  void *qemu_aio_get(AIOPool *pool, BlockD
         acb = pool->free_aiocb;          acb = pool->free_aiocb;
         pool->free_aiocb = acb->next;          pool->free_aiocb = acb->next;
     } else {      } else {
         acb = qemu_mallocz(pool->aiocb_size);          acb = g_malloc0(pool->aiocb_size);
         acb->pool = pool;          acb->pool = pool;
     }      }
     acb->bs = bs;      acb->bs = bs;
Line 2726  void qemu_aio_release(void *p) Line 3592  void qemu_aio_release(void *p)
 }  }
   
 /**************************************************************/  /**************************************************************/
   /* Coroutine block device emulation */
   
   typedef struct CoroutineIOCompletion {
       Coroutine *coroutine;
       int ret;
   } CoroutineIOCompletion;
   
   static void bdrv_co_io_em_complete(void *opaque, int ret)
   {
       CoroutineIOCompletion *co = opaque;
   
       co->ret = ret;
       qemu_coroutine_enter(co->coroutine, NULL);
   }
   
   static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
                                         int nb_sectors, QEMUIOVector *iov,
                                         bool is_write)
   {
       CoroutineIOCompletion co = {
           .coroutine = qemu_coroutine_self(),
       };
       BlockDriverAIOCB *acb;
   
       if (is_write) {
           acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
                                          bdrv_co_io_em_complete, &co);
       } else {
           acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
                                         bdrv_co_io_em_complete, &co);
       }
   
       trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
       if (!acb) {
           return -EIO;
       }
       qemu_coroutine_yield();
   
       return co.ret;
   }
   
   static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
                                            int64_t sector_num, int nb_sectors,
                                            QEMUIOVector *iov)
   {
       return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
   }
   
   static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
                                            int64_t sector_num, int nb_sectors,
                                            QEMUIOVector *iov)
   {
       return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
   }
   
   static void coroutine_fn bdrv_flush_co_entry(void *opaque)
   {
       RwCo *rwco = opaque;
   
       rwco->ret = bdrv_co_flush(rwco->bs);
   }
   
   int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
   {
       int ret;
   
       if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
           return 0;
       }
   
       /* Write back cached data to the OS even with cache=unsafe */
       if (bs->drv->bdrv_co_flush_to_os) {
           ret = bs->drv->bdrv_co_flush_to_os(bs);
           if (ret < 0) {
               return ret;
           }
       }
   
       /* But don't actually force it to the disk with cache=unsafe */
       if (bs->open_flags & BDRV_O_NO_FLUSH) {
           return 0;
       }
   
       if (bs->drv->bdrv_co_flush_to_disk) {
           ret = bs->drv->bdrv_co_flush_to_disk(bs);
       } else if (bs->drv->bdrv_aio_flush) {
           BlockDriverAIOCB *acb;
           CoroutineIOCompletion co = {
               .coroutine = qemu_coroutine_self(),
           };
   
           acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
           if (acb == NULL) {
               ret = -EIO;
           } else {
               qemu_coroutine_yield();
               ret = co.ret;
           }
       } else {
           /*
            * Some block drivers always operate in either writethrough or unsafe
            * mode and don't support bdrv_flush therefore. Usually qemu doesn't
            * know how the server works (because the behaviour is hardcoded or
            * depends on server-side configuration), so we can't ensure that
            * everything is safe on disk. Returning an error doesn't work because
            * that would break guests even if the server operates in writethrough
            * mode.
            *
            * Let's hope the user knows what he's doing.
            */
           ret = 0;
       }
       if (ret < 0) {
           return ret;
       }
   
       /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
        * in the case of cache=unsafe, so there are no useless flushes.
        */
       return bdrv_co_flush(bs->file);
   }
   
   void bdrv_invalidate_cache(BlockDriverState *bs)
   {
       if (bs->drv && bs->drv->bdrv_invalidate_cache) {
           bs->drv->bdrv_invalidate_cache(bs);
       }
   }
   
   void bdrv_invalidate_cache_all(void)
   {
       BlockDriverState *bs;
   
       QTAILQ_FOREACH(bs, &bdrv_states, list) {
           bdrv_invalidate_cache(bs);
       }
   }
   
   void bdrv_clear_incoming_migration_all(void)
   {
       BlockDriverState *bs;
   
       QTAILQ_FOREACH(bs, &bdrv_states, list) {
           bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
       }
   }
   
   int bdrv_flush(BlockDriverState *bs)
   {
       Coroutine *co;
       RwCo rwco = {
           .bs = bs,
           .ret = NOT_DONE,
       };
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_flush_co_entry(&rwco);
       } else {
           co = qemu_coroutine_create(bdrv_flush_co_entry);
           qemu_coroutine_enter(co, &rwco);
           while (rwco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
   
       return rwco.ret;
   }
   
   static void coroutine_fn bdrv_discard_co_entry(void *opaque)
   {
       RwCo *rwco = opaque;
   
       rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
   }
   
   int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
                                    int nb_sectors)
   {
       if (!bs->drv) {
           return -ENOMEDIUM;
       } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
           return -EIO;
       } else if (bs->read_only) {
           return -EROFS;
       } else if (bs->drv->bdrv_co_discard) {
           return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
       } else if (bs->drv->bdrv_aio_discard) {
           BlockDriverAIOCB *acb;
           CoroutineIOCompletion co = {
               .coroutine = qemu_coroutine_self(),
           };
   
           acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
                                           bdrv_co_io_em_complete, &co);
           if (acb == NULL) {
               return -EIO;
           } else {
               qemu_coroutine_yield();
               return co.ret;
           }
       } else {
           return 0;
       }
   }
   
   int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
   {
       Coroutine *co;
       RwCo rwco = {
           .bs = bs,
           .sector_num = sector_num,
           .nb_sectors = nb_sectors,
           .ret = NOT_DONE,
       };
   
       if (qemu_in_coroutine()) {
           /* Fast-path if already in coroutine context */
           bdrv_discard_co_entry(&rwco);
       } else {
           co = qemu_coroutine_create(bdrv_discard_co_entry);
           qemu_coroutine_enter(co, &rwco);
           while (rwco.ret == NOT_DONE) {
               qemu_aio_wait();
           }
       }
   
       return rwco.ret;
   }
   
   /**************************************************************/
 /* removable device support */  /* removable device support */
   
 /**  /**
Line 2734  void qemu_aio_release(void *p) Line 3831  void qemu_aio_release(void *p)
 int bdrv_is_inserted(BlockDriverState *bs)  int bdrv_is_inserted(BlockDriverState *bs)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     int ret;  
     if (!drv)      if (!drv)
         return 0;          return 0;
     if (!drv->bdrv_is_inserted)      if (!drv->bdrv_is_inserted)
         return !bs->tray_open;          return 1;
     ret = drv->bdrv_is_inserted(bs);      return drv->bdrv_is_inserted(bs);
     return ret;  
 }  }
   
 /**  /**
  * Return TRUE if the media changed since the last call to this   * Return whether the media changed since the last call to this
  * function. It is currently only used for floppy disks   * function, or -ENOTSUP if we don't know.  Most drivers don't know.
  */   */
 int bdrv_media_changed(BlockDriverState *bs)  int bdrv_media_changed(BlockDriverState *bs)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     int ret;  
   
     if (!drv || !drv->bdrv_media_changed)      if (drv && drv->bdrv_media_changed) {
         ret = -ENOTSUP;          return drv->bdrv_media_changed(bs);
     else      }
         ret = drv->bdrv_media_changed(bs);      return -ENOTSUP;
     if (ret == -ENOTSUP)  
         ret = bs->media_changed;  
     bs->media_changed = 0;  
     return ret;  
 }  }
   
 /**  /**
  * If eject_flag is TRUE, eject the media. Otherwise, close the tray   * If eject_flag is TRUE, eject the media. Otherwise, close the tray
  */   */
 int bdrv_eject(BlockDriverState *bs, int eject_flag)  void bdrv_eject(BlockDriverState *bs, bool eject_flag)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
     int ret;  
   
     if (bs->locked) {      if (drv && drv->bdrv_eject) {
         return -EBUSY;          drv->bdrv_eject(bs, eject_flag);
     }      }
   
     if (!drv || !drv->bdrv_eject) {      if (bs->device_name[0] != '\0') {
         ret = -ENOTSUP;          bdrv_emit_qmp_eject_event(bs, eject_flag);
     } else {  
         ret = drv->bdrv_eject(bs, eject_flag);  
     }  
     if (ret == -ENOTSUP) {  
         ret = 0;  
     }  
     if (ret >= 0) {  
         bs->tray_open = eject_flag;  
     }      }
   
     return ret;  
 }  
   
 int bdrv_is_locked(BlockDriverState *bs)  
 {  
     return bs->locked;  
 }  }
   
 /**  /**
  * Lock or unlock the media (if it is locked, the user won't be able   * Lock or unlock the media (if it is locked, the user won't be able
  * to eject it manually).   * to eject it manually).
  */   */
 void bdrv_set_locked(BlockDriverState *bs, int locked)  void bdrv_lock_medium(BlockDriverState *bs, bool locked)
 {  {
     BlockDriver *drv = bs->drv;      BlockDriver *drv = bs->drv;
   
     trace_bdrv_set_locked(bs, locked);      trace_bdrv_lock_medium(bs, locked);
   
     bs->locked = locked;      if (drv && drv->bdrv_lock_medium) {
     if (drv && drv->bdrv_set_locked) {          drv->bdrv_lock_medium(bs, locked);
         drv->bdrv_set_locked(bs, locked);  
     }      }
 }  }
   
Line 2832  BlockDriverAIOCB *bdrv_aio_ioctl(BlockDr Line 3906  BlockDriverAIOCB *bdrv_aio_ioctl(BlockDr
     return NULL;      return NULL;
 }  }
   
   void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
   {
       bs->buffer_alignment = align;
   }
   
 void *qemu_blockalign(BlockDriverState *bs, size_t size)  void *qemu_blockalign(BlockDriverState *bs, size_t size)
 {  {
Line 2847  void bdrv_set_dirty_tracking(BlockDriver Line 3924  void bdrv_set_dirty_tracking(BlockDriver
     if (enable) {      if (enable) {
         if (!bs->dirty_bitmap) {          if (!bs->dirty_bitmap) {
             bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +              bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
                     BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;                      BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG - 1;
             bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;              bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG;
   
             bs->dirty_bitmap = qemu_mallocz(bitmap_size);              bs->dirty_bitmap = g_new0(unsigned long, bitmap_size);
         }          }
     } else {      } else {
         if (bs->dirty_bitmap) {          if (bs->dirty_bitmap) {
             qemu_free(bs->dirty_bitmap);              g_free(bs->dirty_bitmap);
             bs->dirty_bitmap = NULL;              bs->dirty_bitmap = NULL;
         }          }
     }      }
Line 2895  int bdrv_in_use(BlockDriverState *bs) Line 3972  int bdrv_in_use(BlockDriverState *bs)
     return bs->in_use;      return bs->in_use;
 }  }
   
   void bdrv_iostatus_enable(BlockDriverState *bs)
   {
       bs->iostatus_enabled = true;
       bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
   }
   
   /* The I/O status is only enabled if the drive explicitly
    * enables it _and_ the VM is configured to stop on errors */
   bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
   {
       return (bs->iostatus_enabled &&
              (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
               bs->on_write_error == BLOCK_ERR_STOP_ANY    ||
               bs->on_read_error == BLOCK_ERR_STOP_ANY));
   }
   
   void bdrv_iostatus_disable(BlockDriverState *bs)
   {
       bs->iostatus_enabled = false;
   }
   
   void bdrv_iostatus_reset(BlockDriverState *bs)
   {
       if (bdrv_iostatus_is_enabled(bs)) {
           bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
       }
   }
   
   /* XXX: Today this is set by device models because it makes the implementation
      quite simple. However, the block layer knows about the error, so it's
      possible to implement this without device models being involved */
   void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
   {
       if (bdrv_iostatus_is_enabled(bs) &&
           bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
           assert(error >= 0);
           bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
                                            BLOCK_DEVICE_IO_STATUS_FAILED;
       }
   }
   
   void
   bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
           enum BlockAcctType type)
   {
       assert(type < BDRV_MAX_IOTYPE);
   
       cookie->bytes = bytes;
       cookie->start_time_ns = get_clock();
       cookie->type = type;
   }
   
   void
   bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
   {
       assert(cookie->type < BDRV_MAX_IOTYPE);
   
       bs->nr_bytes[cookie->type] += cookie->bytes;
       bs->nr_ops[cookie->type]++;
       bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
   }
   
 int bdrv_img_create(const char *filename, const char *fmt,  int bdrv_img_create(const char *filename, const char *fmt,
                     const char *base_filename, const char *base_fmt,                      const char *base_filename, const char *base_fmt,
                     char *options, uint64_t img_size, int flags)                      char *options, uint64_t img_size, int flags)
Line 2988  int bdrv_img_create(const char *filename Line 4127  int bdrv_img_create(const char *filename
         if (backing_file && backing_file->value.s) {          if (backing_file && backing_file->value.s) {
             uint64_t size;              uint64_t size;
             char buf[32];              char buf[32];
               int back_flags;
   
               /* backing files always opened read-only */
               back_flags =
                   flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
   
             bs = bdrv_new("");              bs = bdrv_new("");
   
             ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv);              ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
             if (ret < 0) {              if (ret < 0) {
                 error_report("Could not open '%s'", backing_file->value.s);                  error_report("Could not open '%s'", backing_file->value.s);
                 goto out;                  goto out;
Line 3037  out: Line 4181  out:
   
     return ret;      return ret;
 }  }
   
   void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
                          int64_t speed, BlockDriverCompletionFunc *cb,
                          void *opaque, Error **errp)
   {
       BlockJob *job;
   
       if (bs->job || bdrv_in_use(bs)) {
           error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
           return NULL;
       }
       bdrv_set_in_use(bs, 1);
   
       job = g_malloc0(job_type->instance_size);
       job->job_type      = job_type;
       job->bs            = bs;
       job->cb            = cb;
       job->opaque        = opaque;
       job->busy          = true;
       bs->job = job;
   
       /* Only set speed when necessary to avoid NotSupported error */
       if (speed != 0) {
           Error *local_err = NULL;
   
           block_job_set_speed(job, speed, &local_err);
           if (error_is_set(&local_err)) {
               bs->job = NULL;
               g_free(job);
               bdrv_set_in_use(bs, 0);
               error_propagate(errp, local_err);
               return NULL;
           }
       }
       return job;
   }
   
   void block_job_complete(BlockJob *job, int ret)
   {
       BlockDriverState *bs = job->bs;
   
       assert(bs->job == job);
       job->cb(job->opaque, ret);
       bs->job = NULL;
       g_free(job);
       bdrv_set_in_use(bs, 0);
   }
   
   void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
   {
       Error *local_err = NULL;
   
       if (!job->job_type->set_speed) {
           error_set(errp, QERR_NOT_SUPPORTED);
           return;
       }
       job->job_type->set_speed(job, speed, &local_err);
       if (error_is_set(&local_err)) {
           error_propagate(errp, local_err);
           return;
       }
   
       job->speed = speed;
   }
   
   void block_job_cancel(BlockJob *job)
   {
       job->cancelled = true;
       if (job->co && !job->busy) {
           qemu_coroutine_enter(job->co, NULL);
       }
   }
   
   bool block_job_is_cancelled(BlockJob *job)
   {
       return job->cancelled;
   }
   
   struct BlockCancelData {
       BlockJob *job;
       BlockDriverCompletionFunc *cb;
       void *opaque;
       bool cancelled;
       int ret;
   };
   
   static void block_job_cancel_cb(void *opaque, int ret)
   {
       struct BlockCancelData *data = opaque;
   
       data->cancelled = block_job_is_cancelled(data->job);
       data->ret = ret;
       data->cb(data->opaque, ret);
   }
   
   int block_job_cancel_sync(BlockJob *job)
   {
       struct BlockCancelData data;
       BlockDriverState *bs = job->bs;
   
       assert(bs->job == job);
   
       /* Set up our own callback to store the result and chain to
        * the original callback.
        */
       data.job = job;
       data.cb = job->cb;
       data.opaque = job->opaque;
       data.ret = -EINPROGRESS;
       job->cb = block_job_cancel_cb;
       job->opaque = &data;
       block_job_cancel(job);
       while (data.ret == -EINPROGRESS) {
           qemu_aio_wait();
       }
       return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
   }
   
   void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
   {
       /* Check cancellation *before* setting busy = false, too!  */
       if (!block_job_is_cancelled(job)) {
           job->busy = false;
           co_sleep_ns(clock, ns);
           job->busy = true;
       }
   }

Removed from v.1.1.1.21  
changed lines
  Added in v.1.1.1.23


unix.superglobalmegacorp.com