File:  [Qemu by Fabrice Bellard] / qemu / dma-helpers.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 16:50:56 2018 UTC (3 years, 1 month ago) by root
Branches: qemu, MAIN
CVS tags: qemu0102, qemu0101, qemu0100, HEAD
qemu 0.10.0

    1: /*
    2:  * DMA helper functions
    3:  *
    4:  * Copyright (c) 2009 Red Hat
    5:  *
    6:  * This work is licensed under the terms of the GNU General Public License
    7:  * (GNU GPL), version 2 or later.
    8:  */
    9: 
   10: #include "dma.h"
   11: #include "block_int.h"
   12: 
   13: void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
   14: {
   15:     qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
   16:     qsg->nsg = 0;
   17:     qsg->nalloc = alloc_hint;
   18:     qsg->size = 0;
   19: }
   20: 
   21: void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
   22:                      target_phys_addr_t len)
   23: {
   24:     if (qsg->nsg == qsg->nalloc) {
   25:         qsg->nalloc = 2 * qsg->nalloc + 1;
   26:         qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
   27:     }
   28:     qsg->sg[qsg->nsg].base = base;
   29:     qsg->sg[qsg->nsg].len = len;
   30:     qsg->size += len;
   31:     ++qsg->nsg;
   32: }
   33: 
   34: void qemu_sglist_destroy(QEMUSGList *qsg)
   35: {
   36:     qemu_free(qsg->sg);
   37: }
   38: 
   39: typedef struct {
   40:     BlockDriverState *bs;
   41:     BlockDriverAIOCB *acb;
   42:     QEMUSGList *sg;
   43:     uint64_t sector_num;
   44:     int is_write;
   45:     int sg_cur_index;
   46:     target_phys_addr_t sg_cur_byte;
   47:     QEMUIOVector iov;
   48:     QEMUBH *bh;
   49: } DMABlockState;
   50: 
   51: static void dma_bdrv_cb(void *opaque, int ret);
   52: 
   53: static void reschedule_dma(void *opaque)
   54: {
   55:     DMABlockState *dbs = (DMABlockState *)opaque;
   56: 
   57:     qemu_bh_delete(dbs->bh);
   58:     dbs->bh = NULL;
   59:     dma_bdrv_cb(opaque, 0);
   60: }
   61: 
   62: static void continue_after_map_failure(void *opaque)
   63: {
   64:     DMABlockState *dbs = (DMABlockState *)opaque;
   65: 
   66:     dbs->bh = qemu_bh_new(reschedule_dma, dbs);
   67:     qemu_bh_schedule(dbs->bh);
   68: }
   69: 
   70: static void dma_bdrv_cb(void *opaque, int ret)
   71: {
   72:     DMABlockState *dbs = (DMABlockState *)opaque;
   73:     target_phys_addr_t cur_addr, cur_len;
   74:     void *mem;
   75:     int i;
   76: 
   77:     dbs->sector_num += dbs->iov.size / 512;
   78:     for (i = 0; i < dbs->iov.niov; ++i) {
   79:         cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
   80:                                   dbs->iov.iov[i].iov_len, !dbs->is_write,
   81:                                   dbs->iov.iov[i].iov_len);
   82:     }
   83:     qemu_iovec_reset(&dbs->iov);
   84: 
   85:     if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
   86:         dbs->acb->cb(dbs->acb->opaque, ret);
   87:         qemu_iovec_destroy(&dbs->iov);
   88:         qemu_aio_release(dbs->acb);
   89:         qemu_free(dbs);
   90:         return;
   91:     }
   92: 
   93:     while (dbs->sg_cur_index < dbs->sg->nsg) {
   94:         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
   95:         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
   96:         mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
   97:         if (!mem)
   98:             break;
   99:         qemu_iovec_add(&dbs->iov, mem, cur_len);
  100:         dbs->sg_cur_byte += cur_len;
  101:         if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
  102:             dbs->sg_cur_byte = 0;
  103:             ++dbs->sg_cur_index;
  104:         }
  105:     }
  106: 
  107:     if (dbs->iov.size == 0) {
  108:         cpu_register_map_client(dbs, continue_after_map_failure);
  109:         return;
  110:     }
  111: 
  112:     if (dbs->is_write) {
  113:         bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
  114:                         dbs->iov.size / 512, dma_bdrv_cb, dbs);
  115:     } else {
  116:         bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
  117:                        dbs->iov.size / 512, dma_bdrv_cb, dbs);
  118:     }
  119: }
  120: 
  121: static BlockDriverAIOCB *dma_bdrv_io(
  122:     BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
  123:     BlockDriverCompletionFunc *cb, void *opaque,
  124:     int is_write)
  125: {
  126:     DMABlockState *dbs = qemu_malloc(sizeof(*dbs));
  127: 
  128:     dbs->bs = bs;
  129:     dbs->acb = qemu_aio_get(bs, cb, opaque);
  130:     dbs->sg = sg;
  131:     dbs->sector_num = sector_num;
  132:     dbs->sg_cur_index = 0;
  133:     dbs->sg_cur_byte = 0;
  134:     dbs->is_write = is_write;
  135:     dbs->bh = NULL;
  136:     qemu_iovec_init(&dbs->iov, sg->nsg);
  137:     dma_bdrv_cb(dbs, 0);
  138:     return dbs->acb;
  139: }
  140: 
  141: 
  142: BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  143:                                 QEMUSGList *sg, uint64_t sector,
  144:                                 void (*cb)(void *opaque, int ret), void *opaque)
  145: {
  146:     return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
  147: }
  148: 
  149: BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  150:                                  QEMUSGList *sg, uint64_t sector,
  151:                                  void (*cb)(void *opaque, int ret), void *opaque)
  152: {
  153:     return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
  154: }
  155: 

unix.superglobalmegacorp.com