version 1.1.1.2, 2018/04/24 18:24:36
|
version 1.1.1.3, 2018/04/24 19:17:48
|
Line 31 struct qemu_laiocb {
|
Line 31 struct qemu_laiocb {
|
struct iocb iocb; |
struct iocb iocb; |
ssize_t ret; |
ssize_t ret; |
size_t nbytes; |
size_t nbytes; |
int async_context_id; |
QEMUIOVector *qiov; |
|
bool is_read; |
QLIST_ENTRY(qemu_laiocb) node; |
QLIST_ENTRY(qemu_laiocb) node; |
}; |
}; |
|
|
Line 39 struct qemu_laio_state {
|
Line 40 struct qemu_laio_state {
|
io_context_t ctx; |
io_context_t ctx; |
int efd; |
int efd; |
int count; |
int count; |
QLIST_HEAD(, qemu_laiocb) completed_reqs; |
|
}; |
}; |
|
|
static inline ssize_t io_event_ret(struct io_event *ev) |
static inline ssize_t io_event_ret(struct io_event *ev) |
Line 49 static inline ssize_t io_event_ret(struc
|
Line 49 static inline ssize_t io_event_ret(struc
|
|
|
/* |
/* |
* Completes an AIO request (calls the callback and frees the ACB). |
* Completes an AIO request (calls the callback and frees the ACB). |
* Be sure to be in the right AsyncContext before calling this function. |
|
*/ |
*/ |
static void qemu_laio_process_completion(struct qemu_laio_state *s, |
static void qemu_laio_process_completion(struct qemu_laio_state *s, |
struct qemu_laiocb *laiocb) |
struct qemu_laiocb *laiocb) |
Line 60 static void qemu_laio_process_completion
|
Line 59 static void qemu_laio_process_completion
|
|
|
ret = laiocb->ret; |
ret = laiocb->ret; |
if (ret != -ECANCELED) { |
if (ret != -ECANCELED) { |
if (ret == laiocb->nbytes) |
if (ret == laiocb->nbytes) { |
ret = 0; |
ret = 0; |
else if (ret >= 0) |
} else if (ret >= 0) { |
ret = -EINVAL; |
/* Short reads mean EOF, pad with zeros. */ |
|
if (laiocb->is_read) { |
|
qemu_iovec_memset_skip(laiocb->qiov, 0, |
|
laiocb->qiov->size - ret, ret); |
|
} else { |
|
ret = -EINVAL; |
|
} |
|
} |
|
|
laiocb->common.cb(laiocb->common.opaque, ret); |
laiocb->common.cb(laiocb->common.opaque, ret); |
} |
} |
Line 71 static void qemu_laio_process_completion
|
Line 77 static void qemu_laio_process_completion
|
qemu_aio_release(laiocb); |
qemu_aio_release(laiocb); |
} |
} |
|
|
/* |
|
* Processes all queued AIO requests, i.e. requests that have return from OS |
|
* but their callback was not called yet. Requests that cannot have their |
|
* callback called in the current AsyncContext, remain in the queue. |
|
* |
|
* Returns 1 if at least one request could be completed, 0 otherwise. |
|
*/ |
|
static int qemu_laio_process_requests(void *opaque) |
|
{ |
|
struct qemu_laio_state *s = opaque; |
|
struct qemu_laiocb *laiocb, *next; |
|
int res = 0; |
|
|
|
QLIST_FOREACH_SAFE (laiocb, &s->completed_reqs, node, next) { |
|
if (laiocb->async_context_id == get_async_context_id()) { |
|
qemu_laio_process_completion(s, laiocb); |
|
QLIST_REMOVE(laiocb, node); |
|
res = 1; |
|
} |
|
} |
|
|
|
return res; |
|
} |
|
|
|
/* |
|
* Puts a request in the completion queue so that its callback is called the |
|
* next time when it's possible. If we already are in the right AsyncContext, |
|
* the request is completed immediately instead. |
|
*/ |
|
static void qemu_laio_enqueue_completed(struct qemu_laio_state *s, |
|
struct qemu_laiocb* laiocb) |
|
{ |
|
if (laiocb->async_context_id == get_async_context_id()) { |
|
qemu_laio_process_completion(s, laiocb); |
|
} else { |
|
QLIST_INSERT_HEAD(&s->completed_reqs, laiocb, node); |
|
} |
|
} |
|
|
|
static void qemu_laio_completion_cb(void *opaque) |
static void qemu_laio_completion_cb(void *opaque) |
{ |
{ |
struct qemu_laio_state *s = opaque; |
struct qemu_laio_state *s = opaque; |
Line 141 static void qemu_laio_completion_cb(void
|
Line 108 static void qemu_laio_completion_cb(void
|
container_of(iocb, struct qemu_laiocb, iocb); |
container_of(iocb, struct qemu_laiocb, iocb); |
|
|
laiocb->ret = io_event_ret(&events[i]); |
laiocb->ret = io_event_ret(&events[i]); |
qemu_laio_enqueue_completed(s, laiocb); |
qemu_laio_process_completion(s, laiocb); |
} |
} |
} |
} |
} |
} |
Line 204 BlockDriverAIOCB *laio_submit(BlockDrive
|
Line 171 BlockDriverAIOCB *laio_submit(BlockDrive
|
laiocb->nbytes = nb_sectors * 512; |
laiocb->nbytes = nb_sectors * 512; |
laiocb->ctx = s; |
laiocb->ctx = s; |
laiocb->ret = -EINPROGRESS; |
laiocb->ret = -EINPROGRESS; |
laiocb->async_context_id = get_async_context_id(); |
laiocb->is_read = (type == QEMU_AIO_READ); |
|
laiocb->qiov = qiov; |
|
|
iocbs = &laiocb->iocb; |
iocbs = &laiocb->iocb; |
|
|
Line 215 BlockDriverAIOCB *laio_submit(BlockDrive
|
Line 183 BlockDriverAIOCB *laio_submit(BlockDrive
|
case QEMU_AIO_READ: |
case QEMU_AIO_READ: |
io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); |
io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); |
break; |
break; |
|
/* Currently Linux kernel does not support other operations */ |
default: |
default: |
fprintf(stderr, "%s: invalid AIO request type 0x%x.\n", |
fprintf(stderr, "%s: invalid AIO request type 0x%x.\n", |
__func__, type); |
__func__, type); |
Line 227 BlockDriverAIOCB *laio_submit(BlockDrive
|
Line 196 BlockDriverAIOCB *laio_submit(BlockDrive
|
goto out_dec_count; |
goto out_dec_count; |
return &laiocb->common; |
return &laiocb->common; |
|
|
out_free_aiocb: |
|
qemu_aio_release(laiocb); |
|
out_dec_count: |
out_dec_count: |
s->count--; |
s->count--; |
|
out_free_aiocb: |
|
qemu_aio_release(laiocb); |
return NULL; |
return NULL; |
} |
} |
|
|
Line 238 void *laio_init(void)
|
Line 207 void *laio_init(void)
|
{ |
{ |
struct qemu_laio_state *s; |
struct qemu_laio_state *s; |
|
|
s = qemu_mallocz(sizeof(*s)); |
s = g_malloc0(sizeof(*s)); |
QLIST_INIT(&s->completed_reqs); |
|
s->efd = eventfd(0, 0); |
s->efd = eventfd(0, 0); |
if (s->efd == -1) |
if (s->efd == -1) |
goto out_free_state; |
goto out_free_state; |
Line 249 void *laio_init(void)
|
Line 217 void *laio_init(void)
|
goto out_close_efd; |
goto out_close_efd; |
|
|
qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL, |
qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL, |
qemu_laio_flush_cb, qemu_laio_process_requests, s); |
qemu_laio_flush_cb, NULL, s); |
|
|
return s; |
return s; |
|
|
out_close_efd: |
out_close_efd: |
close(s->efd); |
close(s->efd); |
out_free_state: |
out_free_state: |
qemu_free(s); |
g_free(s); |
return NULL; |
return NULL; |
} |
} |