File:  [Qemu by Fabrice Bellard] / qemu / posix-aio-compat.c
Revision 1.1.1.7 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 19:17:04 2018 UTC (3 years, 1 month ago) by root
Branches: qemu, MAIN
CVS tags: qemu1001, HEAD
qemu 1.0.1

    1: /*
    2:  * QEMU posix-aio emulation
    3:  *
    4:  * Copyright IBM, Corp. 2008
    5:  *
    6:  * Authors:
    7:  *  Anthony Liguori   <aliguori@us.ibm.com>
    8:  *
    9:  * This work is licensed under the terms of the GNU GPL, version 2.  See
   10:  * the COPYING file in the top-level directory.
   11:  *
   12:  */
   13: 
   14: #include <sys/ioctl.h>
   15: #include <sys/types.h>
   16: #include <pthread.h>
   17: #include <unistd.h>
   18: #include <errno.h>
   19: #include <time.h>
   20: #include <string.h>
   21: #include <stdlib.h>
   22: #include <stdio.h>
   23: 
   24: #include "qemu-queue.h"
   25: #include "osdep.h"
   26: #include "sysemu.h"
   27: #include "qemu-common.h"
   28: #include "trace.h"
   29: #include "block_int.h"
   30: 
   31: #include "block/raw-posix-aio.h"
   32: 
   33: static void do_spawn_thread(void);
   34: 
   35: struct qemu_paiocb {
   36:     BlockDriverAIOCB common;
   37:     int aio_fildes;
   38:     union {
   39:         struct iovec *aio_iov;
   40:         void *aio_ioctl_buf;
   41:     };
   42:     int aio_niov;
   43:     size_t aio_nbytes;
   44: #define aio_ioctl_cmd   aio_nbytes /* for QEMU_AIO_IOCTL */
   45:     off_t aio_offset;
   46: 
   47:     QTAILQ_ENTRY(qemu_paiocb) node;
   48:     int aio_type;
   49:     ssize_t ret;
   50:     int active;
   51:     struct qemu_paiocb *next;
   52: };
   53: 
   54: typedef struct PosixAioState {
   55:     int rfd, wfd;
   56:     struct qemu_paiocb *first_aio;
   57: } PosixAioState;
   58: 
   59: 
   60: static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
   61: static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
   62: static pthread_t thread_id;
   63: static pthread_attr_t attr;
   64: static int max_threads = 64;
   65: static int cur_threads = 0;
   66: static int idle_threads = 0;
   67: static int new_threads = 0;     /* backlog of threads we need to create */
   68: static int pending_threads = 0; /* threads created but not running yet */
   69: static QEMUBH *new_thread_bh;
   70: static QTAILQ_HEAD(, qemu_paiocb) request_list;
   71: 
   72: #ifdef CONFIG_PREADV
   73: static int preadv_present = 1;
   74: #else
   75: static int preadv_present = 0;
   76: #endif
   77: 
   78: static void die2(int err, const char *what)
   79: {
   80:     fprintf(stderr, "%s failed: %s\n", what, strerror(err));
   81:     abort();
   82: }
   83: 
   84: static void die(const char *what)
   85: {
   86:     die2(errno, what);
   87: }
   88: 
   89: static void mutex_lock(pthread_mutex_t *mutex)
   90: {
   91:     int ret = pthread_mutex_lock(mutex);
   92:     if (ret) die2(ret, "pthread_mutex_lock");
   93: }
   94: 
   95: static void mutex_unlock(pthread_mutex_t *mutex)
   96: {
   97:     int ret = pthread_mutex_unlock(mutex);
   98:     if (ret) die2(ret, "pthread_mutex_unlock");
   99: }
  100: 
  101: static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
  102:                            struct timespec *ts)
  103: {
  104:     int ret = pthread_cond_timedwait(cond, mutex, ts);
  105:     if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
  106:     return ret;
  107: }
  108: 
  109: static void cond_signal(pthread_cond_t *cond)
  110: {
  111:     int ret = pthread_cond_signal(cond);
  112:     if (ret) die2(ret, "pthread_cond_signal");
  113: }
  114: 
  115: static void thread_create(pthread_t *thread, pthread_attr_t *attr,
  116:                           void *(*start_routine)(void*), void *arg)
  117: {
  118:     int ret = pthread_create(thread, attr, start_routine, arg);
  119:     if (ret) die2(ret, "pthread_create");
  120: }
  121: 
  122: static ssize_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
  123: {
  124:     int ret;
  125: 
  126:     ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf);
  127:     if (ret == -1)
  128:         return -errno;
  129: 
  130:     /*
  131:      * This looks weird, but the aio code only consideres a request
  132:      * successful if it has written the number full number of bytes.
  133:      *
  134:      * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command,
  135:      * so in fact we return the ioctl command here to make posix_aio_read()
  136:      * happy..
  137:      */
  138:     return aiocb->aio_nbytes;
  139: }
  140: 
  141: static ssize_t handle_aiocb_flush(struct qemu_paiocb *aiocb)
  142: {
  143:     int ret;
  144: 
  145:     ret = qemu_fdatasync(aiocb->aio_fildes);
  146:     if (ret == -1)
  147:         return -errno;
  148:     return 0;
  149: }
  150: 
  151: #ifdef CONFIG_PREADV
  152: 
  153: static ssize_t
  154: qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  155: {
  156:     return preadv(fd, iov, nr_iov, offset);
  157: }
  158: 
  159: static ssize_t
  160: qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  161: {
  162:     return pwritev(fd, iov, nr_iov, offset);
  163: }
  164: 
  165: #else
  166: 
  167: static ssize_t
  168: qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  169: {
  170:     return -ENOSYS;
  171: }
  172: 
  173: static ssize_t
  174: qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  175: {
  176:     return -ENOSYS;
  177: }
  178: 
  179: #endif
  180: 
  181: static ssize_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb)
  182: {
  183:     ssize_t len;
  184: 
  185:     do {
  186:         if (aiocb->aio_type & QEMU_AIO_WRITE)
  187:             len = qemu_pwritev(aiocb->aio_fildes,
  188:                                aiocb->aio_iov,
  189:                                aiocb->aio_niov,
  190:                                aiocb->aio_offset);
  191:          else
  192:             len = qemu_preadv(aiocb->aio_fildes,
  193:                               aiocb->aio_iov,
  194:                               aiocb->aio_niov,
  195:                               aiocb->aio_offset);
  196:     } while (len == -1 && errno == EINTR);
  197: 
  198:     if (len == -1)
  199:         return -errno;
  200:     return len;
  201: }
  202: 
  203: /*
  204:  * Read/writes the data to/from a given linear buffer.
  205:  *
  206:  * Returns the number of bytes handles or -errno in case of an error. Short
  207:  * reads are only returned if the end of the file is reached.
  208:  */
  209: static ssize_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf)
  210: {
  211:     ssize_t offset = 0;
  212:     ssize_t len;
  213: 
  214:     while (offset < aiocb->aio_nbytes) {
  215:          if (aiocb->aio_type & QEMU_AIO_WRITE)
  216:              len = pwrite(aiocb->aio_fildes,
  217:                           (const char *)buf + offset,
  218:                           aiocb->aio_nbytes - offset,
  219:                           aiocb->aio_offset + offset);
  220:          else
  221:              len = pread(aiocb->aio_fildes,
  222:                          buf + offset,
  223:                          aiocb->aio_nbytes - offset,
  224:                          aiocb->aio_offset + offset);
  225: 
  226:          if (len == -1 && errno == EINTR)
  227:              continue;
  228:          else if (len == -1) {
  229:              offset = -errno;
  230:              break;
  231:          } else if (len == 0)
  232:              break;
  233: 
  234:          offset += len;
  235:     }
  236: 
  237:     return offset;
  238: }
  239: 
  240: static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
  241: {
  242:     ssize_t nbytes;
  243:     char *buf;
  244: 
  245:     if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
  246:         /*
  247:          * If there is just a single buffer, and it is properly aligned
  248:          * we can just use plain pread/pwrite without any problems.
  249:          */
  250:         if (aiocb->aio_niov == 1)
  251:              return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base);
  252: 
  253:         /*
  254:          * We have more than one iovec, and all are properly aligned.
  255:          *
  256:          * Try preadv/pwritev first and fall back to linearizing the
  257:          * buffer if it's not supported.
  258:          */
  259:         if (preadv_present) {
  260:             nbytes = handle_aiocb_rw_vector(aiocb);
  261:             if (nbytes == aiocb->aio_nbytes)
  262:                 return nbytes;
  263:             if (nbytes < 0 && nbytes != -ENOSYS)
  264:                 return nbytes;
  265:             preadv_present = 0;
  266:         }
  267: 
  268:         /*
  269:          * XXX(hch): short read/write.  no easy way to handle the reminder
  270:          * using these interfaces.  For now retry using plain
  271:          * pread/pwrite?
  272:          */
  273:     }
  274: 
  275:     /*
  276:      * Ok, we have to do it the hard way, copy all segments into
  277:      * a single aligned buffer.
  278:      */
  279:     buf = qemu_blockalign(aiocb->common.bs, aiocb->aio_nbytes);
  280:     if (aiocb->aio_type & QEMU_AIO_WRITE) {
  281:         char *p = buf;
  282:         int i;
  283: 
  284:         for (i = 0; i < aiocb->aio_niov; ++i) {
  285:             memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
  286:             p += aiocb->aio_iov[i].iov_len;
  287:         }
  288:     }
  289: 
  290:     nbytes = handle_aiocb_rw_linear(aiocb, buf);
  291:     if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
  292:         char *p = buf;
  293:         size_t count = aiocb->aio_nbytes, copy;
  294:         int i;
  295: 
  296:         for (i = 0; i < aiocb->aio_niov && count; ++i) {
  297:             copy = count;
  298:             if (copy > aiocb->aio_iov[i].iov_len)
  299:                 copy = aiocb->aio_iov[i].iov_len;
  300:             memcpy(aiocb->aio_iov[i].iov_base, p, copy);
  301:             p     += copy;
  302:             count -= copy;
  303:         }
  304:     }
  305:     qemu_vfree(buf);
  306: 
  307:     return nbytes;
  308: }
  309: 
  310: static void posix_aio_notify_event(void);
  311: 
  312: static void *aio_thread(void *unused)
  313: {
  314:     mutex_lock(&lock);
  315:     pending_threads--;
  316:     mutex_unlock(&lock);
  317:     do_spawn_thread();
  318: 
  319:     while (1) {
  320:         struct qemu_paiocb *aiocb;
  321:         ssize_t ret = 0;
  322:         qemu_timeval tv;
  323:         struct timespec ts;
  324: 
  325:         qemu_gettimeofday(&tv);
  326:         ts.tv_sec = tv.tv_sec + 10;
  327:         ts.tv_nsec = 0;
  328: 
  329:         mutex_lock(&lock);
  330: 
  331:         while (QTAILQ_EMPTY(&request_list) &&
  332:                !(ret == ETIMEDOUT)) {
  333:             idle_threads++;
  334:             ret = cond_timedwait(&cond, &lock, &ts);
  335:             idle_threads--;
  336:         }
  337: 
  338:         if (QTAILQ_EMPTY(&request_list))
  339:             break;
  340: 
  341:         aiocb = QTAILQ_FIRST(&request_list);
  342:         QTAILQ_REMOVE(&request_list, aiocb, node);
  343:         aiocb->active = 1;
  344:         mutex_unlock(&lock);
  345: 
  346:         switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
  347:         case QEMU_AIO_READ:
  348:             ret = handle_aiocb_rw(aiocb);
  349:             if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->common.bs->growable) {
  350:                 /* A short read means that we have reached EOF. Pad the buffer
  351:                  * with zeros for bytes after EOF. */
  352:                 QEMUIOVector qiov;
  353: 
  354:                 qemu_iovec_init_external(&qiov, aiocb->aio_iov,
  355:                                          aiocb->aio_niov);
  356:                 qemu_iovec_memset_skip(&qiov, 0, aiocb->aio_nbytes - ret, ret);
  357: 
  358:                 ret = aiocb->aio_nbytes;
  359:             }
  360:             break;
  361:         case QEMU_AIO_WRITE:
  362:             ret = handle_aiocb_rw(aiocb);
  363:             break;
  364:         case QEMU_AIO_FLUSH:
  365:             ret = handle_aiocb_flush(aiocb);
  366:             break;
  367:         case QEMU_AIO_IOCTL:
  368:             ret = handle_aiocb_ioctl(aiocb);
  369:             break;
  370:         default:
  371:             fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
  372:             ret = -EINVAL;
  373:             break;
  374:         }
  375: 
  376:         mutex_lock(&lock);
  377:         aiocb->ret = ret;
  378:         mutex_unlock(&lock);
  379: 
  380:         posix_aio_notify_event();
  381:     }
  382: 
  383:     cur_threads--;
  384:     mutex_unlock(&lock);
  385: 
  386:     return NULL;
  387: }
  388: 
  389: static void do_spawn_thread(void)
  390: {
  391:     sigset_t set, oldset;
  392: 
  393:     mutex_lock(&lock);
  394:     if (!new_threads) {
  395:         mutex_unlock(&lock);
  396:         return;
  397:     }
  398: 
  399:     new_threads--;
  400:     pending_threads++;
  401: 
  402:     mutex_unlock(&lock);
  403: 
  404:     /* block all signals */
  405:     if (sigfillset(&set)) die("sigfillset");
  406:     if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
  407: 
  408:     thread_create(&thread_id, &attr, aio_thread, NULL);
  409: 
  410:     if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore");
  411: }
  412: 
  413: static void spawn_thread_bh_fn(void *opaque)
  414: {
  415:     do_spawn_thread();
  416: }
  417: 
  418: static void spawn_thread(void)
  419: {
  420:     cur_threads++;
  421:     new_threads++;
  422:     /* If there are threads being created, they will spawn new workers, so
  423:      * we don't spend time creating many threads in a loop holding a mutex or
  424:      * starving the current vcpu.
  425:      *
  426:      * If there are no idle threads, ask the main thread to create one, so we
  427:      * inherit the correct affinity instead of the vcpu affinity.
  428:      */
  429:     if (!pending_threads) {
  430:         qemu_bh_schedule(new_thread_bh);
  431:     }
  432: }
  433: 
  434: static void qemu_paio_submit(struct qemu_paiocb *aiocb)
  435: {
  436:     aiocb->ret = -EINPROGRESS;
  437:     aiocb->active = 0;
  438:     mutex_lock(&lock);
  439:     if (idle_threads == 0 && cur_threads < max_threads)
  440:         spawn_thread();
  441:     QTAILQ_INSERT_TAIL(&request_list, aiocb, node);
  442:     mutex_unlock(&lock);
  443:     cond_signal(&cond);
  444: }
  445: 
  446: static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
  447: {
  448:     ssize_t ret;
  449: 
  450:     mutex_lock(&lock);
  451:     ret = aiocb->ret;
  452:     mutex_unlock(&lock);
  453: 
  454:     return ret;
  455: }
  456: 
  457: static int qemu_paio_error(struct qemu_paiocb *aiocb)
  458: {
  459:     ssize_t ret = qemu_paio_return(aiocb);
  460: 
  461:     if (ret < 0)
  462:         ret = -ret;
  463:     else
  464:         ret = 0;
  465: 
  466:     return ret;
  467: }
  468: 
  469: static int posix_aio_process_queue(void *opaque)
  470: {
  471:     PosixAioState *s = opaque;
  472:     struct qemu_paiocb *acb, **pacb;
  473:     int ret;
  474:     int result = 0;
  475: 
  476:     for(;;) {
  477:         pacb = &s->first_aio;
  478:         for(;;) {
  479:             acb = *pacb;
  480:             if (!acb)
  481:                 return result;
  482: 
  483:             ret = qemu_paio_error(acb);
  484:             if (ret == ECANCELED) {
  485:                 /* remove the request */
  486:                 *pacb = acb->next;
  487:                 qemu_aio_release(acb);
  488:                 result = 1;
  489:             } else if (ret != EINPROGRESS) {
  490:                 /* end of aio */
  491:                 if (ret == 0) {
  492:                     ret = qemu_paio_return(acb);
  493:                     if (ret == acb->aio_nbytes)
  494:                         ret = 0;
  495:                     else
  496:                         ret = -EINVAL;
  497:                 } else {
  498:                     ret = -ret;
  499:                 }
  500: 
  501:                 trace_paio_complete(acb, acb->common.opaque, ret);
  502: 
  503:                 /* remove the request */
  504:                 *pacb = acb->next;
  505:                 /* call the callback */
  506:                 acb->common.cb(acb->common.opaque, ret);
  507:                 qemu_aio_release(acb);
  508:                 result = 1;
  509:                 break;
  510:             } else {
  511:                 pacb = &acb->next;
  512:             }
  513:         }
  514:     }
  515: 
  516:     return result;
  517: }
  518: 
  519: static void posix_aio_read(void *opaque)
  520: {
  521:     PosixAioState *s = opaque;
  522:     ssize_t len;
  523: 
  524:     /* read all bytes from signal pipe */
  525:     for (;;) {
  526:         char bytes[16];
  527: 
  528:         len = read(s->rfd, bytes, sizeof(bytes));
  529:         if (len == -1 && errno == EINTR)
  530:             continue; /* try again */
  531:         if (len == sizeof(bytes))
  532:             continue; /* more to read */
  533:         break;
  534:     }
  535: 
  536:     posix_aio_process_queue(s);
  537: }
  538: 
  539: static int posix_aio_flush(void *opaque)
  540: {
  541:     PosixAioState *s = opaque;
  542:     return !!s->first_aio;
  543: }
  544: 
  545: static PosixAioState *posix_aio_state;
  546: 
  547: static void posix_aio_notify_event(void)
  548: {
  549:     char byte = 0;
  550:     ssize_t ret;
  551: 
  552:     ret = write(posix_aio_state->wfd, &byte, sizeof(byte));
  553:     if (ret < 0 && errno != EAGAIN)
  554:         die("write()");
  555: }
  556: 
  557: static void paio_remove(struct qemu_paiocb *acb)
  558: {
  559:     struct qemu_paiocb **pacb;
  560: 
  561:     /* remove the callback from the queue */
  562:     pacb = &posix_aio_state->first_aio;
  563:     for(;;) {
  564:         if (*pacb == NULL) {
  565:             fprintf(stderr, "paio_remove: aio request not found!\n");
  566:             break;
  567:         } else if (*pacb == acb) {
  568:             *pacb = acb->next;
  569:             qemu_aio_release(acb);
  570:             break;
  571:         }
  572:         pacb = &(*pacb)->next;
  573:     }
  574: }
  575: 
  576: static void paio_cancel(BlockDriverAIOCB *blockacb)
  577: {
  578:     struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb;
  579:     int active = 0;
  580: 
  581:     trace_paio_cancel(acb, acb->common.opaque);
  582: 
  583:     mutex_lock(&lock);
  584:     if (!acb->active) {
  585:         QTAILQ_REMOVE(&request_list, acb, node);
  586:         acb->ret = -ECANCELED;
  587:     } else if (acb->ret == -EINPROGRESS) {
  588:         active = 1;
  589:     }
  590:     mutex_unlock(&lock);
  591: 
  592:     if (active) {
  593:         /* fail safe: if the aio could not be canceled, we wait for
  594:            it */
  595:         while (qemu_paio_error(acb) == EINPROGRESS)
  596:             ;
  597:     }
  598: 
  599:     paio_remove(acb);
  600: }
  601: 
  602: static AIOPool raw_aio_pool = {
  603:     .aiocb_size         = sizeof(struct qemu_paiocb),
  604:     .cancel             = paio_cancel,
  605: };
  606: 
  607: BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
  608:         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  609:         BlockDriverCompletionFunc *cb, void *opaque, int type)
  610: {
  611:     struct qemu_paiocb *acb;
  612: 
  613:     acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  614:     if (!acb)
  615:         return NULL;
  616:     acb->aio_type = type;
  617:     acb->aio_fildes = fd;
  618: 
  619:     if (qiov) {
  620:         acb->aio_iov = qiov->iov;
  621:         acb->aio_niov = qiov->niov;
  622:     }
  623:     acb->aio_nbytes = nb_sectors * 512;
  624:     acb->aio_offset = sector_num * 512;
  625: 
  626:     acb->next = posix_aio_state->first_aio;
  627:     posix_aio_state->first_aio = acb;
  628: 
  629:     trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
  630:     qemu_paio_submit(acb);
  631:     return &acb->common;
  632: }
  633: 
  634: BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
  635:         unsigned long int req, void *buf,
  636:         BlockDriverCompletionFunc *cb, void *opaque)
  637: {
  638:     struct qemu_paiocb *acb;
  639: 
  640:     acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  641:     if (!acb)
  642:         return NULL;
  643:     acb->aio_type = QEMU_AIO_IOCTL;
  644:     acb->aio_fildes = fd;
  645:     acb->aio_offset = 0;
  646:     acb->aio_ioctl_buf = buf;
  647:     acb->aio_ioctl_cmd = req;
  648: 
  649:     acb->next = posix_aio_state->first_aio;
  650:     posix_aio_state->first_aio = acb;
  651: 
  652:     qemu_paio_submit(acb);
  653:     return &acb->common;
  654: }
  655: 
  656: int paio_init(void)
  657: {
  658:     PosixAioState *s;
  659:     int fds[2];
  660:     int ret;
  661: 
  662:     if (posix_aio_state)
  663:         return 0;
  664: 
  665:     s = g_malloc(sizeof(PosixAioState));
  666: 
  667:     s->first_aio = NULL;
  668:     if (qemu_pipe(fds) == -1) {
  669:         fprintf(stderr, "failed to create pipe\n");
  670:         g_free(s);
  671:         return -1;
  672:     }
  673: 
  674:     s->rfd = fds[0];
  675:     s->wfd = fds[1];
  676: 
  677:     fcntl(s->rfd, F_SETFL, O_NONBLOCK);
  678:     fcntl(s->wfd, F_SETFL, O_NONBLOCK);
  679: 
  680:     qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush,
  681:         posix_aio_process_queue, s);
  682: 
  683:     ret = pthread_attr_init(&attr);
  684:     if (ret)
  685:         die2(ret, "pthread_attr_init");
  686: 
  687:     ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  688:     if (ret)
  689:         die2(ret, "pthread_attr_setdetachstate");
  690: 
  691:     QTAILQ_INIT(&request_list);
  692:     new_thread_bh = qemu_bh_new(spawn_thread_bh_fn, NULL);
  693: 
  694:     posix_aio_state = s;
  695:     return 0;
  696: }

unix.superglobalmegacorp.com