File:  [Qemu by Fabrice Bellard] / qemu / qemu-thread-win32.c
Revision 1.1.1.2 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 19:17:47 2018 UTC (23 months, 2 weeks ago) by root
Branches: qemu, MAIN
CVS tags: qemu1001, HEAD
qemu 1.0.1

    1: /*
    2:  * Win32 implementation for mutex/cond/thread functions
    3:  *
    4:  * Copyright Red Hat, Inc. 2010
    5:  *
    6:  * Author:
    7:  *  Paolo Bonzini <pbonzini@redhat.com>
    8:  *
    9:  * This work is licensed under the terms of the GNU GPL, version 2 or later.
   10:  * See the COPYING file in the top-level directory.
   11:  *
   12:  */
   13: #include "qemu-common.h"
   14: #include "qemu-thread.h"
   15: #include <process.h>
   16: #include <assert.h>
   17: #include <limits.h>
   18: 
   19: static void error_exit(int err, const char *msg)
   20: {
   21:     char *pstr;
   22: 
   23:     FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
   24:                   NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
   25:     fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
   26:     LocalFree(pstr);
   27:     abort();
   28: }
   29: 
   30: void qemu_mutex_init(QemuMutex *mutex)
   31: {
   32:     mutex->owner = 0;
   33:     InitializeCriticalSection(&mutex->lock);
   34: }
   35: 
   36: void qemu_mutex_destroy(QemuMutex *mutex)
   37: {
   38:     assert(mutex->owner == 0);
   39:     DeleteCriticalSection(&mutex->lock);
   40: }
   41: 
   42: void qemu_mutex_lock(QemuMutex *mutex)
   43: {
   44:     EnterCriticalSection(&mutex->lock);
   45: 
   46:     /* Win32 CRITICAL_SECTIONs are recursive.  Assert that we're not
   47:      * using them as such.
   48:      */
   49:     assert(mutex->owner == 0);
   50:     mutex->owner = GetCurrentThreadId();
   51: }
   52: 
   53: int qemu_mutex_trylock(QemuMutex *mutex)
   54: {
   55:     int owned;
   56: 
   57:     owned = TryEnterCriticalSection(&mutex->lock);
   58:     if (owned) {
   59:         assert(mutex->owner == 0);
   60:         mutex->owner = GetCurrentThreadId();
   61:     }
   62:     return !owned;
   63: }
   64: 
   65: void qemu_mutex_unlock(QemuMutex *mutex)
   66: {
   67:     assert(mutex->owner == GetCurrentThreadId());
   68:     mutex->owner = 0;
   69:     LeaveCriticalSection(&mutex->lock);
   70: }
   71: 
   72: void qemu_cond_init(QemuCond *cond)
   73: {
   74:     memset(cond, 0, sizeof(*cond));
   75: 
   76:     cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
   77:     if (!cond->sema) {
   78:         error_exit(GetLastError(), __func__);
   79:     }
   80:     cond->continue_event = CreateEvent(NULL,    /* security */
   81:                                        FALSE,   /* auto-reset */
   82:                                        FALSE,   /* not signaled */
   83:                                        NULL);   /* name */
   84:     if (!cond->continue_event) {
   85:         error_exit(GetLastError(), __func__);
   86:     }
   87: }
   88: 
   89: void qemu_cond_destroy(QemuCond *cond)
   90: {
   91:     BOOL result;
   92:     result = CloseHandle(cond->continue_event);
   93:     if (!result) {
   94:         error_exit(GetLastError(), __func__);
   95:     }
   96:     cond->continue_event = 0;
   97:     result = CloseHandle(cond->sema);
   98:     if (!result) {
   99:         error_exit(GetLastError(), __func__);
  100:     }
  101:     cond->sema = 0;
  102: }
  103: 
  104: void qemu_cond_signal(QemuCond *cond)
  105: {
  106:     DWORD result;
  107: 
  108:     /*
  109:      * Signal only when there are waiters.  cond->waiters is
  110:      * incremented by pthread_cond_wait under the external lock,
  111:      * so we are safe about that.
  112:      */
  113:     if (cond->waiters == 0) {
  114:         return;
  115:     }
  116: 
  117:     /*
  118:      * Waiting threads decrement it outside the external lock, but
  119:      * only if another thread is executing pthread_cond_broadcast and
  120:      * has the mutex.  So, it also cannot be decremented concurrently
  121:      * with this particular access.
  122:      */
  123:     cond->target = cond->waiters - 1;
  124:     result = SignalObjectAndWait(cond->sema, cond->continue_event,
  125:                                  INFINITE, FALSE);
  126:     if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
  127:         error_exit(GetLastError(), __func__);
  128:     }
  129: }
  130: 
  131: void qemu_cond_broadcast(QemuCond *cond)
  132: {
  133:     BOOLEAN result;
  134:     /*
  135:      * As in pthread_cond_signal, access to cond->waiters and
  136:      * cond->target is locked via the external mutex.
  137:      */
  138:     if (cond->waiters == 0) {
  139:         return;
  140:     }
  141: 
  142:     cond->target = 0;
  143:     result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
  144:     if (!result) {
  145:         error_exit(GetLastError(), __func__);
  146:     }
  147: 
  148:     /*
  149:      * At this point all waiters continue. Each one takes its
  150:      * slice of the semaphore. Now it's our turn to wait: Since
  151:      * the external mutex is held, no thread can leave cond_wait,
  152:      * yet. For this reason, we can be sure that no thread gets
  153:      * a chance to eat *more* than one slice. OTOH, it means
  154:      * that the last waiter must send us a wake-up.
  155:      */
  156:     WaitForSingleObject(cond->continue_event, INFINITE);
  157: }
  158: 
  159: void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
  160: {
  161:     /*
  162:      * This access is protected under the mutex.
  163:      */
  164:     cond->waiters++;
  165: 
  166:     /*
  167:      * Unlock external mutex and wait for signal.
  168:      * NOTE: we've held mutex locked long enough to increment
  169:      * waiters count above, so there's no problem with
  170:      * leaving mutex unlocked before we wait on semaphore.
  171:      */
  172:     qemu_mutex_unlock(mutex);
  173:     WaitForSingleObject(cond->sema, INFINITE);
  174: 
  175:     /* Now waiters must rendez-vous with the signaling thread and
  176:      * let it continue.  For cond_broadcast this has heavy contention
  177:      * and triggers thundering herd.  So goes life.
  178:      *
  179:      * Decrease waiters count.  The mutex is not taken, so we have
  180:      * to do this atomically.
  181:      *
  182:      * All waiters contend for the mutex at the end of this function
  183:      * until the signaling thread relinquishes it.  To ensure
  184:      * each waiter consumes exactly one slice of the semaphore,
  185:      * the signaling thread stops until it is told by the last
  186:      * waiter that it can go on.
  187:      */
  188:     if (InterlockedDecrement(&cond->waiters) == cond->target) {
  189:         SetEvent(cond->continue_event);
  190:     }
  191: 
  192:     qemu_mutex_lock(mutex);
  193: }
  194: 
  195: struct QemuThreadData {
  196:     QemuThread *thread;
  197:     void *(*start_routine)(void *);
  198:     void *arg;
  199: };
  200: 
  201: static int qemu_thread_tls_index = TLS_OUT_OF_INDEXES;
  202: 
  203: static unsigned __stdcall win32_start_routine(void *arg)
  204: {
  205:     struct QemuThreadData data = *(struct QemuThreadData *) arg;
  206:     QemuThread *thread = data.thread;
  207: 
  208:     free(arg);
  209:     TlsSetValue(qemu_thread_tls_index, thread);
  210: 
  211:     /*
  212:      * Use DuplicateHandle instead of assigning thread->thread in the
  213:      * creating thread to avoid races.  It's simpler this way than with
  214:      * synchronization.
  215:      */
  216:     DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
  217:                     GetCurrentProcess(), &thread->thread,
  218:                     0, FALSE, DUPLICATE_SAME_ACCESS);
  219: 
  220:     qemu_thread_exit(data.start_routine(data.arg));
  221:     abort();
  222: }
  223: 
  224: void qemu_thread_exit(void *arg)
  225: {
  226:     QemuThread *thread = TlsGetValue(qemu_thread_tls_index);
  227:     thread->ret = arg;
  228:     CloseHandle(thread->thread);
  229:     thread->thread = NULL;
  230:     ExitThread(0);
  231: }
  232: 
  233: static inline void qemu_thread_init(void)
  234: {
  235:     if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
  236:         qemu_thread_tls_index = TlsAlloc();
  237:         if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
  238:             error_exit(ERROR_NO_SYSTEM_RESOURCES, __func__);
  239:         }
  240:     }
  241: }
  242: 
  243: 
  244: void qemu_thread_create(QemuThread *thread,
  245:                        void *(*start_routine)(void *),
  246:                        void *arg)
  247: {
  248:     HANDLE hThread;
  249: 
  250:     struct QemuThreadData *data;
  251:     qemu_thread_init();
  252:     data = g_malloc(sizeof *data);
  253:     data->thread = thread;
  254:     data->start_routine = start_routine;
  255:     data->arg = arg;
  256: 
  257:     hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
  258:                                       data, 0, NULL);
  259:     if (!hThread) {
  260:         error_exit(GetLastError(), __func__);
  261:     }
  262:     CloseHandle(hThread);
  263: }
  264: 
  265: void qemu_thread_get_self(QemuThread *thread)
  266: {
  267:     if (!thread->thread) {
  268:         /* In the main thread of the process.  Initialize the QemuThread
  269:            pointer in TLS, and use the dummy GetCurrentThread handle as
  270:            the identifier for qemu_thread_is_self.  */
  271:         qemu_thread_init();
  272:         TlsSetValue(qemu_thread_tls_index, thread);
  273:         thread->thread = GetCurrentThread();
  274:     }
  275: }
  276: 
  277: int qemu_thread_is_self(QemuThread *thread)
  278: {
  279:     QemuThread *this_thread = TlsGetValue(qemu_thread_tls_index);
  280:     return this_thread->thread == thread->thread;
  281: }

unix.superglobalmegacorp.com