diff --git a/server/tracy_benaphore.h b/server/tracy_benaphore.h new file mode 100644 index 00000000..ff66fd36 --- /dev/null +++ b/server/tracy_benaphore.h @@ -0,0 +1,142 @@ +//--------------------------------------------------------- +// For conditions of distribution and use, see +// https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE +//--------------------------------------------------------- + +#ifndef __CPP11OM_BENAPHORE_H__ +#define __CPP11OM_BENAPHORE_H__ + +#include +#include +#include +#include "sema.h" + + +//--------------------------------------------------------- +// NonRecursiveBenaphore +//--------------------------------------------------------- +class NonRecursiveBenaphore +{ +private: + std::atomic m_contentionCount; + DefaultSemaphoreType m_sema; + +public: + NonRecursiveBenaphore() : m_contentionCount(0) {} + + void lock() + { + if (m_contentionCount.fetch_add(1, std::memory_order_acquire) > 0) + { + m_sema.wait(); + } + } + + bool tryLock() + { + if (m_contentionCount.load(std::memory_order_relaxed) != 0) + return false; + int expected = 0; + return m_contentionCount.compare_exchange_strong(expected, 1, std::memory_order_acquire); + } + + void unlock() + { + int oldCount = m_contentionCount.fetch_sub(1, std::memory_order_release); + assert(oldCount > 0); + if (oldCount > 1) + { + m_sema.signal(); + } + } +}; + + +//--------------------------------------------------------- +// RecursiveBenaphore +//--------------------------------------------------------- +class RecursiveBenaphore +{ +private: + std::atomic m_contentionCount; + std::atomic m_owner; + int m_recursion; + DefaultSemaphoreType m_sema; + +public: + RecursiveBenaphore() + : m_contentionCount(0) +// Apple LLVM 6.0 (in Xcode 6.1) refuses to initialize m_owner from a std::thread::id. +// "error: no viable conversion from 'std::__1::__thread_id' to '_Atomic(std::__1::__thread_id)'" +// (Note: On Linux, as of April 11, 2015, Clang 3.7 & libc++ don't have this problem.) +// Prefer atomic_init (below) when Apple LLVM is detected. +#if !(defined(__llvm__) && defined(__APPLE__)) + , m_owner(std::thread::id()) +#endif + , m_recursion(0) + { +// GCC 4.7.2's libstdc++-v3 doesn't implement atomic_init. +// "warning: inline function 'void std::atomic_init(std::atomic<_ITp>*, _ITp) [with _ITp = std::thread::id]' used but never defined [enabled by default]" +// Using the constructor (above) in that case. +#if (defined(__llvm__) && defined(__APPLE__)) + std::atomic_init(&m_owner, std::thread::id()); +#endif + + // If this assert fails on your system, you'll have to replace std::thread::id with a + // more compact, platform-specific thread ID, or just comment the assert and live with + // the extra overhead. + assert(m_owner.is_lock_free()); + } + + void lock() + { + std::thread::id tid = std::this_thread::get_id(); + if (m_contentionCount.fetch_add(1, std::memory_order_acquire) > 0) + { + if (tid != m_owner.load(std::memory_order_relaxed)) + m_sema.wait(); + } + //--- We are now inside the lock --- + m_owner.store(tid, std::memory_order_relaxed); + m_recursion++; + } + + bool tryLock() + { + std::thread::id tid = std::this_thread::get_id(); + if (m_owner.load(std::memory_order_relaxed) == tid) + { + // Already inside the lock + m_contentionCount.fetch_add(1, std::memory_order_relaxed); + } + else + { + if (m_contentionCount.load(std::memory_order_relaxed) != 0) + return false; + int expected = 0; + if (!m_contentionCount.compare_exchange_strong(expected, 1, std::memory_order_acquire)) + return false; + //--- We are now inside the lock --- + m_owner.store(tid, std::memory_order_relaxed); + } + m_recursion++; + return true; + } + + void unlock() + { + assert(std::this_thread::get_id() == m_owner.load(std::memory_order_relaxed)); + int recur = --m_recursion; + if (recur == 0) + m_owner.store(std::thread::id(), std::memory_order_relaxed); + if (m_contentionCount.fetch_sub(1, std::memory_order_release) > 1) + { + if (recur == 0) + m_sema.signal(); + } + //--- We are now outside the lock --- + } +}; + + +#endif // __CPP11OM_BENAPHORE_H__ diff --git a/server/tracy_sema.h b/server/tracy_sema.h new file mode 100644 index 00000000..62d17ba7 --- /dev/null +++ b/server/tracy_sema.h @@ -0,0 +1,225 @@ +//--------------------------------------------------------- +// For conditions of distribution and use, see +// https://github.com/preshing/cpp11-on-multicore/blob/master/LICENSE +//--------------------------------------------------------- + +#ifndef __CPP11OM_SEMAPHORE_H__ +#define __CPP11OM_SEMAPHORE_H__ + +#include +#include + + +#if defined(_WIN32) +//--------------------------------------------------------- +// Semaphore (Windows) +//--------------------------------------------------------- + +#include +#undef min +#undef max + +class Semaphore +{ +private: + HANDLE m_hSema; + + Semaphore(const Semaphore& other) = delete; + Semaphore& operator=(const Semaphore& other) = delete; + +public: + Semaphore(int initialCount = 0) + { + assert(initialCount >= 0); + m_hSema = CreateSemaphore(NULL, initialCount, MAXLONG, NULL); + } + + ~Semaphore() + { + CloseHandle(m_hSema); + } + + void wait() + { + WaitForSingleObject(m_hSema, INFINITE); + } + + void signal(int count = 1) + { + ReleaseSemaphore(m_hSema, count, NULL); + } +}; + + +#elif defined(__MACH__) +//--------------------------------------------------------- +// Semaphore (Apple iOS and OSX) +// Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html +//--------------------------------------------------------- + +#include + +class Semaphore +{ +private: + semaphore_t m_sema; + + Semaphore(const Semaphore& other) = delete; + Semaphore& operator=(const Semaphore& other) = delete; + +public: + Semaphore(int initialCount = 0) + { + assert(initialCount >= 0); + semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); + } + + ~Semaphore() + { + semaphore_destroy(mach_task_self(), m_sema); + } + + void wait() + { + semaphore_wait(m_sema); + } + + void signal() + { + semaphore_signal(m_sema); + } + + void signal(int count) + { + while (count-- > 0) + { + semaphore_signal(m_sema); + } + } +}; + + +#elif defined(__unix__) +//--------------------------------------------------------- +// Semaphore (POSIX, Linux) +//--------------------------------------------------------- + +#include + +class Semaphore +{ +private: + sem_t m_sema; + + Semaphore(const Semaphore& other) = delete; + Semaphore& operator=(const Semaphore& other) = delete; + +public: + Semaphore(int initialCount = 0) + { + assert(initialCount >= 0); + sem_init(&m_sema, 0, initialCount); + } + + ~Semaphore() + { + sem_destroy(&m_sema); + } + + void wait() + { + // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error + int rc; + do + { + rc = sem_wait(&m_sema); + } + while (rc == -1 && errno == EINTR); + } + + void signal() + { + sem_post(&m_sema); + } + + void signal(int count) + { + while (count-- > 0) + { + sem_post(&m_sema); + } + } +}; + + +#else + +#error Unsupported platform! + +#endif + + +//--------------------------------------------------------- +// LightweightSemaphore +//--------------------------------------------------------- +class LightweightSemaphore +{ +private: + std::atomic m_count; + Semaphore m_sema; + + void waitWithPartialSpinning() + { + int oldCount; + // Is there a better way to set the initial spin count? + // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC, + // as threads start hitting the kernel semaphore. + int spin = 10000; + while (spin--) + { + oldCount = m_count.load(std::memory_order_relaxed); + if ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire)) + return; + std::atomic_signal_fence(std::memory_order_acquire); // Prevent the compiler from collapsing the loop. + } + oldCount = m_count.fetch_sub(1, std::memory_order_acquire); + if (oldCount <= 0) + { + m_sema.wait(); + } + } + +public: + LightweightSemaphore(int initialCount = 0) : m_count(initialCount) + { + assert(initialCount >= 0); + } + + bool tryWait() + { + int oldCount = m_count.load(std::memory_order_relaxed); + return (oldCount > 0 && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire)); + } + + void wait() + { + if (!tryWait()) + waitWithPartialSpinning(); + } + + void signal(int count = 1) + { + int oldCount = m_count.fetch_add(count, std::memory_order_release); + int toRelease = -oldCount < count ? -oldCount : count; + if (toRelease > 0) + { + m_sema.signal(toRelease); + } + } +}; + + +typedef LightweightSemaphore DefaultSemaphoreType; + + +#endif // __CPP11OM_SEMAPHORE_H__