diff --git a/indra/aistatemachine/aistatemachine.cpp b/indra/aistatemachine/aistatemachine.cpp index fe7b4a3bd..134461e09 100644 --- a/indra/aistatemachine/aistatemachine.cpp +++ b/indra/aistatemachine/aistatemachine.cpp @@ -440,7 +440,7 @@ void AIStateMachine::multiplex(event_type event) // our need to run (by us having set need_run), so there is no need to run // ourselves. llassert(!mMultiplexMutex.isSelfLocked()); // We may never enter recursively! - if (!mMultiplexMutex.tryLock()) + if (!mMultiplexMutex.try_lock()) { Dout(dc::statemachine(mSMDebug), "Leaving because it is already being run [" << (void*)this << "]"); return; @@ -762,7 +762,7 @@ void AIStateMachine::multiplex(event_type event) //========================================= // Release the lock on mMultiplexMutex *first*, before releasing the lock on mState, - // to avoid to ever call the tryLock() and fail, while this thread isn't still + // to avoid to ever call the try_lock() and fail, while this thread isn't still // BEFORE the critical area of mState! mMultiplexMutex.unlock(); @@ -1262,7 +1262,7 @@ void AIStateMachine::abort(void) multiplex(insert_abort); } // Block until the current run finished. - if (!mRunMutex.tryLock()) + if (!mRunMutex.try_lock()) { llwarns << "AIStateMachine::abort() blocks because the statemachine is still executing code in another thread." << llendl; mRunMutex.lock(); diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt index 79674c5a0..ccc48860d 100644 --- a/indra/llcommon/CMakeLists.txt +++ b/indra/llcommon/CMakeLists.txt @@ -284,6 +284,8 @@ target_link_libraries( ${WINDOWS_LIBRARIES} ${Boost_CONTEXT_LIBRARY} ${Boost_REGEX_LIBRARY} + ${Boost_THREAD_LIBRARY} + ${Boost_SYSTEM_LIBRARY} ${CORESERVICES_LIBRARY} ) diff --git a/indra/llcommon/aiframetimer.cpp b/indra/llcommon/aiframetimer.cpp index 3c102a9c7..4e5c104c1 100644 --- a/indra/llcommon/aiframetimer.cpp +++ b/indra/llcommon/aiframetimer.cpp @@ -65,7 +65,7 @@ static F64 const NEVER = 1e16; // 317 million years. F64 AIFrameTimer::sNextExpiration; AIFrameTimer::timer_list_type AIFrameTimer::sTimerList; -LLMutex AIFrameTimer::sMutex; +LLGlobalMutex AIFrameTimer::sMutex; // Notes on thread-safety of AIRunningFrameTimer (continued from aiframetimer.h) // @@ -80,11 +80,10 @@ LLMutex AIFrameTimer::sMutex; void AIFrameTimer::create(F64 expiration, signal_type::slot_type const& slot) { AIRunningFrameTimer new_timer(expiration, this); - sMutex.lock(); + LLMutexLock lock(sMutex); llassert(mHandle.mRunningTimer == sTimerList.end()); // Create may only be called when the timer isn't already running. mHandle.init(sTimerList.insert(new_timer), slot); sNextExpiration = sTimerList.begin()->expiration(); - sMutex.unlock(); } void AIFrameTimer::cancel(void) @@ -95,18 +94,19 @@ void AIFrameTimer::cancel(void) // mHandle.mMutex lock), we start with trying to obtain // it here and as such wait till the callback function // returned. - mHandle.mMutex.lock(); + mHandle.mMutex.lock(); // Next we have to grab this lock in order to stop // AIFrameTimer::handleExpiration from even entering // in the case we manage to get it first. - sMutex.lock(); - if (mHandle.mRunningTimer != sTimerList.end()) { - sTimerList.erase(mHandle.mRunningTimer); - mHandle.mRunningTimer = sTimerList.end(); - sNextExpiration = sTimerList.empty() ? NEVER : sTimerList.begin()->expiration(); + LLMutexLock lock(sMutex); + if (mHandle.mRunningTimer != sTimerList.end()) + { + sTimerList.erase(mHandle.mRunningTimer); + mHandle.mRunningTimer = sTimerList.end(); + sNextExpiration = sTimerList.empty() ? NEVER : sTimerList.begin()->expiration(); + } } - sMutex.unlock(); mHandle.mMutex.unlock(); } @@ -164,7 +164,7 @@ void AIFrameTimer::handleExpiration(F64 current_frame_time) // // Note that if the other thread actually obtained the sMutex then we // can't be here: this is still inside the critical area of sMutex. - if (handle.mMutex.tryLock()) // If this fails then another thread is in the process of cancelling this timer, so do nothing. + if (handle.mMutex.try_lock()) // If this fails then another thread is in the process of cancelling this timer, so do nothing. { sMutex.unlock(); running_timer->do_callback(); // May not throw exceptions. diff --git a/indra/llcommon/aiframetimer.h b/indra/llcommon/aiframetimer.h index 366581c49..880b492ee 100644 --- a/indra/llcommon/aiframetimer.h +++ b/indra/llcommon/aiframetimer.h @@ -96,7 +96,7 @@ class LL_COMMON_API AIFrameTimer typedef std::multiset timer_list_type; - static LLMutex sMutex; // Mutex for the two global variables below. + static LLGlobalMutex sMutex; // Mutex for the two global variables below. static timer_list_type sTimerList; // List with all running timers. static F64 sNextExpiration; // Cache of smallest value in sTimerList. friend class LLFrameTimer; // Access to sNextExpiration. diff --git a/indra/llcommon/aithreadsafe.h b/indra/llcommon/aithreadsafe.h index bb0ac7e05..93b38246f 100644 --- a/indra/llcommon/aithreadsafe.h +++ b/indra/llcommon/aithreadsafe.h @@ -239,7 +239,7 @@ protected: // For use by AIThreadSafeDC AIThreadSafe(void) { } - AIThreadSafe(LLAPRPool& parent) : mRWLock(parent) { } + MUTEX_POOL(AIThreadSafe(LLAPRPool& parent) : mRWLock(parent){ }) public: // Only for use by AITHREADSAFE, see below. @@ -473,7 +473,7 @@ protected: friend struct AIRegisteredStateMachinesList; // For use by AIThreadSafeSimpleDC and AIRegisteredStateMachinesList. AIThreadSafeSimple(void) { } - AIThreadSafeSimple(LLAPRPool& parent) : mMutex(parent) { } + MUTEX_POOL(AIThreadSafeSimple(LLAPRPool& parent) : mMutex(parent) { }) public: // Only for use by AITHREADSAFESIMPLE, see below. @@ -551,7 +551,7 @@ public: protected: // For use by AIThreadSafeSimpleDCRootPool - AIThreadSafeSimpleDC(LLAPRRootPool& parent) : AIThreadSafeSimple(parent) { new (AIThreadSafeSimple::ptr()) T; } + AIThreadSafeSimpleDC(LLAPRRootPool& parent) : AIThreadSafeSimple(MUTEX_POOL(parent)) { new (AIThreadSafeSimple::ptr()) T; } }; // Helper class for AIThreadSafeSimpleDCRootPool to assure initialization of @@ -585,7 +585,7 @@ public: // as opposed to allocated from the current threads root pool. AIThreadSafeSimpleDCRootPool(void) : AIThreadSafeSimpleDCRootPool_pbase(), - AIThreadSafeSimpleDC(mRootPool) { } + AIThreadSafeSimpleDC(MUTEX_POOL(mRootPool)) { } }; /** diff --git a/indra/llcommon/llapr.cpp b/indra/llcommon/llapr.cpp index 4b86fecdf..28e8db756 100644 --- a/indra/llcommon/llapr.cpp +++ b/indra/llcommon/llapr.cpp @@ -30,60 +30,6 @@ #include "llapr.h" #include "llscopedvolatileaprpool.h" -LLFastTimer::DeclareTimer FT_WAIT_FOR_SCOPEDLOCK("LLScopedLock"); - -//--------------------------------------------------------------------- -// -// LLScopedLock -// -LLScopedLock::LLScopedLock(apr_thread_mutex_t* mutex) : mMutex(mutex) -{ - mLocked = !!mutex; - if (LL_LIKELY(mutex)) - { - apr_status_t status = apr_thread_mutex_trylock(mMutex); - while (LL_UNLIKELY(status != APR_SUCCESS)) - { - if (APR_STATUS_IS_EBUSY(status)) - { - if (AIThreadID::in_main_thread_inline()) - { - LLFastTimer ft1(FT_WAIT_FOR_SCOPEDLOCK); - status = apr_thread_mutex_lock(mMutex); - } - else - { - status = apr_thread_mutex_lock(mMutex); - } - } - else - { - ll_apr_warn_status(status); - mLocked = false; - return; - } - } - } -} - -LLScopedLock::~LLScopedLock() -{ - unlock(); -} - -void LLScopedLock::unlock() -{ - if(mLocked) - { - if(!ll_apr_warn_status(apr_thread_mutex_unlock(mMutex))) - { - mLocked = false; - } - } -} - -//--------------------------------------------------------------------- - bool ll_apr_warn_status(apr_status_t status) { if(APR_SUCCESS == status) return false; diff --git a/indra/llcommon/llapr.h b/indra/llcommon/llapr.h index 97aa96e0b..9baddef5f 100644 --- a/indra/llcommon/llapr.h +++ b/indra/llcommon/llapr.h @@ -36,55 +36,12 @@ #include #include "apr_thread_proc.h" -#include "apr_thread_mutex.h" #include "apr_getopt.h" -#include "apr_signal.h" -#include "apr_atomic.h" #include "llstring.h" class LLAPRPool; class LLVolatileAPRPool; -/** - * @class LLScopedLock - * @brief Small class to help lock and unlock mutexes. - * - * This class is used to have a stack level lock once you already have - * an apr mutex handy. The constructor handles the lock, and the - * destructor handles the unlock. Instances of this class are - * not thread safe. - */ -class LL_COMMON_API LLScopedLock : private boost::noncopyable -{ -public: - /** - * @brief Constructor which accepts a mutex, and locks it. - * - * @param mutex An allocated APR mutex. If you pass in NULL, - * this wrapper will not lock. - */ - LLScopedLock(apr_thread_mutex_t* mutex); - - /** - * @brief Destructor which unlocks the mutex if still locked. - */ - ~LLScopedLock(); - - /** - * @brief Check lock. - */ - bool isLocked() const { return mLocked; } - - /** - * @brief This method unlocks the mutex. - */ - void unlock(); - -protected: - bool mLocked; - apr_thread_mutex_t* mMutex; -}; - // File IO convenience functions. // Returns NULL if the file fails to open, sets *sizep to file size if not NULL // abbreviated flags diff --git a/indra/llcommon/llaprpool.cpp b/indra/llcommon/llaprpool.cpp index 3dffa8300..b065a5801 100644 --- a/indra/llcommon/llaprpool.cpp +++ b/indra/llcommon/llaprpool.cpp @@ -37,6 +37,7 @@ #include "llerror.h" #include "llaprpool.h" +#include "llatomic.h" #include "llthread.h" // Create a subpool from parent. @@ -110,23 +111,18 @@ LLAPRInitialization::LLAPRInitialization(void) } bool LLAPRRootPool::sCountInitialized = false; -apr_uint32_t volatile LLAPRRootPool::sCount; - -apr_thread_mutex_t* gLogMutexp; -apr_thread_mutex_t* gCallStacksLogMutexp; +LLAtomicS32 LLAPRRootPool::sCount; LLAPRRootPool::LLAPRRootPool(void) : LLAPRInitialization(), LLAPRPool(0) { // sCountInitialized don't need locking because when we get here there is still only a single thread. if (!sCountInitialized) { - // Initialize the logging mutex - apr_thread_mutex_create(&gLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool); - apr_thread_mutex_create(&gCallStacksLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool); - +#ifdef NEEDS_APR_ATOMICS apr_status_t status = apr_atomic_init(mPool); llassert_always(status == APR_SUCCESS); - apr_atomic_set32(&sCount, 1); // Set to 1 to account for the global root pool. +#endif + sCount = 1; // Set to 1 to account for the global root pool. sCountInitialized = true; // Initialize thread-local APR pool support. @@ -134,33 +130,16 @@ LLAPRRootPool::LLAPRRootPool(void) : LLAPRInitialization(), LLAPRPool(0) // it must be done last, so that sCount is already initialized. LLThreadLocalData::init(); } - apr_atomic_inc32(&sCount); + sCount++; } LLAPRRootPool::~LLAPRRootPool() { - if (!apr_atomic_dec32(&sCount)) + if (!--sCount) { // The last pool was destructed. Cleanup remainder of APR. LL_INFOS("APR") << "Cleaning up APR" << LL_ENDL; - if (gLogMutexp) - { - // Clean up the logging mutex - - // All other threads NEED to be done before we clean up APR, so this is okay. - apr_thread_mutex_destroy(gLogMutexp); - gLogMutexp = NULL; - } - if (gCallStacksLogMutexp) - { - // Clean up the logging mutex - - // All other threads NEED to be done before we clean up APR, so this is okay. - apr_thread_mutex_destroy(gCallStacksLogMutexp); - gCallStacksLogMutexp = NULL; - } - // Must destroy ALL, and therefore this last LLAPRRootPool, before terminating APR. static_cast(this)->destroy(); diff --git a/indra/llcommon/llaprpool.h b/indra/llcommon/llaprpool.h index 74af351e4..d3606d396 100644 --- a/indra/llcommon/llaprpool.h +++ b/indra/llcommon/llaprpool.h @@ -47,9 +47,11 @@ #include "apr_portable.h" #include "apr_pools.h" +#include "llatomic.h" #include "llerror.h" #include "aithreadid.h" + extern void ll_init_apr(); /** @@ -104,8 +106,11 @@ public: // NEVER destroy a pool that is returned by this function! apr_pool_t* operator()(void) const { - llassert(mPool); - llassert(mOwner.equals_current_thread()); + if (mParent) + { + llassert(mPool); + llassert(mOwner.equals_current_thread()); + } return mPool; } @@ -181,7 +186,7 @@ private: private: // Keep track of how many root pools exist and when the last one is destructed. static bool sCountInitialized; - static apr_uint32_t volatile sCount; + static LLAtomicS32 sCount; public: // Return a global root pool that is independent of LLThreadLocalData. diff --git a/indra/llcommon/llatomic.h b/indra/llcommon/llatomic.h index 3cc5bcf38..0cfe99c77 100644 --- a/indra/llcommon/llatomic.h +++ b/indra/llcommon/llatomic.h @@ -34,8 +34,29 @@ #ifndef LL_LLATOMIC_H #define LL_LLATOMIC_H -#include "apr_atomic.h" +#define USE_BOOST_ATOMIC +//Internal definitions +#define NEEDS_APR_ATOMICS do_not_define_manually_thanks +#undef NEEDS_APR_ATOMICS + +#if defined(USE_BOOST_ATOMIC) +#include "boost/version.hpp" +#endif + +//Prefer boost over stl over apr. + +#if defined(USE_BOOST_ATOMIC) && (BOOST_VERSION >= 105200) +#include "boost/atomic.hpp" +template +struct impl_atomic_type { typedef boost::atomic type; }; +#elif defined(USE_STD_ATOMIC) && (__cplusplus >= 201103L || _MSC_VER >= 1800) +#include +template +struct impl_atomic_type { typedef std::atomic type; }; +#else +#include "apr_atomic.h" +#define NEEDS_APR_ATOMICS template class LLAtomic32 { public: @@ -54,6 +75,27 @@ public: private: apr_uint32_t mData; }; +#endif +#if !defined(NEEDS_APR_ATOMICS) +template class LLAtomic32 +{ +public: + LLAtomic32(void) { } + LLAtomic32(LLAtomic32 const& atom) { mData = Type(atom.mData); } + LLAtomic32(Type x) { mData = x; } + LLAtomic32& operator=(LLAtomic32 const& atom) { mData = Type(atom.mData); return *this; } + + operator Type() const { return mData; } + void operator=(Type x) { mData = x; } + void operator-=(Type x) { mData -= x; } + void operator+=(Type x) { mData += x; } + Type operator++(int) { return mData++; } // Type++ + bool operator--() { return --mData; } // Returns (--Type != 0) + +private: + typename impl_atomic_type::type mData; +}; +#endif typedef LLAtomic32 LLAtomicU32; typedef LLAtomic32 LLAtomicS32; diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp index 8b40edb40..4155885d8 100644 --- a/indra/llcommon/llerror.cpp +++ b/indra/llcommon/llerror.cpp @@ -874,8 +874,8 @@ You get: */ -extern apr_thread_mutex_t* gLogMutexp; -extern apr_thread_mutex_t* gCallStacksLogMutexp; +LLGlobalMutex gLogMutex; +LLGlobalMutex gCallStacksLogMutex; namespace { bool checkLevelMap(const LevelMap& map, const std::string& key, @@ -905,17 +905,15 @@ namespace { LogLock::LogLock() : mLocked(false), mOK(false) { - if (!gLogMutexp) + if (!gLogMutex.isInitalized()) { mOK = true; return; } - const int MAX_RETRIES = 5; for (int attempts = 0; attempts < MAX_RETRIES; ++attempts) { - apr_status_t s = apr_thread_mutex_trylock(gLogMutexp); - if (!APR_STATUS_IS_EBUSY(s)) + if (gLogMutex.try_lock()) { mLocked = true; mOK = true; @@ -937,7 +935,7 @@ namespace { { if (mLocked) { - apr_thread_mutex_unlock(gLogMutexp); + gLogMutex.unlock(); } } } @@ -1299,17 +1297,16 @@ namespace LLError CallStacksLogLock::CallStacksLogLock() : mLocked(false), mOK(false) { - if (!gCallStacksLogMutexp) + if (!gCallStacksLogMutex.isInitalized()) { mOK = true; return; } - + const int MAX_RETRIES = 5; for (int attempts = 0; attempts < MAX_RETRIES; ++attempts) { - apr_status_t s = apr_thread_mutex_trylock(gCallStacksLogMutexp); - if (!APR_STATUS_IS_EBUSY(s)) + if (gCallStacksLogMutex.try_lock()) { mLocked = true; mOK = true; @@ -1328,7 +1325,7 @@ namespace LLError { if (mLocked) { - apr_thread_mutex_unlock(gCallStacksLogMutexp); + gCallStacksLogMutex.unlock(); } } diff --git a/indra/llcommon/llframetimer.cpp b/indra/llcommon/llframetimer.cpp index c214f03a4..94f0c1dee 100644 --- a/indra/llcommon/llframetimer.cpp +++ b/indra/llcommon/llframetimer.cpp @@ -47,12 +47,14 @@ S32 LLFrameTimer::sFrameCount = 0; // Current frame number (number of fr U64 LLFrameTimer::sPrevTotalTime = LLFrameTimer::sStartTotalTime; // Previous (frame) time in microseconds since epoch, updated once per frame. U64 LLFrameTimer::sFrameDeltaTime = 0; // Microseconds between last two calls to LLFrameTimer::updateFrameTimeAndCount. // Mutex for the above. -apr_thread_mutex_t* LLFrameTimer::sGlobalMutex; +LLGlobalMutex LLFrameTimer::sGlobalMutex; + +bool LLFrameTimer::sFirstFrameTimerCreated; // static void LLFrameTimer::global_initialization(void) { - apr_thread_mutex_create(&sGlobalMutex, APR_THREAD_MUTEX_UNNESTED, LLAPRRootPool::get()()); + sFirstFrameTimerCreated = true; AIFrameTimer::sNextExpiration = NEVER; } @@ -63,9 +65,9 @@ void LLFrameTimer::updateFrameTime(void) sTotalTime = totalTime(); sTotalSeconds = U64_to_F64(sTotalTime) * USEC_TO_SEC_F64; F64 new_frame_time = U64_to_F64(sTotalTime - sStartTotalTime) * USEC_TO_SEC_F64; - apr_thread_mutex_lock(sGlobalMutex); + sGlobalMutex.lock(); sFrameTime = new_frame_time; - apr_thread_mutex_unlock(sGlobalMutex); + sGlobalMutex.unlock(); } // static diff --git a/indra/llcommon/llframetimer.h b/indra/llcommon/llframetimer.h index 30bc58dd9..5ebf14247 100644 --- a/indra/llcommon/llframetimer.h +++ b/indra/llcommon/llframetimer.h @@ -36,16 +36,13 @@ #include "lltimer.h" #include "timing.h" -#include -#ifdef SHOW_ASSERT -#include "aithreadid.h" // is_main_thread() -#endif +#include "llthread.h" class LL_COMMON_API LLFrameTimer { public: // Create an LLFrameTimer and start it. After creation it is running and in the state expired (hasExpired will return true). - LLFrameTimer(void) : mExpiry(0), mRunning(true), mPaused(false) { if (!sGlobalMutex) global_initialization(); setAge(0.0); } + LLFrameTimer(void) : mExpiry(0), mRunning(true), mPaused(false) { if (!sFirstFrameTimerCreated) global_initialization(); setAge(0.0); } // void copy(LLFrameTimer const& timer) { mStartTime = timer.mStartTime; mExpiry = timer.mExpiry; mRunning = timer.mRunning; mPaused = timer.mPaused; } @@ -57,9 +54,9 @@ public: static F64 getElapsedSeconds(void) { // Loses msec precision after ~4.5 hours... - apr_thread_mutex_lock(sGlobalMutex); + sGlobalMutex.lock(); F64 res = sFrameTime; - apr_thread_mutex_unlock(sGlobalMutex); + sGlobalMutex.unlock(); return res; } @@ -68,9 +65,9 @@ public: { // sTotalTime is only accessed by the main thread, so no locking is necessary. llassert(is_main_thread()); - //apr_thread_mutex_lock(sGlobalMutex); + //sGlobalMutex.lock(); U64 res = sTotalTime; - //apr_thread_mutex_unlock(sGlobalMutex); + //sGlobalMutex.unlock(); llassert(res); return res; } @@ -80,9 +77,9 @@ public: { // sTotalSeconds is only accessed by the main thread, so no locking is necessary. llassert(is_main_thread()); - //apr_thread_mutex_lock(sGlobalMutex); + //sGlobalMutex.lock(); F64 res = sTotalSeconds; - //apr_thread_mutex_unlock(sGlobalMutex); + //sGlobalMutex.unlock(); return res; } @@ -91,9 +88,9 @@ public: { // sFrameCount is only accessed by the main thread, so no locking is necessary. llassert(is_main_thread()); - //apr_thread_mutex_lock(sGlobalMutex); + //sGlobalMutex.lock(); U32 res = sFrameCount; - //apr_thread_mutex_unlock(sGlobalMutex); + //sGlobalMutex.unlock(); return res; } @@ -170,7 +167,7 @@ protected: // // More than one thread are accessing (some of) these variables, therefore we need locking. - static apr_thread_mutex_t* sGlobalMutex; + static LLGlobalMutex sGlobalMutex; // Current time in seconds since application start, updated together with sTotalTime. static F64 sFrameTime; @@ -190,6 +187,8 @@ protected: // Current frame number (number of frames since application start). static S32 sFrameCount; + static bool sFirstFrameTimerCreated; + // // Member data // diff --git a/indra/llcommon/llsingleton.h b/indra/llcommon/llsingleton.h index a780a6a24..aa8efb1e0 100644 --- a/indra/llcommon/llsingleton.h +++ b/indra/llcommon/llsingleton.h @@ -33,10 +33,10 @@ /// @brief A global registry of all singletons to prevent duplicate allocations /// across shared library boundaries -class LL_COMMON_API LLSingletonRegistry { +class LLSingletonRegistry { private: typedef std::map TypeMap; - static TypeMap * sSingletonMap; + static LL_COMMON_API TypeMap * sSingletonMap; static void checkInit() { diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp index 1c3fec608..7ab4b1f39 100644 --- a/indra/llcommon/llthread.cpp +++ b/indra/llcommon/llthread.cpp @@ -364,138 +364,133 @@ LLThreadLocalData& LLThreadLocalData::tldata(void) //============================================================================ -LLCondition::LLCondition(LLAPRPool& parent) : LLMutex(parent) +#if defined(NEEDS_MUTEX_IMPL) +#if defined(USE_WIN32_THREAD) +LLMutexImpl::LLMutexImpl() { - apr_thread_cond_create(&mAPRCondp, mPool()); + InitializeCriticalSection(&mMutexImpl); //can throw STATUS_NO_MEMORY +} +LLMutexImpl::~LLMutexImpl() +{ + DeleteCriticalSection(&mMutexImpl); //nothrow +} +void LLMutexImpl::lock() +{ + EnterCriticalSection(&mMutexImpl); //can throw EXCEPTION_POSSIBLE_DEADLOCK +} +void LLMutexImpl::unlock() +{ + LeaveCriticalSection(&mMutexImpl); //nothrow +} +bool LLMutexImpl::try_lock() +{ + return !!TryEnterCriticalSection(&mMutexImpl); //nothrow +} +LLConditionVariableImpl::LLConditionVariableImpl() +{ + InitializeConditionVariable(&mConditionVariableImpl); +} +LLConditionVariableImpl::~LLConditionVariableImpl() +{ + //There is no DeleteConditionVariable +} +void LLConditionVariableImpl::notify_one() +{ + WakeConditionVariable(&mConditionVariableImpl); +} +void LLConditionVariableImpl::notify_all() +{ + WakeAllConditionVariable(&mConditionVariableImpl); +} +void LLConditionVariableImpl::wait(LLMutex& lock) +{ + LLMutex::ImplAdoptMutex impl_adopted_mutex(lock); + SleepConditionVariableCS(&mConditionVariableImpl, &lock.native_handle(), INFINITE); +} +#else + +void APRExceptionThrower(apr_status_t status) +{ + if(status != APR_SUCCESS) + { + static char buf[256]; + throw std::logic_error(apr_strerror(status,buf,sizeof(buf))); + } } - -LLCondition::~LLCondition() +LLMutexImpl::LLMutexImpl(native_pool_type& pool) : mPool(pool), mMutexImpl(NULL) { - apr_thread_cond_destroy(mAPRCondp); - mAPRCondp = NULL; + APRExceptionThrower(apr_thread_mutex_create(&mMutexImpl, APR_THREAD_MUTEX_UNNESTED, mPool())); +} +LLMutexImpl::~LLMutexImpl() +{ + APRExceptionThrower(apr_thread_mutex_destroy(mMutexImpl)); + mMutexImpl = NULL; +} +void LLMutexImpl::lock() +{ + APRExceptionThrower(apr_thread_mutex_lock(mMutexImpl)); +} +void LLMutexImpl::unlock() +{ + APRExceptionThrower(apr_thread_mutex_unlock(mMutexImpl)); +} +bool LLMutexImpl::try_lock() +{ + apr_status_t status = apr_thread_mutex_trylock(mMutexImpl); + if(APR_STATUS_IS_EBUSY(status)) + return false; + APRExceptionThrower(status); + return true; +} +LLConditionVariableImpl::LLConditionVariableImpl(native_pool_type& pool) : mPool(pool), mConditionVariableImpl(NULL) +{ + APRExceptionThrower(apr_thread_cond_create(&mConditionVariableImpl, mPool())); +} +LLConditionVariableImpl::~LLConditionVariableImpl() +{ + APRExceptionThrower(apr_thread_cond_destroy(mConditionVariableImpl)); +} +void LLConditionVariableImpl::notify_one() +{ + APRExceptionThrower(apr_thread_cond_signal(mConditionVariableImpl)); +} +void LLConditionVariableImpl::notify_all() +{ + APRExceptionThrower(apr_thread_cond_broadcast(mConditionVariableImpl)); +} +void LLConditionVariableImpl::wait(LLMutex& lock) +{ + LLMutex::ImplAdoptMutex impl_adopted_mutex(lock); + APRExceptionThrower(apr_thread_cond_wait(mConditionVariableImpl, lock.native_handle())); +} +#endif +#endif + +LLFastTimer::DeclareTimer FT_WAIT_FOR_MUTEX("LLMutex::lock()"); +void LLMutex::lock_main(LLFastTimer::DeclareTimer* timer) +{ + llassert(!isSelfLocked()); + LLFastTimer ft1(timer ? *timer : FT_WAIT_FOR_MUTEX); + LLMutexImpl::lock(); } LLFastTimer::DeclareTimer FT_WAIT_FOR_CONDITION("LLCondition::wait()"); - -void LLCondition::wait() +void LLCondition::wait_main() { - if (AIThreadID::in_main_thread_inline()) + llassert(isSelfLocked()); + LLFastTimer ft1(FT_WAIT_FOR_CONDITION); + LLConditionVariableImpl::wait(*this); + llassert(isSelfLocked()); +} + +LLFastTimer::DeclareTimer FT_WAIT_FOR_MUTEXLOCK("LLMutexLock::lock()"); +void LLMutexLock::lock() +{ + if (mMutex) { - LLFastTimer ft1(FT_WAIT_FOR_CONDITION); - apr_thread_cond_wait(mAPRCondp, mAPRMutexp); - } - else - { - apr_thread_cond_wait(mAPRCondp, mAPRMutexp); + mMutex->lock(&FT_WAIT_FOR_MUTEXLOCK); } } -void LLCondition::signal() -{ - apr_thread_cond_signal(mAPRCondp); -} - -void LLCondition::broadcast() -{ - apr_thread_cond_broadcast(mAPRCondp); -} - -//============================================================================ -LLMutexBase::LLMutexBase() : - mCount(0), - mLockingThread(AIThreadID::sNone) -{ -} - -bool LLMutexBase::isSelfLocked() const -{ - return mLockingThread.equals_current_thread_inline(); -} - -LLFastTimer::DeclareTimer FT_WAIT_FOR_MUTEX("LLMutexBase::lock()"); - -void LLMutexBase::lock() -{ - if (mLockingThread.equals_current_thread_inline()) - { //redundant lock - mCount++; - return; - } - - if (APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp))) - { - if (AIThreadID::in_main_thread_inline()) - { - LLFastTimer ft1(FT_WAIT_FOR_MUTEX); - apr_thread_mutex_lock(mAPRMutexp); - } - else - { - apr_thread_mutex_lock(mAPRMutexp); - } - } - - mLockingThread.reset_inline(); -} - -bool LLMutexBase::tryLock() -{ - if (mLockingThread.equals_current_thread_inline()) - { //redundant lock - mCount++; - return true; - } - bool success = !APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp)); - if (success) - { - mLockingThread.reset_inline(); - } - return success; -} - -// non-blocking, but does do a lock/unlock so not free -bool LLMutexBase::isLocked() const -{ - if (mLockingThread.equals_current_thread_inline()) - return false; // A call to lock() won't block. - if (APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp))) - return true; - apr_thread_mutex_unlock(mAPRMutexp); - return false; -} - -void LLMutexBase::unlock() -{ - if (mCount > 0) - { //not the root unlock - mCount--; - return; - } - mLockingThread = AIThreadID::sNone; - - apr_thread_mutex_unlock(mAPRMutexp); -} - -//---------------------------------------------------------------------------- - -LLThreadSafeRefCount::LLThreadSafeRefCount() : - mRef(0) -{ -} - -LLThreadSafeRefCount::~LLThreadSafeRefCount() -{ - if (mRef != 0) - { - llerrs << "deleting non-zero reference" << llendl; - } -} - -//============================================================================ - -LLResponder::~LLResponder() -{ -} - -//============================================================================ +//---------------------------------------------------------------------------- \ No newline at end of file diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h index 7d0cc593c..a3f95e6d8 100644 --- a/indra/llcommon/llthread.h +++ b/indra/llcommon/llthread.h @@ -27,6 +27,8 @@ #ifndef LL_LLTHREAD_H #define LL_LLTHREAD_H +#define USE_BOOST_MUTEX + #if LL_GNUC // Needed for is_main_thread() when compiling with optimization (relwithdebinfo). // It doesn't hurt to just always specify it though. @@ -35,10 +37,9 @@ #include "llapp.h" #include "llapr.h" -#include "llmemory.h" -#include "apr_thread_cond.h" #include "llaprpool.h" #include "llatomic.h" +#include "llmemory.h" #include "aithreadid.h" class LLThread; @@ -177,126 +178,294 @@ protected: //============================================================================ -#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO) +#define MUTEX_POOL(arg) -#ifdef MUTEX_DEBUG -// We really shouldn't be using recursive locks. Make sure of that in debug mode. -#define MUTEX_FLAG APR_THREAD_MUTEX_UNNESTED +//Internal definitions +#define NEEDS_MUTEX_IMPL do_not_define_manually_thanks +#undef NEEDS_MUTEX_IMPL +#define NEEDS_MUTEX_RECURSION do_not_define_manually_thanks +#undef NEEDS_MUTEX_RECURSION + +//Prefer boost over stl over windows over apr. + +#if defined(USE_BOOST_MUTEX) && (BOOST_VERSION >= 103400) //condition_variable_any was added in boost 1.34 +//Define BOOST_SYSTEM_NO_DEPRECATED to avoid system_category() and generic_category() dependencies, as those won't be exported. +#define BOOST_SYSTEM_NO_DEPRECATED +#include +#include +#include +#include +typedef boost::recursive_mutex LLMutexImpl; +typedef boost::condition_variable_any LLConditionVariableImpl; +#elif defined(USE_STD_MUTEX) && (__cplusplus >= 201103L || _MSC_VER >= 1800) +#include +typedef std::recursive_mutex LLMutexImpl; +typedef std::condition_variable_any LLConditionVariableImpl; +#elif defined(USE_WIN32_MUTEX) +typedef CRITICAL_SECTION impl_mutex_handle_type; +typedef CONDITION_VARIABLE impl_cond_handle_type; +#define NEEDS_MUTEX_IMPL +#define NEEDS_MUTEX_RECURSION #else -// Use the fastest platform-optimal lock behavior (can be recursive or non-recursive). -#define MUTEX_FLAG APR_THREAD_MUTEX_DEFAULT +//----APR specific------ +#include "apr_thread_cond.h" +#include "apr_thread_mutex.h" +#include "apr_signal.h" +typedef LLAPRPool native_pool_type; +typedef apr_thread_mutex_t* impl_mutex_handle_type; +typedef apr_thread_cond_t* impl_cond_handle_type; +#undef MUTEX_POOL +#undef DEFAULT_POOL +#define MUTEX_POOL(arg) arg +#define NEEDS_MUTEX_IMPL +#define NEEDS_MUTEX_RECURSION +//END #endif -class LL_COMMON_API LLMutexBase +#ifdef NEEDS_MUTEX_IMPL + +//Impl classes are not meant to be accessed directly. They must be utilized by a parent classes. +// They are designed to be 'clones' of their stl counterparts to facilitate simple drop-in +// replacement of underlying implementation (boost,std,apr,critical_sections,etc) +// Members and member functions are all private. +class LL_COMMON_API LLMutexImpl : private boost::noncopyable { -public: - LLMutexBase() ; - - void lock(); // blocks + friend class LLMutex; + friend class LLCondition; + friend class LLConditionVariableImpl; + + typedef impl_mutex_handle_type native_handle_type; + + LLMutexImpl(MUTEX_POOL(native_pool_type& pool)); + virtual ~LLMutexImpl(); + void lock(); void unlock(); - // Returns true if lock was obtained successfully. - bool tryLock(); - - // Returns true if a call to lock() would block (returns false if self-locked()). - bool isLocked() const; - - // Returns true if locked by this thread. - bool isSelfLocked() const; - -protected: - // mAPRMutexp is initialized and uninitialized in the derived class. - apr_thread_mutex_t* mAPRMutexp; - mutable U32 mCount; - mutable AIThreadID mLockingThread; + bool try_lock(); + native_handle_type& native_handle() { return mMutexImpl; } private: - // Disallow copy construction and assignment. - LLMutexBase(LLMutexBase const&); - LLMutexBase& operator=(LLMutexBase const&); + native_handle_type mMutexImpl; + MUTEX_POOL(native_pool_type mPool); }; -class LL_COMMON_API LLMutex : public LLMutexBase -{ -public: - LLMutex(LLAPRPool& parent = LLThread::tldata().mRootPool) : mPool(parent) - { - apr_thread_mutex_create(&mAPRMutexp, MUTEX_FLAG, mPool()); - } - ~LLMutex() - { - //this assertion erroneously triggers whenever an LLCondition is destroyed - //llassert(!isLocked()); // better not be locked! - apr_thread_mutex_destroy(mAPRMutexp); - mAPRMutexp = NULL; - } - -protected: - LLAPRPool mPool; -}; - -#if APR_HAS_THREADS -// No need to use a root pool in this case. -typedef LLMutex LLMutexRootPool; -#else // APR_HAS_THREADS -class LL_COMMON_API LLMutexRootPool : public LLMutexBase -{ -public: - LLMutexRootPool(void) - { - apr_thread_mutex_create(&mAPRMutexp, MUTEX_FLAG, mRootPool()); - } - ~LLMutexRootPool() - { -#if APR_POOL_DEBUG - // It is allowed to destruct root pools from a different thread. - mRootPool.grab_ownership(); #endif - llassert(!isLocked()); // better not be locked! - apr_thread_mutex_destroy(mAPRMutexp); - mAPRMutexp = NULL; + +class LL_COMMON_API LLMutex : public LLMutexImpl +{ +#ifdef NEEDS_MUTEX_IMPL + friend class LLConditionVariableImpl; +#endif +public: + LLMutex(MUTEX_POOL(native_pool_type& pool = LLThread::tldata().mRootPool)) : LLMutexImpl(MUTEX_POOL(pool)), +#ifdef NEEDS_MUTEX_RECURSION + mLockDepth(0), +#endif + mLockingThread(AIThreadID::sNone) + {} + ~LLMutex() + {} + + void lock(LLFastTimer::DeclareTimer* timer = NULL) // blocks + { + if (inc_lock_if_recursive()) + return; + if (AIThreadID::in_main_thread_inline() && LLApp::isRunning()) + { + if (!LLMutexImpl::try_lock()) + { + lock_main(timer); + } + } + else + { + LLMutexImpl::lock(); + } + mLockingThread.reset_inline(); } -protected: - LLAPRRootPool mRootPool; + void unlock() + { +#ifdef NEEDS_MUTEX_RECURSION + if (mLockDepth > 0) + { + --mLockDepth; + return; + } +#endif + mLockingThread = AIThreadID::sNone; + LLMutexImpl::unlock(); + } + + // Returns true if lock was obtained successfully. + bool try_lock() + { + if (inc_lock_if_recursive()) + return true; + if (!LLMutexImpl::try_lock()) + return false; + mLockingThread.reset_inline(); + return true; + } + + // Returns true if locked not by this thread + bool isLocked() + { + if (isSelfLocked()) + return false; + if (LLMutexImpl::try_lock()) + { + LLMutexImpl::unlock(); + return false; + } + return true; + } + // Returns true if locked by this thread. + bool isSelfLocked() const + { + return mLockingThread.equals_current_thread_inline(); + } + +#ifdef NEEDS_MUTEX_IMPL + //This is important for libraries that we cannot pass LLMutex into. + //For example, apr wait. apr wait unlocks and re-locks the thread, however + // it has no knowledge of LLMutex::mLockingThread and LLMutex::mLockDepth, + // and thus will leave those member variables set even after the wait internally releases the lock. + // Leaving those two variables set even when mutex has actually been unlocked via apr is BAD. + friend class ImplAdoptMutex; + class ImplAdoptMutex + { + friend class LLConditionVariableImpl; + ImplAdoptMutex(LLMutex& mutex) : mMutex(mutex), +#ifdef NEEDS_MUTEX_RECURSION + mLockDepth(mutex.mLockDepth), +#endif + mLockingThread(mutex.mLockingThread) + + { + mMutex.mLockingThread = AIThreadID::sNone; +#ifdef NEEDS_MUTEX_RECURSION + mMutex.mLockDepth = 0; +#endif + } + ~ImplAdoptMutex() + { + mMutex.mLockingThread = mLockingThread; +#ifdef NEEDS_MUTEX_RECURSION + mMutex.mLockDepth = mLockDepth; +#endif + } + LLMutex& mMutex; + AIThreadID mLockingThread; +#ifdef NEEDS_MUTEX_RECURSION + S32 mLockDepth; +#endif + }; +#endif + +private: + void lock_main(LLFastTimer::DeclareTimer* timer); + + bool inc_lock_if_recursive() + { +#ifdef NEEDS_MUTEX_RECURSION + if (isSelfLocked()) + { + mLockDepth++; + return true; + } +#endif + return false; + } + + mutable AIThreadID mLockingThread; +#ifdef NEEDS_MUTEX_RECURSION + LLAtomicS32 mLockDepth; +#endif }; -#endif // APR_HAS_THREADS + +class LLGlobalMutex : public LLMutex +{ +public: + LLGlobalMutex() : LLMutex(MUTEX_POOL(LLAPRRootPool::get())), mbInitalized(true) + {} + bool isInitalized() const + { + return mbInitalized; + } +private: + bool mbInitalized; +}; + +#ifdef NEEDS_MUTEX_IMPL +class LL_COMMON_API LLConditionVariableImpl : private boost::noncopyable +{ + friend class LLCondition; + + typedef impl_cond_handle_type native_handle_type; + + LLConditionVariableImpl(MUTEX_POOL(native_pool_type& pool)); + virtual ~LLConditionVariableImpl(); + void notify_one(); + void notify_all(); + void wait(LLMutex& lock); + native_handle_type& native_handle() { return mConditionVariableImpl; } + + native_handle_type mConditionVariableImpl; + MUTEX_POOL(native_pool_type mPool); +}; +#endif + +typedef LLMutex LLMutexRootPool; // Actually a condition/mutex pair (since each condition needs to be associated with a mutex). -class LL_COMMON_API LLCondition : public LLMutex +class LLCondition : public LLConditionVariableImpl, public LLMutex { public: - LLCondition(LLAPRPool& parent = LLThread::tldata().mRootPool); - ~LLCondition(); - - void wait(); // blocks - void signal(); - void broadcast(); - -protected: - apr_thread_cond_t *mAPRCondp; + LLCondition(MUTEX_POOL(native_pool_type& pool = LLThread::tldata().mRootPool)) : + LLMutex(MUTEX_POOL(pool)), + LLConditionVariableImpl(MUTEX_POOL(pool)) + {} + ~LLCondition() + {} + void LL_COMMON_API wait() + { + if (AIThreadID::in_main_thread_inline()) + wait_main(); + else LLConditionVariableImpl::wait(*this); + } + void signal() { LLConditionVariableImpl::notify_one(); } + void broadcast() { LLConditionVariableImpl::notify_all(); } +private: + void wait_main(); //Cannot be inline. Uses internal fasttimer. }; -class LL_COMMON_API LLMutexLock +class LLMutexLock { public: - LLMutexLock(LLMutexBase* mutex) + LLMutexLock(LLMutex* mutex) { mMutex = mutex; - if(mMutex) mMutex->lock(); + lock(); + } + LLMutexLock(LLMutex& mutex) + { + mMutex = &mutex; + lock(); } ~LLMutexLock() { - if(mMutex) mMutex->unlock(); + if (mMutex) mMutex->unlock(); } private: - LLMutexBase* mMutex; + LL_COMMON_API void lock(); //Cannot be inline. Uses internal fasttimer. + LLMutex* mMutex; }; -class LL_COMMON_API AIRWLock +class AIRWLock { public: AIRWLock(LLAPRPool& parent = LLThread::tldata().mRootPool) : - mWriterWaitingMutex(parent), mNoHoldersCondition(parent), mHoldersCount(0), mWriterIsWaiting(false) { } + mWriterWaitingMutex(MUTEX_POOL(parent)), mNoHoldersCondition(MUTEX_POOL(parent)), mHoldersCount(0), mWriterIsWaiting(false) { } private: LLMutex mWriterWaitingMutex; //!< This mutex is locked while some writer is waiting for access. @@ -398,7 +567,7 @@ public: }; #if LL_DEBUG -class LL_COMMON_API AINRLock +class AINRLock { private: int read_locked; @@ -409,15 +578,15 @@ private: void accessed(void) const { - if (!mAccessed) - { - mAccessed = true; - mTheadID.reset(); - } - else - { - llassert_always(mTheadID.equals_current_thread()); - } + if (!mAccessed) + { + mAccessed = true; + mTheadID.reset(); + } + else + { + llassert_always(mTheadID.equals_current_thread()); + } } public: @@ -451,22 +620,29 @@ void LLThread::unlockData() // see llmemory.h for LLPointer<> definition -class LL_COMMON_API LLThreadSafeRefCount +class LLThreadSafeRefCount { private: LLThreadSafeRefCount(const LLThreadSafeRefCount&); // not implemented LLThreadSafeRefCount&operator=(const LLThreadSafeRefCount&); // not implemented protected: - virtual ~LLThreadSafeRefCount(); // use unref() - + virtual ~LLThreadSafeRefCount() // use unref() + { + if (mRef != 0) + { + llerrs << "deleting non-zero reference" << llendl; + } + } + public: - LLThreadSafeRefCount(); - + LLThreadSafeRefCount() : mRef(0) + {} + void ref() { - mRef++; - } + mRef++; + } void unref() { diff --git a/indra/llmath/llcalcparser.h b/indra/llmath/llcalcparser.h index e3eb2bded..696e4a105 100644 --- a/indra/llmath/llcalcparser.h +++ b/indra/llmath/llcalcparser.h @@ -55,44 +55,53 @@ T max_glue(T a, T b) return std::max(a, b); } +template struct lazy_pow_ { - template - struct result { typedef X type; }; - - template - X operator()(X x, Y y) const + template struct result; + template + struct result + { + typedef T type; + }; + + T operator()(T x, T y) const { return std::pow(x, y); } }; - + +template struct lazy_ufunc_ { - template - struct result { typedef A1 type; }; - - template - A1 operator()(F f, A1 a1) const + template struct result; + template + struct result { - return f(a1); + typedef T type; + }; + + T operator()(T(*fn)(T), T x) const + { + return fn(x); } }; - + +template struct lazy_bfunc_ { - template - struct result { typedef A1 type; }; - - template - A1 operator()(F f, A1 a1, A2 a2) const + template struct result; + template + struct result { - return f(a1, a2); + typedef T type; + }; + double operator()(T(*fn)(T, T), T x, T y) const + { + return fn(x, y); } }; - -//} // end namespace anonymous - + template struct grammar : boost::spirit::qi::grammar< @@ -178,9 +187,9 @@ struct grammar using boost::spirit::qi::no_case; using boost::spirit::qi::_val; - boost::phoenix::function lazy_pow; - boost::phoenix::function lazy_ufunc; - boost::phoenix::function lazy_bfunc; + boost::phoenix::function< lazy_pow_ > lazy_pow; + boost::phoenix::function< lazy_ufunc_ > lazy_ufunc; + boost::phoenix::function< lazy_bfunc_ > lazy_bfunc; expression = term [_val = _1] diff --git a/indra/llmessage/aicurl.cpp b/indra/llmessage/aicurl.cpp index 99683e0c5..68328c394 100644 --- a/indra/llmessage/aicurl.cpp +++ b/indra/llmessage/aicurl.cpp @@ -1300,7 +1300,7 @@ static int const HTTP_REDIRECTS_DEFAULT = 16; // Singu note: I've seen up to 10 // This limit is only here to avoid a redirect loop (infinite redirections). LLChannelDescriptors const BufferedCurlEasyRequest::sChannels; -LLMutex BufferedCurlEasyRequest::sResponderCallbackMutex; +LLGlobalMutex BufferedCurlEasyRequest::sResponderCallbackMutex; bool BufferedCurlEasyRequest::sShuttingDown = false; AIAverage BufferedCurlEasyRequest::sHTTPBandwidth(25); diff --git a/indra/llmessage/aicurlprivate.h b/indra/llmessage/aicurlprivate.h index be3d90e7d..9af27a567 100644 --- a/indra/llmessage/aicurlprivate.h +++ b/indra/llmessage/aicurlprivate.h @@ -430,7 +430,7 @@ class BufferedCurlEasyRequest : public CurlEasyRequest { public: static LLChannelDescriptors const sChannels; // Channel object for mInput (channel out()) and mOutput (channel in()). - static LLMutex sResponderCallbackMutex; // Locked while calling back any overridden ResponderBase::finished and/or accessing sShuttingDown. + static LLGlobalMutex sResponderCallbackMutex; // Locked while calling back any overridden ResponderBase::finished and/or accessing sShuttingDown. static bool sShuttingDown; // If true, no additional calls to ResponderBase::finished will be made anymore. static AIAverage sHTTPBandwidth; // HTTP bandwidth usage of all services combined. diff --git a/indra/llmessage/aicurlthread.cpp b/indra/llmessage/aicurlthread.cpp index 19f190b6b..b7057fe11 100644 --- a/indra/llmessage/aicurlthread.cpp +++ b/indra/llmessage/aicurlthread.cpp @@ -1098,10 +1098,10 @@ void AICurlThread::wakeup_thread(bool stop_thread) if (stop_thread) mRunning = false; // Thread-safe because all other threads were already stopped. - // Note, we do not want this function to be blocking the calling thread; therefore we only use tryLock()s. + // Note, we do not want this function to be blocking the calling thread; therefore we only use try_lock()s. // Stop two threads running the following code concurrently. - if (!mWakeUpMutex.tryLock()) + if (!mWakeUpMutex.try_lock()) { // If we failed to obtain mWakeUpMutex then another thread is (or was) in AICurlThread::wakeup_thread, // or curl was holding the lock for a micro second at the start of process_commands. @@ -1115,7 +1115,7 @@ void AICurlThread::wakeup_thread(bool stop_thread) } // Try if curl thread is still awake and if so, pass the new commands directly. - if (mWakeUpFlagMutex.tryLock()) + if (mWakeUpFlagMutex.try_lock()) { mWakeUpFlag = true; mWakeUpFlagMutex.unlock(); diff --git a/indra/llmessage/llpumpio.cpp b/indra/llmessage/llpumpio.cpp index 588265f97..a2b63e8e1 100644 --- a/indra/llmessage/llpumpio.cpp +++ b/indra/llmessage/llpumpio.cpp @@ -56,7 +56,7 @@ // constants for poll timeout. if we are threading, we want to have a // longer poll timeout. -#if LL_THREADS_APR +#if LL_THREADS_PUMPIO static const S32 DEFAULT_POLL_TIMEOUT = 1000; #else static const S32 DEFAULT_POLL_TIMEOUT = 0; @@ -168,24 +168,21 @@ LLPumpIO::LLPumpIO(void) : mPollset(NULL), mPollsetClientID(0), mNextLock(0), +#if LL_THREADS_PUMPIO + mPool(), + LLMutex mChainsMutex(initPool()), + LLMutex mCallbackMutex(initPool()), +#endif mCurrentPoolReallocCount(0), - mChainsMutex(NULL), - mCallbackMutex(NULL), mCurrentChain(mRunningChains.end()) { - mCurrentChain = mRunningChains.end(); - - initialize(); +#if !LL_THREADS_PUMPIO + initPool(); +#endif } LLPumpIO::~LLPumpIO() { -#if LL_THREADS_APR - if (mChainsMutex) apr_thread_mutex_destroy(mChainsMutex); - if (mCallbackMutex) apr_thread_mutex_destroy(mCallbackMutex); -#endif - mChainsMutex = NULL; - mCallbackMutex = NULL; if(mPollset) { // lldebugs << "cleaning up pollset" << llendl; @@ -218,8 +215,8 @@ bool LLPumpIO::addChain(chain_t const& chain, F32 timeout) info.mChainLinks.push_back(link); } -#if LL_THREADS_APR - LLScopedLock lock(mChainsMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mChainsMutex); #endif mPendingChains.push_back(info); return true; @@ -258,8 +255,8 @@ bool LLPumpIO::addChain( break; } } -#if LL_THREADS_APR - LLScopedLock lock(mChainsMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mChainsMutex); #endif mPendingChains.push_back(info); return true; @@ -403,8 +400,8 @@ void LLPumpIO::clearLock(S32 key) // therefore won't be treading into deleted memory. I think we can // also clear the lock on the chain safely since the pump only // reads that value. -#if LL_THREADS_APR - LLScopedLock lock(mChainsMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mChainsMutex); #endif mClearLocks.insert(key); } @@ -469,8 +466,8 @@ void LLPumpIO::pump(const S32& poll_timeout) PUMP_DEBUG; if(true) { -#if LL_THREADS_APR - LLScopedLock lock(mChainsMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mChainsMutex); #endif // bail if this pump is paused. if(PAUSING == mState) @@ -738,8 +735,8 @@ void LLPumpIO::pump(const S32& poll_timeout) //bool LLPumpIO::respond(const chain_t& pipes) //{ -//#if LL_THREADS_APR -// LLScopedLock lock(mCallbackMutex); +//#if LL_THREADS_PUMPIO +// LLMutexLock lock(mCallbackMutex); //#endif // LLChainInfo info; // links_t links; @@ -752,8 +749,8 @@ bool LLPumpIO::respond(LLIOPipe* pipe) { if(NULL == pipe) return false; -#if LL_THREADS_APR - LLScopedLock lock(mCallbackMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mCallbackMutex); #endif LLChainInfo info; LLLinkInfo link; @@ -773,8 +770,8 @@ bool LLPumpIO::respond( if(!data) return false; if(links.empty()) return false; -#if LL_THREADS_APR - LLScopedLock lock(mCallbackMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mCallbackMutex); #endif // Add the callback response @@ -793,8 +790,8 @@ void LLPumpIO::callback() //llinfos << "LLPumpIO::callback()" << llendl; if(true) { -#if LL_THREADS_APR - LLScopedLock lock(mCallbackMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mCallbackMutex); #endif std::copy( mPendingCallbacks.begin(), @@ -821,8 +818,8 @@ void LLPumpIO::callback() void LLPumpIO::control(LLPumpIO::EControl op) { -#if LL_THREADS_APR - LLScopedLock lock(mChainsMutex); +#if LL_THREADS_PUMPIO + LLMutexLock lock(mChainsMutex); #endif switch(op) { @@ -838,14 +835,11 @@ void LLPumpIO::control(LLPumpIO::EControl op) } } -void LLPumpIO::initialize(void) +LLAPRPool& LLPumpIO::initPool() { - mPool.create(); -#if LL_THREADS_APR - // SJB: Windows defaults to NESTED and OSX defaults to UNNESTED, so use UNNESTED explicitly. - apr_thread_mutex_create(&mChainsMutex, APR_THREAD_MUTEX_UNNESTED, mPool()); - apr_thread_mutex_create(&mCallbackMutex, APR_THREAD_MUTEX_UNNESTED, mPool()); -#endif + if (!mPool) + mPool.create(); + return mPool; } void LLPumpIO::rebuildPollset() diff --git a/indra/llmessage/llpumpio.h b/indra/llmessage/llpumpio.h index 0d1387257..309792c80 100644 --- a/indra/llmessage/llpumpio.h +++ b/indra/llmessage/llpumpio.h @@ -42,7 +42,7 @@ #include "llrun.h" // Define this to enable use with the APR thread library. -//#define LL_THREADS_APR 1 +//#define LL_THREADS_PUMPIO 1 // some simple constants to help with timeouts extern const F32 DEFAULT_CHAIN_EXPIRY_SECS; @@ -382,16 +382,13 @@ protected: LLAPRPool mCurrentPool; S32 mCurrentPoolReallocCount; -#if LL_THREADS_APR - apr_thread_mutex_t* mChainsMutex; - apr_thread_mutex_t* mCallbackMutex; -#else - int* mChainsMutex; - int* mCallbackMutex; +#if LL_THREADS_PUMPIO + LLMutex mChainsMutex; + LLMutex mCallbackMutex; #endif protected: - void initialize(); + LLAPRPool& initPool(); current_chain_t removeRunningChain(current_chain_t& chain) ; /**