libstdc++
|
00001 // <shared_mutex> -*- C++ -*- 00002 00003 // Copyright (C) 2013-2016 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 3, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // Under Section 7 of GPL version 3, you are granted additional 00017 // permissions described in the GCC Runtime Library Exception, version 00018 // 3.1, as published by the Free Software Foundation. 00019 00020 // You should have received a copy of the GNU General Public License and 00021 // a copy of the GCC Runtime Library Exception along with this program; 00022 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00023 // <http://www.gnu.org/licenses/>. 00024 00025 /** @file include/shared_mutex 00026 * This is a Standard C++ Library header. 00027 */ 00028 00029 #ifndef _GLIBCXX_SHARED_MUTEX 00030 #define _GLIBCXX_SHARED_MUTEX 1 00031 00032 #pragma GCC system_header 00033 00034 #if __cplusplus <= 201103L 00035 # include <bits/c++14_warning.h> 00036 #else 00037 00038 #include <bits/c++config.h> 00039 #include <condition_variable> 00040 #include <bits/functexcept.h> 00041 00042 namespace std _GLIBCXX_VISIBILITY(default) 00043 { 00044 _GLIBCXX_BEGIN_NAMESPACE_VERSION 00045 00046 /** 00047 * @ingroup mutexes 00048 * @{ 00049 */ 00050 00051 #ifdef _GLIBCXX_USE_C99_STDINT_TR1 00052 #ifdef _GLIBCXX_HAS_GTHREADS 00053 00054 #if __cplusplus > 201402L 00055 #define __cpp_lib_shared_mutex 201505 00056 class shared_mutex; 00057 #endif 00058 00059 #define __cpp_lib_shared_timed_mutex 201402 00060 class shared_timed_mutex; 00061 00062 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T 00063 /// A shared mutex type implemented using pthread_rwlock_t. 00064 class __shared_mutex_pthread 00065 { 00066 friend class shared_timed_mutex; 00067 00068 #ifdef PTHREAD_RWLOCK_INITIALIZER 00069 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER; 00070 00071 public: 00072 __shared_mutex_pthread() = default; 00073 ~__shared_mutex_pthread() = default; 00074 #else 00075 pthread_rwlock_t _M_rwlock; 00076 00077 public: 00078 __shared_mutex_pthread() 00079 { 00080 int __ret = pthread_rwlock_init(&_M_rwlock, NULL); 00081 if (__ret == ENOMEM) 00082 __throw_bad_alloc(); 00083 else if (__ret == EAGAIN) 00084 __throw_system_error(int(errc::resource_unavailable_try_again)); 00085 else if (__ret == EPERM) 00086 __throw_system_error(int(errc::operation_not_permitted)); 00087 // Errors not handled: EBUSY, EINVAL 00088 __glibcxx_assert(__ret == 0); 00089 } 00090 00091 ~__shared_mutex_pthread() 00092 { 00093 int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock); 00094 // Errors not handled: EBUSY, EINVAL 00095 __glibcxx_assert(__ret == 0); 00096 } 00097 #endif 00098 00099 __shared_mutex_pthread(const __shared_mutex_pthread&) = delete; 00100 __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete; 00101 00102 void 00103 lock() 00104 { 00105 int __ret = pthread_rwlock_wrlock(&_M_rwlock); 00106 if (__ret == EDEADLK) 00107 __throw_system_error(int(errc::resource_deadlock_would_occur)); 00108 // Errors not handled: EINVAL 00109 __glibcxx_assert(__ret == 0); 00110 } 00111 00112 bool 00113 try_lock() 00114 { 00115 int __ret = pthread_rwlock_trywrlock(&_M_rwlock); 00116 if (__ret == EBUSY) return false; 00117 // Errors not handled: EINVAL 00118 __glibcxx_assert(__ret == 0); 00119 return true; 00120 } 00121 00122 void 00123 unlock() 00124 { 00125 int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock); 00126 // Errors not handled: EPERM, EBUSY, EINVAL 00127 __glibcxx_assert(__ret == 0); 00128 } 00129 00130 // Shared ownership 00131 00132 void 00133 lock_shared() 00134 { 00135 int __ret; 00136 // We retry if we exceeded the maximum number of read locks supported by 00137 // the POSIX implementation; this can result in busy-waiting, but this 00138 // is okay based on the current specification of forward progress 00139 // guarantees by the standard. 00140 do 00141 __ret = pthread_rwlock_rdlock(&_M_rwlock); 00142 while (__ret == EAGAIN); 00143 if (__ret == EDEADLK) 00144 __throw_system_error(int(errc::resource_deadlock_would_occur)); 00145 // Errors not handled: EINVAL 00146 __glibcxx_assert(__ret == 0); 00147 } 00148 00149 bool 00150 try_lock_shared() 00151 { 00152 int __ret = pthread_rwlock_tryrdlock(&_M_rwlock); 00153 // If the maximum number of read locks has been exceeded, we just fail 00154 // to acquire the lock. Unlike for lock(), we are not allowed to throw 00155 // an exception. 00156 if (__ret == EBUSY || __ret == EAGAIN) return false; 00157 // Errors not handled: EINVAL 00158 __glibcxx_assert(__ret == 0); 00159 return true; 00160 } 00161 00162 void 00163 unlock_shared() 00164 { 00165 unlock(); 00166 } 00167 00168 void* native_handle() { return &_M_rwlock; } 00169 }; 00170 #endif 00171 00172 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK) 00173 /// A shared mutex type implemented using std::condition_variable. 00174 class __shared_mutex_cv 00175 { 00176 friend class shared_timed_mutex; 00177 00178 // Based on Howard Hinnant's reference implementation from N2406. 00179 00180 // The high bit of _M_state is the write-entered flag which is set to 00181 // indicate a writer has taken the lock or is queuing to take the lock. 00182 // The remaining bits are the count of reader locks. 00183 // 00184 // To take a reader lock, block on gate1 while the write-entered flag is 00185 // set or the maximum number of reader locks is held, then increment the 00186 // reader lock count. 00187 // To release, decrement the count, then if the write-entered flag is set 00188 // and the count is zero then signal gate2 to wake a queued writer, 00189 // otherwise if the maximum number of reader locks was held signal gate1 00190 // to wake a reader. 00191 // 00192 // To take a writer lock, block on gate1 while the write-entered flag is 00193 // set, then set the write-entered flag to start queueing, then block on 00194 // gate2 while the number of reader locks is non-zero. 00195 // To release, unset the write-entered flag and signal gate1 to wake all 00196 // blocked readers and writers. 00197 // 00198 // This means that when no reader locks are held readers and writers get 00199 // equal priority. When one or more reader locks is held a writer gets 00200 // priority and no more reader locks can be taken while the writer is 00201 // queued. 00202 00203 // Only locked when accessing _M_state or waiting on condition variables. 00204 mutex _M_mut; 00205 // Used to block while write-entered is set or reader count at maximum. 00206 condition_variable _M_gate1; 00207 // Used to block queued writers while reader count is non-zero. 00208 condition_variable _M_gate2; 00209 // The write-entered flag and reader count. 00210 unsigned _M_state; 00211 00212 static constexpr unsigned _S_write_entered 00213 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1); 00214 static constexpr unsigned _S_max_readers = ~_S_write_entered; 00215 00216 // Test whether the write-entered flag is set. _M_mut must be locked. 00217 bool _M_write_entered() const { return _M_state & _S_write_entered; } 00218 00219 // The number of reader locks currently held. _M_mut must be locked. 00220 unsigned _M_readers() const { return _M_state & _S_max_readers; } 00221 00222 public: 00223 __shared_mutex_cv() : _M_state(0) {} 00224 00225 ~__shared_mutex_cv() 00226 { 00227 __glibcxx_assert( _M_state == 0 ); 00228 } 00229 00230 __shared_mutex_cv(const __shared_mutex_cv&) = delete; 00231 __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete; 00232 00233 // Exclusive ownership 00234 00235 void 00236 lock() 00237 { 00238 unique_lock<mutex> __lk(_M_mut); 00239 // Wait until we can set the write-entered flag. 00240 _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); }); 00241 _M_state |= _S_write_entered; 00242 // Then wait until there are no more readers. 00243 _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; }); 00244 } 00245 00246 bool 00247 try_lock() 00248 { 00249 unique_lock<mutex> __lk(_M_mut, try_to_lock); 00250 if (__lk.owns_lock() && _M_state == 0) 00251 { 00252 _M_state = _S_write_entered; 00253 return true; 00254 } 00255 return false; 00256 } 00257 00258 void 00259 unlock() 00260 { 00261 lock_guard<mutex> __lk(_M_mut); 00262 __glibcxx_assert( _M_write_entered() ); 00263 _M_state = 0; 00264 // call notify_all() while mutex is held so that another thread can't 00265 // lock and unlock the mutex then destroy *this before we make the call. 00266 _M_gate1.notify_all(); 00267 } 00268 00269 // Shared ownership 00270 00271 void 00272 lock_shared() 00273 { 00274 unique_lock<mutex> __lk(_M_mut); 00275 _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; }); 00276 ++_M_state; 00277 } 00278 00279 bool 00280 try_lock_shared() 00281 { 00282 unique_lock<mutex> __lk(_M_mut, try_to_lock); 00283 if (!__lk.owns_lock()) 00284 return false; 00285 if (_M_state < _S_max_readers) 00286 { 00287 ++_M_state; 00288 return true; 00289 } 00290 return false; 00291 } 00292 00293 void 00294 unlock_shared() 00295 { 00296 lock_guard<mutex> __lk(_M_mut); 00297 __glibcxx_assert( _M_readers() > 0 ); 00298 auto __prev = _M_state--; 00299 if (_M_write_entered()) 00300 { 00301 // Wake the queued writer if there are no more readers. 00302 if (_M_readers() == 0) 00303 _M_gate2.notify_one(); 00304 // No need to notify gate1 because we give priority to the queued 00305 // writer, and that writer will eventually notify gate1 after it 00306 // clears the write-entered flag. 00307 } 00308 else 00309 { 00310 // Wake any thread that was blocked on reader overflow. 00311 if (__prev == _S_max_readers) 00312 _M_gate1.notify_one(); 00313 } 00314 } 00315 }; 00316 #endif 00317 00318 #if __cplusplus > 201402L 00319 /// The standard shared mutex type. 00320 class shared_mutex 00321 { 00322 public: 00323 shared_mutex() = default; 00324 ~shared_mutex() = default; 00325 00326 shared_mutex(const shared_mutex&) = delete; 00327 shared_mutex& operator=(const shared_mutex&) = delete; 00328 00329 // Exclusive ownership 00330 00331 void lock() { _M_impl.lock(); } 00332 bool try_lock() { return _M_impl.try_lock(); } 00333 void unlock() { _M_impl.unlock(); } 00334 00335 // Shared ownership 00336 00337 void lock_shared() { _M_impl.lock_shared(); } 00338 bool try_lock_shared() { return _M_impl.try_lock_shared(); } 00339 void unlock_shared() { _M_impl.unlock_shared(); } 00340 00341 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T 00342 typedef void* native_handle_type; 00343 native_handle_type native_handle() { return _M_impl.native_handle(); } 00344 00345 private: 00346 __shared_mutex_pthread _M_impl; 00347 #else 00348 private: 00349 __shared_mutex_cv _M_impl; 00350 #endif 00351 }; 00352 #endif // C++17 00353 00354 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 00355 using __shared_timed_mutex_base = __shared_mutex_pthread; 00356 #else 00357 using __shared_timed_mutex_base = __shared_mutex_cv; 00358 #endif 00359 00360 /// The standard shared timed mutex type. 00361 class shared_timed_mutex 00362 : private __shared_timed_mutex_base 00363 { 00364 using _Base = __shared_timed_mutex_base; 00365 00366 // Must use the same clock as condition_variable for __shared_mutex_cv. 00367 typedef chrono::system_clock __clock_t; 00368 00369 public: 00370 shared_timed_mutex() = default; 00371 ~shared_timed_mutex() = default; 00372 00373 shared_timed_mutex(const shared_timed_mutex&) = delete; 00374 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete; 00375 00376 // Exclusive ownership 00377 00378 void lock() { _Base::lock(); } 00379 bool try_lock() { return _Base::try_lock(); } 00380 void unlock() { _Base::unlock(); } 00381 00382 template<typename _Rep, typename _Period> 00383 bool 00384 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time) 00385 { 00386 return try_lock_until(__clock_t::now() + __rel_time); 00387 } 00388 00389 // Shared ownership 00390 00391 void lock_shared() { _Base::lock_shared(); } 00392 bool try_lock_shared() { return _Base::try_lock_shared(); } 00393 void unlock_shared() { _Base::unlock_shared(); } 00394 00395 template<typename _Rep, typename _Period> 00396 bool 00397 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time) 00398 { 00399 return try_lock_shared_until(__clock_t::now() + __rel_time); 00400 } 00401 00402 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 00403 00404 // Exclusive ownership 00405 00406 template<typename _Duration> 00407 bool 00408 try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime) 00409 { 00410 auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 00411 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 00412 00413 __gthread_time_t __ts = 00414 { 00415 static_cast<std::time_t>(__s.time_since_epoch().count()), 00416 static_cast<long>(__ns.count()) 00417 }; 00418 00419 int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts); 00420 // On self-deadlock, we just fail to acquire the lock. Technically, 00421 // the program violated the precondition. 00422 if (__ret == ETIMEDOUT || __ret == EDEADLK) 00423 return false; 00424 // Errors not handled: EINVAL 00425 __glibcxx_assert(__ret == 0); 00426 return true; 00427 } 00428 00429 template<typename _Clock, typename _Duration> 00430 bool 00431 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 00432 { 00433 // DR 887 - Sync unknown clock to known clock. 00434 const typename _Clock::time_point __c_entry = _Clock::now(); 00435 const __clock_t::time_point __s_entry = __clock_t::now(); 00436 const auto __delta = __abs_time - __c_entry; 00437 const auto __s_atime = __s_entry + __delta; 00438 return try_lock_until(__s_atime); 00439 } 00440 00441 // Shared ownership 00442 00443 template<typename _Duration> 00444 bool 00445 try_lock_shared_until(const chrono::time_point<__clock_t, 00446 _Duration>& __atime) 00447 { 00448 auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 00449 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 00450 00451 __gthread_time_t __ts = 00452 { 00453 static_cast<std::time_t>(__s.time_since_epoch().count()), 00454 static_cast<long>(__ns.count()) 00455 }; 00456 00457 int __ret; 00458 // Unlike for lock(), we are not allowed to throw an exception so if 00459 // the maximum number of read locks has been exceeded, or we would 00460 // deadlock, we just try to acquire the lock again (and will time out 00461 // eventually). 00462 // In cases where we would exceed the maximum number of read locks 00463 // throughout the whole time until the timeout, we will fail to 00464 // acquire the lock even if it would be logically free; however, this 00465 // is allowed by the standard, and we made a "strong effort" 00466 // (see C++14 30.4.1.4p26). 00467 // For cases where the implementation detects a deadlock we 00468 // intentionally block and timeout so that an early return isn't 00469 // mistaken for a spurious failure, which might help users realise 00470 // there is a deadlock. 00471 do 00472 __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts); 00473 while (__ret == EAGAIN || __ret == EDEADLK); 00474 if (__ret == ETIMEDOUT) 00475 return false; 00476 // Errors not handled: EINVAL 00477 __glibcxx_assert(__ret == 0); 00478 return true; 00479 } 00480 00481 template<typename _Clock, typename _Duration> 00482 bool 00483 try_lock_shared_until(const chrono::time_point<_Clock, 00484 _Duration>& __abs_time) 00485 { 00486 // DR 887 - Sync unknown clock to known clock. 00487 const typename _Clock::time_point __c_entry = _Clock::now(); 00488 const __clock_t::time_point __s_entry = __clock_t::now(); 00489 const auto __delta = __abs_time - __c_entry; 00490 const auto __s_atime = __s_entry + __delta; 00491 return try_lock_shared_until(__s_atime); 00492 } 00493 00494 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK) 00495 00496 // Exclusive ownership 00497 00498 template<typename _Clock, typename _Duration> 00499 bool 00500 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 00501 { 00502 unique_lock<mutex> __lk(_M_mut); 00503 if (!_M_gate1.wait_until(__lk, __abs_time, 00504 [=]{ return !_M_write_entered(); })) 00505 { 00506 return false; 00507 } 00508 _M_state |= _S_write_entered; 00509 if (!_M_gate2.wait_until(__lk, __abs_time, 00510 [=]{ return _M_readers() == 0; })) 00511 { 00512 _M_state ^= _S_write_entered; 00513 // Wake all threads blocked while the write-entered flag was set. 00514 _M_gate1.notify_all(); 00515 return false; 00516 } 00517 return true; 00518 } 00519 00520 // Shared ownership 00521 00522 template <typename _Clock, typename _Duration> 00523 bool 00524 try_lock_shared_until(const chrono::time_point<_Clock, 00525 _Duration>& __abs_time) 00526 { 00527 unique_lock<mutex> __lk(_M_mut); 00528 if (!_M_gate1.wait_until(__lk, __abs_time, 00529 [=]{ return _M_state < _S_max_readers; })) 00530 { 00531 return false; 00532 } 00533 ++_M_state; 00534 return true; 00535 } 00536 00537 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 00538 }; 00539 #endif // _GLIBCXX_HAS_GTHREADS 00540 00541 /// shared_lock 00542 template<typename _Mutex> 00543 class shared_lock 00544 { 00545 public: 00546 typedef _Mutex mutex_type; 00547 00548 // Shared locking 00549 00550 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { } 00551 00552 explicit 00553 shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true) 00554 { __m.lock_shared(); } 00555 00556 shared_lock(mutex_type& __m, defer_lock_t) noexcept 00557 : _M_pm(&__m), _M_owns(false) { } 00558 00559 shared_lock(mutex_type& __m, try_to_lock_t) 00560 : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { } 00561 00562 shared_lock(mutex_type& __m, adopt_lock_t) 00563 : _M_pm(&__m), _M_owns(true) { } 00564 00565 template<typename _Clock, typename _Duration> 00566 shared_lock(mutex_type& __m, 00567 const chrono::time_point<_Clock, _Duration>& __abs_time) 00568 : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { } 00569 00570 template<typename _Rep, typename _Period> 00571 shared_lock(mutex_type& __m, 00572 const chrono::duration<_Rep, _Period>& __rel_time) 00573 : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { } 00574 00575 ~shared_lock() 00576 { 00577 if (_M_owns) 00578 _M_pm->unlock_shared(); 00579 } 00580 00581 shared_lock(shared_lock const&) = delete; 00582 shared_lock& operator=(shared_lock const&) = delete; 00583 00584 shared_lock(shared_lock&& __sl) noexcept : shared_lock() 00585 { swap(__sl); } 00586 00587 shared_lock& 00588 operator=(shared_lock&& __sl) noexcept 00589 { 00590 shared_lock(std::move(__sl)).swap(*this); 00591 return *this; 00592 } 00593 00594 void 00595 lock() 00596 { 00597 _M_lockable(); 00598 _M_pm->lock_shared(); 00599 _M_owns = true; 00600 } 00601 00602 bool 00603 try_lock() 00604 { 00605 _M_lockable(); 00606 return _M_owns = _M_pm->try_lock_shared(); 00607 } 00608 00609 template<typename _Rep, typename _Period> 00610 bool 00611 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time) 00612 { 00613 _M_lockable(); 00614 return _M_owns = _M_pm->try_lock_shared_for(__rel_time); 00615 } 00616 00617 template<typename _Clock, typename _Duration> 00618 bool 00619 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 00620 { 00621 _M_lockable(); 00622 return _M_owns = _M_pm->try_lock_shared_until(__abs_time); 00623 } 00624 00625 void 00626 unlock() 00627 { 00628 if (!_M_owns) 00629 __throw_system_error(int(errc::resource_deadlock_would_occur)); 00630 _M_pm->unlock_shared(); 00631 _M_owns = false; 00632 } 00633 00634 // Setters 00635 00636 void 00637 swap(shared_lock& __u) noexcept 00638 { 00639 std::swap(_M_pm, __u._M_pm); 00640 std::swap(_M_owns, __u._M_owns); 00641 } 00642 00643 mutex_type* 00644 release() noexcept 00645 { 00646 _M_owns = false; 00647 return std::exchange(_M_pm, nullptr); 00648 } 00649 00650 // Getters 00651 00652 bool owns_lock() const noexcept { return _M_owns; } 00653 00654 explicit operator bool() const noexcept { return _M_owns; } 00655 00656 mutex_type* mutex() const noexcept { return _M_pm; } 00657 00658 private: 00659 void 00660 _M_lockable() const 00661 { 00662 if (_M_pm == nullptr) 00663 __throw_system_error(int(errc::operation_not_permitted)); 00664 if (_M_owns) 00665 __throw_system_error(int(errc::resource_deadlock_would_occur)); 00666 } 00667 00668 mutex_type* _M_pm; 00669 bool _M_owns; 00670 }; 00671 00672 /// Swap specialization for shared_lock 00673 template<typename _Mutex> 00674 void 00675 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept 00676 { __x.swap(__y); } 00677 00678 #endif // _GLIBCXX_USE_C99_STDINT_TR1 00679 00680 // @} group mutexes 00681 _GLIBCXX_END_NAMESPACE_VERSION 00682 } // namespace 00683 00684 #endif // C++14 00685 00686 #endif // _GLIBCXX_SHARED_MUTEX