summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTobias Markmann <tm@ayena.de>2014-10-19 20:22:58 (GMT)
committerTobias Markmann <tm@ayena.de>2014-10-20 13:49:33 (GMT)
commit6b22dfcf59474dd016a0355a3102a1dd3692d92c (patch)
tree2b1fd33be433a91e81fee84fdc2bf1b52575d934 /3rdParty/Boost/src/boost/atomic
parent38b0cb785fea8eae5e48fae56440695fdfd10ee1 (diff)
downloadswift-6b22dfcf59474dd016a0355a3102a1dd3692d92c.zip
swift-6b22dfcf59474dd016a0355a3102a1dd3692d92c.tar.bz2
Update Boost in 3rdParty to version 1.56.0.
This updates Boost in our 3rdParty directory to version 1.56.0. Updated our update.sh script to stop on error. Changed error reporting in SwiftTools/CrashReporter.cpp to SWIFT_LOG due to missing include of <iostream> with newer Boost. Change-Id: I4b35c77de951333979a524097f35f5f83d325edc
Diffstat (limited to '3rdParty/Boost/src/boost/atomic')
-rw-r--r--3rdParty/Boost/src/boost/atomic/atomic.hpp93
-rw-r--r--3rdParty/Boost/src/boost/atomic/atomic_flag.hpp33
-rw-r--r--3rdParty/Boost/src/boost/atomic/capabilities.hpp160
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/atomic_flag.hpp70
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/atomic_template.hpp774
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_alpha.hpp34
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_arm.hpp56
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_atomic.hpp134
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_ppc.hpp36
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sparc.hpp34
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sync.hpp62
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_gcc_x86.hpp52
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_linux_arm.hpp35
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_msvc_arm.hpp34
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_msvc_x86.hpp50
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/caps_windows.hpp33
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/casts.hpp64
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/config.hpp24
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/int_sizes.hpp140
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/interlocked.hpp451
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/link.hpp58
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/lockpool.hpp51
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/operations.hpp24
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/operations_fwd.hpp34
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/operations_lockfree.hpp30
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_cas_based.hpp91
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_emulated.hpp149
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_extending_cas_based.hpp65
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_alpha.hpp874
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_arm.hpp971
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_atomic.hpp238
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_ppc.hpp775
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sparc.hpp245
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sync.hpp237
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86.hpp510
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86_dcas.hpp308
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_linux_arm.hpp177
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_msvc_arm.hpp820
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_msvc_common.hpp38
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_msvc_x86.hpp877
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/ops_windows.hpp215
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/pause.hpp43
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/platform.hpp115
-rw-r--r--3rdParty/Boost/src/boost/atomic/detail/storage_type.hpp168
-rw-r--r--3rdParty/Boost/src/boost/atomic/fences.hpp67
45 files changed, 9549 insertions, 0 deletions
diff --git a/3rdParty/Boost/src/boost/atomic/atomic.hpp b/3rdParty/Boost/src/boost/atomic/atomic.hpp
new file mode 100644
index 0000000..8b0bdd1
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/atomic.hpp
@@ -0,0 +1,93 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/atomic.hpp
+ *
+ * This header contains definition of \c atomic template and \c atomic_flag.
+ */
+
+#ifndef BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
+
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/fences.hpp>
+#include <boost/atomic/atomic_flag.hpp>
+#include <boost/atomic/detail/atomic_template.hpp>
+#include <boost/atomic/detail/operations.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+
+using atomics::atomic;
+
+using atomics::atomic_char;
+using atomics::atomic_uchar;
+using atomics::atomic_schar;
+using atomics::atomic_uint8_t;
+using atomics::atomic_int8_t;
+using atomics::atomic_ushort;
+using atomics::atomic_short;
+using atomics::atomic_uint16_t;
+using atomics::atomic_int16_t;
+using atomics::atomic_uint;
+using atomics::atomic_int;
+using atomics::atomic_uint32_t;
+using atomics::atomic_int32_t;
+using atomics::atomic_ulong;
+using atomics::atomic_long;
+using atomics::atomic_uint64_t;
+using atomics::atomic_int64_t;
+#ifdef BOOST_HAS_LONG_LONG
+using atomics::atomic_ullong;
+using atomics::atomic_llong;
+#endif
+using atomics::atomic_address;
+using atomics::atomic_bool;
+using atomics::atomic_wchar_t;
+#if !defined(BOOST_NO_CXX11_CHAR16_T)
+using atomics::atomic_char16_t;
+#endif
+#if !defined(BOOST_NO_CXX11_CHAR32_T)
+using atomics::atomic_char32_t;
+#endif
+
+using atomics::atomic_int_least8_t;
+using atomics::atomic_uint_least8_t;
+using atomics::atomic_int_least16_t;
+using atomics::atomic_uint_least16_t;
+using atomics::atomic_int_least32_t;
+using atomics::atomic_uint_least32_t;
+using atomics::atomic_int_least64_t;
+using atomics::atomic_uint_least64_t;
+using atomics::atomic_int_fast8_t;
+using atomics::atomic_uint_fast8_t;
+using atomics::atomic_int_fast16_t;
+using atomics::atomic_uint_fast16_t;
+using atomics::atomic_int_fast32_t;
+using atomics::atomic_uint_fast32_t;
+using atomics::atomic_int_fast64_t;
+using atomics::atomic_uint_fast64_t;
+using atomics::atomic_intmax_t;
+using atomics::atomic_uintmax_t;
+
+using atomics::atomic_size_t;
+using atomics::atomic_ptrdiff_t;
+
+#if defined(BOOST_HAS_INTPTR_T)
+using atomics::atomic_intptr_t;
+using atomics::atomic_uintptr_t;
+#endif
+
+} // namespace boost
+
+#endif // BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/atomic_flag.hpp b/3rdParty/Boost/src/boost/atomic/atomic_flag.hpp
new file mode 100644
index 0000000..ac296bc
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/atomic_flag.hpp
@@ -0,0 +1,33 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/atomic_flag.hpp
+ *
+ * This header contains definition of \c atomic_flag.
+ */
+
+#ifndef BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
+#define BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
+
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/operations.hpp>
+#include <boost/atomic/detail/atomic_flag.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+
+using atomics::atomic_flag;
+
+} // namespace boost
+
+#endif // BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/capabilities.hpp b/3rdParty/Boost/src/boost/atomic/capabilities.hpp
new file mode 100644
index 0000000..658dd22
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/capabilities.hpp
@@ -0,0 +1,160 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/capabilities.hpp
+ *
+ * This header defines feature capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
+#define BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+
+#if !defined(BOOST_ATOMIC_EMULATED)
+#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#ifndef BOOST_ATOMIC_INT8_LOCK_FREE
+#define BOOST_ATOMIC_INT8_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT16_LOCK_FREE
+#define BOOST_ATOMIC_INT16_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_INT32_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT64_LOCK_FREE
+#define BOOST_ATOMIC_INT64_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
+#define BOOST_ATOMIC_INT128_LOCK_FREE 0
+#endif
+
+
+#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
+#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
+#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
+#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_INT_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_LONG_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
+#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
+#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#else
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
+#endif
+#endif
+
+#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
+
+#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
+#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
+#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_BOOL_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_THREAD_FENCE
+#define BOOST_ATOMIC_THREAD_FENCE 0
+#endif
+
+#ifndef BOOST_ATOMIC_SIGNAL_FENCE
+#define BOOST_ATOMIC_SIGNAL_FENCE 0
+#endif
+
+#endif // BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/atomic_flag.hpp b/3rdParty/Boost/src/boost/atomic/detail/atomic_flag.hpp
new file mode 100644
index 0000000..6a6667d
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/atomic_flag.hpp
@@ -0,0 +1,70 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/atomic_flag.hpp
+ *
+ * This header contains interface definition of \c atomic_flag.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
+
+#include <boost/assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/operations_lockfree.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+/*
+ * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
+ * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
+ */
+
+namespace boost {
+namespace atomics {
+
+#if defined(BOOST_NO_CXX11_CONSTEXPR) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX)
+#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
+#else
+#define BOOST_ATOMIC_FLAG_INIT {}
+#endif
+
+struct atomic_flag
+{
+ typedef atomics::detail::operations< 1u, false > operations;
+ typedef operations::storage_type storage_type;
+
+ storage_type m_storage;
+
+ BOOST_FORCEINLINE BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0)
+ {
+ }
+
+ BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return operations::test_and_set(m_storage, order);
+ }
+
+ BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+ operations::clear(m_storage, order);
+ }
+
+ BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
+ BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&))
+};
+
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/atomic_template.hpp b/3rdParty/Boost/src/boost/atomic/detail/atomic_template.hpp
new file mode 100644
index 0000000..4fd6d79
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/atomic_template.hpp
@@ -0,0 +1,774 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/atomic_template.hpp
+ *
+ * This header contains interface definition of \c atomic template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/assert.hpp>
+#include <boost/type_traits/is_signed.hpp>
+#include <boost/type_traits/is_integral.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/casts.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// 'boost::atomics::atomic<T>' : multiple assignment operators specified
+#pragma warning(disable: 4522)
+#endif
+
+/*
+ * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
+ * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
+ */
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order order) BOOST_NOEXCEPT
+{
+ return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order);
+}
+
+BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+{
+ // 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp
+ // Given the enum values we can test the strength of memory order requirements with this single condition.
+ return (failure_order & 15u) <= (success_order & 15u);
+}
+
+template< typename T, bool IsInt = boost::is_integral< T >::value >
+struct classify
+{
+ typedef void type;
+};
+
+template< typename T >
+struct classify< T, true > { typedef int type; };
+
+template< typename T >
+struct classify< T*, false > { typedef void* type; };
+
+template< typename T, typename Kind >
+class base_atomic;
+
+//! Implementation for integers
+template< typename T >
+class base_atomic< T, int >
+{
+private:
+ typedef T value_type;
+ typedef T difference_type;
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, boost::is_signed< T >::value > operations;
+
+protected:
+ typedef value_type value_arg_type;
+
+public:
+ typedef typename operations::storage_type storage_type;
+
+protected:
+ storage_type m_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
+ BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
+
+ BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ operations::store(m_storage, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return static_cast< value_type >(operations::load(m_storage, order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = static_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = static_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ return operations::is_lock_free(m_storage);
+ }
+
+ BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(1) + 1;
+ }
+
+ BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(1) - 1;
+ }
+
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(v) + v;
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(v) - v;
+ }
+
+ BOOST_FORCEINLINE value_type operator&=(value_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_and(v) & v;
+ }
+
+ BOOST_FORCEINLINE value_type operator|=(value_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_or(v) | v;
+ }
+
+ BOOST_FORCEINLINE value_type operator^=(value_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_xor(v) ^ v;
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+};
+
+//! Implementation for bool
+template< >
+class base_atomic< bool, int >
+{
+private:
+ typedef bool value_type;
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+
+protected:
+ typedef value_type value_arg_type;
+
+public:
+ typedef operations::storage_type storage_type;
+
+protected:
+ storage_type m_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
+ BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
+
+ BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ operations::store(m_storage, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return !!operations::load(m_storage, order);
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return !!operations::exchange(m_storage, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = !!old_value;
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = !!old_value;
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ return operations::is_lock_free(m_storage);
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+};
+
+
+//! Implementation for user-defined types, such as structs and enums
+template< typename T >
+class base_atomic< T, void >
+{
+private:
+ typedef T value_type;
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+
+protected:
+ typedef value_type const& value_arg_type;
+
+public:
+ typedef typename operations::storage_type storage_type;
+
+protected:
+ storage_type m_storage;
+
+public:
+ BOOST_FORCEINLINE explicit base_atomic(value_type const& v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::memcpy_cast< storage_type >(v))
+ {
+ }
+
+ BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ operations::store(m_storage, atomics::detail::memcpy_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::memcpy_cast< value_type >(operations::load(m_storage, order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::memcpy_cast< value_type >(operations::exchange(m_storage, atomics::detail::memcpy_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = atomics::detail::memcpy_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::memcpy_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::memcpy_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = atomics::detail::memcpy_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::memcpy_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::memcpy_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ return operations::is_lock_free(m_storage);
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+};
+
+
+//! Implementation for pointers
+template< typename T >
+class base_atomic< T*, void* >
+{
+private:
+ typedef T* value_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+
+protected:
+ typedef value_type value_arg_type;
+
+public:
+ typedef typename operations::storage_type storage_type;
+
+protected:
+ storage_type m_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
+ BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
+ {
+ }
+
+ BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::union_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::union_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ return operations::is_lock_free(m_storage);
+ }
+
+ BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(1) + 1;
+ }
+
+ BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(1) - 1;
+ }
+
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(v) + v;
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(v) - v;
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+};
+
+
+//! Implementation for void pointers
+template< >
+class base_atomic< void*, void* >
+{
+private:
+ typedef void* value_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+
+protected:
+ typedef value_type value_arg_type;
+
+public:
+ typedef operations::storage_type storage_type;
+
+protected:
+ storage_type m_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
+ BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
+ {
+ }
+
+ BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::union_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::union_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ return operations::is_lock_free(m_storage);
+ }
+
+ BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
+ {
+ return fetch_add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
+ {
+ return (char*)fetch_add(1) + 1;
+ }
+
+ BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
+ {
+ return fetch_sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
+ {
+ return (char*)fetch_sub(1) - 1;
+ }
+
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return (char*)fetch_add(v) + v;
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return (char*)fetch_sub(v) - v;
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+};
+
+} // namespace detail
+
+template< typename T >
+class atomic :
+ public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type >
+{
+private:
+ typedef T value_type;
+ typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type > base_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+public:
+ typedef typename base_type::storage_type storage_type;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(atomic(), BOOST_NOEXCEPT {})
+
+ // NOTE: The constructor is made explicit because gcc 4.7 complains that
+ // operator=(value_arg_type) is considered ambiguous with operator=(atomic const&)
+ // in assignment expressions, even though conversion to atomic<> is less preferred
+ // than conversion to value_arg_type.
+ BOOST_FORCEINLINE explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {}
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE operator value_type() volatile const BOOST_NOEXCEPT
+ {
+ return this->load();
+ }
+
+ BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage; }
+ BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage; }
+ BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage; }
+ BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage; }
+
+ BOOST_DELETED_FUNCTION(atomic(atomic const&))
+ BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&))
+ BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile)
+};
+
+typedef atomic< char > atomic_char;
+typedef atomic< unsigned char > atomic_uchar;
+typedef atomic< signed char > atomic_schar;
+typedef atomic< uint8_t > atomic_uint8_t;
+typedef atomic< int8_t > atomic_int8_t;
+typedef atomic< unsigned short > atomic_ushort;
+typedef atomic< short > atomic_short;
+typedef atomic< uint16_t > atomic_uint16_t;
+typedef atomic< int16_t > atomic_int16_t;
+typedef atomic< unsigned int > atomic_uint;
+typedef atomic< int > atomic_int;
+typedef atomic< uint32_t > atomic_uint32_t;
+typedef atomic< int32_t > atomic_int32_t;
+typedef atomic< unsigned long > atomic_ulong;
+typedef atomic< long > atomic_long;
+typedef atomic< uint64_t > atomic_uint64_t;
+typedef atomic< int64_t > atomic_int64_t;
+#ifdef BOOST_HAS_LONG_LONG
+typedef atomic< boost::ulong_long_type > atomic_ullong;
+typedef atomic< boost::long_long_type > atomic_llong;
+#endif
+typedef atomic< void* > atomic_address;
+typedef atomic< bool > atomic_bool;
+typedef atomic< wchar_t > atomic_wchar_t;
+#if !defined(BOOST_NO_CXX11_CHAR16_T)
+typedef atomic< char16_t > atomic_char16_t;
+#endif
+#if !defined(BOOST_NO_CXX11_CHAR32_T)
+typedef atomic< char32_t > atomic_char32_t;
+#endif
+
+typedef atomic< int_least8_t > atomic_int_least8_t;
+typedef atomic< uint_least8_t > atomic_uint_least8_t;
+typedef atomic< int_least16_t > atomic_int_least16_t;
+typedef atomic< uint_least16_t > atomic_uint_least16_t;
+typedef atomic< int_least32_t > atomic_int_least32_t;
+typedef atomic< uint_least32_t > atomic_uint_least32_t;
+typedef atomic< int_least64_t > atomic_int_least64_t;
+typedef atomic< uint_least64_t > atomic_uint_least64_t;
+typedef atomic< int_fast8_t > atomic_int_fast8_t;
+typedef atomic< uint_fast8_t > atomic_uint_fast8_t;
+typedef atomic< int_fast16_t > atomic_int_fast16_t;
+typedef atomic< uint_fast16_t > atomic_uint_fast16_t;
+typedef atomic< int_fast32_t > atomic_int_fast32_t;
+typedef atomic< uint_fast32_t > atomic_uint_fast32_t;
+typedef atomic< int_fast64_t > atomic_int_fast64_t;
+typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
+typedef atomic< intmax_t > atomic_intmax_t;
+typedef atomic< uintmax_t > atomic_uintmax_t;
+
+typedef atomic< std::size_t > atomic_size_t;
+typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
+
+#if defined(BOOST_HAS_INTPTR_T)
+typedef atomic< intptr_t > atomic_intptr_t;
+typedef atomic< uintptr_t > atomic_uintptr_t;
+#endif
+
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_alpha.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_alpha.hpp
new file mode 100644
index 0000000..861432f
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_alpha.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_alpha.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_arm.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_arm.hpp
new file mode 100644
index 0000000..b827c64
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_arm.hpp
@@ -0,0 +1,56 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * ARM Code by Phil Endecott, based on other architectures.
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_arm.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__))
+// ARMv7 and later have dmb instruction
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
+#endif
+
+#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
+// ARMv6k and ARMv7 have 8 and 16 ldrex/strex variants
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
+#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7M__))
+// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.
+// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,
+// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM
+// or ARMv7 (both ARM and Thumb 2) it works as expected.
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
+#endif
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_atomic.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_atomic.hpp
new file mode 100644
index 0000000..8299ad0
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_atomic.hpp
@@ -0,0 +1,134 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_atomic.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2
+#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_FLAG_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2
+#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_CHAR_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2
+#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2
+#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2
+#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_INT_LOCK_FREE == 2
+#define BOOST_ATOMIC_INT_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_INT_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_LONG_LOCK_FREE == 2
+#define BOOST_ATOMIC_LONG_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_LONG_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
+#endif
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_INT128_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
+#endif
+#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2
+#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_BOOL_LOCK_FREE 0
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_CHAR_LOCK_FREE
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT16_LOCK_FREE 0
+#endif
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT32_LOCK_FREE 0
+#endif
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT64_LOCK_FREE 0
+#endif
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_ppc.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_ppc.hpp
new file mode 100644
index 0000000..6dbdde8
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_ppc.hpp
@@ -0,0 +1,36 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_ppc.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(__powerpc64__)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sparc.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sparc.hpp
new file mode 100644
index 0000000..5806684
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sparc.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2010 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_sparc.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sync.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sync.hpp
new file mode 100644
index 0000000..7fac07a
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_sync.hpp
@@ -0,0 +1,62 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_sync.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_x86.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_x86.hpp
new file mode 100644
index 0000000..0696bf1
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_gcc_x86.hpp
@@ -0,0 +1,52 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2013 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_x86.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__i386__) &&\
+ (\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
+ defined(__i586__) || defined(__i686__) || defined(__pentium4__) || defined(__nocona__) || defined(__core2__) || defined(__corei7__) ||\
+ defined(__k6__) || defined(__athlon__) || defined(__k8__) || defined(__amdfam10__) || defined(__bdver1__) || defined(__bdver2__) || defined(__bdver3__) || defined(__btver1__) || defined(__btver2__)\
+ )
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_linux_arm.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_linux_arm.hpp
new file mode 100644
index 0000000..abe6fb8
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_linux_arm.hpp
@@ -0,0 +1,35 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009, 2011 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * Linux-specific code by Phil Endecott
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_linux_arm.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_msvc_arm.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_msvc_arm.hpp
new file mode 100644
index 0000000..6b3c61f
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_msvc_arm.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2012 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_msvc_arm.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_msvc_x86.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_msvc_x86.hpp
new file mode 100644
index 0000000..5661a5b
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_msvc_x86.hpp
@@ -0,0 +1,50 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2012 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_msvc_x86.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(_M_IX86) && _M_IX86 >= 500
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if _MSC_VER >= 1500 && defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+
+#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/caps_windows.hpp b/3rdParty/Boost/src/boost/atomic/detail/caps_windows.hpp
new file mode 100644
index 0000000..1cc0ded
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/caps_windows.hpp
@@ -0,0 +1,33 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2012 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_windows.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/casts.hpp b/3rdParty/Boost/src/boost/atomic/detail/casts.hpp
new file mode 100644
index 0000000..db28bc2
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/casts.hpp
@@ -0,0 +1,64 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2013 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/casts.hpp
+ *
+ * This header defines \c union_cast and \c memcpy_cast used to convert between storage and value types
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CASTS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CASTS_HPP_INCLUDED_
+
+#include <cstring>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename To, typename From >
+BOOST_FORCEINLINE To union_cast(From const& from) BOOST_NOEXCEPT
+{
+ union
+ {
+ To as_to;
+ From as_from;
+ }
+ caster = {};
+ caster.as_from = from;
+ return caster.as_to;
+}
+
+template< typename To, typename From >
+BOOST_FORCEINLINE To memcpy_cast(From const& from) BOOST_NOEXCEPT
+{
+ struct
+ {
+ To to;
+ }
+ value = {};
+ std::memcpy
+ (
+ &reinterpret_cast< char& >(value.to),
+ &reinterpret_cast< const char& >(from),
+ (sizeof(From) < sizeof(To) ? sizeof(From) : sizeof(To))
+ );
+ return value.to;
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_CASTS_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/config.hpp b/3rdParty/Boost/src/boost/atomic/detail/config.hpp
new file mode 100644
index 0000000..d03ec6a
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/config.hpp
@@ -0,0 +1,24 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2012 Hartmut Kaiser
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/config.hpp
+ *
+ * This header defines configuraion macros for Boost.Atomic
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
+
+#include <boost/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/int_sizes.hpp b/3rdParty/Boost/src/boost/atomic/detail/int_sizes.hpp
new file mode 100644
index 0000000..d06ed42
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/int_sizes.hpp
@@ -0,0 +1,140 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/int_sizes.hpp
+ *
+ * This header defines macros for testing buitin integer type sizes
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// GCC and compatible compilers define internal macros with builtin type traits
+#if defined(__SIZEOF_SHORT__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT __SIZEOF_SHORT__
+#endif
+#if defined(__SIZEOF_INT__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_INT __SIZEOF_INT__
+#endif
+#if defined(__SIZEOF_LONG__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG __SIZEOF_LONG__
+#endif
+#if defined(__SIZEOF_LONG_LONG__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG __SIZEOF_LONG_LONG__
+#endif
+#if defined(__SIZEOF_WCHAR_T__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__
+#endif
+#if defined(__SIZEOF_POINTER__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER __SIZEOF_POINTER__
+#elif defined(_MSC_VER)
+#if defined(_M_AMD64) || defined(_M_IA64)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8
+#else
+#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
+
+// Try to deduce sizes from limits
+#include <limits.h>
+#include <boost/cstdint.hpp>
+
+#if (USHRT_MAX + 0) == 0xff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 1
+#elif (USHRT_MAX + 0) == 0xffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 2
+#elif (USHRT_MAX + 0) == 0xffffffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 4
+#elif (USHRT_MAX + 0) == UINT64_C(0xffffffffffffffff)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 8
+#endif
+
+#if (UINT_MAX + 0) == 0xff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 1
+#elif (UINT_MAX + 0) == 0xffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 2
+#elif (UINT_MAX + 0) == 0xffffffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 4
+#elif (UINT_MAX + 0) == UINT64_C(0xffffffffffffffff)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 8
+#endif
+
+#if (ULONG_MAX + 0) == 0xff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 1
+#elif (ULONG_MAX + 0) == 0xffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 2
+#elif (ULONG_MAX + 0) == 0xffffffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 4
+#elif (ULONG_MAX + 0) == UINT64_C(0xffffffffffffffff)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 8
+#endif
+
+#if defined(__hpux) // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
+#else
+
+// The list of the non-standard macros (the ones except ULLONG_MAX) is taken from cstdint.hpp
+#if defined(ULLONG_MAX)
+#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULLONG_MAX
+#elif defined(ULONG_LONG_MAX)
+#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONG_LONG_MAX
+#elif defined(ULONGLONG_MAX)
+#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONGLONG_MAX
+#elif defined(_LLONG_MAX) // strangely enough, this one seems to be holding the limit for the unsigned integer
+#define BOOST_ATOMIC_DETAIL_ULLONG_MAX _LLONG_MAX
+#endif
+
+#if (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 1
+#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 2
+#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffffffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 4
+#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == UINT64_C(0xffffffffffffffff)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
+#endif
+
+#endif // defined(__hpux)
+
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
+
+#include <wchar.h>
+#include <boost/cstdint.hpp>
+
+#if defined(_MSC_VER) && _MSC_VER <= 1310
+// MSVC 7.1 defines WCHAR_MAX to a value not suitable for constant expressions
+#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
+#elif (WCHAR_MAX + 0) == 0xff || (WCHAR_MAX + 0) == 0x7f
+#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 1
+#elif (WCHAR_MAX + 0) == 0xffff || (WCHAR_MAX + 0) == 0x7fff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
+#elif (WCHAR_MAX + 0) == 0xffffffff || (WCHAR_MAX + 0) == 0x7fffffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 4
+#elif (WCHAR_MAX + 0) == UINT64_C(0xffffffffffffffff) || (WCHAR_MAX + 0) == INT64_C(0x7fffffffffffffff)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 8
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
+#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers.
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/interlocked.hpp b/3rdParty/Boost/src/boost/atomic/detail/interlocked.hpp
new file mode 100644
index 0000000..fa11bef
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/interlocked.hpp
@@ -0,0 +1,451 @@
+#ifndef BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
+#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+// Copyright (c) 2012 - 2014 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(_WIN32_WCE)
+
+#if _WIN32_WCE >= 0x600
+
+extern "C" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );
+extern "C" long __cdecl _InterlockedExchangeAdd( long volatile *, long );
+extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+
+#else // _WIN32_WCE >= 0x600
+
+extern "C" long __cdecl InterlockedCompareExchange( long*, long, long );
+extern "C" long __cdecl InterlockedExchangeAdd( long*, long );
+extern "C" long __cdecl InterlockedExchange( long*, long );
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
+
+#endif // _WIN32_WCE >= 0x600
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
+
+#elif defined(_MSC_VER) && _MSC_VER >= 1310
+
+#if _MSC_VER < 1400
+
+extern "C" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );
+extern "C" long __cdecl _InterlockedExchangeAdd( long volatile *, long );
+extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
+
+#pragma intrinsic(_InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
+
+#else // _MSC_VER < 1400
+
+#include <intrin.h>
+
+#pragma intrinsic(_InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#pragma intrinsic(_InterlockedXor)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
+
+#if (defined(_M_IX86) && _M_IX86 >= 500) || defined(_M_AMD64) || defined(_M_IA64)
+#pragma intrinsic(_InterlockedCompareExchange64)
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#endif
+
+#if _MSC_VER >= 1500 && defined(_M_AMD64)
+#pragma intrinsic(_InterlockedCompareExchange128)
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) _InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
+#endif
+
+#if _MSC_VER >= 1600
+
+// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
+// Note that for each bit count these macros must be either all defined or all not defined.
+// Otherwise atomic<> operations will be implemented inconsistently.
+
+#pragma intrinsic(_InterlockedCompareExchange8)
+#pragma intrinsic(_InterlockedExchangeAdd8)
+#pragma intrinsic(_InterlockedExchange8)
+#pragma intrinsic(_InterlockedAnd8)
+#pragma intrinsic(_InterlockedOr8)
+#pragma intrinsic(_InterlockedXor8)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))
+
+#pragma intrinsic(_InterlockedCompareExchange16)
+#pragma intrinsic(_InterlockedExchangeAdd16)
+#pragma intrinsic(_InterlockedExchange16)
+#pragma intrinsic(_InterlockedAnd16)
+#pragma intrinsic(_InterlockedOr16)
+#pragma intrinsic(_InterlockedXor16)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))
+
+#endif // _MSC_VER >= 1600
+
+#if defined(_M_AMD64) || defined(_M_IA64)
+
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#pragma intrinsic(_InterlockedExchange64)
+#pragma intrinsic(_InterlockedAnd64)
+#pragma intrinsic(_InterlockedOr64)
+#pragma intrinsic(_InterlockedXor64)
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
+
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#pragma intrinsic(_InterlockedExchangePointer)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
+
+#elif defined(_M_IX86)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
+
+#endif
+
+#if _MSC_VER >= 1700 && defined(_M_ARM)
+
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#pragma intrinsic(_InterlockedExchange64)
+#pragma intrinsic(_InterlockedAnd64)
+#pragma intrinsic(_InterlockedOr64)
+#pragma intrinsic(_InterlockedXor64)
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
+
+#pragma intrinsic(_InterlockedCompareExchange8_nf)
+#pragma intrinsic(_InterlockedCompareExchange8_acq)
+#pragma intrinsic(_InterlockedCompareExchange8_rel)
+#pragma intrinsic(_InterlockedCompareExchange16_nf)
+#pragma intrinsic(_InterlockedCompareExchange16_acq)
+#pragma intrinsic(_InterlockedCompareExchange16_rel)
+#pragma intrinsic(_InterlockedCompareExchange_nf)
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#pragma intrinsic(_InterlockedCompareExchange64)
+#pragma intrinsic(_InterlockedCompareExchange64_nf)
+#pragma intrinsic(_InterlockedCompareExchange64_acq)
+#pragma intrinsic(_InterlockedCompareExchange64_rel)
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#pragma intrinsic(_InterlockedCompareExchangePointer_nf)
+#pragma intrinsic(_InterlockedCompareExchangePointer_acq)
+#pragma intrinsic(_InterlockedCompareExchangePointer_rel)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(dest, exchange, compare) _InterlockedCompareExchange8_nf((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange8_acq((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(dest, exchange, compare) _InterlockedCompareExchange8_rel((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(dest, exchange, compare) _InterlockedCompareExchange16_nf((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange16_acq((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(dest, exchange, compare) _InterlockedCompareExchange16_rel((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(dest, exchange, compare) _InterlockedCompareExchange_nf((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange_acq((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(dest, exchange, compare) _InterlockedCompareExchange_rel((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(dest, exchange, compare) _InterlockedCompareExchange64_nf((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange64_acq((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(dest, exchange, compare) _InterlockedCompareExchange64_rel((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELAXED(dest, exchange, compare) _InterlockedCompareExchangePointer_nf((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchangePointer_acq((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELEASE(dest, exchange, compare) _InterlockedCompareExchangePointer_rel((void**)(dest), (void*)(exchange), (void*)(compare))
+
+#pragma intrinsic(_InterlockedExchangeAdd8_nf)
+#pragma intrinsic(_InterlockedExchangeAdd8_acq)
+#pragma intrinsic(_InterlockedExchangeAdd8_rel)
+#pragma intrinsic(_InterlockedExchangeAdd16_nf)
+#pragma intrinsic(_InterlockedExchangeAdd16_acq)
+#pragma intrinsic(_InterlockedExchangeAdd16_rel)
+#pragma intrinsic(_InterlockedExchangeAdd_nf)
+#pragma intrinsic(_InterlockedExchangeAdd_acq)
+#pragma intrinsic(_InterlockedExchangeAdd_rel)
+#pragma intrinsic(_InterlockedExchangeAdd64_nf)
+#pragma intrinsic(_InterlockedExchangeAdd64_acq)
+#pragma intrinsic(_InterlockedExchangeAdd64_rel)
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(dest, addend) _InterlockedExchangeAdd8_nf((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(dest, addend) _InterlockedExchangeAdd8_acq((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(dest, addend) _InterlockedExchangeAdd8_rel((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(dest, addend) _InterlockedExchangeAdd16_nf((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(dest, addend) _InterlockedExchangeAdd16_acq((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(dest, addend) _InterlockedExchangeAdd16_rel((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(dest, addend) _InterlockedExchangeAdd_nf((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(dest, addend) _InterlockedExchangeAdd_acq((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(dest, addend) _InterlockedExchangeAdd_rel((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(dest, addend) _InterlockedExchangeAdd64_nf((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(dest, addend) _InterlockedExchangeAdd64_acq((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(dest, addend) _InterlockedExchangeAdd64_rel((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED((long*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE((long*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE((long*)(dest), byte_offset))
+
+#pragma intrinsic(_InterlockedExchange8_nf)
+#pragma intrinsic(_InterlockedExchange8_acq)
+#pragma intrinsic(_InterlockedExchange16_nf)
+#pragma intrinsic(_InterlockedExchange16_acq)
+#pragma intrinsic(_InterlockedExchange_nf)
+#pragma intrinsic(_InterlockedExchange_acq)
+#pragma intrinsic(_InterlockedExchange64_nf)
+#pragma intrinsic(_InterlockedExchange64_acq)
+#pragma intrinsic(_InterlockedExchangePointer)
+#pragma intrinsic(_InterlockedExchangePointer_nf)
+#pragma intrinsic(_InterlockedExchangePointer_acq)
+#if _MSC_VER >= 1800
+#pragma intrinsic(_InterlockedExchange8_rel)
+#pragma intrinsic(_InterlockedExchange16_rel)
+#pragma intrinsic(_InterlockedExchange_rel)
+#pragma intrinsic(_InterlockedExchange64_rel)
+#pragma intrinsic(_InterlockedExchangePointer_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(dest, newval) _InterlockedExchange8_nf((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(dest, newval) _InterlockedExchange8_acq((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(dest, newval) _InterlockedExchange16_nf((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(dest, newval) _InterlockedExchange16_acq((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(dest, newval) _InterlockedExchange_nf((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(dest, newval) _InterlockedExchange_acq((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(dest, newval) _InterlockedExchange64_nf((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(dest, newval) _InterlockedExchange64_acq((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELAXED(dest, newval) _InterlockedExchangePointer_nf((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_ACQUIRE(dest, newval) _InterlockedExchangePointer_acq((void**)(dest), (void*)(newval))
+
+#if _MSC_VER >= 1800
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) _InterlockedExchange8_rel((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) _InterlockedExchange16_rel((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) _InterlockedExchange_rel((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) _InterlockedExchange64_rel((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) _InterlockedExchangePointer_rel((void**)(dest), (void*)(newval))
+#else
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
+#endif
+
+#pragma intrinsic(_InterlockedAnd8_nf)
+#pragma intrinsic(_InterlockedAnd8_acq)
+#pragma intrinsic(_InterlockedAnd8_rel)
+#pragma intrinsic(_InterlockedAnd16_nf)
+#pragma intrinsic(_InterlockedAnd16_acq)
+#pragma intrinsic(_InterlockedAnd16_rel)
+#pragma intrinsic(_InterlockedAnd_nf)
+#pragma intrinsic(_InterlockedAnd_acq)
+#pragma intrinsic(_InterlockedAnd_rel)
+#pragma intrinsic(_InterlockedAnd64_nf)
+#pragma intrinsic(_InterlockedAnd64_acq)
+#pragma intrinsic(_InterlockedAnd64_rel)
+
+#define BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(dest, arg) _InterlockedAnd8_nf((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(dest, arg) _InterlockedAnd8_acq((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(dest, arg) _InterlockedAnd8_rel((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(dest, arg) _InterlockedAnd16_nf((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(dest, arg) _InterlockedAnd16_acq((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(dest, arg) _InterlockedAnd16_rel((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(dest, arg) _InterlockedAnd_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(dest, arg) _InterlockedAnd_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(dest, arg) _InterlockedAnd_rel((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(dest, arg) _InterlockedAnd64_nf((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(dest, arg) _InterlockedAnd64_acq((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(dest, arg) _InterlockedAnd64_rel((__int64*)(dest), (__int64)(arg))
+
+#pragma intrinsic(_InterlockedOr8_nf)
+#pragma intrinsic(_InterlockedOr8_acq)
+#pragma intrinsic(_InterlockedOr8_rel)
+#pragma intrinsic(_InterlockedOr16_nf)
+#pragma intrinsic(_InterlockedOr16_acq)
+#pragma intrinsic(_InterlockedOr16_rel)
+#pragma intrinsic(_InterlockedOr_nf)
+#pragma intrinsic(_InterlockedOr_acq)
+#pragma intrinsic(_InterlockedOr_rel)
+#pragma intrinsic(_InterlockedOr64_nf)
+#pragma intrinsic(_InterlockedOr64_acq)
+#pragma intrinsic(_InterlockedOr64_rel)
+
+#define BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(dest, arg) _InterlockedOr8_nf((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(dest, arg) _InterlockedOr8_acq((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(dest, arg) _InterlockedOr8_rel((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(dest, arg) _InterlockedOr16_nf((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(dest, arg) _InterlockedOr16_acq((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(dest, arg) _InterlockedOr16_rel((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(dest, arg) _InterlockedOr_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(dest, arg) _InterlockedOr_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(dest, arg) _InterlockedOr_rel((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(dest, arg) _InterlockedOr64_nf((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(dest, arg) _InterlockedOr64_acq((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(dest, arg) _InterlockedOr64_rel((__int64*)(dest), (__int64)(arg))
+
+#pragma intrinsic(_InterlockedXor8_nf)
+#pragma intrinsic(_InterlockedXor8_acq)
+#pragma intrinsic(_InterlockedXor8_rel)
+#pragma intrinsic(_InterlockedXor16_nf)
+#pragma intrinsic(_InterlockedXor16_acq)
+#pragma intrinsic(_InterlockedXor16_rel)
+#pragma intrinsic(_InterlockedXor_nf)
+#pragma intrinsic(_InterlockedXor_acq)
+#pragma intrinsic(_InterlockedXor_rel)
+#pragma intrinsic(_InterlockedXor64_nf)
+#pragma intrinsic(_InterlockedXor64_acq)
+#pragma intrinsic(_InterlockedXor64_rel)
+
+#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(dest, arg) _InterlockedXor8_nf((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(dest, arg) _InterlockedXor8_acq((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(dest, arg) _InterlockedXor8_rel((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(dest, arg) _InterlockedXor16_nf((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(dest, arg) _InterlockedXor16_acq((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(dest, arg) _InterlockedXor16_rel((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(dest, arg) _InterlockedXor_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(dest, arg) _InterlockedXor_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(dest, arg) _InterlockedXor_rel((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(dest, arg) _InterlockedXor64_nf((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg))
+
+#endif // _MSC_VER >= 1700 && defined(_M_ARM)
+
+#endif // _MSC_VER < 1400
+
+#else // defined(_MSC_VER) && _MSC_VER >= 1310
+
+#if defined(BOOST_USE_WINDOWS_H)
+
+#include <windows.h>
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
+
+#if defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
+
+#else // defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
+
+#endif // defined(_WIN64)
+
+#else // defined(BOOST_USE_WINDOWS_H)
+
+#if defined(__MINGW64__)
+#define BOOST_ATOMIC_INTERLOCKED_IMPORT
+#else
+#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+extern "C" {
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
+
+#if defined(_WIN64)
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
+
+#else // defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
+
+#endif // defined(_WIN64)
+
+} // extern "C"
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#undef BOOST_ATOMIC_INTERLOCKED_IMPORT
+
+#endif // defined(BOOST_USE_WINDOWS_H)
+
+#endif // defined(_MSC_VER)
+
+#endif
diff --git a/3rdParty/Boost/src/boost/atomic/detail/link.hpp b/3rdParty/Boost/src/boost/atomic/detail/link.hpp
new file mode 100644
index 0000000..4f522ac
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/link.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2012 Hartmut Kaiser
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/config.hpp
+ *
+ * This header defines macros for linking with compiled library of Boost.Atomic
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Set up dll import/export options
+#if (defined(BOOST_ATOMIC_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && \
+ !defined(BOOST_ATOMIC_STATIC_LINK)
+
+#if defined(BOOST_ATOMIC_SOURCE)
+#define BOOST_ATOMIC_DECL BOOST_SYMBOL_EXPORT
+#define BOOST_ATOMIC_BUILD_DLL
+#else
+#define BOOST_ATOMIC_DECL BOOST_SYMBOL_IMPORT
+#endif
+
+#endif // building a shared library
+
+#ifndef BOOST_ATOMIC_DECL
+#define BOOST_ATOMIC_DECL
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Auto library naming
+#if !defined(BOOST_ATOMIC_SOURCE) && !defined(BOOST_ALL_NO_LIB) && \
+ !defined(BOOST_ATOMIC_NO_LIB)
+
+#define BOOST_LIB_NAME boost_atomic
+
+// tell the auto-link code to select a dll when required:
+#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_ATOMIC_DYN_LINK)
+#define BOOST_DYN_LINK
+#endif
+
+#include <boost/config/auto_link.hpp>
+
+#endif // auto-linking disabled
+
+#endif
diff --git a/3rdParty/Boost/src/boost/atomic/detail/lockpool.hpp b/3rdParty/Boost/src/boost/atomic/detail/lockpool.hpp
new file mode 100644
index 0000000..4e249aa
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/lockpool.hpp
@@ -0,0 +1,51 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013-2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/lockpool.hpp
+ *
+ * This header contains declaration of the lockpool used to emulate atomic ops.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/link.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct lockpool
+{
+ class scoped_lock
+ {
+ void* m_lock;
+
+ public:
+ explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr) BOOST_NOEXCEPT;
+ BOOST_ATOMIC_DECL ~scoped_lock() BOOST_NOEXCEPT;
+
+ BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
+ BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
+ };
+
+ static BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
+ static BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/operations.hpp b/3rdParty/Boost/src/boost/atomic/detail/operations.hpp
new file mode 100644
index 0000000..d81399a
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/operations.hpp
@@ -0,0 +1,24 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/operations.hpp
+ *
+ * This header defines atomic operations, including the emulated version.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/operations_lockfree.hpp>
+#include <boost/atomic/detail/ops_emulated.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/operations_fwd.hpp b/3rdParty/Boost/src/boost/atomic/detail/operations_fwd.hpp
new file mode 100644
index 0000000..69049e4
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/operations_fwd.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< unsigned int Size, bool Signed >
+struct operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/operations_lockfree.hpp b/3rdParty/Boost/src/boost/atomic/detail/operations_lockfree.hpp
new file mode 100644
index 0000000..b465403
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/operations_lockfree.hpp
@@ -0,0 +1,30 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/operations_lockfree.hpp
+ *
+ * This header defines lockfree atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+
+#if !defined(BOOST_ATOMIC_EMULATED)
+#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_)
+#else
+#include <boost/atomic/detail/operations_fwd.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_cas_based.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_cas_based.hpp
new file mode 100644
index 0000000..7f8d288
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_cas_based.hpp
@@ -0,0 +1,91 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_cas_based.hpp
+ *
+ * This header contains CAS-based implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct cas_based_operations :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::store(storage, (storage_type)0, order);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_emulated.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_emulated.hpp
new file mode 100644
index 0000000..597490f
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_emulated.hpp
@@ -0,0 +1,149 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_emulated.hpp
+ *
+ * This header contains lockpool-based implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/lockpool.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T >
+struct emulated_operations
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ lockpool::scoped_lock lock(&storage);
+ const_cast< storage_type& >(storage) = v;
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ lockpool::scoped_lock lock(&storage);
+ return const_cast< storage_type const& >(storage);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s += v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s -= v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s = v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ const bool res = old_val == expected;
+ if (res)
+ s = desired;
+ expected = old_val;
+
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s &= v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s |= v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s ^= v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return false;
+ }
+};
+
+template< unsigned int Size, bool Signed >
+struct operations :
+ public emulated_operations< typename make_storage_type< Size, Signed >::type >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_extending_cas_based.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_extending_cas_based.hpp
new file mode 100644
index 0000000..d7f3c5f
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_extending_cas_based.hpp
@@ -0,0 +1,65 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_extending_cas_based.hpp
+ *
+ * This header contains a boilerplate of the \c operations template implementation that requires sign/zero extension in arithmetic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base, unsigned int Size, bool Signed >
+struct extending_cas_based_operations :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+ typedef typename make_storage_type< Size, Signed >::type emulated_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ emulated_storage_type new_val;
+ do
+ {
+ new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v);
+ }
+ while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::load(storage, memory_order_relaxed);
+ emulated_storage_type new_val;
+ do
+ {
+ new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v);
+ }
+ while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));
+ return old_val;
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_alpha.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_alpha.hpp
new file mode 100644
index 0000000..d17c61d
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_alpha.hpp
@@ -0,0 +1,874 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_alpha.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*
+ Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
+ (HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual.
+ */
+
+/*
+ NB: The most natural thing would be to write the increment/decrement
+ operators along the following lines:
+
+ __asm__ __volatile__
+ (
+ "1: ldl_l %0,%1 \n"
+ "addl %0,1,%0 \n"
+ "stl_c %0,%1 \n"
+ "beq %0,1b\n"
+ : "=&b" (tmp)
+ : "m" (value)
+ : "cc"
+ );
+
+ However according to the comments on the HP website and matching
+ comments in the Linux kernel sources this defies branch prediction,
+ as the cpu assumes that backward branches are always taken; so
+ instead copy the trick from the Linux kernel, introduce a forward
+ branch and back again.
+
+ I have, however, had a hard time measuring the difference between
+ the two versions in microbenchmarks -- I am leaving it in nevertheless
+ as it apparently does not hurt either.
+*/
+
+struct gcc_alpha_operations_base
+{
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+};
+
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_alpha_operations_base
+{
+ typedef typename make_storage_type< 4u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %3, %1\n"
+ "ldl_l %0, %2\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (tmp) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ int success;
+ storage_type current;
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stl_c %1, %4\n" // storage = desired; desired = store succeeded
+ "mov %1, %3\n" // success = desired
+ "2:\n"
+ : "+&r" (expected), // %0
+ "+&r" (desired), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage) // %4
+ :
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ storage_type current, tmp;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %5, %1\n" // tmp = desired
+ "ldl_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stl_c %1, %4\n" // storage = tmp; tmp = store succeeded
+ "beq %1, 3f\n" // if (tmp == 0) goto retry
+ "mov %1, %3\n" // success = tmp
+ "2:\n"
+
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous\n"
+
+ : "+&r" (expected), // %0
+ "=&r" (tmp), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage), // %4
+ "r" (desired) // %5
+ :
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "and %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "bis %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "xor %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+
+template< >
+struct operations< 1u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "zapnot %1, #1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "zapnot %1, #1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 1u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "sextb %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "sextb %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+template< >
+struct operations< 2u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "zapnot %1, #3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "zapnot %1, #3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 2u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "sextw %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "sextw %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_alpha_operations_base
+{
+ typedef typename make_storage_type< 8u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %3, %1\n"
+ "ldq_l %0, %2\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (tmp) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ int success;
+ storage_type current;
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stq_c %1, %4\n" // storage = desired; desired = store succeeded
+ "mov %1, %3\n" // success = desired
+ "2:\n"
+ : "+&r" (expected), // %0
+ "+&r" (desired), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage) // %4
+ :
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ storage_type current, tmp;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %5, %1\n" // tmp = desired
+ "ldq_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stq_c %1, %4\n" // storage = tmp; tmp = store succeeded
+ "beq %1, 3f\n" // if (tmp == 0) goto retry
+ "mov %1, %3\n" // success = tmp
+ "2:\n"
+
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous\n"
+
+ : "+&r" (expected), // %0
+ "=&r" (tmp), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage), // %4
+ "r" (desired) // %5
+ :
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "addq %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "subq %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "and %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "bis %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "xor %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("mb" ::: "memory");
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_arm.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_arm.hpp
new file mode 100644
index 0000000..29e1e5a
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_arm.hpp
@@ -0,0 +1,971 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_arm.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// From the ARM Architecture Reference Manual for architecture v6:
+//
+// LDREX{<cond>} <Rd>, [<Rn>]
+// <Rd> Specifies the destination register for the memory word addressed by <Rd>
+// <Rn> Specifies the register containing the address.
+//
+// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
+// <Rd> Specifies the destination register for the returned status value.
+// 0 if the operation updates memory
+// 1 if the operation fails to update memory
+// <Rm> Specifies the register containing the word to be stored to memory.
+// <Rn> Specifies the register containing the address.
+// Rd must not be the same register as Rm or Rn.
+//
+// ARM v7 is like ARM v6 plus:
+// There are half-word and byte versions of the LDREX and STREX instructions,
+// LDREXH, LDREXB, STREXH and STREXB.
+// There are also double-word versions, LDREXD and STREXD.
+// (Actually it looks like these are available from version 6k onwards.)
+// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
+// I think you can supply an immediate offset to the address.
+//
+// A memory barrier is effected using a "co-processor 15" instruction,
+// though a separate assembler mnemonic is available for it in v7.
+//
+// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
+// doesn't include all instructions and in particular it doesn't include the co-processor
+// instruction used for the memory barrier or the load-locked/store-conditional
+// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
+// asm blocks with code to temporarily change to ARM mode.
+//
+// You can only change between ARM and Thumb modes when branching using the bx instruction.
+// bx takes an address specified in a register. The least significant bit of the address
+// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
+// A temporary register is needed for the address and is passed as an argument to these
+// macros. It must be one of the "low" registers accessible to Thumb code, specified
+// using the "l" attribute in the asm statement.
+//
+// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
+// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
+// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
+// So in v7 we don't need to change to ARM mode; we can write "universal
+// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
+// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
+// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
+// so they can always be present.
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+#if defined(__thumb__) && !defined(__thumb2__)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8:\n"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9:\n"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
+#else
+// The tmpreg may be wasted in this case, which is non-optimal.
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&r" (var)
+#endif
+
+struct gcc_arm_operations_base
+{
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
+ // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
+ // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
+ // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
+ __asm__ __volatile__
+ (
+#if defined(__thumb2__)
+ ".short 0xF3BF, 0x8F5B\n" // dmb ish
+#else
+ ".word 0xF57FF05B\n" // dmb ish
+#endif
+ :
+ :
+ : "memory"
+ );
+#else
+ int tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "mcr\tp15, 0, r0, c7, c10, 5\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : "=&l" (tmp)
+ :
+ : "memory"
+ );
+#endif
+ }
+};
+
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 4u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // load the original value
+ "strex %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
+ : [value] "r" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "r" (expected), // %4
+ [desired] "r" (desired) // %5
+ : "cc"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strex %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "r" (expected), // %4
+ [desired] "r" (desired) // %5
+ : "cc"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+
+template< >
+struct operations< 1u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 1u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+template< >
+struct operations< 2u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 2u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "r" (v) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+// Unlike 32-bit operations, for 64-bit loads and stores we must use ldrexd/strexd.
+// Any other instructions result in a non-atomic sequence of 32-bit accesses.
+// See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
+// Section A3.5.3 "Atomicity in the ARM architecture".
+
+// In the asm blocks below we have to use 32-bit register pairs to compose 64-bit values.
+// In order to pass the 64-bit operands to/from asm blocks, we use undocumented gcc feature:
+// the lower half (Rt) of the operand is accessible normally, via the numbered placeholder (e.g. %0),
+// and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0).
+// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 8u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ exchange(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "ldrexd %1, %H1, [%2]\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original) // %1
+ : "r" (&storage) // %2
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // load the original value
+ "strexd %0, %2, %H2, [%3]\n" // store the replacement, tmp = store failed
+ "teq %0, #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original) // %1
+ : "r" (v), // %2
+ "r" (&storage) // %3
+ : "cc", "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t tmp;
+ storage_type original, old_val = expected;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "cmp %1, %2\n" // flags = original.lo==old_val.lo
+ "ittt eq\n" // [hint that the following 3 instructions are conditional on flags.equal]
+ "cmpeq %H1, %H2\n" // if (flags.equal) flags = original.hi==old_val.hi
+ "strexdeq %0, %4, %H4, [%3]\n" // if (flags.equal) *(&storage) = desired, tmp = store failed
+ "teqeq %0, #0\n" // if (flags.equal) flags = tmp==0
+ "ite eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "moveq %2, #1\n" // if (flags.equal) old_val.lo = 1
+ "movne %2, #0\n" // if (!flags.equal) old_val.lo = 0
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "+r" (old_val) // %2
+ : "r" (&storage), // %3
+ "r" (desired) // %4
+ : "cc", "memory"
+ );
+ const uint32_t success = (uint32_t)old_val;
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t tmp;
+ storage_type original, old_val = expected;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "cmp %1, %2\n" // flags = original.lo==old_val.lo
+ "it eq\n" // [hint that the following instruction is conditional on flags.equal]
+ "cmpeq %H1, %H2\n" // if (flags.equal) flags = original.hi==old_val.hi
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strexd %0, %4, %H4, [%3]\n" // *(&storage) = desired, tmp = store failed
+ "teq %0, #0\n" // flags.equal = tmp == 0
+ "bne 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ "ite eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "moveq %2, #1\n" // if (flags.equal) old_val.lo = 1
+ "movne %2, #0\n" // if (!flags.equal) old_val.lo = 0
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "+r" (old_val) // %2
+ : "r" (&storage), // %3
+ "r" (desired) // %4
+ : "cc", "memory"
+ );
+ const uint32_t success = (uint32_t)old_val;
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "adds %2, %1, %4\n" // result = original + value
+ "adc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : "cc", "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "subs %2, %1, %4\n" // result = original - value
+ "sbc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : "cc", "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "and %2, %1, %4\n" // result = original & value
+ "and %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : "cc", "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "orr %2, %1, %4\n" // result = original | value
+ "orr %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : "cc", "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "eor %2, %1, %4\n" // result = original ^ value
+ "eor %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : "cc", "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ gcc_arm_operations_base::hardware_full_fence();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_atomic.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_atomic.hpp
new file mode 100644
index 0000000..2297791
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_atomic.hpp
@@ -0,0 +1,238 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_atomic.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
+#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__INTEL_COMPILER)
+// This is used to suppress warning #32013 described below for Intel Compiler.
+// In debug builds the compiler does not inline any functions, so basically
+// every atomic function call results in this warning. I don't know any other
+// way to selectively disable just this one warning.
+#pragma system_header
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*!
+ * The function converts \c boost::memory_order values to the compiler-specific constants.
+ *
+ * NOTE: The intention is that the function is optimized away by the compiler, and the
+ * compiler-specific constants are passed to the intrinsics. I know constexpr doesn't
+ * work in this case because the standard atomics interface require memory ordering
+ * constants to be passed as function arguments, at which point they stop being constexpr.
+ * However it is crucial that the compiler sees constants and not runtime values,
+ * because otherwise it just ignores the ordering value and always uses seq_cst.
+ * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
+ * gcc 4.8.2. Intel Compiler issues a warning in this case:
+ *
+ * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
+ *
+ * while gcc acts silently.
+ *
+ * To mitigate the problem ALL functions, including the atomic<> members must be
+ * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
+ * all functions are called with constant orderings and call intrinstcts properly.
+ *
+ * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
+ * inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic
+ * operaions will be executed with seq_cst semantics.
+ */
+BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
+{
+ return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
+ (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
+ (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
+}
+
+template< typename T >
+struct gcc_atomic_operations
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return __atomic_compare_exchange_n
+ (
+ &storage, &expected, desired, false,
+ atomics::detail::convert_memory_order_to_gcc(success_order),
+ atomics::detail::convert_memory_order_to_gcc(failure_order)
+ );
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return __atomic_compare_exchange_n
+ (
+ &storage, &expected, desired, true,
+ atomics::detail::convert_memory_order_to_gcc(success_order),
+ atomics::detail::convert_memory_order_to_gcc(failure_order)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile& storage) BOOST_NOEXCEPT
+ {
+ return __atomic_is_lock_free(sizeof(storage_type), &storage);
+ }
+};
+
+#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type >
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
+#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+// Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< gcc_dcas_x86< Signed > >
+{
+};
+
+#else
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >
+{
+};
+
+#endif
+#endif
+
+#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
+#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
+// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public cas_based_operations< gcc_dcas_x86_64< Signed > >
+{
+};
+
+#else
+
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >
+{
+};
+
+#endif
+#endif
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_ppc.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_ppc.hpp
new file mode 100644
index 0000000..1a1fbb7
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_ppc.hpp
@@ -0,0 +1,775 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_ppc.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*
+ Refer to: Motorola: "Programming Environments Manual for 32-Bit
+ Implementations of the PowerPC Architecture", Appendix E:
+ "Synchronization Programming Examples" for an explanation of what is
+ going on here (can be found on the web at various places by the
+ name "MPCFPE32B.pdf", Google is your friend...)
+
+ Most of the atomic operations map to instructions in a relatively
+ straight-forward fashion, but "load"s may at first glance appear
+ a bit strange as they map to:
+
+ lwz %rX, addr
+ cmpw %rX, %rX
+ bne- 1f
+ 1:
+
+ That is, the CPU is forced to perform a branch that "formally" depends
+ on the value retrieved from memory. This scheme has an overhead of
+ about 1-2 clock cycles per load, but it allows to map "acquire" to
+ the "isync" instruction instead of "sync" uniformly and for all type
+ of atomic operations. Since "isync" has a cost of about 15 clock
+ cycles, while "sync" hast a cost of about 50 clock cycles, the small
+ penalty to atomic loads more than compensates for this.
+
+ Byte- and halfword-sized atomic values are realized by encoding the
+ value to be represented into a word, performing sign/zero extension
+ as appropriate. This means that after add/sub operations the value
+ needs fixing up to accurately preserve the wrap-around semantic of
+ the smaller type. (Nothing special needs to be done for the bit-wise
+ and the "exchange type" operators as the compiler already sees to
+ it that values carried in registers are extended appropriately and
+ everything falls into place naturally).
+
+ The register constraint "b" instructs gcc to use any register
+ except r0; this is sometimes required because the encoding for
+ r0 is used to signify "constant zero" in a number of instructions,
+ making r0 unusable in this place. For simplicity this constraint
+ is used everywhere since I am to lazy to look this up on a
+ per-instruction basis, and ppc has enough registers for this not
+ to pose a problem.
+*/
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+struct gcc_ppc_operations_base
+{
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(__powerpc64__)
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ else if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+#else
+ if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("sync" ::: "memory");
+#endif
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ __asm__ __volatile__ ("isync" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ }
+};
+
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 4u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "stw %1, %0\n"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ __asm__ __volatile__
+ (
+ "lwz %0, %1\n"
+ "cmpw %0, %0\n"
+ "bne- 1f\n"
+ "1:\n"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0"
+ );
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y1\n"
+ "stwcx. %2,%y1\n"
+ "bne- 1b\n"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n"
+ "lwarx %0,%y2\n"
+ "cmpw %0, %3\n"
+ "bne- 1f\n"
+ "stwcx. %4,%y2\n"
+ "bne- 1f\n"
+ "li %1, 1\n"
+ "1:"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n"
+ "0: lwarx %0,%y2\n"
+ "cmpw %0, %3\n"
+ "bne- 1f\n"
+ "stwcx. %4,%y2\n"
+ "bne- 0b\n"
+ "li %1, 1\n"
+ "1:"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "add %1,%0,%3\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "sub %1,%0,%3\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "and %1,%0,%3\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "or %1,%0,%3\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "xor %1,%0,%3\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+
+template< >
+struct operations< 1u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "add %1,%0,%3\n"
+ "rlwinm %1, %1, 0, 0xff\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "sub %1,%0,%3\n"
+ "rlwinm %1, %1, 0, 0xff\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 1u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "add %1,%0,%3\n"
+ "extsb %1, %1\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "sub %1,%0,%3\n"
+ "extsb %1, %1\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+template< >
+struct operations< 2u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "add %1,%0,%3\n"
+ "rlwinm %1, %1, 0, 0xffff\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "sub %1,%0,%3\n"
+ "rlwinm %1, %1, 0, 0xffff\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 2u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "add %1,%0,%3\n"
+ "extsh %1, %1\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "lwarx %0,%y2\n"
+ "sub %1,%0,%3\n"
+ "extsh %1, %1\n"
+ "stwcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+#if defined(__powerpc64__)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 8u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "std %1, %0\n"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ __asm__ __volatile__
+ (
+ "ld %0, %1\n"
+ "cmpd %0, %0\n"
+ "bne- 1f\n"
+ "1:\n"
+ : "=&b" (v)
+ : "m" (storage)
+ : "cr0"
+ );
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldarx %0,%y1\n"
+ "stdcx. %2,%y1\n"
+ "bne- 1b\n"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n"
+ "ldarx %0,%y2\n"
+ "cmpd %0, %3\n"
+ "bne- 1f\n"
+ "stdcx. %4,%y2\n"
+ "bne- 1f\n"
+ "li %1, 1\n"
+ "1:"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n"
+ "0: ldarx %0,%y2\n"
+ "cmpd %0, %3\n"
+ "bne- 1f\n"
+ "stdcx. %4,%y2\n"
+ "bne- 0b\n"
+ "li %1, 1\n"
+ "1:"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldarx %0,%y2\n"
+ "add %1,%0,%3\n"
+ "stdcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldarx %0,%y2\n"
+ "sub %1,%0,%3\n"
+ "stdcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldarx %0,%y2\n"
+ "and %1,%0,%3\n"
+ "stdcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldarx %0,%y2\n"
+ "or %1,%0,%3\n"
+ "stdcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldarx %0,%y2\n"
+ "xor %1,%0,%3\n"
+ "stdcx. %1,%y2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+#endif // defined(__powerpc64__)
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ switch (order)
+ {
+ case memory_order_consume:
+ case memory_order_acquire:
+ __asm__ __volatile__ ("isync" ::: "memory");
+ break;
+ case memory_order_release:
+#if defined(__powerpc64__)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+ break;
+#endif
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("sync" ::: "memory");
+ break;
+ default:;
+ }
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sparc.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sparc.hpp
new file mode 100644
index 0000000..ea6df91
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sparc.hpp
@@ -0,0 +1,245 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2010 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_sparc.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct gcc_sparc_cas_base
+{
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ else if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ else if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ }
+};
+
+template< bool Signed >
+struct gcc_sparc_cas32 :
+ public gcc_sparc_cas_base
+{
+ typedef typename make_storage_type< 4u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before_store(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ storage_type previous = expected;
+ __asm__ __volatile__
+ (
+ "cas [%1], %2, %0"
+ : "+r" (desired)
+ : "r" (&storage), "r" (previous)
+ : "memory"
+ );
+ const bool success = (desired == previous);
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = desired;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public cas_based_operations< gcc_sparc_cas32< Signed > >
+{
+ typedef cas_based_operations< gcc_sparc_cas32< Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "swap [%1], %0"
+ : "+r" (v)
+ : "r" (&storage)
+ : "memory"
+ );
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+template< bool Signed >
+struct gcc_sparc_cas64 :
+ public gcc_sparc_cas_base
+{
+ typedef typename make_storage_type< 8u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before_store(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ storage_type previous = expected;
+ __asm__ __volatile__
+ (
+ "casx [%1], %2, %0"
+ : "+r" (desired)
+ : "r" (&storage), "r" (previous)
+ : "memory"
+ );
+ const bool success = (desired == previous);
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = desired;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< gcc_sparc_cas64< Signed > >
+{
+};
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ switch (order)
+ {
+ case memory_order_release:
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
+ break;
+ case memory_order_acq_rel:
+ __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
+ break;
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ break;
+ case memory_order_relaxed:
+ default:
+ break;
+ }
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sync.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sync.hpp
new file mode 100644
index 0000000..f4fc333
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_sync.hpp
@@ -0,0 +1,237 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_sync.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct gcc_sync_operations_base
+{
+ static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ __sync_synchronize();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_acquire | memory_order_consume)) != 0)
+ __sync_synchronize();
+ }
+};
+
+template< typename T >
+struct gcc_sync_operations :
+ public gcc_sync_operations_base
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before_store(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_add(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_sub(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
+ // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
+ // add a check here and fall back to a CAS loop.
+ if ((order & memory_order_release) != 0)
+ __sync_synchronize();
+ return __sync_lock_test_and_set(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type expected2 = expected;
+ storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
+
+ if (old_val == expected2)
+ {
+ return true;
+ }
+ else
+ {
+ expected = old_val;
+ return false;
+ }
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_and(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_or(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_xor(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ __sync_synchronize();
+ return !!__sync_lock_test_and_set(&storage, 1);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ __sync_lock_release(&storage);
+ if (order == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 1u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
+ public gcc_sync_operations< typename make_storage_type< 1u, Signed >::type >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 2u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
+ public gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 4u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ public gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 8u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >
+{
+};
+#endif
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __sync_synchronize();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86.hpp
new file mode 100644
index 0000000..f18227f
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86.hpp
@@ -0,0 +1,510 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_x86.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__x86_64__)
+#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "rdx"
+#else
+#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "edx"
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct gcc_x86_operations_base
+{
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_acquire) != 0)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+template< typename T, typename Derived >
+struct gcc_x86_operations :
+ public gcc_x86_operations_base
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_seq_cst)
+ {
+ fence_before(order);
+ storage = v;
+ fence_after(order);
+ }
+ else
+ {
+ Derived::exchange(storage, v, order);
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return Derived::fetch_add(storage, -v, order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+{
+ typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddb %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "cc", "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgb %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgb %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "q" (desired)
+ : "cc", "memory"
+ );
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ __asm__ __volatile__\
+ (\
+ "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
+ ".align 16\n\t"\
+ "1: movb %[arg], %%dl\n\t"\
+ op " %%al, %%dl\n\t"\
+ "lock; cmpxchgb %%dl, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage)\
+ : [arg] "q" (argument)\
+ : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+{
+ typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddw %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "cc", "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgw %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgw %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "q" (desired)
+ : "cc", "memory"
+ );
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ __asm__ __volatile__\
+ (\
+ "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
+ ".align 16\n\t"\
+ "1: movw %[arg], %%dx\n\t"\
+ op " %%ax, %%dx\n\t"\
+ "lock; cmpxchgw %%dx, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage)\
+ : [arg] "q" (argument)\
+ : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+{
+ typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddl %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "cc", "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgl %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgl %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "r" (desired)
+ : "cc", "memory"
+ );
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ __asm__ __volatile__\
+ (\
+ "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
+ ".align 16\n\t"\
+ "1: movl %[arg], %%edx\n\t"\
+ op " %%eax, %%edx\n\t"\
+ "lock; cmpxchgl %%edx, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage)\
+ : [arg] "r" (argument)\
+ : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< gcc_dcas_x86< Signed > >
+{
+};
+
+#elif defined(__x86_64__)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
+{
+ typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddq %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "cc", "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgq %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgq %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "r" (desired)
+ : "cc", "memory"
+ );
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ __asm__ __volatile__\
+ (\
+ "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
+ ".align 16\n\t"\
+ "1: movq %[arg], %%rdx\n\t"\
+ op " %%rax, %%rdx\n\t"\
+ "lock; cmpxchgq %%rdx, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage)\
+ : [arg] "r" (argument)\
+ : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public cas_based_operations< gcc_dcas_x86_64< Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order == memory_order_seq_cst)
+ {
+ __asm__ __volatile__
+ (
+#if defined(__x86_64__) || defined(__SSE2__)
+ "mfence\n"
+#else
+ "lock; addl $0, (%%esp)\n"
+#endif
+ ::: "memory"
+ );
+ }
+ else if ((order & (memory_order_acquire | memory_order_release)) != 0)
+ {
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#undef BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86_dcas.hpp
new file mode 100644
index 0000000..5e00535
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_gcc_x86_dcas.hpp
@@ -0,0 +1,308 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_x86_dcas.hpp
+ *
+ * This header contains implementation of the double-width CAS primitive for x86.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+template< bool Signed >
+struct gcc_dcas_x86
+{
+ typedef typename make_storage_type< 8u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if ((((uint32_t)&storage) & 0x00000007) == 0)
+ {
+#if defined(__SSE2__)
+ __asm__ __volatile__
+ (
+#if defined(__AVX__)
+ "vmovq %1, %%xmm4\n\t"
+ "vmovq %%xmm4, %0\n\t"
+#else
+ "movq %1, %%xmm4\n\t"
+ "movq %%xmm4, %0\n\t"
+#endif
+ : "=m" (storage)
+ : "m" (v)
+ : "memory", "xmm4"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "fildll %1\n\t"
+ "fistpll %0\n\t"
+ : "=m" (storage)
+ : "m" (v)
+ : "memory"
+ );
+#endif
+ }
+ else
+ {
+#if defined(__PIC__)
+ uint32_t scratch;
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %[scratch]\n\t"
+ "movl %[value_lo], %%ebx\n\t"
+ "movl 0(%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "jne 1b\n\t"
+ "movl %[scratch], %%ebx"
+ : [scratch] "=m,m" (scratch)
+ : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
+ : "cc", "edx", "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "movl 0(%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "jne 1b\n\t"
+ :
+ : [value_lo] "b,b" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
+ : "cc", "eax", "edx", "memory"
+ );
+#endif
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type value;
+
+ if ((((uint32_t)&storage) & 0x00000007) == 0)
+ {
+#if defined(__SSE2__)
+ __asm__ __volatile__
+ (
+#if defined(__AVX__)
+ "vmovq %1, %%xmm4\n\t"
+ "vmovq %%xmm4, %0\n\t"
+#else
+ "movq %1, %%xmm4\n\t"
+ "movq %%xmm4, %0\n\t"
+#endif
+ : "=m" (value)
+ : "m" (storage)
+ : "memory", "xmm4"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "fildll %1\n\t"
+ "fistpll %0\n\t"
+ : "=m" (value)
+ : "m" (storage)
+ : "memory"
+ );
+#endif
+ }
+ else
+ {
+#if defined(__clang__)
+ // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
+ value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0);
+#else
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %%eax\n\t"
+ "movl %%ecx, %%edx\n\t"
+ "lock; cmpxchg8b %[storage]"
+ : "=&A" (value)
+ : [storage] "m" (storage)
+ : "cc", "memory"
+ );
+#endif
+ }
+
+ return value;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+ // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
+ storage_type old_expected = expected;
+ expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
+ return expected == old_expected;
+#elif defined(__PIC__)
+ // Make sure ebx is saved and restored properly in case
+ // of position independent code. To make this work
+ // setup register constraints such that ebx can not be
+ // used by accident e.g. as base address for the variable
+ // to be modified. Accessing "scratch" should always be okay,
+ // as it can only be placed on the stack (and therefore
+ // accessed through ebp or esp only).
+ //
+ // In theory, could push/pop ebx onto/off the stack, but movs
+ // to a prepared stack slot turn out to be faster.
+
+ uint32_t scratch;
+ bool success;
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %[scratch]\n\t"
+ "movl %[desired_lo], %%ebx\n\t"
+ "lock; cmpxchg8b %[dest]\n\t"
+ "movl %[scratch], %%ebx\n\t"
+ "sete %[success]"
+ : "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success)
+ : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32))
+ : "cc", "memory"
+ );
+ return success;
+#else
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg8b %[dest]\n\t"
+ "sete %[success]"
+ : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
+ : "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32))
+ : "cc", "memory"
+ );
+ return success;
+#endif
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+template< bool Signed >
+struct gcc_dcas_x86_64
+{
+ typedef typename make_storage_type< 16u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ uint64_t const* p_value = (uint64_t const*)&v;
+ __asm__ __volatile__
+ (
+ "movq 0(%[dest]), %%rax\n\t"
+ "movq 8(%[dest]), %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b 0(%[dest])\n\t"
+ "jne 1b"
+ :
+ : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage)
+ : "cc", "rax", "rdx", "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+ // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
+ storage_type value = storage_type();
+ return __sync_val_compare_and_swap(&storage, value, value);
+#else
+ storage_type value;
+
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
+ __asm__ __volatile__
+ (
+ "movq %%rbx, %%rax\n\t"
+ "movq %%rcx, %%rdx\n\t"
+ "lock; cmpxchg16b %[storage]"
+ : "=&A" (value)
+ : [storage] "m" (storage)
+ : "cc", "memory"
+ );
+
+ return value;
+#endif
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+ // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
+ storage_type old_expected = expected;
+ expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
+ return expected == old_expected;
+#else
+ uint64_t const* p_desired = (uint64_t const*)&desired;
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg16b %[dest]\n\t"
+ "sete %[success]"
+ : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
+ : "b,b" (p_desired[0]), "c,c" (p_desired[1])
+ : "cc", "memory"
+ );
+ return success;
+#endif
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_linux_arm.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_linux_arm.hpp
new file mode 100644
index 0000000..25167b1
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_linux_arm.hpp
@@ -0,0 +1,177 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009, 2011 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * Linux-specific code by Phil Endecott
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_linux_arm.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// Different ARM processors have different atomic instructions. In particular,
+// architecture versions before v6 (which are still in widespread use, e.g. the
+// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
+// On Linux the kernel provides some support that lets us abstract away from
+// these differences: it provides emulated CAS and barrier functions at special
+// addresses that are guaranteed not to be interrupted by the kernel. Using
+// this facility is slightly slower than inline assembler would be, but much
+// faster than a system call.
+//
+// While this emulated CAS is "strong" in the sense that it does not fail
+// "spuriously" (i.e.: it never fails to perform the exchange when the value
+// found equals the value expected), it does not return the found value on
+// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
+// return the found value on failure, and we have to manually load this value
+// after the emulated CAS reports failure. This in turn introduces a race
+// between the CAS failing (due to the "wrong" value being found) and subsequently
+// loading (which might turn up the "right" value). From an application's
+// point of view this looks like "spurious failure", and therefore the
+// emulated CAS is only good enough to provide compare_exchange_weak
+// semantics.
+
+struct linux_arm_cas_base
+{
+ static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ typedef void (*kernel_dmb_t)(void);
+ ((kernel_dmb_t)0xffff0fa0)();
+ }
+};
+
+template< bool Signed >
+struct linux_arm_cas :
+ public linux_arm_cas_base
+{
+ typedef typename make_storage_type< 4u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before_store(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ while (true)
+ {
+ storage_type tmp = expected;
+ if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))
+ return true;
+ if (tmp != expected)
+ {
+ expected = tmp;
+ return false;
+ }
+ }
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr);
+
+ if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0)
+ {
+ return true;
+ }
+ else
+ {
+ expected = storage;
+ return false;
+ }
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 1u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 2u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public cas_based_operations< linux_arm_cas< Signed > >
+{
+};
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ linux_arm_cas_base::hardware_full_fence();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_arm.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_arm.hpp
new file mode 100644
index 0000000..349f7a5
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_arm.hpp
@@ -0,0 +1,820 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_msvc_arm.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <intrin.h>
+#include <boost/memory_order.hpp>
+#include <boost/type_traits/make_signed.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v))
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+struct msvc_arm_operations_base
+{
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
+ }
+
+ static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ if ((order & memory_order_release) != 0)
+ hardware_full_fence();
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ hardware_full_fence();
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ // Combine order flags together and promote memory_order_consume to memory_order_acquire
+ return static_cast< memory_order >(((failure_order | success_order) & ~memory_order_consume) | (((failure_order | success_order) & memory_order_consume) << 1u));
+ }
+};
+
+template< typename T, typename Derived >
+struct msvc_arm_operations :
+ public msvc_arm_operations_base
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ typedef typename make_signed< storage_type >::type signed_storage_type;
+ return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ Derived::store(storage, (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+{
+ typedef msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+{
+ typedef msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+{
+ typedef msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
+{
+ typedef msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order != memory_order_relaxed)
+ msvc_arm_operations_base::hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE8
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE16
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE32
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE64
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_common.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_common.hpp
new file mode 100644
index 0000000..53628f3
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_common.hpp
@@ -0,0 +1,38 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_msvc_common.hpp
+ *
+ * This header contains common tools for MSVC implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// Define compiler barriers
+#if defined(__INTEL_COMPILER)
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __memory_barrier()
+#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
+extern "C" void _ReadWriteBarrier(void);
+#pragma intrinsic(_ReadWriteBarrier)
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() _ReadWriteBarrier()
+#endif
+
+#ifndef BOOST_ATOMIC_DETAIL_COMPILER_BARRIER
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER()
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_x86.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_x86.hpp
new file mode 100644
index 0000000..51db0fe
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_msvc_x86.hpp
@@ -0,0 +1,877 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_msvc_x86.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/type_traits/make_signed.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#endif
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// frame pointer register 'ebx' modified by inline assembly code. See the note below.
+#pragma warning(disable: 4731)
+#endif
+
+#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
+extern "C" void _mm_mfence(void);
+#pragma intrinsic(_mm_mfence)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*
+ * Implementation note for asm blocks.
+ *
+ * http://msdn.microsoft.com/en-us/data/k1a8ss06%28v=vs.105%29
+ *
+ * Some SSE types require eight-byte stack alignment, forcing the compiler to emit dynamic stack-alignment code.
+ * To be able to access both the local variables and the function parameters after the alignment, the compiler
+ * maintains two frame pointers. If the compiler performs frame pointer omission (FPO), it will use EBP and ESP.
+ * If the compiler does not perform FPO, it will use EBX and EBP. To ensure code runs correctly, do not modify EBX
+ * in asm code if the function requires dynamic stack alignment as it could modify the frame pointer.
+ * Either move the eight-byte aligned types out of the function, or avoid using EBX.
+ *
+ * Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx
+ * whenever we have to clobber it. Additionally, we disable warning C4731 above so that the compiler
+ * doesn't spam about ebx use.
+ */
+
+struct msvc_x86_operations_base
+{
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
+ // Use mfence only if SSE2 is available
+ _mm_mfence();
+#else
+ long tmp;
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
+#endif
+ }
+
+ static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ // On x86 and x86_64 there is no need for a hardware barrier,
+ // even if seq_cst memory order is requested, because all
+ // seq_cst writes are implemented with lock-prefixed operations
+ // or xchg which has implied lock prefix. Therefore normal loads
+ // are already ordered with seq_cst stores on these architectures.
+ }
+};
+
+template< typename T, typename Derived >
+struct msvc_x86_operations :
+ public msvc_x86_operations_base
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_seq_cst)
+ {
+ fence_before(order);
+ storage = v;
+ fence_after(order);
+ }
+ else
+ {
+ Derived::exchange(storage, v, order);
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ typedef typename make_signed< storage_type >::type signed_storage_type;
+ return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+{
+ typedef msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
+ }
+#else
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
+ return res;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
+ }
+#else
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
+ return res;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
+ }
+#else
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
+ return res;
+ }
+#endif
+};
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+{
+ typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
+ }
+};
+
+#elif defined(_M_IX86)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+{
+ typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xadd byte ptr [edx], al
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ xchg byte ptr [edx], al
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(success_order);
+ bool success;
+ __asm
+ {
+ mov esi, expected
+ mov edi, storage
+ movzx eax, byte ptr [esi]
+ movzx edx, desired
+ lock cmpxchg byte ptr [edi], dl
+ mov byte ptr [esi], al
+ sete success
+ };
+ // The success and failure fences are equivalent anyway
+ base_type::fence_after(success_order);
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ xor edx, edx
+ mov edi, storage
+ movzx ebx, v
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ and dl, bl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, al
+ mov ebx, backup
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ xor edx, edx
+ mov edi, storage
+ movzx ebx, v
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ or dl, bl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, al
+ mov ebx, backup
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ xor edx, edx
+ mov edi, storage
+ movzx ebx, v
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ xor dl, bl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, al
+ mov ebx, backup
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+};
+
+#else
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+{
+ typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
+ }
+};
+
+#elif defined(_M_IX86)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+{
+ typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xadd word ptr [edx], ax
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ xchg word ptr [edx], ax
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(success_order);
+ bool success;
+ __asm
+ {
+ mov esi, expected
+ mov edi, storage
+ movzx eax, word ptr [esi]
+ movzx edx, desired
+ lock cmpxchg word ptr [edi], dx
+ mov word ptr [esi], ax
+ sete success
+ };
+ // The success and failure fences are equivalent anyway
+ base_type::fence_after(success_order);
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ xor edx, edx
+ mov edi, storage
+ movzx ebx, v
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ and dx, bx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, ax
+ mov ebx, backup
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ xor edx, edx
+ mov edi, storage
+ movzx ebx, v
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ or dx, bx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, ax
+ mov ebx, backup
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ xor edx, edx
+ mov edi, storage
+ movzx ebx, v
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ xor dx, bx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, ax
+ mov ebx, backup
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+};
+
+#else
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+#endif
+
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+template< bool Signed >
+struct msvc_dcas_x86
+{
+ typedef typename make_storage_type< 8u, Signed >::type storage_type;
+
+ // Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
+ //
+ // The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
+ // * Reading or writing a quadword aligned on a 64-bit boundary
+ //
+ // Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
+ // have at least 8 byte alignment. The only unfortunate case is when atomic is placeod on the stack and it is not 8-byte aligned (like on 32 bit Windows).
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type volatile* p = &storage;
+ if (((uint32_t)p & 0x00000007) == 0)
+ {
+#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
+#if defined(__AVX__)
+ __asm
+ {
+ mov edx, p
+ vmovq xmm4, v
+ vmovq qword ptr [edx], xmm4
+ };
+#else
+ __asm
+ {
+ mov edx, p
+ movq xmm4, v
+ movq qword ptr [edx], xmm4
+ };
+#endif
+#else
+ __asm
+ {
+ mov edx, p
+ fild v
+ fistp qword ptr [edx]
+ };
+#endif
+ }
+ else
+ {
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ mov edi, p
+ mov ebx, dword ptr [v]
+ mov ecx, dword ptr [v + 4]
+ mov eax, dword ptr [edi]
+ mov edx, dword ptr [edi + 4]
+ align 16
+ again:
+ lock cmpxchg8b qword ptr [edi]
+ jne again
+ mov ebx, backup
+ };
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type const volatile* p = &storage;
+ storage_type value;
+
+ if (((uint32_t)p & 0x00000007) == 0)
+ {
+#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
+#if defined(__AVX__)
+ __asm
+ {
+ mov edx, p
+ vmovq xmm4, qword ptr [edx]
+ vmovq value, xmm4
+ };
+#else
+ __asm
+ {
+ mov edx, p
+ movq xmm4, qword ptr [edx]
+ movq value, xmm4
+ };
+#endif
+#else
+ __asm
+ {
+ mov edx, p
+ fild qword ptr [edx]
+ fistp value
+ };
+#endif
+ }
+ else
+ {
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
+ __asm
+ {
+ mov edi, p
+ mov eax, ebx
+ mov edx, ecx
+ lock cmpxchg8b qword ptr [edi]
+ mov dword ptr [value], eax
+ mov dword ptr [value + 4], edx
+ };
+ }
+
+ return value;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type volatile* p = &storage;
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+ const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
+ const bool result = (old_val == expected);
+ expected = old_val;
+ return result;
+#else
+ bool result;
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ mov edi, p
+ mov esi, expected
+ mov ebx, dword ptr [desired]
+ mov ecx, dword ptr [desired + 4]
+ mov eax, dword ptr [esi]
+ mov edx, dword ptr [esi + 4]
+ lock cmpxchg8b qword ptr [edi]
+ mov dword ptr [esi], eax
+ mov dword ptr [esi + 4], edx
+ mov ebx, backup
+ sete result
+ };
+ return result;
+#endif
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< msvc_dcas_x86< Signed > >
+{
+};
+
+#elif defined(_M_AMD64)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
+{
+ typedef msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
+ }
+};
+
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+template< bool Signed >
+struct msvc_dcas_x86_64
+{
+ typedef typename make_storage_type< 16u, Signed >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type value = const_cast< storage_type& >(storage);
+ while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type value = storage_type();
+ BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);
+ return value;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public cas_based_operations< msvc_dcas_x86_64< Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order == memory_order_seq_cst)
+ msvc_x86_operations_base::hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/ops_windows.hpp b/3rdParty/Boost/src/boost/atomic/detail/ops_windows.hpp
new file mode 100644
index 0000000..1b4b04c
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/ops_windows.hpp
@@ -0,0 +1,215 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_windows.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ *
+ * This implementation is the most basic version for Windows. It should
+ * work for any non-MSVC-like compilers as long as there are Interlocked WinAPI
+ * functions available. This version is also used for WinCE.
+ *
+ * Notably, this implementation is not as efficient as other
+ * versions based on compiler intrinsics.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/type_traits/make_signed.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct windows_operations_base
+{
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ long tmp;
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
+ }
+
+ static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+};
+
+template< typename T, typename Derived >
+struct windows_operations :
+ public windows_operations_base
+{
+ typedef T storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Derived::exchange(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return Derived::fetch_add(const_cast< storage_type volatile& >(storage), (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ typedef typename make_signed< storage_type >::type signed_storage_type;
+ return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+{
+ typedef windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ base_type::fence_before(success_order);
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
+ expected = old_val;
+ // The success and failure fences are the same anyway
+ base_type::fence_after(success_order);
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
+ base_type::fence_after(order);
+ return v;
+#else
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
+ return res;
+#endif
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
+ base_type::fence_after(order);
+ return v;
+#else
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
+ return res;
+#endif
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
+ base_type::fence_after(order);
+ return v;
+#else
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
+ return res;
+#endif
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order == memory_order_seq_cst)
+ windows_operations_base::hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/pause.hpp b/3rdParty/Boost/src/boost/atomic/detail/pause.hpp
new file mode 100644
index 0000000..15d7a02
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/pause.hpp
@@ -0,0 +1,43 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * (C) Copyright 2013 Tim Blechmann
+ * (C) Copyright 2013 Andrey Semashev
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))
+extern "C" void _mm_pause(void);
+#pragma intrinsic(_mm_pause)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+BOOST_FORCEINLINE void pause() BOOST_NOEXCEPT
+{
+#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))
+ _mm_pause();
+
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+ __asm__ __volatile__("pause;");
+
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/platform.hpp b/3rdParty/Boost/src/boost/atomic/detail/platform.hpp
new file mode 100644
index 0000000..76ad4eb
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/platform.hpp
@@ -0,0 +1,115 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/platform.hpp
+ *
+ * This header defines macros for the target platform detection
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
+
+// Compiler-based backends
+#if ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\
+ (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\
+ (\
+ (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\
+ )
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic
+
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86
+
+#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc
+
+// This list of ARM architecture versions comes from Apple's arm/arch.h header.
+// I don't know how complete it is.
+#elif defined(__GNUC__) &&\
+ (\
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\
+ defined(__ARM_ARCH_6ZK__) ||\
+ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\
+ defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\
+ )
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_arm
+
+#elif defined(__GNUC__) && defined(__sparc_v9__)
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc
+
+#elif defined(__GNUC__) && defined(__alpha__)
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha
+
+#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\
+ (\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) ||\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) ||\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) ||\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\
+ )
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync
+
+#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86
+
+#elif defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM)
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm
+
+#endif
+
+// OS-based backends
+#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+
+#if defined(__linux__) && defined(__arm__)
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM linux_arm
+
+#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM windows
+
+#endif
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+
+#endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK)
+
+#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+#define BOOST_ATOMIC_DETAIL_PLATFORM emulated
+#define BOOST_ATOMIC_EMULATED
+#endif
+
+#define BOOST_ATOMIC_DETAIL_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_PLATFORM).hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/detail/storage_type.hpp b/3rdParty/Boost/src/boost/atomic/detail/storage_type.hpp
new file mode 100644
index 0000000..a024f1d
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/detail/storage_type.hpp
@@ -0,0 +1,168 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2013 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/storage_type.hpp
+ *
+ * This header defines underlying types used as storage
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
+
+#include <cstring>
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< unsigned int Size >
+struct buffer_storage
+{
+ unsigned char data[Size];
+
+ BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
+ {
+ bool result = true;
+ for (unsigned int i = 0; i < Size && result; ++i)
+ {
+ result &= data[i] == 0;
+ }
+ return result;
+ }
+
+ BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT
+ {
+ return std::memcmp(data, that.data, Size) == 0;
+ }
+
+ BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT
+ {
+ return std::memcmp(data, that.data, Size) != 0;
+ }
+};
+
+template< unsigned int Size, bool Signed >
+struct make_storage_type
+{
+ typedef buffer_storage< Size > type;
+};
+
+template< >
+struct make_storage_type< 1u, false >
+{
+ typedef boost::uint8_t type;
+};
+
+template< >
+struct make_storage_type< 1u, true >
+{
+ typedef boost::int8_t type;
+};
+
+template< >
+struct make_storage_type< 2u, false >
+{
+ typedef boost::uint16_t type;
+};
+
+template< >
+struct make_storage_type< 2u, true >
+{
+ typedef boost::int16_t type;
+};
+
+template< >
+struct make_storage_type< 4u, false >
+{
+ typedef boost::uint32_t type;
+};
+
+template< >
+struct make_storage_type< 4u, true >
+{
+ typedef boost::int32_t type;
+};
+
+template< >
+struct make_storage_type< 8u, false >
+{
+ typedef boost::uint64_t type;
+};
+
+template< >
+struct make_storage_type< 8u, true >
+{
+ typedef boost::int64_t type;
+};
+
+#if defined(BOOST_HAS_INT128)
+
+template< >
+struct make_storage_type< 16u, false >
+{
+ typedef boost::uint128_type type;
+};
+
+template< >
+struct make_storage_type< 16u, true >
+{
+ typedef boost::int128_type type;
+};
+
+#elif !defined(BOOST_NO_ALIGNMENT)
+
+struct BOOST_ALIGNMENT(16) storage128_t
+{
+ boost::uint64_t data[2];
+
+ BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
+ {
+ return data[0] == 0 && data[1] == 0;
+ }
+};
+
+BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
+{
+ return left.data[0] == right.data[0] && left.data[1] == right.data[1];
+}
+BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
+{
+ return !(left == right);
+}
+
+template< bool Signed >
+struct make_storage_type< 16u, Signed >
+{
+ typedef storage128_t type;
+};
+
+#endif
+
+template< typename T >
+struct storage_size_of
+{
+ enum _
+ {
+ size = sizeof(T),
+ value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size)))
+ };
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
diff --git a/3rdParty/Boost/src/boost/atomic/fences.hpp b/3rdParty/Boost/src/boost/atomic/fences.hpp
new file mode 100644
index 0000000..31e3040
--- /dev/null
+++ b/3rdParty/Boost/src/boost/atomic/fences.hpp
@@ -0,0 +1,67 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/fences.hpp
+ *
+ * This header contains definition of \c atomic_thread_fence and \c atomic_signal_fence functions.
+ */
+
+#ifndef BOOST_ATOMIC_FENCES_HPP_INCLUDED_
+#define BOOST_ATOMIC_FENCES_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/operations.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+/*
+ * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
+ * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
+ */
+
+namespace boost {
+
+namespace atomics {
+
+#if BOOST_ATOMIC_THREAD_FENCE > 0
+BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ detail::thread_fence(order);
+}
+#else
+BOOST_FORCEINLINE void atomic_thread_fence(memory_order) BOOST_NOEXCEPT
+{
+ detail::lockpool::thread_fence();
+}
+#endif
+
+#if BOOST_ATOMIC_SIGNAL_FENCE > 0
+BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ detail::signal_fence(order);
+}
+#else
+BOOST_FORCEINLINE void atomic_signal_fence(memory_order) BOOST_NOEXCEPT
+{
+ detail::lockpool::signal_fence();
+}
+#endif
+
+} // namespace atomics
+
+using atomics::atomic_thread_fence;
+using atomics::atomic_signal_fence;
+
+} // namespace boost
+
+#endif // BOOST_ATOMIC_FENCES_HPP_INCLUDED_