diff options
Diffstat (limited to '3rdParty/Boost/src/libs/atomic/doc')
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/Jamfile.v2 | 26 | ||||
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/atomic.hpp | 547 | ||||
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/atomic.qbk | 828 | ||||
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/examples.qbk | 398 | ||||
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/logo.png | bin | 0 -> 11094 bytes | |||
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/logo.svg | 1053 | ||||
-rw-r--r-- | 3rdParty/Boost/src/libs/atomic/doc/platform.qbk | 312 |
7 files changed, 3164 insertions, 0 deletions
diff --git a/3rdParty/Boost/src/libs/atomic/doc/Jamfile.v2 b/3rdParty/Boost/src/libs/atomic/doc/Jamfile.v2 new file mode 100644 index 0000000..c293a66 --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/Jamfile.v2 @@ -0,0 +1,26 @@ +# Boost.Atomic library documentation Jamfile +# +# Copyright Helge Bahmann 2011. +# Copyright Tim Blechmann 2012. +# Distributed under the Boost Software License, Version 1.0. +# (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +import quickbook ; +import boostbook : boostbook ; + +xml atomic : atomic.qbk ; + +boostbook standalone + : atomic + : <xsl:param>boost.root=../../../.. + <xsl:param>boost.libraries=../../../libraries.htm + <format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/atomic/doc/html + ; + +install css : [ glob $(BOOST_ROOT)/doc/src/*.css ] + : <location>html ; +install images : [ glob $(BOOST_ROOT)/doc/src/images/*.png ] + : <location>html/images ; +explicit css ; +explicit images ; diff --git a/3rdParty/Boost/src/libs/atomic/doc/atomic.hpp b/3rdParty/Boost/src/libs/atomic/doc/atomic.hpp new file mode 100644 index 0000000..60e61c2 --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/atomic.hpp @@ -0,0 +1,547 @@ +/** \file boost/atomic.hpp */ + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +/* this is just a pseudo-header file fed to doxygen +to more easily generate the class documentation; will +be replaced by proper documentation down the road */ + +namespace boost { + +/** + \brief Memory ordering constraints + + This defines the relative order of one atomic operation + and other memory operations (loads, stores, other atomic operations) + executed by the same thread. + + The order of operations specified by the programmer in the + source code ("program order") does not necessarily match + the order in which they are actually executed on the target system: + Both compiler as well as processor may reorder operations + quite arbitrarily. <B>Specifying the wrong ordering + constraint will therefore generally result in an incorrect program.</B> +*/ +enum memory_order { + /** + \brief No constraint + Atomic operation and other memory operations may be reordered freely. + */ + memory_order_relaxed, + /** + \brief Data dependence constraint + Atomic operation must strictly precede any memory operation that + computationally depends on the outcome of the atomic operation. + */ + memory_order_consume, + /** + \brief Acquire memory + Atomic operation must strictly precede all memory operations that + follow in program order. + */ + memory_order_acquire, + /** + \brief Release memory + Atomic operation must strictly follow all memory operations that precede + in program order. + */ + memory_order_release, + /** + \brief Acquire and release memory + Combines the effects of \ref memory_order_acquire and \ref memory_order_release + */ + memory_order_acq_rel, + /** + \brief Sequentially consistent + Produces the same result \ref memory_order_acq_rel, but additionally + enforces globally sequential consistent execution + */ + memory_order_seq_cst +}; + +/** + \brief Atomic datatype + + An atomic variable. Provides methods to modify this variable atomically. + Valid template parameters are: + + - integral data types (char, short, int, ...) + - pointer data types + - any other data type that has a non-throwing default + constructor and that can be copied via <TT>memcpy</TT> + + Unless specified otherwise, any memory ordering constraint can be used + with any of the atomic operations. +*/ +template<typename Type> +class atomic { +public: + /** + \brief Create uninitialized atomic variable + Creates an atomic variable. Its initial value is undefined. + */ + atomic(); + /** + \brief Create an initialize atomic variable + \param value Initial value + Creates and initializes an atomic variable. + */ + explicit atomic(Type value); + + /** + \brief Read the current value of the atomic variable + \param order Memory ordering constraint, see \ref memory_order + \return Current value of the variable + + Valid memory ordering constraints are: + - @c memory_order_relaxed + - @c memory_order_consume + - @c memory_order_acquire + - @c memory_order_seq_cst + */ + Type load(memory_order order=memory_order_seq_cst) const; + + /** + \brief Write new value to atomic variable + \param value New value + \param order Memory ordering constraint, see \ref memory_order + + Valid memory ordering constraints are: + - @c memory_order_relaxed + - @c memory_order_release + - @c memory_order_seq_cst + */ + void store(Type value, memory_order order=memory_order_seq_cst); + + /** + \brief Atomically compare and exchange variable + \param expected Expected old value + \param desired Desired new value + \param order Memory ordering constraint, see \ref memory_order + \return @c true if value was changed + + Atomically performs the following operation + + \code + if (variable==expected) { + variable=desired; + return true; + } else { + expected=variable; + return false; + } + \endcode + + This operation may fail "spuriously", i.e. the state of the variable + is unchanged even though the expected value was found (this is the + case on architectures using "load-linked"/"store conditional" to + implement the operation). + + The established memory order will be @c order if the operation + is successful. If the operation is unsuccessful, the + memory order will be + + - @c memory_order_relaxed if @c order is @c memory_order_acquire , + @c memory_order_relaxed or @c memory_order_consume + - @c memory_order_release if @c order is @c memory_order_acq_release + or @c memory_order_release + - @c memory_order_seq_cst if @c order is @c memory_order_seq_cst + */ + bool compare_exchange_weak( + Type &expected, + Type desired, + memory_order order=memory_order_seq_cst); + + /** + \brief Atomically compare and exchange variable + \param expected Expected old value + \param desired Desired new value + \param success_order Memory ordering constraint if operation + is successful + \param failure_order Memory ordering constraint if operation is unsuccessful + \return @c true if value was changed + + Atomically performs the following operation + + \code + if (variable==expected) { + variable=desired; + return true; + } else { + expected=variable; + return false; + } + \endcode + + This operation may fail "spuriously", i.e. the state of the variable + is unchanged even though the expected value was found (this is the + case on architectures using "load-linked"/"store conditional" to + implement the operation). + + The constraint imposed by @c success_order may not be + weaker than the constraint imposed by @c failure_order. + */ + bool compare_exchange_weak( + Type &expected, + Type desired, + memory_order success_order, + memory_order failure_order); + /** + \brief Atomically compare and exchange variable + \param expected Expected old value + \param desired Desired new value + \param order Memory ordering constraint, see \ref memory_order + \return @c true if value was changed + + Atomically performs the following operation + + \code + if (variable==expected) { + variable=desired; + return true; + } else { + expected=variable; + return false; + } + \endcode + + In contrast to \ref compare_exchange_weak, this operation will never + fail spuriously. Since compare-and-swap must generally be retried + in a loop, implementors are advised to prefer \ref compare_exchange_weak + where feasible. + + The established memory order will be @c order if the operation + is successful. If the operation is unsuccessful, the + memory order will be + + - @c memory_order_relaxed if @c order is @c memory_order_acquire , + @c memory_order_relaxed or @c memory_order_consume + - @c memory_order_release if @c order is @c memory_order_acq_release + or @c memory_order_release + - @c memory_order_seq_cst if @c order is @c memory_order_seq_cst + */ + bool compare_exchange_strong( + Type &expected, + Type desired, + memory_order order=memory_order_seq_cst); + + /** + \brief Atomically compare and exchange variable + \param expected Expected old value + \param desired Desired new value + \param success_order Memory ordering constraint if operation + is successful + \param failure_order Memory ordering constraint if operation is unsuccessful + \return @c true if value was changed + + Atomically performs the following operation + + \code + if (variable==expected) { + variable=desired; + return true; + } else { + expected=variable; + return false; + } + \endcode + + In contrast to \ref compare_exchange_weak, this operation will never + fail spuriously. Since compare-and-swap must generally be retried + in a loop, implementors are advised to prefer \ref compare_exchange_weak + where feasible. + + The constraint imposed by @c success_order may not be + weaker than the constraint imposed by @c failure_order. + */ + bool compare_exchange_strong( + Type &expected, + Type desired, + memory_order success_order, + memory_order failure_order); + /** + \brief Atomically exchange variable + \param value New value + \param order Memory ordering constraint, see \ref memory_order + \return Old value of the variable + + Atomically exchanges the value of the variable with the new + value and returns its old value. + */ + Type exchange(Type value, memory_order order=memory_order_seq_cst); + + /** + \brief Atomically add and return old value + \param operand Operand + \param order Memory ordering constraint, see \ref memory_order + \return Old value of the variable + + Atomically adds operand to the variable and returns its + old value. + */ + Type fetch_add(Type operand, memory_order order=memory_order_seq_cst); + /** + \brief Atomically subtract and return old value + \param operand Operand + \param order Memory ordering constraint, see \ref memory_order + \return Old value of the variable + + Atomically subtracts operand from the variable and returns its + old value. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + @c operand is of type @c ptrdiff_t and the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type fetch_sub(Type operand, memory_order order=memory_order_seq_cst); + + /** + \brief Atomically perform bitwise "AND" and return old value + \param operand Operand + \param order Memory ordering constraint, see \ref memory_order + \return Old value of the variable + + Atomically performs bitwise "AND" with the variable and returns its + old value. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + @c operand is of type @c ptrdiff_t and the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type fetch_and(Type operand, memory_order order=memory_order_seq_cst); + + /** + \brief Atomically perform bitwise "OR" and return old value + \param operand Operand + \param order Memory ordering constraint, see \ref memory_order + \return Old value of the variable + + Atomically performs bitwise "OR" with the variable and returns its + old value. + + This method is available only if \c Type is an integral type. + */ + Type fetch_or(Type operand, memory_order order=memory_order_seq_cst); + + /** + \brief Atomically perform bitwise "XOR" and return old value + \param operand Operand + \param order Memory ordering constraint, see \ref memory_order + \return Old value of the variable + + Atomically performs bitwise "XOR" with the variable and returns its + old value. + + This method is available only if \c Type is an integral type. + */ + Type fetch_xor(Type operand, memory_order order=memory_order_seq_cst); + + /** + \brief Implicit load + \return Current value of the variable + + The same as <tt>load(memory_order_seq_cst)</tt>. Avoid using + the implicit conversion operator, use \ref load with + an explicit memory ordering constraint. + */ + operator Type(void) const; + /** + \brief Implicit store + \param value New value + \return Copy of @c value + + The same as <tt>store(value, memory_order_seq_cst)</tt>. Avoid using + the implicit conversion operator, use \ref store with + an explicit memory ordering constraint. + */ + Type operator=(Type v); + + /** + \brief Atomically perform bitwise "AND" and return new value + \param operand Operand + \return New value of the variable + + The same as <tt>fetch_and(operand, memory_order_seq_cst)&operand</tt>. + Avoid using the implicit bitwise "AND" operator, use \ref fetch_and + with an explicit memory ordering constraint. + */ + Type operator&=(Type operand); + + /** + \brief Atomically perform bitwise "OR" and return new value + \param operand Operand + \return New value of the variable + + The same as <tt>fetch_or(operand, memory_order_seq_cst)|operand</tt>. + Avoid using the implicit bitwise "OR" operator, use \ref fetch_or + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type. + */ + Type operator|=(Type operand); + + /** + \brief Atomically perform bitwise "XOR" and return new value + \param operand Operand + \return New value of the variable + + The same as <tt>fetch_xor(operand, memory_order_seq_cst)^operand</tt>. + Avoid using the implicit bitwise "XOR" operator, use \ref fetch_xor + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type. + */ + Type operator^=(Type operand); + + /** + \brief Atomically add and return new value + \param operand Operand + \return New value of the variable + + The same as <tt>fetch_add(operand, memory_order_seq_cst)+operand</tt>. + Avoid using the implicit add operator, use \ref fetch_add + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + @c operand is of type @c ptrdiff_t and the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type operator+=(Type operand); + + /** + \brief Atomically subtract and return new value + \param operand Operand + \return New value of the variable + + The same as <tt>fetch_sub(operand, memory_order_seq_cst)-operand</tt>. + Avoid using the implicit subtract operator, use \ref fetch_sub + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + @c operand is of type @c ptrdiff_t and the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type operator-=(Type operand); + + /** + \brief Atomically increment and return new value + \return New value of the variable + + The same as <tt>fetch_add(1, memory_order_seq_cst)+1</tt>. + Avoid using the implicit increment operator, use \ref fetch_add + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type operator++(void); + /** + \brief Atomically increment and return old value + \return Old value of the variable + + The same as <tt>fetch_add(1, memory_order_seq_cst)</tt>. + Avoid using the implicit increment operator, use \ref fetch_add + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type operator++(int); + /** + \brief Atomically subtract and return new value + \return New value of the variable + + The same as <tt>fetch_sub(1, memory_order_seq_cst)-1</tt>. + Avoid using the implicit increment operator, use \ref fetch_sub + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type operator--(void); + /** + \brief Atomically subtract and return old value + \return Old value of the variable + + The same as <tt>fetch_sub(1, memory_order_seq_cst)</tt>. + Avoid using the implicit increment operator, use \ref fetch_sub + with an explicit memory ordering constraint. + + This method is available only if \c Type is an integral type + or a non-void pointer type. If it is a pointer type, + the operation + is performed following the rules for pointer arithmetic + in C++. + */ + Type operator--(int); + + /** \brief Deleted copy constructor */ + atomic(const atomic &) = delete; + /** \brief Deleted copy assignment */ + const atomic & operator=(const atomic &) = delete; +}; + +/** + \brief Insert explicit fence for thread synchronization + \param order Memory ordering constraint + + Inserts an explicit fence. The exact semantic depends on the + type of fence inserted: + + - \c memory_order_relaxed: No operation + - \c memory_order_release: Performs a "release" operation + - \c memory_order_acquire or \c memory_order_consume: Performs an + "acquire" operation + - \c memory_order_acq_rel: Performs both an "acquire" and a "release" + operation + - \c memory_order_seq_cst: Performs both an "acquire" and a "release" + operation and in addition there exists a global total order of + all \c memory_order_seq_cst operations + +*/ +void atomic_thread_fence(memory_order order); + +/** + \brief Insert explicit fence for synchronization with a signal handler + \param order Memory ordering constraint + + Inserts an explicit fence to synchronize with a signal handler called within + the context of the same thread. The fence ensures the corresponding operations + around it are complete and/or not started. The exact semantic depends on the + type of fence inserted: + + - \c memory_order_relaxed: No operation + - \c memory_order_release: Ensures the operations before the fence are complete + - \c memory_order_acquire or \c memory_order_consume: Ensures the operations + after the fence are not started. + - \c memory_order_acq_rel or \c memory_order_seq_cst: Ensures the operations + around the fence do not cross it. + + Note that this call does not affect visibility order of the memory operations + to other threads. It is functionally similar to \c atomic_thread_fence, only + it does not generate any instructions to synchronize hardware threads. +*/ +void atomic_signal_fence(memory_order order); + +} diff --git a/3rdParty/Boost/src/libs/atomic/doc/atomic.qbk b/3rdParty/Boost/src/libs/atomic/doc/atomic.qbk new file mode 100644 index 0000000..880fab5 --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/atomic.qbk @@ -0,0 +1,828 @@ +[/ + / Copyright (c) 2009 Helge Bahmann + / Copyright (c) 2014 Andrey Semashev + / + / Distributed under the Boost Software License, Version 1.0. (See accompanying + / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + /] + +[library Boost.Atomic + [quickbook 1.4] + [authors [Bahmann, Helge][Semashev, Andrey]] + [copyright 2011 Helge Bahmann] + [copyright 2012 Tim Blechmann] + [copyright 2013 Andrey Semashev] + [id atomic] + [dirname atomic] + [purpose Atomic operations] + [license + Distributed under the Boost Software License, Version 1.0. + (See accompanying file LICENSE_1_0.txt or copy at + [@http://www.boost.org/LICENSE_1_0.txt]) + ] +] + +[section:introduction Introduction] + +[section:introduction_presenting Presenting Boost.Atomic] + +[*Boost.Atomic] is a library that provides [^atomic] +data types and operations on these data types, as well as memory +ordering constraints required for coordinating multiple threads through +atomic variables. It implements the interface as defined by the C++11 +standard, but makes this feature available for platforms lacking +system/compiler support for this particular C++11 feature. + +Users of this library should already be familiar with concurrency +in general, as well as elementary concepts such as "mutual exclusion". + +The implementation makes use of processor-specific instructions where +possible (via inline assembler, platform libraries or compiler +intrinsics), and falls back to "emulating" atomic operations through +locking. + +[endsect] + +[section:introduction_purpose Purpose] + +Operations on "ordinary" variables are not guaranteed to be atomic. +This means that with [^int n=0] initially, two threads concurrently +executing + +[c++] + + void function() + { + n ++; + } + +might result in [^n==1] instead of 2: Each thread will read the +old value into a processor register, increment it and write the result +back. Both threads may therefore write [^1], unaware that the other thread +is doing likewise. + +Declaring [^atomic<int> n=0] instead, the same operation on +this variable will always result in [^n==2] as each operation on this +variable is ['atomic]: This means that each operation behaves as if it +were strictly sequentialized with respect to the other. + +Atomic variables are useful for two purposes: + +* as a means for coordinating multiple threads via custom + coordination protocols +* as faster alternatives to "locked" access to simple variables + +Take a look at the [link atomic.usage_examples examples] section +for common patterns. + +[endsect] + +[endsect] + +[section:thread_coordination Thread coordination using Boost.Atomic] + +The most common use of [*Boost.Atomic] is to realize custom +thread synchronization protocols: The goal is to coordinate +accesses of threads to shared variables in order to avoid +"conflicts". The +programmer must be aware of the fact that +compilers, CPUs and the cache +hierarchies may generally reorder memory references at will. +As a consequence a program such as: + +[c++] + + int x = 0, int y = 0; + + thread1: + x = 1; + y = 1; + + thread2 + if (y == 1) { + assert(x == 1); + } + +might indeed fail as there is no guarantee that the read of `x` +by thread2 "sees" the write by thread1. + +[*Boost.Atomic] uses a synchronisation concept based on the +['happens-before] relation to describe the guarantees under +which situations such as the above one cannot occur. + +The remainder of this section will discuss ['happens-before] in +a "hands-on" way instead of giving a fully formalized definition. +The reader is encouraged to additionally have a +look at the discussion of the correctness of a few of the +[link atomic.usage_examples examples] afterwards. + +[section:mutex Enforcing ['happens-before] through mutual exclusion] + +As an introductory example to understand how arguing using +['happens-before] works, consider two threads synchronizing +using a common mutex: + +[c++] + + mutex m; + + thread1: + m.lock(); + ... /* A */ + m.unlock(); + + thread2: + m.lock(); + ... /* B */ + m.unlock(); + +The "lockset-based intuition" would be to argue that A and B +cannot be executed concurrently as the code paths require a +common lock to be held. + +One can however also arrive at the same conclusion using +['happens-before]: Either thread1 or thread2 will succeed first +at [^m.lock()]. If this is be thread1, then as a consequence, +thread2 cannot succeed at [^m.lock()] before thread1 has executed +[^m.unlock()], consequently A ['happens-before] B in this case. +By symmetry, if thread2 succeeds at [^m.lock()] first, we can +conclude B ['happens-before] A. + +Since this already exhausts all options, we can conclude that +either A ['happens-before] B or B ['happens-before] A must +always hold. Obviously cannot state ['which] of the two relationships +holds, but either one is sufficient to conclude that A and B +cannot conflict. + +Compare the [link boost_atomic.usage_examples.example_spinlock spinlock] +implementation to see how the mutual exclusion concept can be +mapped to [*Boost.Atomic]. + +[endsect] + +[section:release_acquire ['happens-before] through [^release] and [^acquire]] + +The most basic pattern for coordinating threads via [*Boost.Atomic] +uses [^release] and [^acquire] on an atomic variable for coordination: If ... + +* ... thread1 performs an operation A, +* ... thread1 subsequently writes (or atomically + modifies) an atomic variable with [^release] semantic, +* ... thread2 reads (or atomically reads-and-modifies) + the value this value from the same atomic variable with + [^acquire] semantic and +* ... thread2 subsequently performs an operation B, + +... then A ['happens-before] B. + +Consider the following example + +[c++] + + atomic<int> a(0); + + thread1: + ... /* A */ + a.fetch_add(1, memory_order_release); + + thread2: + int tmp = a.load(memory_order_acquire); + if (tmp == 1) { + ... /* B */ + } else { + ... /* C */ + } + +In this example, two avenues for execution are possible: + +* The [^store] operation by thread1 precedes the [^load] by thread2: + In this case thread2 will execute B and "A ['happens-before] B" + holds as all of the criteria above are satisfied. +* The [^load] operation by thread2 precedes the [^store] by thread1: + In this case, thread2 will execute C, but "A ['happens-before] C" + does ['not] hold: thread2 does not read the value written by + thread1 through [^a]. + +Therefore, A and B cannot conflict, but A and C ['can] conflict. + +[endsect] + +[section:fences Fences] + +Ordering constraints are generally specified together with an access to +an atomic variable. It is however also possible to issue "fence" +operations in isolation, in this case the fence operates in +conjunction with preceding (for `acquire`, `consume` or `seq_cst` +operations) or succeeding (for `release` or `seq_cst`) atomic +operations. + +The example from the previous section could also be written in +the following way: + +[c++] + + atomic<int> a(0); + + thread1: + ... /* A */ + atomic_thread_fence(memory_order_release); + a.fetch_add(1, memory_order_relaxed); + + thread2: + int tmp = a.load(memory_order_relaxed); + if (tmp == 1) { + atomic_thread_fence(memory_order_acquire); + ... /* B */ + } else { + ... /* C */ + } + +This provides the same ordering guarantees as previously, but +elides a (possibly expensive) memory ordering operation in +the case C is executed. + +[endsect] + +[section:release_consume ['happens-before] through [^release] and [^consume]] + +The second pattern for coordinating threads via [*Boost.Atomic] +uses [^release] and [^consume] on an atomic variable for coordination: If ... + +* ... thread1 performs an operation A, +* ... thread1 subsequently writes (or atomically modifies) an + atomic variable with [^release] semantic, +* ... thread2 reads (or atomically reads-and-modifies) + the value this value from the same atomic variable with [^consume] semantic and +* ... thread2 subsequently performs an operation B that is ['computationally + dependent on the value of the atomic variable], + +... then A ['happens-before] B. + +Consider the following example + +[c++] + + atomic<int> a(0); + complex_data_structure data[2]; + + thread1: + data[1] = ...; /* A */ + a.store(1, memory_order_release); + + thread2: + int index = a.load(memory_order_consume); + complex_data_structure tmp = data[index]; /* B */ + +In this example, two avenues for execution are possible: + +* The [^store] operation by thread1 precedes the [^load] by thread2: + In this case thread2 will read [^data\[1\]] and "A ['happens-before] B" + holds as all of the criteria above are satisfied. +* The [^load] operation by thread2 precedes the [^store] by thread1: + In this case thread2 will read [^data\[0\]] and "A ['happens-before] B" + does ['not] hold: thread2 does not read the value written by + thread1 through [^a]. + +Here, the ['happens-before] relationship helps ensure that any +accesses (presumable writes) to [^data\[1\]] by thread1 happen before +before the accesses (presumably reads) to [^data\[1\]] by thread2: +Lacking this relationship, thread2 might see stale/inconsistent +data. + +Note that in this example, the fact that operation B is computationally +dependent on the atomic variable, therefore the following program would +be erroneous: + +[c++] + + atomic<int> a(0); + complex_data_structure data[2]; + + thread1: + data[1] = ...; /* A */ + a.store(1, memory_order_release); + + thread2: + int index = a.load(memory_order_consume); + complex_data_structure tmp; + if (index == 0) + tmp = data[0]; + else + tmp = data[1]; + +[^consume] is most commonly (and most safely! see +[link atomic.limitations limitations]) used with +pointers, compare for example the +[link boost_atomic.usage_examples.singleton singleton with double-checked locking]. + +[endsect] + +[section:seq_cst Sequential consistency] + +The third pattern for coordinating threads via [*Boost.Atomic] +uses [^seq_cst] for coordination: If ... + +* ... thread1 performs an operation A, +* ... thread1 subsequently performs any operation with [^seq_cst], +* ... thread1 subsequently performs an operation B, +* ... thread2 performs an operation C, +* ... thread2 subsequently performs any operation with [^seq_cst], +* ... thread2 subsequently performs an operation D, + +then either "A ['happens-before] D" or "C ['happens-before] B" holds. + +In this case it does not matter whether thread1 and thread2 operate +on the same or different atomic variables, or use a "stand-alone" +[^atomic_thread_fence] operation. + +[endsect] + +[endsect] + +[section:interface Programming interfaces] + +[section:configuration Configuration and building] + +The library contains header-only and compiled parts. The library is +header-only for lock-free cases but requires a separate binary to +implement the lock-based emulation. Users are able to detect whether +linking to the compiled part is required by checking the +[link atomic.interface.feature_macros feature macros]. + +The following macros affect library behavior: + +[table + [[Macro] [Description]] + [[`BOOST_ATOMIC_NO_CMPXCHG16B`] [Affects 64-bit x86 MSVC builds. When defined, + the library assumes the target CPU does not support `cmpxchg16b` instruction used + to support 128-bit atomic operations. This is the case with some early 64-bit AMD CPUs, + all Intel CPUs and current AMD CPUs support this instruction. The library does not + perform runtime detection of this instruction, so running the code that uses 128-bit + atomics on such CPUs will result in crashes, unless this macro is defined. Note that + the macro does not affect GCC and compatible compilers because the library infers + this information from the compiler-defined macros.]] + [[`BOOST_ATOMIC_FORCE_FALLBACK`] [When defined, all operations are implemented with locks. + This is mostly used for testing and should not be used in real world projects.]] + [[`BOOST_ATOMIC_DYN_LINK` and `BOOST_ALL_DYN_LINK`] [Control library linking. If defined, + the library assumes dynamic linking, otherwise static. The latter macro affects all Boost + libraries, not just [*Boost.Atomic].]] + [[`BOOST_ATOMIC_NO_LIB` and `BOOST_ALL_NO_LIB`] [Control library auto-linking on Windows. + When defined, disables auto-linking. The latter macro affects all Boost libraries, + not just [*Boost.Atomic].]] +] + +Besides macros, it is important to specify the correct compiler options for the target CPU. +With GCC and compatible compilers this affects whether particular atomic operations are +lock-free or not. + +Boost building process is described in the [@http://www.boost.org/doc/libs/release/more/getting_started/ Getting Started guide]. +For example, you can build [*Boost.Atomic] with the following command line: + +[pre + bjam --with-atomic variant=release instruction-set=core2 stage +] + +[endsect] + +[section:interface_memory_order Memory order] + + #include <boost/memory_order.hpp> + +The enumeration [^boost::memory_order] defines the following +values to represent memory ordering constraints: + +[table + [[Constant] [Description]] + [[`memory_order_relaxed`] [No ordering constraint. + Informally speaking, following operations may be reordered before, + preceding operations may be reordered after the atomic + operation. This constraint is suitable only when + either a) further operations do not depend on the outcome + of the atomic operation or b) ordering is enforced through + stand-alone `atomic_thread_fence` operations. The operation on + the atomic value itself is still atomic though. + ]] + [[`memory_order_release`] [ + Perform `release` operation. Informally speaking, + prevents all preceding memory operations to be reordered + past this point. + ]] + [[`memory_order_acquire`] [ + Perform `acquire` operation. Informally speaking, + prevents succeeding memory operations to be reordered + before this point. + ]] + [[`memory_order_consume`] [ + Perform `consume` operation. More relaxed (and + on some architectures more efficient) than `memory_order_acquire` + as it only affects succeeding operations that are + computationally-dependent on the value retrieved from + an atomic variable. + ]] + [[`memory_order_acq_rel`] [Perform both `release` and `acquire` operation]] + [[`memory_order_seq_cst`] [ + Enforce sequential consistency. Implies `memory_order_acq_rel`, but + additionally enforces total order for all operations such qualified. + ]] +] + +See section [link atomic.thread_coordination ['happens-before]] for explanation +of the various ordering constraints. + +[endsect] + +[section:interface_atomic_object Atomic objects] + + #include <boost/atomic/atomic.hpp> + +[^boost::atomic<['T]>] provides methods for atomically accessing +variables of a suitable type [^['T]]. The type is suitable if +it is /trivially copyable/ (3.9/9 \[basic.types\]). Following are +examples of the types compatible with this requirement: + +* a scalar type (e.g. integer, boolean, enum or pointer type) +* a [^class] or [^struct] that has no non-trivial copy or move + constructors or assignment operators, has a trivial destructor, + and that is comparable via [^memcmp]. + +Note that classes with virtual functions or virtual base classes +do not satisfy the requirements. Also be warned +that structures with "padding" between data members may compare +non-equal via [^memcmp] even though all members are equal. + +[section:interface_atomic_generic [^boost::atomic<['T]>] template class] + +All atomic objects supports the following operations: + +[table + [[Syntax] [Description]] + [ + [`atomic()`] + [Initialize to an unspecified value] + ] + [ + [`atomic(T initial_value)`] + [Initialize to [^initial_value]] + ] + [ + [`bool is_lock_free()`] + [Checks if the atomic object is lock-free] + ] + [ + [`T load(memory_order order)`] + [Return current value] + ] + [ + [`void store(T value, memory_order order)`] + [Write new value to atomic variable] + ] + [ + [`T exchange(T new_value, memory_order order)`] + [Exchange current value with `new_value`, returning current value] + ] + [ + [`bool compare_exchange_weak(T & expected, T desired, memory_order order)`] + [Compare current value with `expected`, change it to `desired` if matches. + Returns `true` if an exchange has been performed, and always writes the + previous value back in `expected`. May fail spuriously, so must generally be + retried in a loop.] + ] + [ + [`bool compare_exchange_weak(T & expected, T desired, memory_order success_order, memory_order failure_order)`] + [Compare current value with `expected`, change it to `desired` if matches. + Returns `true` if an exchange has been performed, and always writes the + previous value back in `expected`. May fail spuriously, so must generally be + retried in a loop.] + ] + [ + [`bool compare_exchange_strong(T & expected, T desired, memory_order order)`] + [Compare current value with `expected`, change it to `desired` if matches. + Returns `true` if an exchange has been performed, and always writes the + previous value back in `expected`.] + ] + [ + [`bool compare_exchange_strong(T & expected, T desired, memory_order success_order, memory_order failure_order))`] + [Compare current value with `expected`, change it to `desired` if matches. + Returns `true` if an exchange has been performed, and always writes the + previous value back in `expected`.] + ] +] + +`order` always has `memory_order_seq_cst` as default parameter. + +The `compare_exchange_weak`/`compare_exchange_strong` variants +taking four parameters differ from the three parameter variants +in that they allow a different memory ordering constraint to +be specified in case the operation fails. + +In addition to these explicit operations, each +[^atomic<['T]>] object also supports +implicit [^store] and [^load] through the use of "assignment" +and "conversion to [^T]" operators. Avoid using these operators, +as they do not allow explicit specification of a memory ordering +constraint. + +[endsect] + +[section:interface_atomic_integral [^boost::atomic<['integral]>] template class] + +In addition to the operations listed in the previous section, +[^boost::atomic<['I]>] for integral +types [^['I]] supports the following operations: + +[table + [[Syntax] [Description]] + [ + [`T fetch_add(T v, memory_order order)`] + [Add `v` to variable, returning previous value] + ] + [ + [`T fetch_sub(T v, memory_order order)`] + [Subtract `v` from variable, returning previous value] + ] + [ + [`T fetch_and(T v, memory_order order)`] + [Apply bit-wise "and" with `v` to variable, returning previous value] + ] + [ + [`T fetch_or(T v, memory_order order)`] + [Apply bit-wise "or" with `v` to variable, returning previous value] + ] + [ + [`T fetch_xor(T v, memory_order order)`] + [Apply bit-wise "xor" with `v` to variable, returning previous value] + ] +] + +`order` always has `memory_order_seq_cst` as default parameter. + +In addition to these explicit operations, each +[^boost::atomic<['I]>] object also +supports implicit pre-/post- increment/decrement, as well +as the operators `+=`, `-=`, `&=`, `|=` and `^=`. +Avoid using these operators, +as they do not allow explicit specification of a memory ordering +constraint. + +[endsect] + +[section:interface_atomic_pointer [^boost::atomic<['pointer]>] template class] + +In addition to the operations applicable to all atomic object, +[^boost::atomic<['P]>] for pointer +types [^['P]] (other than [^void] pointers) support the following operations: + +[table + [[Syntax] [Description]] + [ + [`T fetch_add(ptrdiff_t v, memory_order order)`] + [Add `v` to variable, returning previous value] + ] + [ + [`T fetch_sub(ptrdiff_t v, memory_order order)`] + [Subtract `v` from variable, returning previous value] + ] +] + +`order` always has `memory_order_seq_cst` as default parameter. + +In addition to these explicit operations, each +[^boost::atomic<['P]>] object also +supports implicit pre-/post- increment/decrement, as well +as the operators `+=`, `-=`. Avoid using these operators, +as they do not allow explicit specification of a memory ordering +constraint. + +[endsect] + +[endsect] + +[section:interface_fences Fences] + + #include <boost/atomic/fences.hpp> + +[table + [[Syntax] [Description]] + [ + [`void atomic_thread_fence(memory_order order)`] + [Issue fence for coordination with other threads.] + ] + [ + [`void atomic_signal_fence(memory_order order)`] + [Issue fence for coordination with signal handler (only in same thread).] + ] +] + +[endsect] + +[section:feature_macros Feature testing macros] + + #include <boost/atomic/capabilities.hpp> + +[*Boost.Atomic] defines a number of macros to allow compile-time +detection whether an atomic data type is implemented using +"true" atomic operations, or whether an internal "lock" is +used to provide atomicity. The following macros will be +defined to `0` if operations on the data type always +require a lock, to `1` if operations on the data type may +sometimes require a lock, and to `2` if they are always lock-free: + +[table + [[Macro] [Description]] + [ + [`BOOST_ATOMIC_FLAG_LOCK_FREE`] + [Indicate whether `atomic_flag` is lock-free] + ] + [ + [`BOOST_ATOMIC_BOOL_LOCK_FREE`] + [Indicate whether `atomic<bool>` is lock-free] + ] + [ + [`BOOST_ATOMIC_CHAR_LOCK_FREE`] + [Indicate whether `atomic<char>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_CHAR16_T_LOCK_FREE`] + [Indicate whether `atomic<char16_t>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_CHAR32_T_LOCK_FREE`] + [Indicate whether `atomic<char32_t>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_WCHAR_T_LOCK_FREE`] + [Indicate whether `atomic<wchar_t>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_SHORT_LOCK_FREE`] + [Indicate whether `atomic<short>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_INT_LOCK_FREE`] + [Indicate whether `atomic<int>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_LONG_LOCK_FREE`] + [Indicate whether `atomic<long>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_LLONG_LOCK_FREE`] + [Indicate whether `atomic<long long>` (including signed/unsigned variants) is lock-free] + ] + [ + [`BOOST_ATOMIC_ADDRESS_LOCK_FREE` or `BOOST_ATOMIC_POINTER_LOCK_FREE`] + [Indicate whether `atomic<T *>` is lock-free] + ] + [ + [`BOOST_ATOMIC_THREAD_FENCE`] + [Indicate whether `atomic_thread_fence` function is lock-free] + ] + [ + [`BOOST_ATOMIC_SIGNAL_FENCE`] + [Indicate whether `atomic_signal_fence` function is lock-free] + ] +] + +In addition to these standard macros, [*Boost.Atomic] also defines a number of extension macros, +which can also be useful. Like the standard ones, these macros are defined to values `0`, `1` and `2` +to indicate whether the corresponding operations are lock-free or not. + +[table + [[Macro] [Description]] + [ + [`BOOST_ATOMIC_INT8_LOCK_FREE`] + [Indicate whether `atomic<int8_type>` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT16_LOCK_FREE`] + [Indicate whether `atomic<int16_type>` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT32_LOCK_FREE`] + [Indicate whether `atomic<int32_type>` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT64_LOCK_FREE`] + [Indicate whether `atomic<int64_type>` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT128_LOCK_FREE`] + [Indicate whether `atomic<int128_type>` is lock-free.] + ] + [ + [`BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT`] + [Defined after including `atomic_flag.hpp`, if the implementation + does not support the `BOOST_ATOMIC_FLAG_INIT` macro for static + initialization of `atomic_flag`. This macro is typically defined + for pre-C++11 compilers.] + ] +] + +In the table above, `intN_type` is a type that fits storage of contiguous `N` bits, suitably aligned for atomic operations. + +[endsect] + +[endsect] + +[section:usage_examples Usage examples] + +[include examples.qbk] + +[endsect] + +[/ +[section:platform_support Implementing support for additional platforms] + +[include platform.qbk] + +[endsect] +] + +[/ [xinclude autodoc.xml] ] + +[section:limitations Limitations] + +While [*Boost.Atomic] strives to implement the atomic operations +from C++11 as faithfully as possible, there are a few +limitations that cannot be lifted without compiler support: + +* [*Using non-POD-classes as template parameter to `atomic<T>` results + in undefined behavior]: This means that any class containing a + constructor, destructor, virtual methods or access control + specifications is not a valid argument in C++98. C++11 relaxes + this slightly by allowing "trivial" classes containing only + empty constructors. [*Advise]: Use only POD types. +* [*C++98 compilers may transform computation- to control-dependency]: + Crucially, `memory_order_consume` only affects computationally-dependent + operations, but in general there is nothing preventing a compiler + from transforming a computation dependency into a control dependency. + A C++11 compiler would be forbidden from such a transformation. + [*Advise]: Use `memory_order_consume` only in conjunction with + pointer values, as the compiler cannot speculate and transform + these into control dependencies. +* [*Fence operations enforce "too strong" compiler ordering]: + Semantically, `memory_order_acquire`/`memory_order_consume` + and `memory_order_release` need to restrain reordering of + memory operations only in one direction. Since there is no + way to express this constraint to the compiler, these act + as "full compiler barriers" in this implementation. In corner + cases this may result in a less efficient code than a C++11 compiler + could generate. +* [*No interprocess fallback]: using `atomic<T>` in shared memory only works + correctly, if `atomic<T>::is_lock_free() == true`. + +[endsect] + +[section:porting Porting] + +[section:unit_tests Unit tests] + +[*Boost.Atomic] provides a unit test suite to verify that the +implementation behaves as expected: + +* [*fallback_api.cpp] verifies that the fallback-to-locking aspect + of [*Boost.Atomic] compiles and has correct value semantics. +* [*native_api.cpp] verifies that all atomic operations have correct + value semantics (e.g. "fetch_add" really adds the desired value, + returning the previous). It is a rough "smoke-test" to help weed + out the most obvious mistakes (for example width overflow, + signed/unsigned extension, ...). +* [*lockfree.cpp] verifies that the [*BOOST_ATOMIC_*_LOCKFREE] macros + are set properly according to the expectations for a given + platform, and that they match up with the [*is_lock_free] member + functions of the [*atomic] object instances. +* [*atomicity.cpp] lets two threads race against each other modifying + a shared variable, verifying that the operations behave atomic + as appropriate. By nature, this test is necessarily stochastic, and + the test self-calibrates to yield 99% confidence that a + positive result indicates absence of an error. This test is + very useful on uni-processor systems with preemption already. +* [*ordering.cpp] lets two threads race against each other accessing + multiple shared variables, verifying that the operations + exhibit the expected ordering behavior. By nature, this test is + necessarily stochastic, and the test attempts to self-calibrate to + yield 99% confidence that a positive result indicates absence + of an error. This only works on true multi-processor (or multi-core) + systems. It does not yield any result on uni-processor systems + or emulators (due to there being no observable reordering even + the order=relaxed case) and will report that fact. + +[endsect] + +[section:tested_compilers Tested compilers] + +[*Boost.Atomic] has been tested on and is known to work on +the following compilers/platforms: + +* gcc 4.x: i386, x86_64, ppc32, ppc64, sparcv9, armv6, alpha +* Visual Studio Express 2008/Windows XP, x86, x64, ARM + +[endsect] + +[section:acknowledgements Acknowledgements] + +* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/atomic GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos. + +[endsect] + +[endsect] diff --git a/3rdParty/Boost/src/libs/atomic/doc/examples.qbk b/3rdParty/Boost/src/libs/atomic/doc/examples.qbk new file mode 100644 index 0000000..e34c402 --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/examples.qbk @@ -0,0 +1,398 @@ +[/ + / Copyright (c) 2009 Helge Bahmann + / + / Distributed under the Boost Software License, Version 1.0. (See accompanying + / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + /] + +[section:example_reference_counters Reference counting] + +The purpose of a ['reference counter] is to count the number +of pointers to an object. The object can be destroyed as +soon as the reference counter reaches zero. + +[section Implementation] + +[c++] + + #include <boost/intrusive_ptr.hpp> + #include <boost/atomic.hpp> + + class X { + public: + typedef boost::intrusive_ptr<X> pointer; + X() : refcount_(0) {} + + private: + mutable boost::atomic<int> refcount_; + friend void intrusive_ptr_add_ref(const X * x) + { + x->refcount_.fetch_add(1, boost::memory_order_relaxed); + } + friend void intrusive_ptr_release(const X * x) + { + if (x->refcount_.fetch_sub(1, boost::memory_order_release) == 1) { + boost::atomic_thread_fence(boost::memory_order_acquire); + delete x; + } + } + }; + +[endsect] + +[section Usage] + +[c++] + + X::pointer x = new X; + +[endsect] + +[section Discussion] + +Increasing the reference counter can always be done with +[^memory_order_relaxed]: New references to an object can only +be formed from an existing reference, and passing an existing +reference from one thread to another must already provide any +required synchronization. + +It is important to enforce any possible access to the object in +one thread (through an existing reference) to ['happen before] +deleting the object in a different thread. This is achieved +by a "release" operation after dropping a reference (any +access to the object through this reference must obviously +happened before), and an "acquire" operation before +deleting the object. + +It would be possible to use [^memory_order_acq_rel] for the +[^fetch_sub] operation, but this results in unneeded "acquire" +operations when the reference counter does not yet reach zero +and may impose a performance penalty. + +[endsect] + +[endsect] + +[section:example_spinlock Spinlock] + +The purpose of a ['spin lock] is to prevent multiple threads +from concurrently accessing a shared data structure. In contrast +to a mutex, threads will busy-wait and waste CPU cycles instead +of yielding the CPU to another thread. ['Do not use spinlocks +unless you are certain that you understand the consequences.] + +[section Implementation] + +[c++] + + #include <boost/atomic.hpp> + + class spinlock { + private: + typedef enum {Locked, Unlocked} LockState; + boost::atomic<LockState> state_; + + public: + spinlock() : state_(Unlocked) {} + + void lock() + { + while (state_.exchange(Locked, boost::memory_order_acquire) == Locked) { + /* busy-wait */ + } + } + void unlock() + { + state_.store(Unlocked, boost::memory_order_release); + } + }; + +[endsect] + +[section Usage] + +[c++] + + spinlock s; + + s.lock(); + // access data structure here + s.unlock(); + +[endsect] + +[section Discussion] + +The purpose of the spinlock is to make sure that one access +to the shared data structure always strictly "happens before" +another. The usage of acquire/release in lock/unlock is required +and sufficient to guarantee this ordering. + +It would be correct to write the "lock" operation in the following +way: + +[c++] + + lock() + { + while (state_.exchange(Locked, boost::memory_order_relaxed) == Locked) { + /* busy-wait */ + } + atomic_thread_fence(boost::memory_order_acquire); + } + +This "optimization" is however a) useless and b) may in fact hurt: +a) Since the thread will be busily spinning on a blocked spinlock, +it does not matter if it will waste the CPU cycles with just +"exchange" operations or with both useless "exchange" and "acquire" +operations. b) A tight "exchange" loop without any +memory-synchronizing instruction introduced through an "acquire" +operation will on some systems monopolize the memory subsystem +and degrade the performance of other system components. + +[endsect] + +[endsect] + +[section:singleton Singleton with double-checked locking pattern] + +The purpose of the ['Singleton with double-checked locking pattern] is to ensure +that at most one instance of a particular object is created. +If one instance has been created already, access to the existing +object should be as light-weight as possible. + +[section Implementation] + +[c++] + + #include <boost/atomic.hpp> + #include <boost/thread/mutex.hpp> + + class X { + public: + static X * instance() + { + X * tmp = instance_.load(boost::memory_order_consume); + if (!tmp) { + boost::mutex::scoped_lock guard(instantiation_mutex); + tmp = instance_.load(boost::memory_order_consume); + if (!tmp) { + tmp = new X; + instance_.store(tmp, boost::memory_order_release); + } + } + return tmp; + } + private: + static boost::atomic<X *> instance_; + static boost::mutex instantiation_mutex; + }; + + boost::atomic<X *> X::instance_(0); + +[endsect] + +[section Usage] + +[c++] + + X * x = X::instance(); + // dereference x + +[endsect] + +[section Discussion] + +The mutex makes sure that only one instance of the object is +ever created. The [^instance] method must make sure that any +dereference of the object strictly "happens after" creating +the instance in another thread. The use of [^memory_order_release] +after creating and initializing the object and [^memory_order_consume] +before dereferencing the object provides this guarantee. + +It would be permissible to use [^memory_order_acquire] instead of +[^memory_order_consume], but this provides a stronger guarantee +than is required since only operations depending on the value of +the pointer need to be ordered. + +[endsect] + +[endsect] + +[section:example_ringbuffer Wait-free ring buffer] + +A ['wait-free ring buffer] provides a mechanism for relaying objects +from one single "producer" thread to one single "consumer" thread without +any locks. The operations on this data structure are "wait-free" which +means that each operation finishes within a constant number of steps. +This makes this data structure suitable for use in hard real-time systems +or for communication with interrupt/signal handlers. + +[section Implementation] + +[c++] + + #include <boost/atomic.hpp> + + template<typename T, size_t Size> + class ringbuffer { + public: + ringbuffer() : head_(0), tail_(0) {} + + bool push(const T & value) + { + size_t head = head_.load(boost::memory_order_relaxed); + size_t next_head = next(head); + if (next_head == tail_.load(boost::memory_order_acquire)) + return false; + ring_[head] = value; + head_.store(next_head, boost::memory_order_release); + return true; + } + bool pop(T & value) + { + size_t tail = tail_.load(boost::memory_order_relaxed); + if (tail == head_.load(boost::memory_order_acquire)) + return false; + value = ring_[tail]; + tail_.store(next(tail), boost::memory_order_release); + return true; + } + private: + size_t next(size_t current) + { + return (current + 1) % Size; + } + T ring_[Size]; + boost::atomic<size_t> head_, tail_; + }; + +[endsect] + +[section Usage] + +[c++] + + ringbuffer<int, 32> r; + + // try to insert an element + if (r.push(42)) { /* succeeded */ } + else { /* buffer full */ } + + // try to retrieve an element + int value; + if (r.pop(value)) { /* succeeded */ } + else { /* buffer empty */ } + +[endsect] + +[section Discussion] + +The implementation makes sure that the ring indices do +not "lap-around" each other to ensure that no elements +are either lost or read twice. + +Furthermore it must guarantee that read-access to a +particular object in [^pop] "happens after" it has been +written in [^push]. This is achieved by writing [^head_ ] +with "release" and reading it with "acquire". Conversely +the implementation also ensures that read access to +a particular ring element "happens before" before +rewriting this element with a new value by accessing [^tail_] +with appropriate ordering constraints. + +[endsect] + +[endsect] + +[section:mp_queue Wait-free multi-producer queue] + +The purpose of the ['wait-free multi-producer queue] is to allow +an arbitrary number of producers to enqueue objects which are +retrieved and processed in FIFO order by a single consumer. + +[section Implementation] + +[c++] + + template<typename T> + class waitfree_queue { + public: + struct node { + T data; + node * next; + }; + void push(const T &data) + { + node * n = new node; + n->data = data; + node * stale_head = head_.load(boost::memory_order_relaxed); + do { + n->next = stale_head; + } while (!head_.compare_exchange_weak(stale_head, n, boost::memory_order_release)); + } + + node * pop_all(void) + { + T * last = pop_all_reverse(), * first = 0; + while(last) { + T * tmp = last; + last = last->next; + tmp->next = first; + first = tmp; + } + return first; + } + + waitfree_queue() : head_(0) {} + + // alternative interface if ordering is of no importance + node * pop_all_reverse(void) + { + return head_.exchange(0, boost::memory_order_consume); + } + private: + boost::atomic<node *> head_; + }; + +[endsect] + +[section Usage] + +[c++] + + waitfree_queue<int> q; + + // insert elements + q.push(42); + q.push(2); + + // pop elements + waitfree_queue<int>::node * x = q.pop_all() + while(x) { + X * tmp = x; + x = x->next; + // process tmp->data, probably delete it afterwards + delete tmp; + } + +[endsect] + +[section Discussion] + +The implementation guarantees that all objects enqueued are +processed in the order they were enqueued by building a singly-linked +list of object in reverse processing order. The queue is atomically +emptied by the consumer and brought into correct order. + +It must be guaranteed that any access to an object to be enqueued +by the producer "happens before" any access by the consumer. This +is assured by inserting objects into the list with ['release] and +dequeuing them with ['consume] memory order. It is not +necessary to use ['acquire] memory order in [^waitfree_queue::pop_all] +because all operations involved depend on the value of +the atomic pointer through dereference + +[endsect] + +[endsect] diff --git a/3rdParty/Boost/src/libs/atomic/doc/logo.png b/3rdParty/Boost/src/libs/atomic/doc/logo.png Binary files differnew file mode 100644 index 0000000..8b12104 --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/logo.png diff --git a/3rdParty/Boost/src/libs/atomic/doc/logo.svg b/3rdParty/Boost/src/libs/atomic/doc/logo.svg new file mode 100644 index 0000000..50d078e --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/logo.svg @@ -0,0 +1,1053 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!-- Created with Inkscape (http://www.inkscape.org/) --> + +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="744.09448819" + height="1052.3622047" + id="svg7933" + version="1.1" + inkscape:version="0.48.4 r9939" + sodipodi:docname="atomic.svg"> + <defs + id="defs7935"> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87" + id="linearGradient10687-9" + gradientUnits="userSpaceOnUse" + x1="753.02301" + y1="3132.0801" + x2="1146.25" + y2="3132.0801" /> + <linearGradient + id="linearGradient4453-7-8-6-7-87"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop4455-61-8-7-1-8" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop4457-4-1-9-1-4" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87" + id="linearGradient10685-44" + gradientUnits="userSpaceOnUse" + x1="753.02301" + y1="3132.0801" + x2="1146.25" + y2="3132.0801" /> + <linearGradient + id="linearGradient7822"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop7824" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop7826" /> + </linearGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3387-3-6-4-2-3"> + <path + inkscape:connector-curvature="0" + d="m 862.109,3289.75 -109.086,-190.45 69.122,-124.42 164.511,-0.47 c 0,0 111.044,188.28 116.564,197.63 7.66,0 43.03,0 43.03,0 l -67.03,117.71 -217.111,0 z" + id="path3389-2-0-7-7-58" /> + </clipPath> + <linearGradient + y2="3132.0801" + x2="1146.25" + y1="3132.0801" + x1="753.02301" + gradientUnits="userSpaceOnUse" + id="linearGradient4492-0" + xlink:href="#linearGradient4453-7-8-6-7-87" + inkscape:collect="always" /> + <linearGradient + id="linearGradient7831"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop7833" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop7835" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87" + id="linearGradient10691-2" + gradientUnits="userSpaceOnUse" + x1="1026.6899" + y1="2937.73" + x2="1463.14" + y2="2937.73" /> + <linearGradient + id="linearGradient7838"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop7840" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop7842" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87" + id="linearGradient10689-9" + gradientUnits="userSpaceOnUse" + x1="1026.6899" + y1="2937.73" + x2="1463.14" + y2="2937.73" /> + <linearGradient + id="linearGradient7845"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop7847" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop7849" /> + </linearGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3369-1-5-6-1-49"> + <path + inkscape:connector-curvature="0" + d="m 1131.64,3128.5 -104.95,-181.12 116.38,-200.42 208.05,0.94 112.02,191.63 -112.08,188.97 -219.42,0 z" + id="path3371-89-4-1-6-8" /> + </clipPath> + <linearGradient + y2="2937.73" + x2="1463.14" + y1="2937.73" + x1="1026.6899" + gradientUnits="userSpaceOnUse" + id="linearGradient4498-3" + xlink:href="#linearGradient4453-7-8-6-7-87" + inkscape:collect="always" /> + <linearGradient + id="linearGradient7854"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop7856" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop7858" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4459-1-2-8-9-0" + id="linearGradient10695-5" + gradientUnits="userSpaceOnUse" + x1="646.55499" + y1="2736.25" + x2="1088.27" + y2="2736.25" /> + <linearGradient + id="linearGradient4459-1-2-8-9-0"> + <stop + id="stop4461-2-1-5-2-43" + offset="0" + style="stop-color:#839bc2;stop-opacity:1;" /> + <stop + id="stop4463-3-2-8-7-5" + offset="1" + style="stop-color:#9fb6d4;stop-opacity:1;" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4459-1-2-8-9-0" + id="linearGradient10693-2" + gradientUnits="userSpaceOnUse" + x1="646.55499" + y1="2736.25" + x2="1088.27" + y2="2736.25" /> + <linearGradient + id="linearGradient7865"> + <stop + id="stop7867" + offset="0" + style="stop-color:#839bc2;stop-opacity:1;" /> + <stop + id="stop7869" + offset="1" + style="stop-color:#9fb6d4;stop-opacity:1;" /> + </linearGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3351-4-7-3-5-57"> + <path + inkscape:connector-curvature="0" + d="m 757.242,2926.25 -110.687,-189.11 110.656,-190.89 219.437,0 111.622,189.1 -111.59,190.9 -219.438,0 z" + id="path3353-1-9-3-4-77" /> + </clipPath> + <linearGradient + y2="2736.25" + x2="1088.27" + y1="2736.25" + x1="646.55499" + gradientUnits="userSpaceOnUse" + id="linearGradient4510-4" + xlink:href="#linearGradient4459-1-2-8-9-0" + inkscape:collect="always" /> + <linearGradient + id="linearGradient7874"> + <stop + id="stop7876" + offset="0" + style="stop-color:#839bc2;stop-opacity:1;" /> + <stop + id="stop7878" + offset="1" + style="stop-color:#9fb6d4;stop-opacity:1;" /> + </linearGradient> + <radialGradient + inkscape:collect="always" + xlink:href="#radialGradient3327-8-4-8-0-0" + id="radialGradient10699-4" + gradientUnits="userSpaceOnUse" + cx="997.46997" + cy="2896.25" + fx="997.46997" + fy="2896.25" + r="583.73999" /> + <radialGradient + fx="0" + fy="0" + cx="0" + cy="0" + r="1" + gradientUnits="userSpaceOnUse" + gradientTransform="matrix(58.375,0,0,-58.375,99.75,289.625)" + spreadMethod="pad" + id="radialGradient3327-8-4-8-0-0"> + <stop + style="stop-opacity:1;stop-color:#aeaeb3" + offset="0" + id="stop3329-7-0-1-0-4" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="0.949438" + id="stop3331-4-3-0-0-11" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="1" + id="stop3333-2-4-9-7-54" /> + </radialGradient> + <radialGradient + r="583.73999" + fy="2896.25" + fx="997.46997" + cy="2896.25" + cx="997.46997" + gradientUnits="userSpaceOnUse" + id="radialGradient15837" + xlink:href="#radialGradient3327-8-4-8-0-0" + inkscape:collect="always" /> + <radialGradient + fx="0" + fy="0" + cx="0" + cy="0" + r="1" + gradientUnits="userSpaceOnUse" + gradientTransform="matrix(58.375,0,0,-58.375,99.75,289.625)" + spreadMethod="pad" + id="radialGradient7886"> + <stop + style="stop-opacity:1;stop-color:#aeaeb3" + offset="0" + id="stop7888" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="0.949438" + id="stop7890" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="1" + id="stop7892" /> + </radialGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3323-7-2-4-5-2"> + <path + inkscape:connector-curvature="0" + d="m 997.469,2312.51 c -322.379,0 -583.739,261.36 -583.739,583.74 0,322.38 261.36,583.74 583.739,583.74 322.381,0 583.741,-261.36 583.741,-583.74 0,-322.38 -261.36,-583.74 -583.741,-583.74" + id="path3325-9-2-9-5-8" /> + </clipPath> + <radialGradient + r="583.73999" + fy="2896.25" + fx="997.46997" + cy="2896.25" + cx="997.46997" + gradientUnits="userSpaceOnUse" + id="radialGradient4516-6" + xlink:href="#radialGradient3327-8-4-8-0-0" + inkscape:collect="always" /> + <radialGradient + fx="0" + fy="0" + cx="0" + cy="0" + r="1" + gradientUnits="userSpaceOnUse" + gradientTransform="matrix(58.375,0,0,-58.375,99.75,289.625)" + spreadMethod="pad" + id="radialGradient7897"> + <stop + style="stop-opacity:1;stop-color:#aeaeb3" + offset="0" + id="stop7899" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="0.949438" + id="stop7901" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="1" + id="stop7903" /> + </radialGradient> + <radialGradient + r="583.73999" + fy="2896.25" + fx="997.46997" + cy="2896.25" + cx="997.46997" + gradientUnits="userSpaceOnUse" + id="radialGradient7931" + xlink:href="#radialGradient3327-8-4-8-0-0" + inkscape:collect="always" /> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87-7" + id="linearGradient10687-9-1" + gradientUnits="userSpaceOnUse" + x1="753.02301" + y1="3132.0801" + x2="1146.25" + y2="3132.0801" /> + <linearGradient + id="linearGradient4453-7-8-6-7-87-7"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop4455-61-8-7-1-8-4" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop4457-4-1-9-1-4-0" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87-7" + id="linearGradient10685-44-9" + gradientUnits="userSpaceOnUse" + x1="753.02301" + y1="3132.0801" + x2="1146.25" + y2="3132.0801" /> + <linearGradient + id="linearGradient3083"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop3085" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop3087" /> + </linearGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3387-3-6-4-2-3-4"> + <path + inkscape:connector-curvature="0" + d="m 862.109,3289.75 -109.086,-190.45 69.122,-124.42 164.511,-0.47 c 0,0 111.044,188.28 116.564,197.63 7.66,0 43.03,0 43.03,0 l -67.03,117.71 -217.111,0 z" + id="path3389-2-0-7-7-58-8" /> + </clipPath> + <linearGradient + y2="3132.0801" + x2="1146.25" + y1="3132.0801" + x1="753.02301" + gradientUnits="userSpaceOnUse" + id="linearGradient4492-0-8" + xlink:href="#linearGradient4453-7-8-6-7-87-7" + inkscape:collect="always" /> + <linearGradient + id="linearGradient3092"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop3094" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop3096" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87-7" + id="linearGradient10691-2-2" + gradientUnits="userSpaceOnUse" + x1="1026.6899" + y1="2937.73" + x2="1463.14" + y2="2937.73" /> + <linearGradient + id="linearGradient3099"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop3101" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop3103" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4453-7-8-6-7-87-7" + id="linearGradient10689-9-4" + gradientUnits="userSpaceOnUse" + x1="1026.6899" + y1="2937.73" + x2="1463.14" + y2="2937.73" /> + <linearGradient + id="linearGradient3106"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop3108" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop3110" /> + </linearGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3369-1-5-6-1-49-5"> + <path + inkscape:connector-curvature="0" + d="m 1131.64,3128.5 -104.95,-181.12 116.38,-200.42 208.05,0.94 112.02,191.63 -112.08,188.97 -219.42,0 z" + id="path3371-89-4-1-6-8-5" /> + </clipPath> + <linearGradient + y2="2937.73" + x2="1463.14" + y1="2937.73" + x1="1026.6899" + gradientUnits="userSpaceOnUse" + id="linearGradient4498-3-1" + xlink:href="#linearGradient4453-7-8-6-7-87-7" + inkscape:collect="always" /> + <linearGradient + id="linearGradient3115"> + <stop + style="stop-color:#aac4dd;stop-opacity:1;" + offset="0" + id="stop3117" /> + <stop + style="stop-color:#c2dbe9;stop-opacity:1;" + offset="1" + id="stop3119" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4459-1-2-8-9-0-1" + id="linearGradient10695-5-7" + gradientUnits="userSpaceOnUse" + x1="646.55499" + y1="2736.25" + x2="1088.27" + y2="2736.25" /> + <linearGradient + id="linearGradient4459-1-2-8-9-0-1"> + <stop + id="stop4461-2-1-5-2-43-1" + offset="0" + style="stop-color:#839bc2;stop-opacity:1;" /> + <stop + id="stop4463-3-2-8-7-5-5" + offset="1" + style="stop-color:#9fb6d4;stop-opacity:1;" /> + </linearGradient> + <linearGradient + inkscape:collect="always" + xlink:href="#linearGradient4459-1-2-8-9-0-1" + id="linearGradient10693-2-2" + gradientUnits="userSpaceOnUse" + x1="646.55499" + y1="2736.25" + x2="1088.27" + y2="2736.25" /> + <linearGradient + id="linearGradient3126"> + <stop + id="stop3128" + offset="0" + style="stop-color:#839bc2;stop-opacity:1;" /> + <stop + id="stop3130" + offset="1" + style="stop-color:#9fb6d4;stop-opacity:1;" /> + </linearGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3351-4-7-3-5-57-7"> + <path + inkscape:connector-curvature="0" + d="m 757.242,2926.25 -110.687,-189.11 110.656,-190.89 219.437,0 111.622,189.1 -111.59,190.9 -219.438,0 z" + id="path3353-1-9-3-4-77-6" /> + </clipPath> + <linearGradient + y2="2736.25" + x2="1088.27" + y1="2736.25" + x1="646.55499" + gradientUnits="userSpaceOnUse" + id="linearGradient4510-4-1" + xlink:href="#linearGradient4459-1-2-8-9-0-1" + inkscape:collect="always" /> + <linearGradient + id="linearGradient3135"> + <stop + id="stop3137" + offset="0" + style="stop-color:#839bc2;stop-opacity:1;" /> + <stop + id="stop3139" + offset="1" + style="stop-color:#9fb6d4;stop-opacity:1;" /> + </linearGradient> + <radialGradient + inkscape:collect="always" + xlink:href="#radialGradient3327-8-4-8-0-0-2" + id="radialGradient10699-4-4" + gradientUnits="userSpaceOnUse" + cx="997.46997" + cy="2896.25" + fx="997.46997" + fy="2896.25" + r="583.73999" /> + <radialGradient + fx="0" + fy="0" + cx="0" + cy="0" + r="1" + gradientUnits="userSpaceOnUse" + gradientTransform="matrix(58.375,0,0,-58.375,99.75,289.625)" + spreadMethod="pad" + id="radialGradient3327-8-4-8-0-0-2"> + <stop + style="stop-opacity:1;stop-color:#aeaeb3" + offset="0" + id="stop3329-7-0-1-0-4-3" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="0.949438" + id="stop3331-4-3-0-0-11-2" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="1" + id="stop3333-2-4-9-7-54-2" /> + </radialGradient> + <radialGradient + r="583.73999" + fy="2896.25" + fx="997.46997" + cy="2896.25" + cx="997.46997" + gradientUnits="userSpaceOnUse" + id="radialGradient7931-1" + xlink:href="#radialGradient3327-8-4-8-0-0-2" + inkscape:collect="always" /> + <radialGradient + fx="0" + fy="0" + cx="0" + cy="0" + r="1" + gradientUnits="userSpaceOnUse" + gradientTransform="matrix(58.375,0,0,-58.375,99.75,289.625)" + spreadMethod="pad" + id="radialGradient3147"> + <stop + style="stop-opacity:1;stop-color:#aeaeb3" + offset="0" + id="stop3149" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="0.949438" + id="stop3151" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="1" + id="stop3153" /> + </radialGradient> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath3323-7-2-4-5-2-6"> + <path + inkscape:connector-curvature="0" + d="m 997.469,2312.51 c -322.379,0 -583.739,261.36 -583.739,583.74 0,322.38 261.36,583.74 583.739,583.74 322.381,0 583.741,-261.36 583.741,-583.74 0,-322.38 -261.36,-583.74 -583.741,-583.74" + id="path3325-9-2-9-5-8-8" /> + </clipPath> + <radialGradient + r="583.73999" + fy="2896.25" + fx="997.46997" + cy="2896.25" + cx="997.46997" + gradientUnits="userSpaceOnUse" + id="radialGradient4516-6-5" + xlink:href="#radialGradient3327-8-4-8-0-0-2" + inkscape:collect="always" /> + <radialGradient + fx="0" + fy="0" + cx="0" + cy="0" + r="1" + gradientUnits="userSpaceOnUse" + gradientTransform="matrix(58.375,0,0,-58.375,99.75,289.625)" + spreadMethod="pad" + id="radialGradient3158"> + <stop + style="stop-opacity:1;stop-color:#aeaeb3" + offset="0" + id="stop3160" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="0.949438" + id="stop3162" /> + <stop + style="stop-opacity:1;stop-color:#ffffff" + offset="1" + id="stop3164" /> + </radialGradient> + <radialGradient + r="583.73999" + fy="2896.25" + fx="997.46997" + cy="2896.25" + cx="997.46997" + gradientUnits="userSpaceOnUse" + id="radialGradient3192" + xlink:href="#radialGradient3327-8-4-8-0-0-2" + inkscape:collect="always" /> + </defs> + <sodipodi:namedview + id="base" + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1.0" + inkscape:pageopacity="0.0" + inkscape:pageshadow="2" + inkscape:zoom="0.98994949" + inkscape:cx="194.37763" + inkscape:cy="577.68232" + inkscape:document-units="px" + inkscape:current-layer="layer1" + showgrid="false" + inkscape:window-width="1680" + inkscape:window-height="982" + inkscape:window-x="-8" + inkscape:window-y="-8" + inkscape:window-maximized="1" /> + <metadata + id="metadata7938"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title /> + </cc:Work> + </rdf:RDF> + </metadata> + <g + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1"> + <g + id="g3319-1-2-5-1-8" + transform="matrix(0.10419818,0,0,-0.10419818,149.49392,819.86044)" + style="fill:url(#radialGradient10699-4);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3323-7-2-4-5-2)" + id="g3321-9-2-7-4-14" + style="fill:url(#radialGradient7931);fill-opacity:1"> + <path + id="path3335-8-7-3-8-7" + style="fill:url(#radialGradient4516-6);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 997.469,2312.51 c -322.379,0 -583.739,261.36 -583.739,583.74 0,322.38 261.36,583.74 583.739,583.74 322.381,0 583.741,-261.36 583.741,-583.74 0,-322.38 -261.36,-583.74 -583.741,-583.74" + inkscape:connector-curvature="0" /> + </g> + </g> + <g + id="g3347-6-8-3-7-64" + transform="matrix(0.125,0,0,-0.125,124.46742,882.67914)" + style="fill:url(#linearGradient10695-5);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3351-4-7-3-5-57)" + id="g3349-5-5-4-7-98" + style="fill:url(#linearGradient10693-2);fill-opacity:1"> + <path + id="path3361-0-8-2-9-7" + style="fill:url(#linearGradient4510-4);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 757.242,2926.25 -110.687,-189.11 110.656,-190.89 219.437,0 111.622,189.1 -111.59,190.9 -219.438,0" + inkscape:connector-curvature="0" /> + </g> + </g> + <path + id="path3363-6-9-0-1-3" + style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 206.73253,540.54045 13.106,-22.3925 25.99649,0 13.21487,22.6087 -13.21487,22.39129 -25.99649,0 -13.106,-22.60749 z m 40.53761,-24.8925 -28.86524,0 -0.72412,1.2375 -13.106,22.3925 -0.73575,1.2563 0.73087,1.26 13.106,22.60739 0.72263,1.2464 28.86374,0 0.72512,-1.2289 13.215,-22.39119 0.74625,-1.265 -0.74,-1.2675 -13.21537,-22.6088 -0.72313,-1.2387" + inkscape:connector-curvature="0" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523" /> + <g + id="g3365-0-4-1-9-8" + transform="matrix(0.125,0,0,-0.125,124.46742,882.67914)" + style="fill:url(#linearGradient10691-2);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3369-1-5-6-1-49)" + id="g3367-2-4-2-8-6" + style="fill:url(#linearGradient10689-9);fill-opacity:1"> + <path + id="path3379-4-6-7-6-2" + style="fill:url(#linearGradient4498-3);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 1131.64,3128.5 -104.95,-181.12 116.38,-200.42 208.05,0.94 112.02,191.63 -112.08,188.97 -219.42,0" + inkscape:connector-curvature="0" /> + </g> + </g> + <path + id="path3381-6-8-0-0-8" + style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 254.24864,514.25665 12.3925,-21.3899 25.99625,0 13.27125,22.3762 -13.27125,22.7025 -24.56875,0.11 -13.82,-23.7988 z m 39.8125,-23.8899 -28.86125,0 -0.7225,1.2462 -12.3925,21.39 -0.72625,1.255 0.7275,1.2537 13.82,23.7988 0.7275,1.2513 1.44625,-0.01 24.56875,-0.11 1.4275,-0.01 0.72,-1.2325 13.27125,-22.7025 0.7425,-1.27 -0.75125,-1.2663 -13.27125,-22.3762 -0.72625,-1.225" + inkscape:connector-curvature="0" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523" /> + <g + id="g3383-5-5-2-2-0" + transform="matrix(0.125,0,0,-0.125,124.46742,882.67914)" + style="fill:url(#linearGradient10687-9);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3387-3-6-4-2-3)" + id="g3385-0-6-4-8-9" + style="fill:url(#linearGradient10685-44);fill-opacity:1"> + <path + id="path3397-9-6-1-6-8" + style="fill:url(#linearGradient4492-0);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 862.109,3289.75 -109.086,-190.45 69.122,-124.42 164.511,-0.47 c 0,0 111.044,188.28 116.564,197.63 7.66,0 43.03,0 43.03,0 l -67.03,117.71 -217.111,0" + inkscape:connector-curvature="0" /> + </g> + </g> + <path + id="path3399-6-7-3-2-04" + style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 220.0304,495.27665 12.92424,-22.5662 25.68775,0 6.95625,12.2138 -3.94375,0 -14.56837,24.7025 -19.11525,-0.055 -7.94087,-14.295 z m 40.06574,-25.0662 -28.59012,0 -0.71975,1.2575 -12.92537,22.5663 -0.70112,1.2237 0.68512,1.2325 7.94137,14.295 0.71087,1.2825 1.46638,0 19.11525,0.055 1.43212,0 0.729,-1.2338 13.84375,-23.4724 6.81625,0 -2.12875,-3.7375 -6.95625,-12.2138 -0.71875,-1.2625" + inkscape:connector-curvature="0" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523" /> + <g + transform="matrix(1.25,0,0,-1.25,117.405,895.69964)" + id="g3465-0-9" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <text + id="text3467-9-6" + transform="matrix(1,0,-0.17627963,-1,0,0)" + x="116.98372" + y="-267.77499" + style="font-size:11.81779194px"> + <tspan + id="tspan3469-0-8" + sodipodi:role="line" + style="font-size:46.39999771px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#49608a;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Denmark;-inkscape-font-specification:Denmark" + x="116.98372" + y="-267.77499">ATOMIC</tspan> + </text> + <text + id="text3471-0-2" + transform="matrix(0.99235617,0,-0.17763746,-1.0077027,0,0)" + style="font-size:38.40000153px" + x="112.74373" + y="-306.75479"> + <tspan + id="tspan3473-6-8" + sodipodi:role="line" + style="font-size:38.40000153px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#49608a;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Denmark;-inkscape-font-specification:Denmark" + x="112.74373" + y="-306.75479">boost</tspan> + </text> + </g> + <path + sodipodi:type="arc" + style="fill:#0000a7;fill-opacity:1;stroke:#ffffff;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + id="path4821-47" + sodipodi:cx="160.35715" + sodipodi:cy="240.93361" + sodipodi:rx="10.114144" + sodipodi:ry="10.114144" + d="m 170.47129,240.93361 a 10.114144,10.114144 0 1 1 -20.22829,0 10.114144,10.114144 0 1 1 20.22829,0 z" + transform="translate(73.021973,299.26676)" /> + <path + transform="matrix(0.74292732,0,0,0.74292732,160.9833,334.95876)" + sodipodi:type="arc" + style="fill:#00b000;fill-opacity:1;stroke:#ffffff;stroke-width:2.69205332;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + id="path4821-4-9" + sodipodi:cx="160.35715" + sodipodi:cy="240.93361" + sodipodi:rx="10.114144" + sodipodi:ry="10.114144" + d="m 170.47129,240.93361 a 10.114144,10.114144 0 1 1 -20.22829,0 10.114144,10.114144 0 1 1 20.22829,0 z" /> + <path + transform="matrix(0.51798873,0,0,0.51798873,159.28596,366.74327)" + sodipodi:type="arc" + style="fill:#cb0000;fill-opacity:1;stroke:#ffffff;stroke-width:3.8610878;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + id="path4821-4-8-4" + sodipodi:cx="160.35715" + sodipodi:cy="240.93361" + sodipodi:rx="10.114144" + sodipodi:ry="10.114144" + d="m 170.47129,240.93361 a 10.114144,10.114144 0 1 1 -20.22829,0 10.114144,10.114144 0 1 1 20.22829,0 z" /> + <path + style="fill:none;stroke:#808080;stroke-width:1.5;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + d="m 250.05288,487.74985 c 12.11436,-0.8226 21.99257,5.106 28.337,15.9431" + id="path16018" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#808080;stroke-width:2;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + d="m 279.75705,526.03685 c -5.64547,24.9935 -31.79301,20.1257 -32.06907,20.0828" + id="path16020" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#808080;stroke-width:1.70000005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + d="m 233.21476,494.63695 c -16.70632,11.7511 -8.41283,33.4381 -8.77494,33.2356" + id="path16022" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <g + id="g3319-1-2-5-1-8-7" + transform="matrix(0.10419818,0,0,-0.10419818,147.29097,965.23744)" + style="fill:url(#radialGradient10699-4-4);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3323-7-2-4-5-2-6)" + id="g3321-9-2-7-4-14-6" + style="fill:url(#radialGradient3192);fill-opacity:1"> + <path + id="path3335-8-7-3-8-7-1" + style="fill:url(#radialGradient4516-6-5);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 997.469,2312.51 c -322.379,0 -583.739,261.36 -583.739,583.74 0,322.38 261.36,583.74 583.739,583.74 322.381,0 583.741,-261.36 583.741,-583.74 0,-322.38 -261.36,-583.74 -583.741,-583.74" + inkscape:connector-curvature="0" /> + </g> + </g> + <g + id="g3347-6-8-3-7-64-8" + transform="matrix(0.125,0,0,-0.125,122.26447,1028.0561)" + style="fill:url(#linearGradient10695-5-7);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3351-4-7-3-5-57-7)" + id="g3349-5-5-4-7-98-9" + style="fill:url(#linearGradient10693-2-2);fill-opacity:1"> + <path + id="path3361-0-8-2-9-7-2" + style="fill:url(#linearGradient4510-4-1);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 757.242,2926.25 -110.687,-189.11 110.656,-190.89 219.437,0 111.622,189.1 -111.59,190.9 -219.438,0" + inkscape:connector-curvature="0" /> + </g> + </g> + <path + id="path3363-6-9-0-1-3-7" + style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 204.52958,685.91745 13.106,-22.3925 25.99649,0 13.21487,22.6087 -13.21487,22.39129 -25.99649,0 -13.106,-22.60749 z m 40.53761,-24.8925 -28.86524,0 -0.72412,1.2375 -13.106,22.3925 -0.73575,1.2563 0.73087,1.26 13.106,22.60739 0.72263,1.2464 28.86374,0 0.72512,-1.2289 13.215,-22.39119 0.74625,-1.265 -0.74,-1.2675 -13.21537,-22.6088 -0.72313,-1.2387" + inkscape:connector-curvature="0" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523" /> + <g + id="g3365-0-4-1-9-8-9" + transform="matrix(0.125,0,0,-0.125,122.26447,1028.0561)" + style="fill:url(#linearGradient10691-2-2);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3369-1-5-6-1-49-5)" + id="g3367-2-4-2-8-6-5" + style="fill:url(#linearGradient10689-9-4);fill-opacity:1"> + <path + id="path3379-4-6-7-6-2-4" + style="fill:url(#linearGradient4498-3-1);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 1131.64,3128.5 -104.95,-181.12 116.38,-200.42 208.05,0.94 112.02,191.63 -112.08,188.97 -219.42,0" + inkscape:connector-curvature="0" /> + </g> + </g> + <path + id="path3381-6-8-0-0-8-3" + style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 252.04569,659.63365 12.3925,-21.3899 25.99625,0 13.27125,22.3762 -13.27125,22.7025 -24.56875,0.11 -13.82,-23.7988 z m 39.8125,-23.8899 -28.86125,0 -0.7225,1.2462 -12.3925,21.39 -0.72625,1.255 0.7275,1.2537 13.82,23.7988 0.7275,1.2513 1.44625,-0.01 24.56875,-0.11 1.4275,-0.01 0.72,-1.2325 13.27125,-22.7025 0.7425,-1.27 -0.75125,-1.2663 -13.27125,-22.3762 -0.72625,-1.225" + inkscape:connector-curvature="0" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523" /> + <g + id="g3383-5-5-2-2-0-1" + transform="matrix(0.125,0,0,-0.125,122.26447,1028.0561)" + style="fill:url(#linearGradient10687-9-1);fill-opacity:1" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <g + clip-path="url(#clipPath3387-3-6-4-2-3-4)" + id="g3385-0-6-4-8-9-2" + style="fill:url(#linearGradient10685-44-9);fill-opacity:1"> + <path + id="path3397-9-6-1-6-8-3" + style="fill:url(#linearGradient4492-0-8);fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 862.109,3289.75 -109.086,-190.45 69.122,-124.42 164.511,-0.47 c 0,0 111.044,188.28 116.564,197.63 7.66,0 43.03,0 43.03,0 l -67.03,117.71 -217.111,0" + inkscape:connector-curvature="0" /> + </g> + </g> + <path + id="path3399-6-7-3-2-04-3" + style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 217.82745,640.65365 12.92424,-22.5662 25.68775,0 6.95625,12.2138 -3.94375,0 -14.56837,24.7025 -19.11525,-0.055 -7.94087,-14.295 z m 40.06574,-25.0662 -28.59012,0 -0.71975,1.2575 -12.92537,22.5663 -0.70112,1.2237 0.68512,1.2325 7.94137,14.295 0.71087,1.2825 1.46638,0 19.11525,0.055 1.43212,0 0.729,-1.2338 13.84375,-23.4724 6.81625,0 -2.12875,-3.7375 -6.95625,-12.2138 -0.71875,-1.2625" + inkscape:connector-curvature="0" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523" /> + <g + transform="matrix(1.25,0,0,-1.25,115.20205,1041.0766)" + id="g3465-0-9-4" + inkscape:export-filename="C:\Users\Adam\Desktop\bg_logo1.png" + inkscape:export-xdpi="63.625523" + inkscape:export-ydpi="63.625523"> + <text + id="text3467-9-6-1" + transform="matrix(1,0,-0.17627963,-1,0,0)" + x="116.98372" + y="-267.77499" + style="font-size:11.81779194px"> + <tspan + id="tspan3469-0-8-1" + sodipodi:role="line" + style="font-size:46.39999771px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#49608a;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Denmark;-inkscape-font-specification:Denmark" + x="116.98372" + y="-267.77499">Atomic</tspan> + </text> + <text + id="text3471-0-2-3" + transform="matrix(0.99235617,0,-0.17763746,-1.0077027,0,0)" + style="font-size:38.40000153px" + x="112.74373" + y="-306.75479"> + <tspan + id="tspan3473-6-8-8" + sodipodi:role="line" + style="font-size:38.40000153px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#49608a;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Denmark;-inkscape-font-specification:Denmark" + x="112.74373" + y="-306.75479">Boost</tspan> + </text> + </g> + <path + sodipodi:type="arc" + style="fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + id="path4821-47-7" + sodipodi:cx="160.35715" + sodipodi:cy="240.93361" + sodipodi:rx="10.114144" + sodipodi:ry="10.114144" + d="m 170.47129,240.93361 a 10.114144,10.114144 0 1 1 -20.22829,0 10.114144,10.114144 0 1 1 20.22829,0 z" + transform="matrix(0.53649776,0,0,0.53649776,136.31572,567.15057)" /> + <path + transform="matrix(0.45553914,0,0,0.45553914,171.71781,574.06648)" + sodipodi:type="arc" + style="fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:2.69205331999999990;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + id="path4821-4-9-4" + sodipodi:cx="160.35715" + sodipodi:cy="240.93361" + sodipodi:rx="10.114144" + sodipodi:ry="10.114144" + d="m 170.47129,240.93361 a 10.114144,10.114144 0 1 1 -20.22829,0 10.114144,10.114144 0 1 1 20.22829,0 z" /> + <path + transform="matrix(0.33234221,0,0,0.33234221,173.35638,592.9985)" + sodipodi:type="arc" + style="fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:3.86108780000000000;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + id="path4821-4-8-4-2" + sodipodi:cx="160.35715" + sodipodi:cy="240.93361" + sodipodi:rx="10.114144" + sodipodi:ry="10.114144" + d="m 170.47129,240.93361 a 10.114144,10.114144 0 1 1 -20.22829,0 10.114144,10.114144 0 1 1 20.22829,0 z" /> + <path + style="fill:none;stroke:#ffffff;stroke-width:1;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + d="m 231.48167,671.25081 c 5.6438,-0.15431 8.79799,1.45226 12.0778,6.25882" + id="path16018-7" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#ffffff;stroke-width:2;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + d="m 244.5941,690.50066 c -4.06373,10.90468 -15.39442,8.6554 -15.38328,8.74968" + id="path16020-7" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#ffffff;stroke-width:1.5;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + d="m 221.63678,674.9333 c -7.09591,5.63691 -3.2412,15.66119 -3.57792,15.56405" + id="path16022-9" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + </g> +</svg> diff --git a/3rdParty/Boost/src/libs/atomic/doc/platform.qbk b/3rdParty/Boost/src/libs/atomic/doc/platform.qbk new file mode 100644 index 0000000..6b9a9ec --- /dev/null +++ b/3rdParty/Boost/src/libs/atomic/doc/platform.qbk @@ -0,0 +1,312 @@ +[/ + / Copyright (c) 2009 Helge Bahmann + / + / Distributed under the Boost Software License, Version 1.0. (See accompanying + / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + /] + +[section:template_organization Organization of class template layers] + +The implementation uses multiple layers of template classes that +inherit from the next lower level each and refine or adapt the respective +underlying class: + +* [^boost::atomic<T>] is the topmost-level, providing + the external interface. Implementation-wise, it does not add anything + (except for hiding copy constructor and assignment operator). + +* [^boost::detail::atomic::internal_atomic&<T,S=sizeof(T),I=is_integral_type<T> >]: + This layer is mainly responsible for providing the overloaded operators + mapping to API member functions (e.g. [^+=] to [^fetch_add]). + The defaulted template parameter [^I] allows + to expose the correct API functions (via partial template + specialization): For non-integral types, it only + publishes the various [^exchange] functions + as well as load and store, for integral types it + additionally exports arithmetic and logic operations. + [br] + Depending on whether the given type is integral, it + inherits from either [^boost::detail::atomic::platform_atomic<T,S=sizeof(T)>] + or [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]. + There is however some special-casing: for non-integral types + of size 1, 2, 4 or 8, it will coerce the datatype into an integer representation + and delegate to [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>] + -- the rationale is that platform implementors only need to provide + integer-type operations. + +* [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>] + must provide the full set of operations for an integral type T + (i.e. [^load], [^store], [^exchange], + [^compare_exchange_weak], [^compare_exchange_strong], + [^fetch_add], [^fetch_sub], [^fetch_and], + [^fetch_or], [^fetch_xor], [^is_lock_free]). + The default implementation uses locking to emulate atomic operations, so + this is the level at which implementors should provide template specializations + to add support for platform-specific atomic operations. + [br] + The two separate template parameters allow separate specialization + on size and type (which, with fixed size, cannot + specify more than signedness/unsignedness). The rationale is that + most platform-specific atomic operations usually depend only on the + operand size, so that common implementations for signed/unsigned + types are possible. Signedness allows to properly to choose sign-extending + instructions for the [^load] operation, avoiding later + conversion. The expectation is that in most implementations this will + be a normal assignment in C, possibly accompanied by memory + fences, so that the compiler can automatically choose the correct + instruction. + +* At the lowest level, [^boost::detail::atomic::platform_atomic<T,S=sizeof(T)>] + provides the most basic atomic operations ([^load], [^store], + [^exchange], [^compare_exchange_weak], + [^compare_exchange_strong]) for arbitrarily generic data types. + The default implementation uses locking as a fallback mechanism. + Implementors generally do not have to specialize at this level + (since these will not be used for the common integral type sizes + of 1, 2, 4 and 8 bytes), but if s/he can if s/he so wishes to + provide truly atomic operations for "odd" data type sizes. + Some amount of care must be taken as the "raw" data type + passed in from the user through [^boost::atomic<T>] + is visible here -- it thus needs to be type-punned or otherwise + manipulated byte-by-byte to avoid using overloaded assignment, + comparison operators and copy constructors. + +[endsect] + + +[section:platform_atomic_implementation Implementing platform-specific atomic operations] + +In principle implementors are responsible for providing the +full range of named member functions of an atomic object +(i.e. [^load], [^store], [^exchange], +[^compare_exchange_weak], [^compare_exchange_strong], +[^fetch_add], [^fetch_sub], [^fetch_and], +[^fetch_or], [^fetch_xor], [^is_lock_free]). +These must be implemented as partial template specializations for +[^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]: + +[c++] + + template<typename T> + class platform_atomic_integral<T, 4> + { + public: + explicit platform_atomic_integral(T v) : i(v) {} + platform_atomic_integral(void) {} + + T load(memory_order order=memory_order_seq_cst) const volatile + { + // platform-specific code + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + // platform-specific code + } + + private: + volatile T i; + }; + +As noted above, it will usually suffice to specialize on the second +template argument, indicating the size of the data type in bytes. + +[section:automatic_buildup Templates for automatic build-up] + +Often only a portion of the required operations can be +usefully mapped to machine instructions. Several helper template +classes are provided that can automatically synthesize missing methods to +complete an implementation. + +At the minimum, an implementor must provide the +[^load], [^store], +[^compare_exchange_weak] and +[^is_lock_free] methods: + +[c++] + + template<typename T> + class my_atomic_32 { + public: + my_atomic_32() {} + my_atomic_32(T initial_value) : value(initial_value) {} + + T load(memory_order order=memory_order_seq_cst) volatile const + { + // platform-specific code + } + void store(T new_value, memory_order order=memory_order_seq_cst) volatile + { + // platform-specific code + } + bool compare_exchange_weak(T &expected, T desired, + memory_order success_order, + memory_order_failure_order) volatile + { + // platform-specific code + } + bool is_lock_free() const volatile {return true;} + protected: + // typedef is required for classes inheriting from this + typedef T integral_type; + private: + T value; + }; + +The template [^boost::detail::atomic::build_atomic_from_minimal] +can then take care of the rest: + +[c++] + + template<typename T> + class platform_atomic_integral<T, 4> + : public boost::detail::atomic::build_atomic_from_minimal<my_atomic_32<T> > + { + public: + typedef build_atomic_from_minimal<my_atomic_32<T> > super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} + }; + +There are several helper classes to assist in building "complete" +atomic implementations from different starting points: + +* [^build_atomic_from_minimal] requires + * [^load] + * [^store] + * [^compare_exchange_weak] (4-operand version) + +* [^build_atomic_from_exchange] requires + * [^load] + * [^store] + * [^compare_exchange_weak] (4-operand version) + * [^compare_exchange_strong] (4-operand version) + * [^exchange] + +* [^build_atomic_from_add] requires + * [^load] + * [^store] + * [^compare_exchange_weak] (4-operand version) + * [^compare_exchange_strong] (4-operand version) + * [^exchange] + * [^fetch_add] + +* [^build_atomic_from_typical] (<I>supported on gcc only</I>) requires + * [^load] + * [^store] + * [^compare_exchange_weak] (4-operand version) + * [^compare_exchange_strong] (4-operand version) + * [^exchange] + * [^fetch_add_var] (protected method) + * [^fetch_inc] (protected method) + * [^fetch_dec] (protected method) + + This will generate a [^fetch_add] method + that calls [^fetch_inc]/[^fetch_dec] + when the given parameter is a compile-time constant + equal to +1 or -1 respectively, and [^fetch_add_var] + in all other cases. This provides a mechanism for + optimizing the extremely common case of an atomic + variable being used as a counter. + + The prototypes for these methods to be implemented is: + [c++] + + template<typename T> + class my_atomic { + public: + T fetch_inc(memory_order order) volatile; + T fetch_dec(memory_order order) volatile; + T fetch_add_var(T counter, memory_order order) volatile; + }; + +These helper templates are defined in [^boost/atomic/detail/builder.hpp]. + +[endsect] + +[section:automatic_buildup_small Build sub-word-sized atomic data types] + +There is one other helper template that can build sub-word-sized +atomic data types even though the underlying architecture allows +only word-sized atomic operations: + +[c++] + + template<typename T> + class platform_atomic_integral<T, 1> : + public build_atomic_from_larger_type<my_atomic_32<uint32_t>, T> + { + public: + typedef build_atomic_from_larger_type<my_atomic_32<uint32_t>, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} + }; + +The above would create an atomic data type of 1 byte size, and +use masking and shifts to map it to 32-bit atomic operations. +The base type must implement [^load], [^store] +and [^compare_exchange_weak] for this to work. + +[endsect] + +[section:other_sizes Atomic data types for unusual object sizes] + +In unusual circumstances, an implementor may also opt to specialize +[^public boost::detail::atomic::platform_atomic<T,S=sizeof(T)>] +to provide support for atomic objects not fitting an integral size. +If you do that, keep the following things in mind: + +* There is no reason to ever do this for object sizes + of 1, 2, 4 and 8 +* Only the following methods need to be implemented: + * [^load] + * [^store] + * [^compare_exchange_weak] (4-operand version) + * [^compare_exchange_strong] (4-operand version) + * [^exchange] + +The type of the data to be stored in the atomic +variable (template parameter [^T]) +is exposed to this class, and the type may have +overloaded assignment and comparison operators -- +using these overloaded operators however will result +in an error. The implementor is responsible for +accessing the objects in a way that does not +invoke either of these operators (using e.g. +[^memcpy] or type-casts). + +[endsect] + +[endsect] + +[section:platform_atomic_fences Fences] + +Platform implementors need to provide a function performing +the action required for [funcref boost::atomic_thread_fence atomic_thread_fence] +(the fallback implementation will just perform an atomic operation +on an integer object). This is achieved by specializing the +[^boost::detail::atomic::platform_atomic_thread_fence] template +function in the following way: + +[c++] + + template<> + void platform_atomic_thread_fence(memory_order order) + { + // platform-specific code here + } + +[endsect] + +[section:platform_atomic_puttogether Putting it altogether] + +The template specializations should be put into a header file +in the [^boost/atomic/detail] directory, preferably +specifying supported compiler and architecture in its name. + +The file [^boost/atomic/detail/platform.hpp] must +subsequently be modified to conditionally include the new +header. + +[endsect] |