33 #ifndef itkAtomicIntDetail_h
34 #define itkAtomicIntDetail_h
41 #if ITK_COMPILER_CXX_ATOMIC
43 #elif defined(ITK_HAVE_SYNC_BUILTINS)
44 # define ITK_GCC_ATOMICS_32
45 # define ITK_GCC_ATOMICS_64
46 #elif defined(__APPLE__)
47 # include <libkern/OSAtomic.h>
48 # define ITK_APPLE_ATOMICS_32
49 # if ITK_SIZEOF_VOID_P == 8 || defined(__i386__)
50 # define ITK_APPLE_ATOMICS_64
52 #elif defined(_WIN32) && defined(_MSC_VER)
53 # define ITK_WINDOWS_ATOMICS_32
54 # if ITK_SIZEOF_VOID_P == 8
55 # define ITK_WINDOWS_ATOMICS_64
68 #if ITK_COMPILER_CXX_ATOMIC
70 template <
size_t VSize>
struct BaseType;
75 typedef typename BaseType<VSize>::Type AtomicType;
76 typedef typename BaseType<VSize>::Type ValueType;
79 #elif defined ITK_HAVE_SYNC_BUILTINS
81 template <
size_t VSize>
struct BaseType;
86 typedef typename BaseType<VSize>::Type AtomicType;
87 typedef typename BaseType<VSize>::Type ValueType;
89 static ValueType AddAndFetch(ValueType *ref, ValueType val)
91 return __sync_add_and_fetch(ref, val);
94 static ValueType SubAndFetch(ValueType *ref, ValueType val)
96 return __sync_sub_and_fetch(ref, val);
99 static ValueType PreIncrement(ValueType *ref)
101 return __sync_add_and_fetch(ref, 1);
104 static ValueType PreDecrement(ValueType *ref)
106 return __sync_sub_and_fetch(ref, 1);
109 static ValueType PostIncrement(ValueType *ref)
111 return __sync_fetch_and_add(ref, 1);
114 static ValueType PostDecrement(ValueType *ref)
116 return __sync_fetch_and_sub(ref, 1);
119 static ValueType Load(
const ValueType *ref)
121 __sync_synchronize();
122 return *
static_cast<const volatile ValueType *
>(ref);
125 static void Store(ValueType *ref, ValueType val)
127 *
static_cast<volatile ValueType*
>(ref) = val;
128 __sync_synchronize();
132 #endif // defined ITK_HAVE_SYNC_BUILTINS
134 #if defined(ITK_GCC_ATOMICS_64) || ITK_COMPILER_CXX_ATOMIC
135 template<>
struct BaseType<8>
137 itkAlignedTypedef( 8,
int64_t, Type );
140 #elif defined(ITK_APPLE_ATOMICS_64)
141 template <>
class AtomicOps<8>
144 itkAlignedTypedef( 8,
int64_t, AtomicType );
149 return OSAtomicAdd64Barrier(val, ref);
154 return OSAtomicAdd64Barrier(-val, ref);
159 return OSAtomicIncrement64Barrier(ref);
164 return OSAtomicDecrement64Barrier(ref);
169 int64_t val = OSAtomicIncrement64Barrier(ref);
175 int64_t val = OSAtomicDecrement64Barrier(ref);
182 return *
static_cast<const volatile int64_t*
>(ref);
187 *
static_cast<volatile int64_t*
>(ref) = val;
197 #if defined(ITK_WINDOWS_ATOMICS_64)
198 itkAlignedTypedef( 8,
int64_t, AtomicType );
201 struct ITKCommon_EXPORT AtomicType
214 static int64_t PreIncrement(AtomicType *ref);
215 static int64_t PreDecrement(AtomicType *ref);
216 static int64_t PostIncrement(AtomicType *ref);
217 static int64_t PostDecrement(AtomicType *ref);
218 static int64_t Load(
const AtomicType *ref);
219 static void Store(AtomicType *ref,
int64_t val);
224 #if defined(ITK_GCC_ATOMICS_32) || ITK_COMPILER_CXX_ATOMIC
225 template<>
struct BaseType<4>
227 itkAlignedTypedef( 4,
int32_t, Type );
230 #elif defined(ITK_APPLE_ATOMICS_32)
231 template <>
class AtomicOps<4>
234 itkAlignedTypedef( 4,
int32_t, AtomicType );
239 return OSAtomicAdd32Barrier(val, ref);
244 return OSAtomicAdd32Barrier(-val, ref);
249 return OSAtomicIncrement32Barrier(ref);
254 return OSAtomicDecrement32Barrier(ref);
259 int32_t val = OSAtomicIncrement32Barrier(ref);
265 int32_t val = OSAtomicDecrement32Barrier(ref);
272 return *
static_cast<const volatile int32_t*
>(ref);
277 *
static_cast<volatile int32_t*
>(ref) = val;
287 #if defined(ITK_WINDOWS_ATOMICS_32)
288 itkAlignedTypedef( 4,
int32_t, AtomicType );
290 struct ITKCommon_EXPORT AtomicType
303 static int32_t PreIncrement(AtomicType *ref);
304 static int32_t PreDecrement(AtomicType *ref);
305 static int32_t PostIncrement(AtomicType *ref);
306 static int32_t PostDecrement(AtomicType *ref);
307 static int32_t Load(
const AtomicType *ref);
308 static void Store(AtomicType *ref,
int32_t val);
313 template <
typename T>
Critical section locking class that can be allocated on the stack.
KWIML_INT_int64_t int64_t
IsAtomicSupportedIntegralType Self
SimpleFastMutexLock * mutex
void IgnoreUnusedVariable(T)
Concept::Detail::UniqueType_bool< sizeof(T)==4||sizeof(T)==8 > SizeT
Concept::Detail::UniqueType_bool< NumericTraits< T >::is_specialized > SpecializedT
Concept::Detail::UniqueType_bool< true > TrueT
KWIML_INT_int32_t int32_t
SimpleFastMutexLock * mutex
Concept::Detail::UniqueType_bool< NumericTraits< T >::is_integer > IntegralT
itkConceptConstraintsMacro()