00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #pragma once
00021
00022 #define _KERNEL
00023 #include <atomic.h>
00024 #undef _KERNEL
00025
00026 inline bool __sync_fetch_and_add(volatile bool* ptr, bool val)
00027 {
00028 bool ret= *ptr;
00029 (val == true) ? atomic_inc_8((volatile uint8_t *)ptr) : atomic_add_8((volatile uint8_t *)ptr, (int8_t)val);
00030 return ret;
00031 }
00032
00033 inline int8_t __sync_fetch_and_add(volatile int8_t* ptr, int8_t val)
00034 {
00035 int8_t ret= *ptr;
00036 (val == 1) ? atomic_inc_8((volatile uint8_t*)ptr) : atomic_add_8((volatile uint8_t*)ptr, val);
00037 return ret;
00038 }
00039
00040 inline int16_t __sync_fetch_and_add(volatile int16_t* ptr, int16_t val)
00041 {
00042 int16_t ret= *ptr;
00043 (val == 1) ? atomic_inc_16((volatile uint16_t*)ptr) : atomic_add_16((volatile uint16_t*)ptr, val);
00044 return ret;
00045 }
00046
00047 inline int32_t __sync_fetch_and_add(volatile int32_t* ptr, int32_t val)
00048 {
00049 int32_t ret= *ptr;
00050 (val == 1) ? atomic_inc_32((volatile uint32_t*)ptr) : atomic_add_32((volatile uint32_t*)ptr, val);
00051 return ret;
00052 }
00053
00054 inline uint8_t __sync_fetch_and_add(volatile uint8_t* ptr, uint8_t val)
00055 {
00056 uint8_t ret= *ptr;
00057 (val == 1) ? atomic_inc_8(ptr) : atomic_add_8(ptr, (int8_t)val);
00058 return ret;
00059 }
00060
00061 inline uint16_t __sync_fetch_and_add(volatile uint16_t* ptr, uint16_t val)
00062 {
00063 uint16_t ret= *ptr;
00064 (val == 1) ? atomic_inc_16(ptr) : atomic_add_16(ptr, (int16_t)val);
00065 return ret;
00066 }
00067
00068 inline uint32_t __sync_fetch_and_add(volatile uint32_t* ptr, uint32_t val)
00069 {
00070 uint32_t ret= *ptr;
00071 (val == 1) ? atomic_inc_32(ptr) : atomic_add_32(ptr, (int32_t)val);
00072 return ret;
00073 }
00074
00075 # if defined(_KERNEL) || defined(_INT64_TYPE)
00076 inline uint64_t __sync_fetch_and_add(volatile uint64_t* ptr, uint64_t val)
00077 {
00078 uint64_t ret= *ptr;
00079 (val == 1) ? atomic_inc_64(ptr) : atomic_add_64(ptr, (int64_t)val);
00080 return ret;
00081 }
00082
00083 inline int64_t __sync_fetch_and_add(volatile int64_t* ptr, int64_t val)
00084 {
00085 int64_t ret= *ptr;
00086 (val == 1) ? atomic_inc_64((volatile uint64_t*)ptr) : atomic_add_64((volatile uint64_t*)ptr, val);
00087 return ret;
00088 }
00089 # endif
00090
00091 inline uint8_t __sync_fetch_and_sub(volatile uint8_t* ptr, uint8_t val)
00092 {
00093 uint8_t ret= *ptr;
00094 (val == 1) ? atomic_dec_8(ptr) : atomic_add_8(ptr, 0-(int8_t)val);
00095 return ret;
00096 }
00097
00098 inline uint16_t __sync_fetch_and_sub(volatile uint16_t* ptr, uint16_t val)
00099 {
00100 uint16_t ret= *ptr;
00101 (val == 1) ? atomic_dec_16(ptr) : atomic_add_16(ptr, 0-(int16_t)val);
00102 return ret;
00103 }
00104
00105 inline uint32_t __sync_fetch_and_sub(volatile uint32_t* ptr, uint32_t val)
00106 {
00107 uint32_t ret= *ptr;
00108 (val == 1) ? atomic_dec_32(ptr) : atomic_add_32(ptr, 0-(int32_t)val);
00109 return ret;
00110 }
00111
00112 # if defined(_KERNEL) || defined(_INT64_TYPE)
00113 inline uint64_t __sync_fetch_and_sub(volatile uint64_t* ptr, uint64_t val)
00114 {
00115 uint64_t ret= *ptr;
00116 (val == 1) ? atomic_dec_64(ptr) : atomic_add_64(ptr, 0-(int64_t)val);
00117 return ret;
00118 }
00119 inline int64_t __sync_fetch_and_sub(volatile int64_t* ptr, uint64_t val)
00120 {
00121 int64_t ret= *ptr;
00122 (val == 1) ? atomic_dec_64((volatile uint64_t *) ptr) : atomic_add_64((volatile uint64_t *) ptr, 0-(int64_t)val);
00123 return ret;
00124 }
00125 # endif
00126
00127 inline bool __sync_add_and_fetch(volatile bool* ptr, bool val)
00128 {
00129 return (val == true) ? atomic_inc_8_nv((volatile uint8_t *)ptr) : atomic_add_8_nv((volatile uint8_t *)ptr, (int8_t)val);
00130 }
00131
00132 inline int8_t __sync_add_and_fetch(volatile int8_t* ptr, int8_t val)
00133 {
00134 return (val == 1) ? atomic_inc_8_nv((volatile uint8_t*)ptr) : atomic_add_8_nv((volatile uint8_t*)ptr, val);
00135 }
00136
00137 inline int16_t __sync_add_and_fetch(volatile int16_t* ptr, int16_t val)
00138 {
00139 return (val == 1) ? atomic_inc_16_nv((volatile uint16_t*)ptr) : atomic_add_16_nv((volatile uint16_t*)ptr, val);
00140 }
00141
00142 inline int32_t __sync_add_and_fetch(volatile int32_t* ptr, int32_t val)
00143 {
00144 return (val == 1) ? atomic_inc_32_nv((volatile uint32_t*)ptr) : atomic_add_32_nv((volatile uint32_t*)ptr, val);
00145 }
00146
00147 inline uint8_t __sync_add_and_fetch(volatile uint8_t* ptr, uint8_t val)
00148 {
00149 return (val == 1) ? atomic_inc_8_nv(ptr) : atomic_add_8_nv(ptr, (int8_t)val);
00150 }
00151
00152 inline uint16_t __sync_add_and_fetch(volatile uint16_t* ptr, uint16_t val)
00153 {
00154 return (val == 1) ? atomic_inc_16_nv(ptr) : atomic_add_16_nv(ptr, (int16_t)val);
00155 }
00156
00157 inline uint32_t __sync_add_and_fetch(volatile uint32_t* ptr, uint32_t val)
00158 {
00159 return (val == 1) ? atomic_inc_32_nv(ptr) : atomic_add_32_nv(ptr, (int32_t)val);
00160 }
00161
00162 # if defined(_KERNEL) || defined(_INT64_TYPE)
00163 inline uint64_t __sync_add_and_fetch(volatile uint64_t* ptr, uint64_t val)
00164 {
00165 return (val == 1) ? atomic_inc_64_nv(ptr) : atomic_add_64_nv(ptr, (int64_t)val);
00166 }
00167
00168 inline int64_t __sync_add_and_fetch(volatile int64_t* ptr, int64_t val)
00169 {
00170 return (val == 1) ? atomic_inc_64_nv((volatile uint64_t*)ptr) : atomic_add_64_nv((volatile uint64_t*)ptr, val);
00171 }
00172 # endif
00173
00174 inline uint8_t __sync_sub_and_fetch(volatile uint8_t* ptr, uint8_t val)
00175 {
00176 return (val == 1) ? atomic_dec_8_nv(ptr) : atomic_add_8_nv(ptr, 0-(int8_t)val);
00177 }
00178
00179 inline uint16_t __sync_sub_and_fetch(volatile uint16_t* ptr, uint16_t val)
00180 {
00181 return (val == 1) ? atomic_dec_16_nv(ptr) : atomic_add_16_nv(ptr, 0-(int16_t)val);
00182 }
00183
00184 inline uint32_t __sync_sub_and_fetch(volatile uint32_t* ptr, uint32_t val)
00185 {
00186 return (val == 1) ? atomic_dec_32_nv(ptr) : atomic_add_32_nv(ptr, 0-(int32_t)val);
00187 }
00188
00189 # if defined(_KERNEL) || defined(_INT64_TYPE)
00190 inline uint64_t __sync_sub_and_fetch(volatile uint64_t* ptr, uint64_t val)
00191 {
00192 return (val == 1) ? atomic_dec_64_nv(ptr) : atomic_add_64_nv(ptr, 0-(int64_t)val);
00193 }
00194 inline int64_t __sync_sub_and_fetch(volatile int64_t* ptr, uint64_t val)
00195 {
00196 return (val == 1) ? atomic_dec_64_nv((volatile uint64_t *) ptr) : atomic_add_64_nv((volatile uint64_t *) ptr, 0-(int64_t)val);
00197 }
00198 # endif
00199
00200 inline uint8_t __sync_lock_test_and_set(volatile uint8_t* ptr, uint8_t val)
00201 {
00202 atomic_swap_8(ptr, val);
00203 return *ptr;
00204 }
00205
00206 inline uint16_t __sync_lock_test_and_set(volatile uint16_t* ptr, uint16_t val)
00207 {
00208 atomic_swap_16(ptr, val);
00209 return *ptr;
00210 }
00211
00212 inline uint32_t __sync_lock_test_and_set(volatile uint32_t* ptr, uint32_t val)
00213 {
00214 atomic_swap_32(ptr, val);
00215 return *ptr;
00216 }
00217
00218 # if defined(_KERNEL) || defined(_INT64_TYPE)
00219 inline uint64_t __sync_lock_test_and_set(volatile uint64_t* ptr, uint64_t val)
00220 {
00221 atomic_swap_64(ptr, val);
00222 return *ptr;
00223 }
00224 #endif
00225
00226 inline int8_t __sync_val_compare_and_swap(volatile int8_t* ptr,
00227 int8_t old_val, int8_t val)
00228 {
00229 atomic_cas_8((volatile uint8_t *)ptr, old_val, val);
00230 return *ptr;
00231 }
00232
00233 inline uint8_t __sync_val_compare_and_swap(volatile uint8_t* ptr,
00234 uint8_t old_val, uint8_t val)
00235 {
00236 atomic_cas_8(ptr, old_val, val);
00237 return *ptr;
00238 }
00239
00240 inline uint16_t __sync_val_compare_and_swap(volatile uint16_t* ptr,
00241 uint16_t old_val, uint16_t val)
00242 {
00243 atomic_cas_16(ptr, old_val, val);
00244 return *ptr;
00245 }
00246
00247 inline uint32_t __sync_val_compare_and_swap(volatile uint32_t* ptr,
00248 uint32_t old_val, uint32_t val)
00249 {
00250 atomic_cas_32(ptr, old_val, val);
00251 return *ptr;
00252 }
00253
00254 # if defined(_KERNEL) || defined(_INT64_TYPE)
00255 inline uint64_t __sync_val_compare_and_swap(volatile uint64_t* ptr,
00256 uint64_t old_val, uint64_t val)
00257 {
00258 atomic_cas_64(ptr, old_val, val);
00259 return *ptr;
00260 }
00261 #endif
00262
00263 inline int8_t __sync_bool_compare_and_swap(volatile int8_t* ptr,
00264 int8_t old_val, int8_t val)
00265 {
00266 int8_t orig= *ptr;
00267 return orig == atomic_cas_8((volatile uint8_t *)ptr, old_val, val);
00268 }
00269
00270 inline uint8_t __sync_bool_compare_and_swap(volatile uint8_t* ptr,
00271 uint8_t old_val, uint8_t val)
00272 {
00273 uint8_t orig= *ptr;
00274 return orig == atomic_cas_8(ptr, old_val, val);
00275 }
00276
00277 inline uint16_t __sync_bool_compare_and_swap(volatile uint16_t* ptr,
00278 uint16_t old_val, uint16_t val)
00279 {
00280 uint16_t orig= *ptr;
00281 return orig == atomic_cas_16(ptr, old_val, val);
00282 }
00283
00284 inline uint32_t __sync_bool_compare_and_swap(volatile uint32_t* ptr,
00285 uint32_t old_val, uint32_t val)
00286 {
00287 uint32_t orig= *ptr;
00288 return orig == atomic_cas_32(ptr, old_val, val);
00289 }
00290
00291 # if defined(_KERNEL) || defined(_INT64_TYPE)
00292 inline uint64_t __sync_bool_compare_and_swap(volatile uint64_t* ptr,
00293 uint64_t old_val, uint64_t val)
00294 {
00295 uint64_t orig= *ptr;
00296 return orig == atomic_cas_64(ptr, old_val, val);
00297 }
00298 #endif
00299