diff options
author | wm4 <wm4@nowhere> | 2014-05-28 22:37:37 +0200 |
---|---|---|
committer | wm4 <wm4@nowhere> | 2014-05-28 22:44:43 +0200 |
commit | 3238cd3dac49d3f961314f16cf689863db0e92e2 (patch) | |
tree | e18a550ef3561f3dc771315167d6b9d81f045680 | |
parent | f289060259113c434a8c69a6ef297415fe0d8f58 (diff) | |
download | mpv-3238cd3dac49d3f961314f16cf689863db0e92e2.tar.bz2 mpv-3238cd3dac49d3f961314f16cf689863db0e92e2.tar.xz |
atomics: some corrections to __sync builtins usage
We don't need to combine __sync_add_and_fetch with a memory barrier,
since these intrinsics are documented as using a full barrier already.
Use __sync_fetch_and_add instead of __sync_add_and_fetch; this gives
atomic_fetch_add() the correct return value (although we don't use it).
Use __sync_fetch_and_add to emulate atomic_load(). This should enforce
the full barrier semantics better. (This trick is stolen from the
FreeBSD based stdatomic.h emulation.)
-rw-r--r-- | compat/atomics.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/compat/atomics.h b/compat/atomics.h index 01ecdc64d7..797992f876 100644 --- a/compat/atomics.h +++ b/compat/atomics.h @@ -51,11 +51,11 @@ typedef struct { volatile unsigned long long v; } atomic_ullong; #elif HAVE_SYNC_BUILTINS #define atomic_load(p) \ - (__sync_synchronize(), (p)->v) + __sync_fetch_and_add(&(p)->v, 0) #define atomic_store(p, val) \ - ((p)->v = (val), __sync_synchronize()) + (__sync_synchronize(), (p)->v = (val), __sync_synchronize()) #define atomic_fetch_add(a, b) \ - (__sync_add_and_fetch(&(a)->v, b), __sync_synchronize()) + __sync_fetch_and_add(&(a)->v, b) #else # error "this should have been a configuration error, report a bug please" |