mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-20 01:02:36 +00:00
This patch introduces wrappers to the built-in atomic operations provided by
gcc.
There are two sets of built-in atomic functions available. Before gcc 4.7.0 only
the __sync built-in functions are available, while since 4.7.0 a new set of
built-in functions with the __atomic prefix is introduced as a
replacement. Since the __sync functions will eventually be deprecated, the
__atomic ones are preferred whenever available.
The interfaces provided are listed below, mostly following the naming of the
underlying built-in functions which explain themselves.
atomic_load
atomic_store
atomic_xchg
atomic_cmpxchg
atomic_add_fetch
atomic_sub_fetch
atomic_and_fetch
atomic_xor_fetch
atomic_or_fetch
atomic_nand_fetch
atomic_fetch_add
atomic_fetch_sub
atomic_fetch_and
atomic_fetch_xor
atomic_fetch_or
atomic_fetch_nand
atomic_test_and_set
atomic_clear
atomic_thread_fence
atomic_signal_fence
Tracked-On: #875
Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Yu Wang <yu1.wang@intel.com>
125 lines
4.0 KiB
C
125 lines
4.0 KiB
C
/*
|
|
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#ifndef _ATOMIC_H_
|
|
#define _ATOMIC_H_
|
|
|
|
/* Test for GCC >= 4.7.0 */
|
|
#if ((__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7)))
|
|
|
|
/* Since GCC 4.7.0, the __atomic builtins are introduced as a replacement of the
|
|
* __sync ones. The original __sync builtins maps to their __atomic counter-part
|
|
* using the __ATOMIC_SEQ_CST model and will be eventually deprecated. */
|
|
|
|
#define atomic_load(ptr) \
|
|
__atomic_load_n(ptr, __ATOMIC_SEQ_CST)
|
|
|
|
#define atomic_store(ptr, val) \
|
|
__atomic_store_n(ptr, val, __ATOMIC_SEQ_CST)
|
|
|
|
#define atomic_xchg(ptr, val) \
|
|
__atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST)
|
|
|
|
/* Note: expected should also be a pointer. */
|
|
#define atomic_cmpxchg(ptr, expected, desired) \
|
|
__atomic_compare_exchange_n(ptr, expected, desired, \
|
|
false, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
|
|
|
|
#define atomic_add_fetch(ptr, val) \
|
|
__atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_sub_fetch(ptr, val) \
|
|
__atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_and_fetch(ptr, val) \
|
|
__atomic_and_fetch(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_xor_fetch(ptr, val) \
|
|
__atomic_xor_fetch(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_or_fetch(ptr, val) \
|
|
__atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_nand_fetch(ptr, val) \
|
|
__atomic_nand_fetch(ptr, val, __ATOMIC_SEQ_CST)
|
|
|
|
#define atomic_fetch_add(ptr, val) \
|
|
__atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_fetch_sub(ptr, val) \
|
|
__atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_fetch_and(ptr, val) \
|
|
__atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_fetch_xor(ptr, val) \
|
|
__atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_fetch_or(ptr, val) \
|
|
__atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST)
|
|
#define atomic_fetch_nand(ptr, val) \
|
|
__atomic_fetch_nand(ptr, val, __ATOMIC_SEQ_CST)
|
|
|
|
#define atomic_test_and_set(ptr) \
|
|
__atomic_test_and_set(ptr, __ATOMIC_SEQ_CST)
|
|
#define atomic_clear(ptr) \
|
|
__atomic_clear(ptr, __ATOMIC_SEQ_CST)
|
|
#define atomic_thread_fence() \
|
|
__atomic_thread_fence(__ATOMIC_SEQ_CST)
|
|
#define atomic_signal_fence() \
|
|
__atomic_signal_fence(__ATOMIC_SEQ_CST)
|
|
|
|
#else /* not GCC >= 4.7.0 */
|
|
|
|
/* __sync builtins do not have load/store interfaces. Use add_fetch and xchg to
|
|
* mimic their functinality.
|
|
*
|
|
* Also note that __sync_lock_test_and_set is rather an atomic exchange
|
|
* operation per GCC manual on the __sync builtins.
|
|
*/
|
|
#define atomic_load(ptr) \
|
|
__sync_add_and_fetch(ptr, 0)
|
|
|
|
#define atomic_store(ptr, val) \
|
|
(void)(__sync_lock_test_and_set(ptr, val))
|
|
|
|
#define atomic_xchg(ptr, val) \
|
|
__sync_lock_test_and_set(ptr, val)
|
|
|
|
/* Note: expected should also be a pointer. */
|
|
#define atomic_cmpxchg(ptr, expected, desired) \
|
|
__sync_bool_compare_and_swap(ptr, (*(expected)), desired)
|
|
|
|
#define atomic_add_fetch(ptr, val) \
|
|
__sync_add_and_fetch(ptr, val)
|
|
#define atomic_sub_fetch(ptr, val) \
|
|
__sync_sub_and_fetch(ptr, val)
|
|
#define atomic_and_fetch(ptr, val) \
|
|
__sync_and_and_fetch(ptr, val)
|
|
#define atomic_xor_fetch(ptr, val) \
|
|
__sync_xor_and_fetch(ptr, val)
|
|
#define atomic_or_fetch(ptr, val) \
|
|
__sync_or_and_fetch(ptr, val)
|
|
#define atomic_nand_fetch(ptr, val) \
|
|
__sync_nand_and_fetch(ptr, val)
|
|
|
|
#define atomic_fetch_add(ptr, val) \
|
|
__sync_fetch_and_add(ptr, val)
|
|
#define atomic_fetch_sub(ptr, val) \
|
|
__sync_fetch_and_sub(ptr, val)
|
|
#define atomic_fetch_and(ptr, val) \
|
|
__sync_fetch_and_and(ptr, val)
|
|
#define atomic_fetch_xor(ptr, val) \
|
|
__sync_fetch_and_xor(ptr, val)
|
|
#define atomic_fetch_or(ptr, val) \
|
|
__sync_fetch_and_or(ptr, val)
|
|
#define atomic_fetch_nand(ptr, val) \
|
|
__sync_fetch_and_nand(ptr, val)
|
|
|
|
#define atomic_test_and_set(ptr) \
|
|
(bool)(__sync_lock_test_and_set(ptr, 1))
|
|
#define atomic_clear(ptr) \
|
|
__sync_lock_release(ptr)
|
|
#define atomic_thread_fence() \
|
|
__sync_synchronize()
|
|
#define atomic_signal_fence() \
|
|
__sync_synchronize()
|
|
|
|
#endif /* GCC >= 4.7.0 */
|
|
|
|
#endif /* _ATOMIC_H_ */
|