hv: multi-arch reconstruct atomic library

extract common interface to include/lib/atomic.h,
and invoke the variant implementation of arch.

Tracked-On: #8803
Signed-off-by: Haoyu Tang <haoyu.tang@intel.com>
Reviewed-by: Yifan Liu  <yifan1.liu@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
Haoyu Tang
2025-09-05 14:16:09 +08:00
committed by acrnsi-robot
parent d22e21f484
commit 286a7557bc
11 changed files with 159 additions and 68 deletions

View File

@@ -1,6 +1,6 @@
/*- /*-
* Copyright (c) 2011 NetApp, Inc. * Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2017-2022 Intel Corporation. * Copyright (c) 2017-2025 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include <types.h> #include <types.h>
#include <errno.h> #include <errno.h>
#include <asm/lib/bits.h> #include <asm/lib/bits.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <per_cpu.h> #include <per_cpu.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/lapic.h> #include <asm/lapic.h>

View File

@@ -1,12 +1,12 @@
/* /*
* Copyright (C) 2019-2022 Intel Corporation. * Copyright (C) 2019-2025 Intel Corporation.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <types.h> #include <types.h>
#include <errno.h> #include <errno.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <io_req.h> #include <io_req.h>
#include <asm/guest/vcpu.h> #include <asm/guest/vcpu.h>
#include <asm/guest/vm.h> #include <asm/guest/vm.h>

View File

@@ -1,6 +1,6 @@
/*- /*-
* Copyright (c) 2011 NetApp, Inc. * Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2017-2024 Intel Corporation. * Copyright (c) 2017-2025 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@@ -27,7 +27,7 @@
*/ */
#include <types.h> #include <types.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cpu_caps.h> #include <asm/cpu_caps.h>

View File

@@ -7,7 +7,7 @@
#include <types.h> #include <types.h>
#include <errno.h> #include <errno.h>
#include <asm/lib/bits.h> #include <asm/lib/bits.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/per_cpu.h> #include <asm/per_cpu.h>

View File

@@ -5,7 +5,7 @@
*/ */
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <asm/lib/bits.h> #include <asm/lib/bits.h>
#include <per_cpu.h> #include <per_cpu.h>
#include <asm/notify.h> #include <asm/notify.h>

View File

@@ -5,9 +5,8 @@
*/ */
#include <types.h> #include <types.h>
#include <per_cpu.h> #include <per_cpu.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <sprintf.h> #include <sprintf.h>
#include <spinlock.h> #include <spinlock.h>
#include <npk_log.h> #include <npk_log.h>

View File

@@ -1,10 +1,10 @@
/* /*
* Copyright (C) 2018-2022 Intel Corporation. * Copyright (C) 2018-2025 Intel Corporation.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <types.h> #include <types.h>
#include <asm/lib/atomic.h> #include <atomic.h>
#include <acrn_hv_defs.h> #include <acrn_hv_defs.h>
#include <asm/io.h> #include <asm/io.h>
#include <per_cpu.h> #include <per_cpu.h>

View File

@@ -1,6 +1,6 @@
/*- /*-
* Copyright (c) 2011 NetApp, Inc. * Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2018-2022 Intel Corporation. * Copyright (c) 2018-2025 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@@ -38,6 +38,7 @@
#include <asm/pci_dev.h> #include <asm/pci_dev.h>
#include <hash.h> #include <hash.h>
#include <board_info.h> #include <board_info.h>
#include <atomic.h>
static int32_t vpci_init_vdevs(struct acrn_vm *vm); static int32_t vpci_init_vdevs(struct acrn_vm *vm);

View File

@@ -1,6 +1,6 @@
/*- /*-
* Copyright (c) 1998 Doug Rabson * Copyright (c) 1998 Doug Rabson
* Copyright (c) 2018-2022 Intel Corporation. * Copyright (c) 2018-2025 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@@ -26,10 +26,8 @@
* $FreeBSD$ * $FreeBSD$
*/ */
#ifndef ATOMIC_H #ifndef X86_LIB_ATOMIC_H
#define ATOMIC_H #define X86_LIB_ATOMIC_H
#include <types.h>
#define BUS_LOCK "lock ; " #define BUS_LOCK "lock ; "
#define build_atomic_inc(name, size, type) \ #define build_atomic_inc(name, size, type) \
@@ -39,9 +37,9 @@ static inline void name(type *ptr) \
: "=m" (*ptr) \ : "=m" (*ptr) \
: "m" (*ptr)); \ : "m" (*ptr)); \
} }
build_atomic_inc(atomic_inc16, "w", uint16_t) build_atomic_inc(arch_atomic_inc16, "w", uint16_t)
build_atomic_inc(atomic_inc32, "l", uint32_t) build_atomic_inc(arch_atomic_inc32, "l", uint32_t)
build_atomic_inc(atomic_inc64, "q", uint64_t) build_atomic_inc(arch_atomic_inc64, "q", uint64_t)
#define build_atomic_dec(name, size, type) \ #define build_atomic_dec(name, size, type) \
static inline void name(type *ptr) \ static inline void name(type *ptr) \
@@ -50,9 +48,9 @@ static inline void name(type *ptr) \
: "=m" (*ptr) \ : "=m" (*ptr) \
: "m" (*ptr)); \ : "m" (*ptr)); \
} }
build_atomic_dec(atomic_dec16, "w", uint16_t) build_atomic_dec(arch_atomic_dec16, "w", uint16_t)
build_atomic_dec(atomic_dec32, "l", uint32_t) build_atomic_dec(arch_atomic_dec32, "l", uint32_t)
build_atomic_dec(atomic_dec64, "q", uint64_t) build_atomic_dec(arch_atomic_dec64, "q", uint64_t)
#define build_atomic_swap(name, size, type) \ #define build_atomic_swap(name, size, type) \
static inline type name(type *ptr, type v) \ static inline type name(type *ptr, type v) \
@@ -63,26 +61,8 @@ static inline type name(type *ptr, type v) \
: "cc", "memory"); \ : "cc", "memory"); \
return v; \ return v; \
} }
build_atomic_swap(atomic_swap32, "l", uint32_t) build_atomic_swap(arch_atomic_swap32, "l", uint32_t)
build_atomic_swap(atomic_swap64, "q", uint64_t) build_atomic_swap(arch_atomic_swap64, "q", uint64_t)
/*
* #define atomic_readandclear32(P) \
* (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
*/
static inline uint32_t atomic_readandclear32(uint32_t *p)
{
return atomic_swap32(p, 0U);
}
/*
* #define atomic_readandclear64(P) \
* (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
*/
static inline uint64_t atomic_readandclear64(uint64_t *p)
{
return atomic_swap64(p, 0UL);
}
#define build_atomic_cmpxchg(name, size, type) \ #define build_atomic_cmpxchg(name, size, type) \
static inline type name(volatile type *ptr, type old, type new) \ static inline type name(volatile type *ptr, type old, type new) \
@@ -94,8 +74,8 @@ static inline type name(volatile type *ptr, type old, type new) \
: "memory"); \ : "memory"); \
return ret; \ return ret; \
} }
build_atomic_cmpxchg(atomic_cmpxchg32, "l", uint32_t) build_atomic_cmpxchg(arch_atomic_cmpxchg32, "l", uint32_t)
build_atomic_cmpxchg(atomic_cmpxchg64, "q", uint64_t) build_atomic_cmpxchg(arch_atomic_cmpxchg64, "q", uint64_t)
#define build_atomic_xadd(name, size, type) \ #define build_atomic_xadd(name, size, type) \
static inline type name(type *ptr, type v) \ static inline type name(type *ptr, type v) \
@@ -106,48 +86,48 @@ static inline type name(type *ptr, type v) \
: "cc", "memory"); \ : "cc", "memory"); \
return v; \ return v; \
} }
build_atomic_xadd(atomic_xadd16, "w", uint16_t) build_atomic_xadd(arch_atomic_xadd16, "w", uint16_t)
build_atomic_xadd(atomic_xadd32, "l", int32_t) build_atomic_xadd(arch_atomic_xadd32, "l", int32_t)
build_atomic_xadd(atomic_xadd64, "q", int64_t) build_atomic_xadd(arch_atomic_xadd64, "q", int64_t)
static inline int32_t atomic_add_return(int32_t *p, int32_t v) static inline int32_t arch_atomic_add_return(int32_t *p, int32_t v)
{ {
return (atomic_xadd32(p, v) + v); return (arch_atomic_xadd32(p, v) + v);
} }
static inline int32_t atomic_sub_return(int32_t *p, int32_t v) static inline int32_t arch_atomic_sub_return(int32_t *p, int32_t v)
{ {
return (atomic_xadd32(p, -v) - v); return (arch_atomic_xadd32(p, -v) - v);
} }
static inline int32_t atomic_inc_return(int32_t *v) static inline int32_t arch_atomic_inc_return(int32_t *v)
{ {
return atomic_add_return(v, 1); return arch_atomic_add_return(v, 1);
} }
static inline int32_t atomic_dec_return(int32_t *v) static inline int32_t arch_atomic_dec_return(int32_t *v)
{ {
return atomic_sub_return(v, 1); return arch_atomic_sub_return(v, 1);
} }
static inline int64_t atomic_add64_return(int64_t *p, int64_t v) static inline int64_t arch_atomic_add64_return(int64_t *p, int64_t v)
{ {
return (atomic_xadd64(p, v) + v); return (arch_atomic_xadd64(p, v) + v);
} }
static inline int64_t atomic_sub64_return(int64_t *p, int64_t v) static inline int64_t arch_atomic_sub64_return(int64_t *p, int64_t v)
{ {
return (atomic_xadd64(p, -v) - v); return (arch_atomic_xadd64(p, -v) - v);
} }
static inline int64_t atomic_inc64_return(int64_t *v) static inline int64_t arch_atomic_inc64_return(int64_t *v)
{ {
return atomic_add64_return(v, 1); return arch_atomic_add64_return(v, 1);
} }
static inline int64_t atomic_dec64_return(int64_t *v) static inline int64_t arch_atomic_dec64_return(int64_t *v)
{ {
return atomic_sub64_return(v, 1); return arch_atomic_sub64_return(v, 1);
} }
#endif /* ATOMIC_H*/ #endif /* X86_LIB_ATOMIC_H */

View File

@@ -1,6 +1,6 @@
/*- /*-
* Copyright (c) 1998 Doug Rabson * Copyright (c) 1998 Doug Rabson
* Copyright (c) 2017-2022 Intel Corporation. * Copyright (c) 2017-2025 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@@ -28,7 +28,7 @@
#ifndef BITS_H #ifndef BITS_H
#define BITS_H #define BITS_H
#include <asm/lib/atomic.h> #include <atomic.h>
/** /**
* *

View File

@@ -0,0 +1,111 @@
/*
* Copyright (C) 2025 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ATOMIC_H
#define ATOMIC_H
#include <types.h>
#include <asm/lib/atomic.h>
/* The mandatory functions should be implemented by arch atomic library */
static inline void arch_atomic_inc32(uint32_t * ptr);
static inline void arch_atomic_inc64(uint64_t * ptr);
static inline void arch_atomic_dec32(uint32_t * ptr);
static inline void arch_atomic_dec64(uint64_t * ptr);
static inline uint32_t arch_atomic_cmpxchg32(volatile uint32_t * ptr, uint32_t old, uint32_t new);
static inline uint64_t arch_atomic_cmpxchg64(volatile uint64_t * ptr, uint64_t old, uint64_t new);
static inline uint32_t arch_atomic_swap32(uint32_t *p, uint32_t v);
static inline uint64_t arch_atomic_swap64(uint64_t *p, uint64_t v);
static inline int32_t arch_atomic_add_return(int32_t *p, int32_t v);
static inline int32_t arch_atomic_sub_return(int32_t *p, int32_t v);
static inline int32_t arch_atomic_inc_return(int32_t *v);
static inline int32_t arch_atomic_dec_return(int32_t *v);
static inline int64_t arch_atomic_add64_return(int64_t *p, int64_t v);
static inline int64_t arch_atomic_sub64_return(int64_t *p, int64_t v);
static inline int64_t arch_atomic_inc64_return(int64_t *v);
static inline int64_t arch_atomic_dec64_return(int64_t *v);
/* The common functions map to arch implementation */
static inline void atomic_inc32(uint32_t * ptr)
{
return arch_atomic_inc32(ptr);
}
static inline void atomic_inc64(uint64_t * ptr)
{
return arch_atomic_inc64(ptr);
}
static inline void atomic_dec32(uint32_t * ptr)
{
return arch_atomic_dec32(ptr);
}
static inline void atomic_dec64(uint64_t * ptr)
{
return arch_atomic_dec64(ptr);
}
static inline uint32_t atomic_cmpxchg32(volatile uint32_t * ptr, uint32_t old, uint32_t new)
{
return arch_atomic_cmpxchg32(ptr, old, new);
}
static inline uint64_t atomic_cmpxchg64(volatile uint64_t * ptr, uint64_t old, uint64_t new)
{
return arch_atomic_cmpxchg64(ptr, old, new);
}
static inline uint32_t atomic_readandclear32(uint32_t *p)
{
return arch_atomic_swap32(p, 0U);
}
static inline uint64_t atomic_readandclear64(uint64_t *p)
{
return arch_atomic_swap64(p, 0UL);
}
static inline int32_t atomic_add_return(int32_t *p, int32_t v)
{
return arch_atomic_add_return(p, v);
}
static inline int32_t atomic_sub_return(int32_t *p, int32_t v)
{
return arch_atomic_sub_return(p, v);
}
static inline int32_t atomic_inc_return(int32_t *v)
{
return arch_atomic_inc_return(v);
}
static inline int32_t atomic_dec_return(int32_t *v)
{
return arch_atomic_dec_return(v);
}
static inline int64_t atomic_add64_return(int64_t *p, int64_t v)
{
return arch_atomic_add64_return(p, v);
}
static inline int64_t atomic_sub64_return(int64_t *p, int64_t v)
{
return arch_atomic_sub64_return(p, v);
}
static inline int64_t atomic_inc64_return(int64_t *v)
{
return arch_atomic_inc64_return(v);
}
static inline int64_t atomic_dec64_return(int64_t *v)
{
return arch_atomic_dec64_return(v);
}
#endif /* ATOMIC_H*/