mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-23 17:58:30 +00:00
hv:move several files related X86 for lib
modified: Makefile renamed: lib/memory.c -> arch/x86/lib/memory.c renamed: include/lib/atomic.h -> include/arch/x86/lib/atomic.h renamed: include/lib/bits.h -> include/arch/x86/lib/bits.h renamed: include/lib/spinlock.h -> include/arch/x86/lib/spinlock.h Tracked-On: #1842 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
This commit is contained in:
committed by
ACRN System Integration
parent
350d6a9eb6
commit
795d6de0fb
243
hypervisor/include/arch/x86/lib/atomic.h
Normal file
243
hypervisor/include/arch/x86/lib/atomic.h
Normal file
@@ -0,0 +1,243 @@
|
||||
/*-
|
||||
* Copyright (c) 1998 Doug Rabson
|
||||
* Copyright (c) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef ATOMIC_H
|
||||
#define ATOMIC_H
|
||||
#include <types.h>
|
||||
|
||||
#define BUS_LOCK "lock ; "
|
||||
|
||||
#define build_atomic_load(name, size, type) \
|
||||
static inline type name(const volatile type *ptr) \
|
||||
{ \
|
||||
type ret; \
|
||||
asm volatile("mov" size " %1,%0" \
|
||||
: "=r" (ret) \
|
||||
: "m" (*ptr) \
|
||||
: "cc", "memory"); \
|
||||
return ret; \
|
||||
}
|
||||
build_atomic_load(atomic_load16, "w", uint16_t)
|
||||
build_atomic_load(atomic_load32, "l", uint32_t)
|
||||
build_atomic_load(atomic_load64, "q", uint64_t)
|
||||
|
||||
#define build_atomic_store(name, size, type) \
|
||||
static inline void name(volatile type *ptr, type v) \
|
||||
{ \
|
||||
asm volatile("mov" size " %1,%0" \
|
||||
: "=m" (*ptr) \
|
||||
: "r" (v) \
|
||||
: "cc", "memory"); \
|
||||
}
|
||||
build_atomic_store(atomic_store16, "w", uint16_t)
|
||||
build_atomic_store(atomic_store32, "l", uint32_t)
|
||||
build_atomic_store(atomic_store64, "q", uint64_t)
|
||||
|
||||
#define build_atomic_inc(name, size, type) \
|
||||
static inline void name(type *ptr) \
|
||||
{ \
|
||||
asm volatile(BUS_LOCK "inc" size " %0" \
|
||||
: "=m" (*ptr) \
|
||||
: "m" (*ptr)); \
|
||||
}
|
||||
build_atomic_inc(atomic_inc16, "w", uint16_t)
|
||||
build_atomic_inc(atomic_inc32, "l", uint32_t)
|
||||
build_atomic_inc(atomic_inc64, "q", uint64_t)
|
||||
|
||||
#define build_atomic_dec(name, size, type) \
|
||||
static inline void name(type *ptr) \
|
||||
{ \
|
||||
asm volatile(BUS_LOCK "dec" size " %0" \
|
||||
: "=m" (*ptr) \
|
||||
: "m" (*ptr)); \
|
||||
}
|
||||
build_atomic_dec(atomic_dec16, "w", uint16_t)
|
||||
build_atomic_dec(atomic_dec32, "l", uint32_t)
|
||||
build_atomic_dec(atomic_dec64, "q", uint64_t)
|
||||
|
||||
/**
|
||||
* #define atomic_set32(P, V) (*(uint32_t *)(P) |= (V))
|
||||
*
|
||||
* Parameters:
|
||||
* uint32_t* p A pointer to memory area that stores source
|
||||
* value and setting result;
|
||||
* uint32_t v The value needs to be set.
|
||||
*/
|
||||
static inline void atomic_set32(uint32_t *p, uint32_t v)
|
||||
{
|
||||
__asm __volatile(BUS_LOCK "orl %1,%0"
|
||||
: "+m" (*p)
|
||||
: "r" (v)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* #define atomic_clear32(P, V) (*(uint32_t *)(P) &= ~(V))
|
||||
* Parameters:
|
||||
* uint32_t* p A pointer to memory area that stores source
|
||||
* value and clearing result;
|
||||
* uint32_t v The value needs to be cleared.
|
||||
*/
|
||||
static inline void atomic_clear32(uint32_t *p, uint32_t v)
|
||||
{
|
||||
__asm __volatile(BUS_LOCK "andl %1,%0"
|
||||
: "+m" (*p)
|
||||
: "r" (~v)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* #define atomic_set64(P, V) (*(uint64_t *)(P) |= (V))
|
||||
*
|
||||
* Parameters:
|
||||
* uint64_t* p A pointer to memory area that stores source
|
||||
* value and setting result;
|
||||
* uint64_t v The value needs to be set.
|
||||
*/
|
||||
static inline void atomic_set64(uint64_t *p, uint64_t v)
|
||||
{
|
||||
__asm __volatile(BUS_LOCK "orq %1,%0"
|
||||
: "+m" (*p)
|
||||
: "r" (v)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* #define atomic_clear64(P, V) (*(uint64_t *)(P) &= ~(V))
|
||||
*
|
||||
* Parameters:
|
||||
* uint64_t* p A pointer to memory area that stores source
|
||||
* value and clearing result;
|
||||
* uint64_t v The value needs to be cleared.
|
||||
*/
|
||||
static inline void atomic_clear64(uint64_t *p, uint64_t v)
|
||||
{
|
||||
__asm __volatile(BUS_LOCK "andq %1,%0"
|
||||
: "+m" (*p)
|
||||
: "r" (~v)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
#define build_atomic_swap(name, size, type) \
|
||||
static inline type name(type *ptr, type v) \
|
||||
{ \
|
||||
asm volatile(BUS_LOCK "xchg" size " %1,%0" \
|
||||
: "+m" (*ptr), "+r" (v) \
|
||||
: \
|
||||
: "cc", "memory"); \
|
||||
return v; \
|
||||
}
|
||||
build_atomic_swap(atomic_swap32, "l", uint32_t)
|
||||
build_atomic_swap(atomic_swap64, "q", uint64_t)
|
||||
|
||||
/*
|
||||
* #define atomic_readandclear32(P) \
|
||||
* (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
|
||||
*/
|
||||
static inline uint32_t atomic_readandclear32(uint32_t *p)
|
||||
{
|
||||
return atomic_swap32(p, 0U);
|
||||
}
|
||||
|
||||
/*
|
||||
* #define atomic_readandclear64(P) \
|
||||
* (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
|
||||
*/
|
||||
static inline uint64_t atomic_readandclear64(uint64_t *p)
|
||||
{
|
||||
return atomic_swap64(p, 0UL);
|
||||
}
|
||||
|
||||
#define build_atomic_cmpxchg(name, size, type) \
|
||||
static inline type name(volatile type *ptr, type old, type new) \
|
||||
{ \
|
||||
type ret; \
|
||||
asm volatile(BUS_LOCK "cmpxchg" size " %2,%1" \
|
||||
: "=a" (ret), "+m" (*ptr) \
|
||||
: "r" (new), "0" (old) \
|
||||
: "memory"); \
|
||||
return ret; \
|
||||
}
|
||||
build_atomic_cmpxchg(atomic_cmpxchg32, "l", uint32_t)
|
||||
build_atomic_cmpxchg(atomic_cmpxchg64, "q", uint64_t)
|
||||
|
||||
#define build_atomic_xadd(name, size, type) \
|
||||
static inline type name(type *ptr, type v) \
|
||||
{ \
|
||||
asm volatile(BUS_LOCK "xadd" size " %0,%1" \
|
||||
: "+r" (v), "+m" (*ptr) \
|
||||
: \
|
||||
: "cc", "memory"); \
|
||||
return v; \
|
||||
}
|
||||
build_atomic_xadd(atomic_xadd16, "w", uint16_t)
|
||||
build_atomic_xadd(atomic_xadd32, "l", int32_t)
|
||||
build_atomic_xadd(atomic_xadd64, "q", int64_t)
|
||||
|
||||
static inline int32_t atomic_add_return(int32_t *p, int32_t v)
|
||||
{
|
||||
return (atomic_xadd32(p, v) + v);
|
||||
}
|
||||
|
||||
static inline int32_t atomic_sub_return(int32_t *p, int32_t v)
|
||||
{
|
||||
return (atomic_xadd32(p, -v) - v);
|
||||
}
|
||||
|
||||
static inline int32_t atomic_inc_return(int32_t *v)
|
||||
{
|
||||
return atomic_add_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int32_t atomic_dec_return(int32_t *v)
|
||||
{
|
||||
return atomic_sub_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_add64_return(int64_t *p, int64_t v)
|
||||
{
|
||||
return (atomic_xadd64(p, v) + v);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_sub64_return(int64_t *p, int64_t v)
|
||||
{
|
||||
return (atomic_xadd64(p, -v) - v);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_inc64_return(int64_t *v)
|
||||
{
|
||||
return atomic_add64_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_dec64_return(int64_t *v)
|
||||
{
|
||||
return atomic_sub64_return(v, 1);
|
||||
}
|
||||
|
||||
#endif /* ATOMIC_H*/
|
294
hypervisor/include/arch/x86/lib/bits.h
Normal file
294
hypervisor/include/arch/x86/lib/bits.h
Normal file
@@ -0,0 +1,294 @@
|
||||
/*-
|
||||
* Copyright (c) 1998 Doug Rabson
|
||||
* Copyright (c) 2017 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef BITS_H
|
||||
#define BITS_H
|
||||
#include <atomic.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* INVALID_BIT_INDEX means when input paramter is zero,
|
||||
* bit operations function can't find bit set and return
|
||||
* the invalid bit index directly.
|
||||
*
|
||||
**/
|
||||
#define INVALID_BIT_INDEX 0xffffU
|
||||
|
||||
/*
|
||||
*
|
||||
* fls32 - Find the Last (most significant) bit Set in value and
|
||||
* return the bit index of that bit.
|
||||
*
|
||||
* Bits are numbered starting at 0,the least significant bit.
|
||||
* A return value of INVALID_BIT_INDEX means return value is
|
||||
* invalid bit index when the input argument was zero.
|
||||
*
|
||||
* Examples:
|
||||
* fls32 (0x0) = INVALID_BIT_INDEX
|
||||
* fls32 (0x01) = 0
|
||||
* fls32 (0x80) = 7
|
||||
* ...
|
||||
* fls32 (0x80000001) = 31
|
||||
*
|
||||
* @param value: 'uint32_t' type value
|
||||
*
|
||||
* @return value: zero-based bit index, INVALID_BIT_INDEX means
|
||||
* when 'value' was zero, bit operations function can't find bit
|
||||
* set and return the invalid bit index directly.
|
||||
*
|
||||
*/
|
||||
static inline uint16_t fls32(uint32_t value)
|
||||
{
|
||||
uint32_t ret;
|
||||
asm volatile("bsrl %1,%0\n\t"
|
||||
"jnz 1f\n\t"
|
||||
"mov %2,%0\n"
|
||||
"1:" : "=r" (ret)
|
||||
: "rm" (value), "i" (INVALID_BIT_INDEX));
|
||||
return (uint16_t)ret;
|
||||
}
|
||||
|
||||
static inline uint16_t fls64(uint64_t value)
|
||||
{
|
||||
uint64_t ret = 0UL;
|
||||
asm volatile("bsrq %1,%0\n\t"
|
||||
"jnz 1f\n\t"
|
||||
"mov %2,%0\n"
|
||||
"1:" : "=r" (ret)
|
||||
: "rm" (value), "i" (INVALID_BIT_INDEX));
|
||||
return (uint16_t)ret;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ffs64 - Find the First (least significant) bit Set in value(Long type)
|
||||
* and return the index of that bit.
|
||||
*
|
||||
* Bits are numbered starting at 0,the least significant bit.
|
||||
* A return value of INVALID_BIT_INDEX means that the return value is the inalid
|
||||
* bit index when the input argument was zero.
|
||||
*
|
||||
* Examples:
|
||||
* ffs64 (0x0) = INVALID_BIT_INDEX
|
||||
* ffs64 (0x01) = 0
|
||||
* ffs64 (0xf0) = 4
|
||||
* ffs64 (0xf00) = 8
|
||||
* ...
|
||||
* ffs64 (0x8000000000000001) = 0
|
||||
* ffs64 (0xf000000000000000) = 60
|
||||
*
|
||||
* @param value: 'uint64_t' type value
|
||||
*
|
||||
* @return value: zero-based bit index, INVALID_BIT_INDEX means
|
||||
* when 'value' was zero, bit operations function can't find bit
|
||||
* set and return the invalid bit index directly.
|
||||
*
|
||||
*/
|
||||
static inline uint16_t ffs64(uint64_t value)
|
||||
{
|
||||
uint64_t ret;
|
||||
asm volatile("bsfq %1,%0\n\t"
|
||||
"jnz 1f\n\t"
|
||||
"mov %2,%0\n"
|
||||
"1:" : "=r" (ret)
|
||||
: "rm" (value), "i" (INVALID_BIT_INDEX));
|
||||
return (uint16_t)ret;
|
||||
}
|
||||
|
||||
/*bit scan forward for the least significant bit '0'*/
|
||||
static inline uint16_t ffz64(uint64_t value)
|
||||
{
|
||||
return ffs64(~value);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* find the first zero bit in a uint64_t array.
|
||||
* @pre: the size must be multiple of 64.
|
||||
*/
|
||||
static inline uint64_t ffz64_ex(const uint64_t *addr, uint64_t size)
|
||||
{
|
||||
uint64_t ret = size;
|
||||
uint64_t idx;
|
||||
|
||||
for (idx = 0UL; (idx << 6U) < size; idx++) {
|
||||
if (addr[idx] != ~0UL) {
|
||||
ret = (idx << 6U) + ffz64(addr[idx]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Counts leading zeros.
|
||||
*
|
||||
* The number of leading zeros is defined as the number of
|
||||
* most significant bits which are not '1'. E.g.:
|
||||
* clz(0x80000000)==0
|
||||
* clz(0x40000000)==1
|
||||
* ...
|
||||
* clz(0x00000001)==31
|
||||
* clz(0x00000000)==32
|
||||
*
|
||||
* @param value:The 32 bit value to count the number of leading zeros.
|
||||
*
|
||||
* @return The number of leading zeros in 'value'.
|
||||
*/
|
||||
static inline uint16_t clz(uint32_t value)
|
||||
{
|
||||
return ((value != 0U) ? (31U - fls32(value)) : 32U);
|
||||
}
|
||||
|
||||
/*
|
||||
* Counts leading zeros (64 bit version).
|
||||
*
|
||||
* @param value:The 64 bit value to count the number of leading zeros.
|
||||
*
|
||||
* @return The number of leading zeros in 'value'.
|
||||
*/
|
||||
static inline uint16_t clz64(uint64_t value)
|
||||
{
|
||||
return ((value != 0UL) ? (63U - fls64(value)) : 64U);
|
||||
}
|
||||
|
||||
/*
|
||||
* (*addr) |= (1UL<<nr);
|
||||
* Note:Input parameter nr shall be less than 64.
|
||||
* If nr>=64, it will be truncated.
|
||||
*/
|
||||
#define build_bitmap_set(name, op_len, op_type, lock) \
|
||||
static inline void name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "or" op_len " %1,%0" \
|
||||
: "+m" (*addr) \
|
||||
: "r" ((op_type)(1UL<<nr)) \
|
||||
: "cc", "memory"); \
|
||||
}
|
||||
build_bitmap_set(bitmap_set_nolock, "q", uint64_t, "")
|
||||
build_bitmap_set(bitmap_set_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_set(bitmap32_set_nolock, "l", uint32_t, "")
|
||||
build_bitmap_set(bitmap32_set_lock, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
/*
|
||||
* (*addr) &= ~(1UL<<nr);
|
||||
* Note:Input parameter nr shall be less than 64.
|
||||
* If nr>=64, it will be truncated.
|
||||
*/
|
||||
#define build_bitmap_clear(name, op_len, op_type, lock) \
|
||||
static inline void name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "and" op_len " %1,%0" \
|
||||
: "+m" (*addr) \
|
||||
: "r" ((op_type)(~(1UL<<(nr)))) \
|
||||
: "cc", "memory"); \
|
||||
}
|
||||
build_bitmap_clear(bitmap_clear_nolock, "q", uint64_t, "")
|
||||
build_bitmap_clear(bitmap_clear_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_clear(bitmap32_clear_nolock, "l", uint32_t, "")
|
||||
build_bitmap_clear(bitmap32_clear_lock, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
/*
|
||||
* return !!((*addr) & (1UL<<nr));
|
||||
* Note:Input parameter nr shall be less than 64. If nr>=64, it will
|
||||
* be truncated.
|
||||
*/
|
||||
static inline bool bitmap_test(uint16_t nr, const volatile uint64_t *addr)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
asm volatile("btq %q2,%1\n\tsbbl %0, %0"
|
||||
: "=r" (ret)
|
||||
: "m" (*addr), "r" ((uint64_t)(nr & 0x3fU))
|
||||
: "cc", "memory");
|
||||
return (ret != 0);
|
||||
}
|
||||
|
||||
static inline bool bitmap32_test(uint16_t nr, const volatile uint32_t *addr)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
asm volatile("btl %2,%1\n\tsbbl %0, %0"
|
||||
: "=r" (ret)
|
||||
: "m" (*addr), "r" ((uint32_t)(nr & 0x1fU))
|
||||
: "cc", "memory");
|
||||
return (ret != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* bool ret = (*addr) & (1UL<<nr);
|
||||
* (*addr) |= (1UL<<nr);
|
||||
* return ret;
|
||||
* Note:Input parameter nr shall be less than 64. If nr>=64, it
|
||||
* will be truncated.
|
||||
*/
|
||||
#define build_bitmap_testandset(name, op_len, op_type, lock) \
|
||||
static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
int32_t ret=0; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "bts" op_len " %2,%1\n\tsbbl %0,%0" \
|
||||
: "=r" (ret), "=m" (*addr) \
|
||||
: "r" ((op_type)nr) \
|
||||
: "cc", "memory"); \
|
||||
return (ret != 0); \
|
||||
}
|
||||
build_bitmap_testandset(bitmap_test_and_set_nolock, "q", uint64_t, "")
|
||||
build_bitmap_testandset(bitmap_test_and_set_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_testandset(bitmap32_test_and_set_nolock, "l", uint32_t, "")
|
||||
build_bitmap_testandset(bitmap32_test_and_set_lock, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
/*
|
||||
* bool ret = (*addr) & (1UL<<nr);
|
||||
* (*addr) &= ~(1UL<<nr);
|
||||
* return ret;
|
||||
* Note:Input parameter nr shall be less than 64. If nr>=64,
|
||||
* it will be truncated.
|
||||
*/
|
||||
#define build_bitmap_testandclear(name, op_len, op_type, lock) \
|
||||
static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
int32_t ret=0; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "btr" op_len " %2,%1\n\tsbbl %0,%0" \
|
||||
: "=r" (ret), "=m" (*addr) \
|
||||
: "r" ((op_type)nr) \
|
||||
: "cc", "memory"); \
|
||||
return (ret != 0); \
|
||||
}
|
||||
build_bitmap_testandclear(bitmap_test_and_clear_nolock, "q", uint64_t, "")
|
||||
build_bitmap_testandclear(bitmap_test_and_clear_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_testandclear(bitmap32_test_and_clear_nolock, "l", uint32_t, "")
|
||||
build_bitmap_testandclear(bitmap32_test_and_clear_lock, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
#endif /* BITS_H*/
|
103
hypervisor/include/arch/x86/lib/spinlock.h
Normal file
103
hypervisor/include/arch/x86/lib/spinlock.h
Normal file
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef SPINLOCK_H
|
||||
#define SPINLOCK_H
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
#include <types.h>
|
||||
#include <rtl.h>
|
||||
|
||||
/** The architecture dependent spinlock type. */
|
||||
typedef struct _spinlock {
|
||||
uint32_t head;
|
||||
uint32_t tail;
|
||||
|
||||
} spinlock_t;
|
||||
|
||||
/* Function prototypes */
|
||||
static inline void spinlock_init(spinlock_t *lock)
|
||||
{
|
||||
(void)memset(lock, 0U, sizeof(spinlock_t));
|
||||
}
|
||||
|
||||
static inline void spinlock_obtain(spinlock_t *lock)
|
||||
{
|
||||
|
||||
/* The lock function atomically increments and exchanges the head
|
||||
* counter of the queue. If the old head of the queue is equal to the
|
||||
* tail, we have locked the spinlock. Otherwise we have to wait.
|
||||
*/
|
||||
|
||||
asm volatile (" movl $0x1,%%eax\n"
|
||||
" lock xaddl %%eax,%[head]\n"
|
||||
" cmpl %%eax,%[tail]\n"
|
||||
" jz 1f\n"
|
||||
"2: pause\n"
|
||||
" cmpl %%eax,%[tail]\n"
|
||||
" jnz 2b\n"
|
||||
"1:\n"
|
||||
:
|
||||
:
|
||||
[head] "m"(lock->head),
|
||||
[tail] "m"(lock->tail)
|
||||
: "cc", "memory", "eax");
|
||||
}
|
||||
|
||||
static inline void spinlock_release(spinlock_t *lock)
|
||||
{
|
||||
/* Increment tail of queue */
|
||||
asm volatile (" lock incl %[tail]\n"
|
||||
:
|
||||
: [tail] "m" (lock->tail)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
#else /* ASSEMBLER */
|
||||
|
||||
/** The offset of the head element. */
|
||||
#define SYNC_SPINLOCK_HEAD_OFFSET 0
|
||||
|
||||
/** The offset of the tail element. */
|
||||
#define SYNC_SPINLOCK_TAIL_OFFSET 4
|
||||
|
||||
.macro spinlock_obtain lock
|
||||
movl $1, % eax
|
||||
lea \lock, % rbx
|
||||
lock xaddl % eax, SYNC_SPINLOCK_HEAD_OFFSET(%rbx)
|
||||
cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
|
||||
jz 1f
|
||||
2 :
|
||||
pause
|
||||
cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
|
||||
jnz 2b
|
||||
1 :
|
||||
.endm
|
||||
|
||||
#define spinlock_obtain(x) spinlock_obtain lock = (x)
|
||||
|
||||
.macro spinlock_release lock
|
||||
lea \lock, % rbx
|
||||
lock incl SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
|
||||
.endm
|
||||
|
||||
#define spinlock_release(x) spinlock_release lock = (x)
|
||||
|
||||
#endif /* ASSEMBLER */
|
||||
|
||||
#define spinlock_irqsave_obtain(lock, p_rflags) \
|
||||
do { \
|
||||
CPU_INT_ALL_DISABLE(p_rflags); \
|
||||
spinlock_obtain(lock); \
|
||||
} while (0)
|
||||
|
||||
#define spinlock_irqrestore_release(lock, rflags) \
|
||||
do { \
|
||||
spinlock_release(lock); \
|
||||
CPU_INT_ALL_RESTORE(rflags); \
|
||||
} while (0)
|
||||
#endif /* SPINLOCK_H */
|
Reference in New Issue
Block a user