mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-11-14 10:31:59 +00:00
hv: multi-arch construct barrier library
extract common barrier interface to include/lib/barrier.h, and invoke the variant implementation of arch. Tracked-On: #8803 Signed-off-by: Haoyu Tang <haoyu.tang@intel.com> Reviewed-by: Yifan Liu <yifan1.liu@intel.com> Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
@@ -41,6 +41,7 @@
|
||||
#include <acrn_common.h>
|
||||
#include <asm/msr.h>
|
||||
#include <errno.h>
|
||||
#include <barrier.h>
|
||||
|
||||
/* Define CPU stack alignment */
|
||||
#define CPU_STACK_ALIGN 16UL
|
||||
@@ -565,24 +566,6 @@ static inline void cpu_sp_write(uint64_t *stack_ptr)
|
||||
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
|
||||
}
|
||||
|
||||
/* Synchronizes all write accesses to memory */
|
||||
static inline void cpu_write_memory_barrier(void)
|
||||
{
|
||||
asm volatile ("sfence\n" : : : "memory");
|
||||
}
|
||||
|
||||
/* Synchronizes all read and write accesses to/from memory */
|
||||
static inline void cpu_memory_barrier(void)
|
||||
{
|
||||
asm volatile ("mfence\n" : : : "memory");
|
||||
}
|
||||
|
||||
/* Prevents compilers from reordering read/write access across this barrier */
|
||||
static inline void cpu_compiler_barrier(void)
|
||||
{
|
||||
asm volatile ("" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void invlpg(unsigned long addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
|
||||
|
||||
26
hypervisor/include/arch/x86/asm/lib/barrier.h
Normal file
26
hypervisor/include/arch/x86/asm/lib/barrier.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Copyright (C) 2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef X86_LIB_BARRIER_H
|
||||
#define X86_LIB_BARRIER_H
|
||||
/* Synchronizes all read accesses to memory */
|
||||
static inline void arch_cpu_read_memory_barrier(void)
|
||||
{
|
||||
asm volatile ("lfence\n" : : : "memory");
|
||||
}
|
||||
|
||||
/* Synchronizes all write accesses to memory */
|
||||
static inline void arch_cpu_write_memory_barrier(void)
|
||||
{
|
||||
asm volatile ("sfence\n" : : : "memory");
|
||||
}
|
||||
|
||||
/* Synchronizes all read and write accesses to/from memory */
|
||||
static inline void arch_cpu_memory_barrier(void)
|
||||
{
|
||||
asm volatile ("mfence\n" : : : "memory");
|
||||
}
|
||||
#endif /* X86_LIB_BARRIER_H */
|
||||
40
hypervisor/include/lib/barrier.h
Normal file
40
hypervisor/include/lib/barrier.h
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (C) 2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef BARRIER_H
|
||||
#define BARRIER_H
|
||||
|
||||
#include <types.h>
|
||||
#include <asm/lib/barrier.h>
|
||||
|
||||
/* The mandatory functions should be implemented by arch barrier library */
|
||||
static inline void arch_cpu_read_memory_barrier(void);
|
||||
static inline void arch_cpu_write_memory_barrier(void);
|
||||
static inline void arch_cpu_memory_barrier(void);
|
||||
|
||||
/* The common functions map to arch implementation */
|
||||
/* Synchronizes all write accesses to memory */
|
||||
static inline void cpu_write_memory_barrier(void)
|
||||
{
|
||||
return arch_cpu_write_memory_barrier();
|
||||
}
|
||||
/* Synchronizes all read accesses from memory */
|
||||
static inline void cpu_read_memory_barrier(void)
|
||||
{
|
||||
return arch_cpu_read_memory_barrier();
|
||||
}
|
||||
/* Synchronizes all read and write accesses to/from memory */
|
||||
static inline void cpu_memory_barrier(void)
|
||||
{
|
||||
return arch_cpu_memory_barrier();
|
||||
}
|
||||
|
||||
/* Prevents compilers from reordering read/write access across this barrier */
|
||||
static inline void cpu_compiler_barrier(void)
|
||||
{
|
||||
asm volatile ("" : : : "memory");
|
||||
}
|
||||
#endif /* BARRIER_H */
|
||||
Reference in New Issue
Block a user