hv: abstract IRQ related macros

Convert IRQ-related macros to static inline functions and introduce
wrappers for arch-specific implementations. This follows the style we
defined for multi-arch development.

This is a follow-up update for commit
a7239d126 ("[FIXME] hv: risc-v add denpended implementation in cpu.h").

CPU_IRQ_ENABLE_ON_CONFIG  -> local_irq_enable
CPU_IRQ_DISABLE_ON_CONFIG -> local_irq_disable
CPU_INT_ALL_DISABLE       -> local_irq_save
CPU_INT_ALL_RESTORE       -> local_irq_restore

Tracked-On: #8813
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Reviewed-by: Yifan Liu  <yifan1.liu@intel.com>
This commit is contained in:
Shiqing Gao
2025-09-22 16:39:59 +08:00
committed by acrnsi-robot
parent 86b25ea9ac
commit ca778139e6
14 changed files with 123 additions and 98 deletions

View File

@@ -46,5 +46,5 @@ void init_interrupt(uint16_t pcpu_id)
{
init_interrupt_arch(pcpu_id);
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
}

View File

@@ -397,11 +397,11 @@ void cpu_do_idle(void)
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
if ((vcpu != NULL) && !is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
}
asm_pause();
if ((vcpu != NULL) && !is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_DISABLE_ON_CONFIG();
local_irq_disable();
}
}
#endif

View File

@@ -270,13 +270,13 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
if (basic_exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) {
/* Handling external_interrupt should disable intr */
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_DISABLE_ON_CONFIG();
local_irq_disable();
}
ret = dispatch->handler(vcpu);
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
}
} else {
ret = dispatch->handler(vcpu);

View File

@@ -204,7 +204,7 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
write_trampoline_sym(main_entry, (uint64_t)restore_s3_context);
clac();
CPU_IRQ_DISABLE_ON_CONFIG();
local_irq_disable();
vmx_off();
suspend_console();
@@ -222,7 +222,7 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
init_frequency_policy();
vmx_on();
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
/* restore the default main entry */
stac();

View File

@@ -10,7 +10,7 @@
#include <asm/cpu_caps.h>
#include <asm/io.h>
#include <asm/tsc.h>
#include <asm/cpu.h>
#include <cpu.h>
#include <logmsg.h>
#include <acpi.h>
@@ -116,11 +116,11 @@ static uint64_t hpet_calibrate_tsc(uint32_t cal_ms_arg)
uint64_t delta_tsc, delta_fs;
uint64_t rflags, tsc_khz;
CPU_INT_ALL_DISABLE(&rflags);
local_irq_save(&rflags);
tsc1 = tsc_read_hpet(&hpet1);
pit_calibrate_tsc(cal_ms_arg);
tsc2 = tsc_read_hpet(&hpet2);
CPU_INT_ALL_RESTORE(rflags);
local_irq_restore(rflags);
/* in case counter wrap happened in the low 32 bits */
if (hpet2 <= hpet1) {

View File

@@ -23,7 +23,7 @@ void vcpu_thread(struct thread_object *obj)
do {
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_DISABLE_ON_CONFIG();
local_irq_disable();
}
/* Don't open interrupt window between here and vmentry */
@@ -60,7 +60,7 @@ void vcpu_thread(struct thread_object *obj)
profiling_pre_vmexit_handler(vcpu);
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
}
/* Dispatch handler */
ret = vmexit_handler(vcpu);

View File

@@ -227,5 +227,5 @@ void init_interrupt(uint16_t pcpu_id)
init_softirq();
}
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
}

View File

@@ -99,13 +99,13 @@ static void ptirq_enqueue_softirq(struct ptirq_remapping_info *entry)
uint64_t rflags;
/* enqueue request in order, SOFTIRQ_PTDEV will pickup */
CPU_INT_ALL_DISABLE(&rflags);
local_irq_save(&rflags);
/* avoid adding recursively */
list_del(&entry->softirq_node);
/* TODO: assert if entry already in list */
list_add_tail(&entry->softirq_node, &get_cpu_var(softirq_dev_entry_list));
CPU_INT_ALL_RESTORE(rflags);
local_irq_restore(rflags);
fire_softirq(SOFTIRQ_PTDEV);
}
@@ -121,7 +121,7 @@ struct ptirq_remapping_info *ptirq_dequeue_softirq(uint16_t pcpu_id)
uint64_t rflags;
struct ptirq_remapping_info *entry = NULL;
CPU_INT_ALL_DISABLE(&rflags);
local_irq_save(&rflags);
while (!list_empty(&get_cpu_var(softirq_dev_entry_list))) {
entry = get_first_item(&per_cpu(softirq_dev_entry_list, pcpu_id), struct ptirq_remapping_info, softirq_node);
@@ -138,7 +138,7 @@ struct ptirq_remapping_info *ptirq_dequeue_softirq(uint16_t pcpu_id)
}
}
CPU_INT_ALL_RESTORE(rflags);
local_irq_restore(rflags);
return entry;
}
@@ -172,10 +172,10 @@ void ptirq_release_entry(struct ptirq_remapping_info *entry)
{
uint64_t rflags;
CPU_INT_ALL_DISABLE(&rflags);
local_irq_save(&rflags);
list_del_init(&entry->softirq_node);
del_timer(&entry->intr_delay_timer);
CPU_INT_ALL_RESTORE(rflags);
local_irq_restore(rflags);
bitmap_clear((entry->ptdev_entry_id) & 0x3FU, &ptirq_entry_bitmaps[entry->ptdev_entry_id >> 6U]);

View File

@@ -55,9 +55,9 @@ void do_softirq(void)
if (per_cpu(softirq_servicing, cpu_id) == 0U) {
per_cpu(softirq_servicing, cpu_id) = 1U;
CPU_IRQ_ENABLE_ON_CONFIG();
local_irq_enable();
do_softirq_internal(cpu_id);
CPU_IRQ_DISABLE_ON_CONFIG();
local_irq_disable();
do_softirq_internal(cpu_id);
per_cpu(softirq_servicing, cpu_id) = 0U;

View File

@@ -111,12 +111,12 @@ int32_t add_timer(struct hv_timer *timer)
pcpu_id = get_pcpu_id();
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
CPU_INT_ALL_DISABLE(&rflags);
local_irq_save(&rflags);
/* update the physical timer if we're on the timer_list head */
if (local_add_timer(cpu_timer, timer)) {
update_physical_timer(cpu_timer);
}
CPU_INT_ALL_RESTORE(rflags);
local_irq_restore(rflags);
TRACE_2L(TRACE_TIMER_ACTION_ADDED, timer->timeout, 0UL);
}
@@ -162,11 +162,11 @@ void del_timer(struct hv_timer *timer)
{
uint64_t rflags;
CPU_INT_ALL_DISABLE(&rflags);
local_irq_save(&rflags);
if ((timer != NULL) && !list_empty(&timer->node)) {
list_del_init(&timer->node);
}
CPU_INT_ALL_RESTORE(rflags);
local_irq_restore(rflags);
}
static void init_percpu_timer(uint16_t pcpu_id)

View File

@@ -122,26 +122,36 @@ static inline void arch_asm_pause(void)
asm volatile (" csrc " STRINGIFY(reg) ", %0 \n\t" :: "r"(mask): "memory"); \
})
/**
* FIXME: to follow multi-arch design, refactor all of them into static inline functions with corresponding
* X86 implementation together.
*/
#define local_irq_disable() asm volatile("csrc sstatus, %0\n" ::"i"(SSTATUS_SIE) : "memory")
#define local_irq_enable() asm volatile("csrs sstatus, %0\n" ::"i"(SSTATUS_SIE) : "memory")
#define local_save_flags(x) ({ asm volatile("csrr %0, sstatus, 0\n" : "=r"(x)::"memory"); })
#define local_irq_restore(x) ({ asm volatile("csrs sstatus, %0\n" ::"rK"(x & SSTATUS_SIE) : "memory"); })
#define local_irq_save(x) \
({ \
uint64_t val = 0UL; \
asm volatile("csrrc %0, sstatus, %1\n" : "=r"(val) : "i"(SSTATUS_SIE) : "memory"); \
*(uint64_t *)(x) = val; \
})
static inline void arch_local_irq_enable(void)
{
asm volatile ("csrs sstatus, %0 \n"
:: "i"(SSTATUS_SIE)
: "memory");
}
#define CPU_INT_ALL_DISABLE(x) local_irq_save(x)
#define CPU_INT_ALL_RESTORE(x) local_irq_restore(x)
static inline void arch_local_irq_disable(void)
{
asm volatile ("csrc sstatus, %0 \n"
:: "i"(SSTATUS_SIE)
: "memory");
}
#define CPU_IRQ_ENABLE_ON_CONFIG local_irq_enable
#define CPU_IRQ_DISABLE_ON_CONFIG local_irq_disable
static inline void arch_local_irq_save(uint64_t *flags_ptr)
{
uint64_t val = 0UL;
/* read and clear the SSTATUS_SIE bit (disable interrupts) */
asm volatile("csrrc %0, sstatus, %1 \n"
: "=r"(val)
: "r"(SSTATUS_SIE)
: "memory");
*flags_ptr = val;
}
static inline void arch_local_irq_restore(uint64_t flags)
{
asm volatile("csrs sstatus, %0 \n" ::"rK"(flags & SSTATUS_SIE) : "memory");
}
void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync);
void init_percpu_hart_id(uint32_t bsp_hart_id);

View File

@@ -521,16 +521,6 @@ static inline void asm_safe_hlt(void)
asm volatile ("sti; hlt; cli" : : : "cc");
}
/* Disables interrupts on the current CPU */
#ifdef CONFIG_KEEP_IRQ_DISABLED
#define CPU_IRQ_DISABLE_ON_CONFIG() do { } while (0)
#else
#define CPU_IRQ_DISABLE_ON_CONFIG() \
{ \
asm volatile ("cli\n" : : : "cc"); \
}
#endif
/* Enables interrupts on the current CPU
* If CONFIG_KEEP_IRQ_DISABLED is 'y', all interrupts
* received in root mode will be handled in external interrupt
@@ -538,14 +528,20 @@ static inline void asm_safe_hlt(void)
* Permanently turning off interrupts in root mode can be useful in
* many scenarios (e.g., x86_tee).
*/
#ifdef CONFIG_KEEP_IRQ_DISABLED
#define CPU_IRQ_ENABLE_ON_CONFIG() do { } while (0)
#else
#define CPU_IRQ_ENABLE_ON_CONFIG() \
{ \
asm volatile ("sti\n" : : : "cc"); \
}
static inline void arch_local_irq_enable(void)
{
#ifndef CONFIG_KEEP_IRQ_DISABLED
asm volatile ("sti\n" : : : "cc");
#endif
}
/* Disables interrupts on the current CPU */
static inline void arch_local_irq_disable(void)
{
#ifndef CONFIG_KEEP_IRQ_DISABLED
asm volatile ("cli\n" : : : "cc");
#endif
}
/* This macro writes the stack pointer. */
static inline void cpu_sp_write(uint64_t *stack_ptr)
@@ -591,47 +587,30 @@ cpu_rdtscp_execute(uint64_t *timestamp_ptr, uint32_t *cpu_id_ptr)
*timestamp_ptr = ((uint64_t)tsh << 32U) | tsl;
}
/* Macro to save rflags register */
#define CPU_RFLAGS_SAVE(rflags_ptr) \
{ \
asm volatile (" pushf"); \
asm volatile (" pop %0" \
: "=r" (*(rflags_ptr)) \
: /* No inputs */); \
static inline void cpu_rflags_save(uint64_t *rflags_ptr)
{
asm volatile (" pushf \n\t"
" pop %0"
: "=r" (*(rflags_ptr))
: /* No inputs */);
}
/* Macro to restore rflags register */
#define CPU_RFLAGS_RESTORE(rflags) \
{ \
asm volatile (" push %0\n\t" \
"popf \n\t": : "r" (rflags) \
:"cc"); \
static inline void cpu_rflags_restore(uint64_t rflags)
{
asm volatile (" push %0\n\t"
"popf \n\t": : "r" (rflags)
:"cc");
}
/* This macro locks out interrupts and saves the current architecture status
* register / state register to the specified address. This function does not
* attempt to mask any bits in the return register value and can be used as a
* quick method to guard a critical section.
* NOTE: This macro is used in conjunction with CPU_INT_ALL_RESTORE
* defined below and CPU_INT_CONTROL_VARS defined above.
*/
#define CPU_INT_ALL_DISABLE(p_rflags) \
{ \
CPU_RFLAGS_SAVE(p_rflags); \
CPU_IRQ_DISABLE_ON_CONFIG(); \
static inline void arch_local_irq_save(uint64_t *flags_ptr)
{
cpu_rflags_save(flags_ptr);
arch_local_irq_disable();
}
/* This macro restores the architecture status / state register used to lockout
* interrupts to the value provided. The intent of this function is to be a
* fast mechanism to restore the interrupt level at the end of a critical
* section to its original level.
* NOTE: This macro is used in conjunction with CPU_INT_ALL_DISABLE
* and CPU_INT_CONTROL_VARS defined above.
*/
#define CPU_INT_ALL_RESTORE(rflags) \
{ \
CPU_RFLAGS_RESTORE(rflags); \
static inline void arch_local_irq_restore(uint64_t flags)
{
cpu_rflags_restore(flags);
}
#define ACRN_PSEUDO_PCPUID_MSR MSR_IA32_SYSENTER_CS

View File

@@ -68,6 +68,42 @@ static inline void asm_pause(void)
arch_asm_pause();
}
/* The mandatory functions should be implemented by arch. */
static inline void arch_local_irq_enable(void);
static inline void arch_local_irq_disable(void);
static inline void arch_local_irq_save(uint64_t *flags_ptr);
static inline void arch_local_irq_restore(uint64_t flags);
static inline void local_irq_enable(void)
{
arch_local_irq_enable();
}
static inline void local_irq_disable(void)
{
arch_local_irq_disable();
}
/* This function locks out interrupts and saves the current architecture status
* register / state register to the specified address. This function does not
* attempt to mask any bits in the return register value and can be used as a
* quick method to guard a critical section.
* NOTE: This function is used in conjunction with local_irq_restore().
*/
static inline void local_irq_save(uint64_t *flags_ptr){
arch_local_irq_save(flags_ptr);
}
/* This function restores the architecture status / state register used to lockout
* interrupts to the value provided. The intent of this function is to be a
* fast mechanism to restore the interrupt level at the end of a critical
* section to its original level.
* NOTE: This function is used in conjunction with local_irq_save().
*/
static inline void local_irq_restore(uint64_t flags)
{
arch_local_irq_restore(flags);
}
#endif /* ASSEMBLER */
#endif /* COMMON_CPU_H */

View File

@@ -10,7 +10,7 @@
#ifndef ASSEMBLER
#include <types.h>
#include <rtl.h>
#include <asm/cpu.h>
#include <cpu.h>
#include <asm/lib/spinlock.h>
/* The common spinlock type */
@@ -28,14 +28,14 @@ static inline void spinlock_init(spinlock_t *lock)
static inline void spinlock_irqsave_obtain(spinlock_t *lock, uint64_t * flags)
{
CPU_INT_ALL_DISABLE(flags);
local_irq_save(flags);
arch_spinlock_obtain(lock);
}
static inline void spinlock_irqrestore_release(spinlock_t *lock, uint64_t flags)
{
arch_spinlock_release(lock);
CPU_INT_ALL_RESTORE(flags);
local_irq_restore(flags);
}
static inline void spinlock_obtain(spinlock_t *lock)