mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-30 15:06:49 +00:00
HV: remove 'spinlock_rfags' declaration
- remove the global declaration of 'cpu_int_value' Signed-off-by: Yonghua Huang <yonghua.huang@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
932bc32dcc
commit
d030595194
@ -93,17 +93,16 @@ static inline uint32_t
|
||||
ioapic_read_reg32(const void *ioapic_base, const uint32_t offset)
|
||||
{
|
||||
uint32_t v;
|
||||
uint64_t rflags;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
spinlock_irqsave_obtain(&ioapic_lock);
|
||||
spinlock_irqsave_obtain(&ioapic_lock, &rflags);
|
||||
|
||||
/* Write IOREGSEL */
|
||||
mmio_write32(offset, (void *)ioapic_base + IOAPIC_REGSEL);
|
||||
/* Read IOWIN */
|
||||
v = mmio_read32((void *)ioapic_base + IOAPIC_WINDOW);
|
||||
|
||||
spinlock_irqrestore_release(&ioapic_lock);
|
||||
spinlock_irqrestore_release(&ioapic_lock, rflags);
|
||||
return v;
|
||||
}
|
||||
|
||||
@ -111,16 +110,16 @@ static inline void
|
||||
ioapic_write_reg32(const void *ioapic_base,
|
||||
const uint32_t offset, const uint32_t value)
|
||||
{
|
||||
spinlock_rflags;
|
||||
uint64_t rflags;
|
||||
|
||||
spinlock_irqsave_obtain(&ioapic_lock);
|
||||
spinlock_irqsave_obtain(&ioapic_lock, &rflags);
|
||||
|
||||
/* Write IOREGSEL */
|
||||
mmio_write32(offset, (void *)ioapic_base + IOAPIC_REGSEL);
|
||||
/* Write IOWIN */
|
||||
mmio_write32(value, (void *)ioapic_base + IOAPIC_WINDOW);
|
||||
|
||||
spinlock_irqrestore_release(&ioapic_lock);
|
||||
spinlock_irqrestore_release(&ioapic_lock, rflags);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
|
@ -67,20 +67,19 @@ static uint32_t find_available_vector()
|
||||
*/
|
||||
uint32_t irq_mark_used(uint32_t irq)
|
||||
{
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
return IRQ_INVALID;
|
||||
}
|
||||
|
||||
desc = &irq_desc_array[irq];
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
if (desc->used == IRQ_NOT_ASSIGNED) {
|
||||
desc->used = IRQ_ASSIGNED;
|
||||
}
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
return irq;
|
||||
}
|
||||
|
||||
@ -91,19 +90,19 @@ uint32_t irq_mark_used(uint32_t irq)
|
||||
static uint32_t alloc_irq(void)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
for (i = irq_gsi_num(); i < NR_IRQS; i++) {
|
||||
desc = &irq_desc_array[i];
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
if (desc->used == IRQ_NOT_ASSIGNED) {
|
||||
desc->used = IRQ_ASSIGNED;
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
break;
|
||||
}
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
}
|
||||
return (i == NR_IRQS) ? IRQ_INVALID : i;
|
||||
}
|
||||
@ -121,15 +120,14 @@ static void local_irq_desc_set_vector(uint32_t irq, uint32_t vr)
|
||||
/* lock version of set vector */
|
||||
static void irq_desc_set_vector(uint32_t irq, uint32_t vr)
|
||||
{
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
desc = &irq_desc_array[irq];
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
vector_to_irq[vr] = irq;
|
||||
desc->vector = vr;
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
}
|
||||
|
||||
/* used with holding irq_lock outside */
|
||||
@ -173,7 +171,7 @@ int32_t request_irq(uint32_t irq_arg,
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
uint32_t irq = irq_arg, vector;
|
||||
spinlock_rflags;
|
||||
uint64_t rflags;
|
||||
|
||||
/* ======================================================
|
||||
* This is low level ISR handler registering function
|
||||
@ -237,7 +235,7 @@ int32_t request_irq(uint32_t irq_arg,
|
||||
}
|
||||
|
||||
if (desc->action == NULL) {
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
desc->priv_data = priv_data;
|
||||
desc->action = action_fn;
|
||||
|
||||
@ -246,7 +244,7 @@ int32_t request_irq(uint32_t irq_arg,
|
||||
*/
|
||||
(void)strcpy_s(desc->name, 32U, name);
|
||||
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
} else {
|
||||
pr_err("%s: request irq(%u) vr(%u) for %s failed,\
|
||||
already requested", __func__,
|
||||
@ -264,9 +262,9 @@ int32_t request_irq(uint32_t irq_arg,
|
||||
uint32_t irq_desc_alloc_vector(uint32_t irq)
|
||||
{
|
||||
uint32_t vr = VECTOR_INVALID;
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
/* irq should be always available at this time */
|
||||
if (irq >= NR_IRQS) {
|
||||
@ -274,7 +272,7 @@ uint32_t irq_desc_alloc_vector(uint32_t irq)
|
||||
}
|
||||
|
||||
desc = &irq_desc_array[irq];
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
if (desc->vector != VECTOR_INVALID) {
|
||||
/* already allocated a vector */
|
||||
goto OUT;
|
||||
@ -288,28 +286,27 @@ uint32_t irq_desc_alloc_vector(uint32_t irq)
|
||||
}
|
||||
local_irq_desc_set_vector(irq, vr);
|
||||
OUT:
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
return vr;
|
||||
}
|
||||
|
||||
void irq_desc_try_free_vector(uint32_t irq)
|
||||
{
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
/* legacy irq's vector is reserved and should not be freed */
|
||||
if ((irq >= NR_IRQS) || (irq < NR_LEGACY_IRQ)) {
|
||||
return;
|
||||
}
|
||||
|
||||
desc = &irq_desc_array[irq];
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
if (desc->action == NULL) {
|
||||
_irq_desc_free_vector(irq);
|
||||
}
|
||||
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
|
||||
}
|
||||
|
||||
@ -421,8 +418,8 @@ void partition_mode_dispatch_interrupt(struct intr_excp_ctx *ctx)
|
||||
int handle_level_interrupt_common(struct irq_desc *desc,
|
||||
__unused void *handler_data)
|
||||
{
|
||||
uint64_t rflags;
|
||||
irq_action_t action = desc->action;
|
||||
spinlock_rflags;
|
||||
|
||||
/*
|
||||
* give other Core a try to return without hold irq_lock
|
||||
@ -434,7 +431,7 @@ int handle_level_interrupt_common(struct irq_desc *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
desc->state = IRQ_DESC_IN_PROCESS;
|
||||
|
||||
/* mask iopaic pin */
|
||||
@ -454,15 +451,15 @@ int handle_level_interrupt_common(struct irq_desc *desc,
|
||||
}
|
||||
|
||||
desc->state = IRQ_DESC_PENDING;
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int common_handler_edge(struct irq_desc *desc, __unused void *handler_data)
|
||||
{
|
||||
uint64_t rflags;
|
||||
irq_action_t action = desc->action;
|
||||
spinlock_rflags;
|
||||
|
||||
/*
|
||||
* give other Core a try to return without hold irq_lock
|
||||
@ -474,7 +471,7 @@ int common_handler_edge(struct irq_desc *desc, __unused void *handler_data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
desc->state = IRQ_DESC_IN_PROCESS;
|
||||
|
||||
/* Send EOI to LAPIC/IOAPIC IRR */
|
||||
@ -485,15 +482,15 @@ int common_handler_edge(struct irq_desc *desc, __unused void *handler_data)
|
||||
}
|
||||
|
||||
desc->state = IRQ_DESC_PENDING;
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int common_dev_handler_level(struct irq_desc *desc, __unused void *handler_data)
|
||||
{
|
||||
uint64_t rflags;
|
||||
irq_action_t action = desc->action;
|
||||
spinlock_rflags;
|
||||
|
||||
/*
|
||||
* give other Core a try to return without hold irq_lock
|
||||
@ -505,7 +502,7 @@ int common_dev_handler_level(struct irq_desc *desc, __unused void *handler_data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
desc->state = IRQ_DESC_IN_PROCESS;
|
||||
|
||||
/* mask iopaic pin */
|
||||
@ -521,7 +518,7 @@ int common_dev_handler_level(struct irq_desc *desc, __unused void *handler_data)
|
||||
}
|
||||
|
||||
desc->state = IRQ_DESC_PENDING;
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
|
||||
/* we did not unmask irq until guest EOI the vector */
|
||||
return 0;
|
||||
@ -544,26 +541,24 @@ int quick_handler_nolock(struct irq_desc *desc, __unused void *handler_data)
|
||||
|
||||
void update_irq_handler(uint32_t irq, irq_handler_t func)
|
||||
{
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
return;
|
||||
}
|
||||
|
||||
desc = &irq_desc_array[irq];
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
desc->irq_handler = func;
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
}
|
||||
|
||||
void free_irq(uint32_t irq)
|
||||
{
|
||||
uint64_t rflags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
spinlock_rflags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
return;
|
||||
}
|
||||
@ -572,13 +567,13 @@ void free_irq(uint32_t irq)
|
||||
dev_dbg(ACRN_DBG_IRQ, "[%s] %s irq%d vr:0x%x",
|
||||
__func__, desc->name, irq, irq_to_vector(irq));
|
||||
|
||||
spinlock_irqsave_obtain(&desc->irq_lock);
|
||||
spinlock_irqsave_obtain(&desc->irq_lock, &rflags);
|
||||
|
||||
desc->action = NULL;
|
||||
desc->priv_data = NULL;
|
||||
memset(desc->name, '\0', 32U);
|
||||
|
||||
spinlock_irqrestore_release(&desc->irq_lock);
|
||||
spinlock_irqrestore_release(&desc->irq_lock, rflags);
|
||||
irq_desc_try_free_vector(desc->irq);
|
||||
}
|
||||
|
||||
|
@ -30,26 +30,27 @@ static spinlock_t softirq_dev_lock;
|
||||
|
||||
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
|
||||
{
|
||||
spinlock_rflags;
|
||||
uint64_t rflags;
|
||||
|
||||
/* enqueue request in order, SOFTIRQ_PTDEV will pickup */
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock);
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
|
||||
|
||||
/* avoid adding recursively */
|
||||
list_del(&entry->softirq_node);
|
||||
/* TODO: assert if entry already in list */
|
||||
list_add_tail(&entry->softirq_node,
|
||||
&softirq_dev_entry_list);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
|
||||
fire_softirq(SOFTIRQ_PTDEV);
|
||||
}
|
||||
|
||||
struct ptdev_remapping_info*
|
||||
ptdev_dequeue_softirq(void)
|
||||
{
|
||||
uint64_t rflags;
|
||||
struct ptdev_remapping_info *entry = NULL;
|
||||
|
||||
spinlock_rflags;
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock);
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
|
||||
|
||||
if (!list_empty(&softirq_dev_entry_list)) {
|
||||
entry = get_first_item(&softirq_dev_entry_list,
|
||||
@ -57,7 +58,7 @@ ptdev_dequeue_softirq(void)
|
||||
list_del_init(&entry->softirq_node);
|
||||
}
|
||||
|
||||
spinlock_irqrestore_release(&softirq_dev_lock);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
|
||||
return entry;
|
||||
}
|
||||
|
||||
@ -86,7 +87,7 @@ alloc_entry(struct vm *vm, enum ptdev_intr_type type)
|
||||
void
|
||||
release_entry(struct ptdev_remapping_info *entry)
|
||||
{
|
||||
spinlock_rflags;
|
||||
uint64_t rflags;
|
||||
|
||||
/* remove entry from ptdev_list */
|
||||
list_del_init(&entry->entry_node);
|
||||
@ -95,9 +96,9 @@ release_entry(struct ptdev_remapping_info *entry)
|
||||
* remove entry from softirq list.the ptdev_lock
|
||||
* is required before calling release_entry.
|
||||
*/
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock);
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
|
||||
list_del_init(&entry->softirq_node);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
|
||||
|
||||
free(entry);
|
||||
}
|
||||
@ -146,7 +147,7 @@ ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq)
|
||||
void
|
||||
ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
|
||||
{
|
||||
spinlock_rflags;
|
||||
uint64_t rflags;
|
||||
|
||||
atomic_clear32(&entry->active, ACTIVE_FLAG);
|
||||
|
||||
@ -154,9 +155,9 @@ ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
|
||||
entry->allocated_pirq = IRQ_INVALID;
|
||||
|
||||
/* remove from softirq list if added */
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock);
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
|
||||
list_del_init(&entry->softirq_node);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
|
||||
}
|
||||
|
||||
void ptdev_init(void)
|
||||
|
@ -47,13 +47,13 @@ static int do_copy_earlylog(struct shared_buf *dst_sbuf,
|
||||
{
|
||||
uint32_t buf_size, valid_size;
|
||||
uint32_t cur_tail;
|
||||
spinlock_rflags;
|
||||
uint64_t rflags;
|
||||
|
||||
if ((src_sbuf->ele_size != dst_sbuf->ele_size)
|
||||
&& (src_sbuf->ele_num != dst_sbuf->ele_num)) {
|
||||
spinlock_irqsave_obtain(&(logmsg.lock));
|
||||
spinlock_irqsave_obtain(&(logmsg.lock), &rflags);
|
||||
printf("Error to copy early hvlog: size mismatch\n");
|
||||
spinlock_irqrestore_release(&(logmsg.lock));
|
||||
spinlock_irqrestore_release(&(logmsg.lock), rflags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -87,12 +87,11 @@ void init_logmsg(__unused uint32_t mem_size, uint32_t flags)
|
||||
void do_logmsg(uint32_t severity, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
uint64_t timestamp;
|
||||
uint64_t timestamp, rflags;
|
||||
uint16_t pcpu_id;
|
||||
bool do_console_log;
|
||||
bool do_mem_log;
|
||||
char *buffer;
|
||||
spinlock_rflags;
|
||||
|
||||
do_console_log = (((logmsg.flags & LOG_FLAG_STDOUT) != 0U) &&
|
||||
(severity <= console_loglevel));
|
||||
@ -129,12 +128,12 @@ void do_logmsg(uint32_t severity, const char *fmt, ...)
|
||||
|
||||
/* Check if flags specify to output to stdout */
|
||||
if (do_console_log) {
|
||||
spinlock_irqsave_obtain(&(logmsg.lock));
|
||||
spinlock_irqsave_obtain(&(logmsg.lock), &rflags);
|
||||
|
||||
/* Send buffer to stdout */
|
||||
printf("%s\n\r", buffer);
|
||||
|
||||
spinlock_irqrestore_release(&(logmsg.lock));
|
||||
spinlock_irqrestore_release(&(logmsg.lock), rflags);
|
||||
}
|
||||
|
||||
/* Check if flags specify to output to memory */
|
||||
@ -169,11 +168,11 @@ void do_logmsg(uint32_t severity, const char *fmt, ...)
|
||||
|
||||
void print_logmsg_buffer(uint16_t pcpu_id)
|
||||
{
|
||||
spinlock_rflags;
|
||||
char buffer[LOG_ENTRY_SIZE + 1];
|
||||
int read_cnt;
|
||||
struct shared_buf **sbuf;
|
||||
int is_earlylog = 0;
|
||||
uint64_t rflags;
|
||||
|
||||
if (pcpu_id >= phys_cpu_num) {
|
||||
return;
|
||||
@ -187,13 +186,13 @@ void print_logmsg_buffer(uint16_t pcpu_id)
|
||||
&per_cpu(sbuf, pcpu_id)[ACRN_HVLOG];
|
||||
}
|
||||
|
||||
spinlock_irqsave_obtain(&(logmsg.lock));
|
||||
spinlock_irqsave_obtain(&(logmsg.lock), &rflags);
|
||||
if ((*sbuf) != NULL) {
|
||||
printf("CPU%hu: head: 0x%x, tail: 0x%x %s\n\r",
|
||||
pcpu_id, (*sbuf)->head, (*sbuf)->tail,
|
||||
(is_earlylog != 0) ? "[earlylog]" : "");
|
||||
}
|
||||
spinlock_irqrestore_release(&(logmsg.lock));
|
||||
spinlock_irqrestore_release(&(logmsg.lock), rflags);
|
||||
|
||||
do {
|
||||
uint32_t idx;
|
||||
@ -212,8 +211,8 @@ void print_logmsg_buffer(uint16_t pcpu_id)
|
||||
idx = (read_cnt < LOG_ENTRY_SIZE) ? read_cnt : LOG_ENTRY_SIZE;
|
||||
buffer[idx] = '\0';
|
||||
|
||||
spinlock_irqsave_obtain(&(logmsg.lock));
|
||||
spinlock_irqsave_obtain(&(logmsg.lock), &rflags);
|
||||
printf("%s\n\r", buffer);
|
||||
spinlock_irqrestore_release(&(logmsg.lock));
|
||||
spinlock_irqrestore_release(&(logmsg.lock), rflags);
|
||||
} while (read_cnt > 0);
|
||||
}
|
||||
|
@ -421,13 +421,6 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
|
||||
*timestamp_ptr = ((uint64_t)tsh << 32) | tsl; \
|
||||
}
|
||||
|
||||
/* Define variable(s) required to save / restore architecture interrupt state.
|
||||
* These variable(s) are used in conjunction with the ESAL_AR_INT_ALL_DISABLE()
|
||||
* and ESAL_AR_INT_ALL_RESTORE() macros to hold any data that must be preserved
|
||||
* in order to allow these macros to function correctly.
|
||||
*/
|
||||
#define CPU_INT_CONTROL_VARS uint64_t cpu_int_value
|
||||
|
||||
/* Macro to save rflags register */
|
||||
#define CPU_RFLAGS_SAVE(rflags_ptr) \
|
||||
{ \
|
||||
@ -453,9 +446,9 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
|
||||
* defined below and CPU_INT_CONTROL_VARS defined above.
|
||||
*/
|
||||
|
||||
#define CPU_INT_ALL_DISABLE() \
|
||||
#define CPU_INT_ALL_DISABLE(p_rflags) \
|
||||
{ \
|
||||
CPU_RFLAGS_SAVE(&cpu_int_value); \
|
||||
CPU_RFLAGS_SAVE(p_rflags); \
|
||||
CPU_IRQ_DISABLE(); \
|
||||
}
|
||||
|
||||
@ -466,9 +459,9 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
|
||||
* NOTE: This macro is used in conjunction with CPU_INT_ALL_DISABLE
|
||||
* and CPU_INT_CONTROL_VARS defined above.
|
||||
*/
|
||||
#define CPU_INT_ALL_RESTORE() \
|
||||
#define CPU_INT_ALL_RESTORE(rflags) \
|
||||
{ \
|
||||
CPU_RFLAGS_RESTORE(cpu_int_value); \
|
||||
CPU_RFLAGS_RESTORE(rflags); \
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -63,18 +63,15 @@ static inline void spinlock_release(spinlock_t *lock)
|
||||
|
||||
#endif /* ASSEMBLER */
|
||||
|
||||
#define spinlock_rflags unsigned long cpu_int_value
|
||||
|
||||
#define spinlock_irqsave_obtain(l) \
|
||||
#define spinlock_irqsave_obtain(lock, p_rflags) \
|
||||
do { \
|
||||
CPU_INT_ALL_DISABLE(); \
|
||||
spinlock_obtain(l); \
|
||||
CPU_INT_ALL_DISABLE(p_rflags); \
|
||||
spinlock_obtain(lock); \
|
||||
} while (0)
|
||||
|
||||
#define spinlock_irqrestore_release(l) \
|
||||
#define spinlock_irqrestore_release(lock, rflags) \
|
||||
do { \
|
||||
spinlock_release(l); \
|
||||
CPU_INT_ALL_RESTORE(); \
|
||||
spinlock_release(lock); \
|
||||
CPU_INT_ALL_RESTORE(rflags); \
|
||||
} while (0)
|
||||
|
||||
#endif /* SPINLOCK_H */
|
||||
|
Loading…
Reference in New Issue
Block a user