mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-18 19:57:31 +00:00
hv: softirq: refine softirq
1. add register_softirq to register a softirq handler 2. rename exec_softirq to do_softirq; raise_softirq to fire_softirq. 3. in do_softirq call registered softirq handler not call the device softirq handle function directly 4. enable irq after vm exit and disable irq after the first call do_softirq before vm enter. 5. call do_softirq again when irq disabled to handle the risk unhandled softirq. 6. rename SOFTIRQ_DEV_ASSIGN to SOFTIRQ_PTDEV 7. remove SOFTIRQ_ATOMIC Signed-off-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
073583cc41
commit
457ecd6ef7
@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <softirq.h>
|
||||
|
||||
static spinlock_t exception_spinlock = { .head = 0U, .tail = 0U, };
|
||||
|
||||
|
@ -27,7 +27,7 @@ static void run_timer(struct hv_timer *timer)
|
||||
/* run in interrupt context */
|
||||
static int tsc_deadline_handler(__unused int irq, __unused void *data)
|
||||
{
|
||||
raise_softirq(SOFTIRQ_TIMER);
|
||||
fire_softirq(SOFTIRQ_TIMER);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -156,33 +156,7 @@ static void init_tsc_deadline_timer(void)
|
||||
msr_write(MSR_IA32_TSC_DEADLINE, 0UL);
|
||||
}
|
||||
|
||||
void timer_init(void)
|
||||
{
|
||||
char name[32] = {0};
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
snprintf(name, 32, "timer_tick[%hu]", pcpu_id);
|
||||
if (request_timer_irq(pcpu_id, tsc_deadline_handler, NULL, name) < 0) {
|
||||
pr_err("Timer setup failed");
|
||||
return;
|
||||
}
|
||||
|
||||
init_tsc_deadline_timer();
|
||||
init_percpu_timer(pcpu_id);
|
||||
}
|
||||
|
||||
void timer_cleanup(void)
|
||||
{
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
if (per_cpu(timer_node, pcpu_id) != NULL) {
|
||||
unregister_handler_common(per_cpu(timer_node, pcpu_id));
|
||||
}
|
||||
|
||||
per_cpu(timer_node, pcpu_id) = NULL;
|
||||
}
|
||||
|
||||
void timer_softirq(uint16_t pcpu_id)
|
||||
static void timer_softirq(uint16_t pcpu_id)
|
||||
{
|
||||
struct per_cpu_timers *cpu_timer;
|
||||
struct hv_timer *timer;
|
||||
@ -222,6 +196,34 @@ void timer_softirq(uint16_t pcpu_id)
|
||||
update_physical_timer(cpu_timer);
|
||||
}
|
||||
|
||||
void timer_init(void)
|
||||
{
|
||||
char name[32] = {0};
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
init_percpu_timer(pcpu_id);
|
||||
register_softirq(SOFTIRQ_TIMER, timer_softirq);
|
||||
|
||||
snprintf(name, 32, "timer_tick[%hu]", pcpu_id);
|
||||
if (request_timer_irq(pcpu_id, tsc_deadline_handler, NULL, name) < 0) {
|
||||
pr_err("Timer setup failed");
|
||||
return;
|
||||
}
|
||||
|
||||
init_tsc_deadline_timer();
|
||||
}
|
||||
|
||||
void timer_cleanup(void)
|
||||
{
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
if (per_cpu(timer_node, pcpu_id) != NULL) {
|
||||
unregister_handler_common(per_cpu(timer_node, pcpu_id));
|
||||
}
|
||||
|
||||
per_cpu(timer_node, pcpu_id) = NULL;
|
||||
}
|
||||
|
||||
void check_tsc(void)
|
||||
{
|
||||
uint64_t temp64;
|
||||
|
@ -216,11 +216,11 @@ int vmexit_handler(struct vcpu *vcpu)
|
||||
/* Handling external_interrupt
|
||||
* should disable intr
|
||||
*/
|
||||
ret = dispatch->handler(vcpu);
|
||||
} else {
|
||||
CPU_IRQ_ENABLE();
|
||||
ret = dispatch->handler(vcpu);
|
||||
CPU_IRQ_DISABLE();
|
||||
ret = dispatch->handler(vcpu);
|
||||
CPU_IRQ_ENABLE();
|
||||
} else {
|
||||
ret = dispatch->handler(vcpu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <schedule.h>
|
||||
#include <softirq.h>
|
||||
|
||||
bool x2apic_enabled;
|
||||
|
||||
@ -33,8 +34,11 @@ void vcpu_thread(struct vcpu *vcpu)
|
||||
run_vcpu_pre_work(vcpu);
|
||||
|
||||
do {
|
||||
/* handling pending softirq */
|
||||
exec_softirq();
|
||||
/* handle pending softirq when irq enable*/
|
||||
do_softirq();
|
||||
CPU_IRQ_DISABLE();
|
||||
/* handle risk softirq when disabling irq*/
|
||||
do_softirq();
|
||||
|
||||
/* Check and process pending requests(including interrupt) */
|
||||
ret = acrn_handle_pending_request(vcpu);
|
||||
@ -85,6 +89,7 @@ void vcpu_thread(struct vcpu *vcpu)
|
||||
/* Restore native TSC_AUX */
|
||||
CPU_MSR_WRITE(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
|
||||
|
||||
CPU_IRQ_ENABLE();
|
||||
/* Dispatch handler */
|
||||
ret = vmexit_handler(vcpu);
|
||||
if (ret < 0) {
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <softirq.h>
|
||||
#include <ptdev.h>
|
||||
|
||||
/* SOFTIRQ_DEV_ASSIGN list for all CPUs */
|
||||
/* SOFTIRQ_PTDEV list for all CPUs */
|
||||
struct list_head softirq_dev_entry_list;
|
||||
/* passthrough device link */
|
||||
struct list_head ptdev_list;
|
||||
@ -31,7 +31,7 @@ spinlock_t softirq_dev_lock;
|
||||
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
|
||||
{
|
||||
spinlock_rflags;
|
||||
/* enqueue request in order, SOFTIRQ_DEV_ASSIGN will pickup */
|
||||
/* enqueue request in order, SOFTIRQ_PTDEV will pickup */
|
||||
spinlock_irqsave_obtain(&softirq_dev_lock);
|
||||
|
||||
/* avoid adding recursively */
|
||||
@ -40,7 +40,7 @@ static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
|
||||
list_add_tail(&entry->softirq_node,
|
||||
&softirq_dev_entry_list);
|
||||
spinlock_irqrestore_release(&softirq_dev_lock);
|
||||
raise_softirq(SOFTIRQ_DEV_ASSIGN);
|
||||
fire_softirq(SOFTIRQ_PTDEV);
|
||||
}
|
||||
|
||||
struct ptdev_remapping_info*
|
||||
@ -169,6 +169,8 @@ void ptdev_init(void)
|
||||
spinlock_init(&ptdev_lock);
|
||||
INIT_LIST_HEAD(&softirq_dev_entry_list);
|
||||
spinlock_init(&softirq_dev_lock);
|
||||
|
||||
register_softirq(SOFTIRQ_PTDEV, ptdev_softirq);
|
||||
}
|
||||
|
||||
void ptdev_release_all_entries(struct vm *vm)
|
||||
|
@ -7,89 +7,42 @@
|
||||
#include <hypervisor.h>
|
||||
#include <softirq.h>
|
||||
|
||||
void disable_softirq(uint16_t cpu_id)
|
||||
{
|
||||
bitmap_clear_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
|
||||
}
|
||||
|
||||
void enable_softirq(uint16_t cpu_id)
|
||||
{
|
||||
bitmap_set_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
|
||||
}
|
||||
static softirq_handler softirq_handlers[NR_SOFTIRQS];
|
||||
|
||||
void init_softirq(void)
|
||||
{
|
||||
uint16_t pcpu_id;
|
||||
|
||||
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
|
||||
per_cpu(softirq_pending, pcpu_id) = 0UL;
|
||||
bitmap_set_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, pcpu_id));
|
||||
}
|
||||
}
|
||||
|
||||
void raise_softirq(uint16_t softirq_id)
|
||||
/*
|
||||
* @pre: nr will not equal or large than NR_SOFTIRQS
|
||||
*/
|
||||
void register_softirq(uint16_t nr, softirq_handler handler)
|
||||
{
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
|
||||
|
||||
if (cpu_id >= phys_cpu_num) {
|
||||
return;
|
||||
}
|
||||
|
||||
bitmap_set_lock(softirq_id, bitmap);
|
||||
softirq_handlers[nr] = handler;
|
||||
}
|
||||
|
||||
void exec_softirq(void)
|
||||
/*
|
||||
* @pre: nr will not equal or large than NR_SOFTIRQS
|
||||
*/
|
||||
void fire_softirq(uint16_t nr)
|
||||
{
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
volatile uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
|
||||
|
||||
uint16_t softirq_id;
|
||||
|
||||
if (cpu_id >= phys_cpu_num) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (((*bitmap) & SOFTIRQ_MASK) == 0UL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Disable softirq
|
||||
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
|
||||
*/
|
||||
if (!bitmap_test_and_clear_lock(SOFTIRQ_ATOMIC, bitmap)) {
|
||||
return;
|
||||
}
|
||||
|
||||
again:
|
||||
CPU_IRQ_ENABLE();
|
||||
|
||||
while (1) {
|
||||
softirq_id = ffs64(*bitmap);
|
||||
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX)) {
|
||||
break;
|
||||
}
|
||||
|
||||
bitmap_clear_lock(softirq_id, bitmap);
|
||||
|
||||
switch (softirq_id) {
|
||||
case SOFTIRQ_TIMER:
|
||||
timer_softirq(cpu_id);
|
||||
break;
|
||||
case SOFTIRQ_DEV_ASSIGN:
|
||||
ptdev_softirq(cpu_id);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
CPU_IRQ_DISABLE();
|
||||
|
||||
if (((*bitmap) & SOFTIRQ_MASK) != 0U) {
|
||||
goto again;
|
||||
}
|
||||
|
||||
enable_softirq(cpu_id);
|
||||
bitmap_set_lock(nr, &per_cpu(softirq_pending, get_cpu_id()));
|
||||
}
|
||||
|
||||
void do_softirq(void)
|
||||
{
|
||||
uint16_t nr;
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
volatile uint64_t *softirq_pending_bitmap =
|
||||
&per_cpu(softirq_pending, cpu_id);
|
||||
|
||||
while (true) {
|
||||
nr = ffs64(*softirq_pending_bitmap);
|
||||
if (nr >= NR_SOFTIRQS)
|
||||
break;
|
||||
|
||||
bitmap_clear_lock(nr, softirq_pending_bitmap);
|
||||
(*softirq_handlers[nr])(cpu_id);
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,6 @@ static inline void initialize_timer(struct hv_timer *timer,
|
||||
int add_timer(struct hv_timer *timer);
|
||||
void del_timer(struct hv_timer *timer);
|
||||
|
||||
void timer_softirq(uint16_t pcpu_id);
|
||||
void timer_init(void);
|
||||
void timer_cleanup(void);
|
||||
void check_tsc(void);
|
||||
|
@ -8,16 +8,14 @@
|
||||
#define SOFTIRQ_H
|
||||
|
||||
#define SOFTIRQ_TIMER 0U
|
||||
#define SOFTIRQ_DEV_ASSIGN 1U
|
||||
#define SOFTIRQ_MAX 2U
|
||||
#define SOFTIRQ_MASK ((1UL<<SOFTIRQ_MAX)-1)
|
||||
#define SOFTIRQ_PTDEV 1U
|
||||
#define NR_SOFTIRQS 2U
|
||||
#define SOFTIRQ_MASK ((1UL << NR_SOFTIRQS) - 1UL)
|
||||
|
||||
/* used for atomic value for prevent recursive */
|
||||
#define SOFTIRQ_ATOMIC 63U
|
||||
typedef void (*softirq_handler)(uint16_t cpu_id);
|
||||
|
||||
void enable_softirq(uint16_t cpu_id);
|
||||
void disable_softirq(uint16_t cpu_id);
|
||||
void init_softirq(void);
|
||||
void raise_softirq(uint16_t softirq_id);
|
||||
void exec_softirq(void);
|
||||
void register_softirq(uint16_t nr, softirq_handler handler);
|
||||
void fire_softirq(uint16_t nr);
|
||||
void do_softirq(void);
|
||||
#endif /* SOFTIRQ_H */
|
||||
|
Loading…
Reference in New Issue
Block a user