From 4b03c97a5e21050b4e0e1be9f38e2bfba6fd887b Mon Sep 17 00:00:00 2001 From: Jason Chen CJ Date: Sun, 12 Aug 2018 21:02:27 +0800 Subject: [PATCH] add smp_call_function support take use of VCPU_NOTIFY vector, add smp_call_function support. added a per_cpu field smp_call_info, and make each smp_call_function is not re-entered, and the caller CPU is returned when all the target CPUs complete the call. v4: - remove global lock - take use of wait_sync_change function to do the sequence sync v3: - remove per_cpu lock in smp_call_info - use a global lock to ensure smp_call_function sequence - use pcpu_sync_sleep to wait IPI complete v2: - after new smp function come, if old one exist, changed from overwirte with the new one to ignore the new one. Signed-off-by: Jason Chen CJ --- hypervisor/arch/x86/notify.c | 42 +++++++++++++++++++++++++-- hypervisor/include/arch/x86/irq.h | 7 +++++ hypervisor/include/arch/x86/per_cpu.h | 1 + 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/hypervisor/arch/x86/notify.c b/hypervisor/arch/x86/notify.c index 77351ae95..949c91365 100644 --- a/hypervisor/arch/x86/notify.c +++ b/hypervisor/arch/x86/notify.c @@ -8,15 +8,53 @@ static uint32_t notification_irq = IRQ_INVALID; +static volatile uint64_t smp_call_mask = 0UL; + /* run in interrupt context */ static int kick_notification(__unused uint32_t irq, __unused void *data) { - /* Notification vector does not require handling here, it's just used - * to kick taget cpu out of non-root mode. + /* Notification vector is used to kick taget cpu out of non-root mode. + * And it also serves for smp call. */ + uint16_t pcpu_id = get_cpu_id(); + + if (bitmap_test(pcpu_id, &smp_call_mask)) { + struct smp_call_info_data *smp_call = + &per_cpu(smp_call_info, pcpu_id); + + if (smp_call->func) + smp_call->func(smp_call->data); + bitmap_clear_nolock(pcpu_id, &smp_call_mask); + } + return 0; } +void smp_call_function(uint64_t mask, smp_call_func_t func, void *data) +{ + uint16_t pcpu_id; + struct smp_call_info_data *smp_call; + + /* wait for previous smp call complete, which may run on other cpus */ + while (atomic_cmpxchg64(&smp_call_mask, 0UL, mask & INVALID_BIT_INDEX)); + while ((pcpu_id = ffs64(mask)) != INVALID_BIT_INDEX) { + bitmap_clear_nolock(pcpu_id, &mask); + if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) { + smp_call = &per_cpu(smp_call_info, pcpu_id); + smp_call->func = func; + smp_call->data = data; + } else { + /* pcpu is not in active, print error */ + pr_err("pcpu_id %d not in active!", pcpu_id); + bitmap_clear_nolock(pcpu_id, &smp_call_mask); + } + } + send_dest_ipi(smp_call_mask, VECTOR_NOTIFY_VCPU, + INTR_LAPIC_ICR_LOGICAL); + /* wait for current smp call complete */ + wait_sync_change(&smp_call_mask, 0UL); +} + static int request_notification_irq(irq_action_t func, void *data, const char *name) { diff --git a/hypervisor/include/arch/x86/irq.h b/hypervisor/include/arch/x86/irq.h index 60c276c83..5e98877ce 100644 --- a/hypervisor/include/arch/x86/irq.h +++ b/hypervisor/include/arch/x86/irq.h @@ -47,6 +47,13 @@ struct intr_excp_ctx { uint64_t ss; }; +typedef void (*smp_call_func_t)(void *data); +struct smp_call_info_data { + smp_call_func_t func; + void *data; +}; + +void smp_call_function(uint64_t mask, smp_call_func_t func, void *data); int handle_level_interrupt_common(struct irq_desc *desc, __unused void *handler_data); int common_handler_edge(struct irq_desc *desc, __unused void *handler_data); diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h index 758f9c250..5d2655c6a 100644 --- a/hypervisor/include/arch/x86/per_cpu.h +++ b/hypervisor/include/arch/x86/per_cpu.h @@ -46,6 +46,7 @@ struct per_cpu_region { uint8_t stack[CONFIG_STACK_SIZE] __aligned(16); char logbuf[LOG_MESSAGE_MAX_SIZE]; uint8_t lapic_id; + struct smp_call_info_data smp_call_info; } __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE extern struct per_cpu_region *per_cpu_data_base_ptr;