mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-07 12:29:48 +00:00
HV: Use the mwait instead of pause for cpu_idle
Now it will use the pause when cpu is in idle state. It will consume more power. If the mwait is supported, it will use the monitor/mwait to enter the deep CPU C-state. It helps to save power. Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
This commit is contained in:
parent
21bd1c8bd3
commit
a7706e0c39
@ -367,9 +367,34 @@ void stop_pcpus(void)
|
||||
wait_pcpus_offline(mask);
|
||||
}
|
||||
|
||||
static
|
||||
inline void asm_monitor(const uint64_t *addr, uint64_t ecx, uint64_t edx)
|
||||
{
|
||||
asm volatile("monitor\n" : : "a" (addr), "c" (ecx), "d" (edx));
|
||||
}
|
||||
|
||||
static
|
||||
inline void asm_mwait(uint64_t eax, uint64_t ecx)
|
||||
{
|
||||
asm volatile("mwait\n" : : "a" (eax), "c" (ecx));
|
||||
}
|
||||
|
||||
void cpu_do_idle(void)
|
||||
{
|
||||
asm_pause();
|
||||
uint16_t pcpu_id = get_pcpu_id();
|
||||
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
||||
|
||||
if (ctx->mwait_flags) {
|
||||
CPU_IRQ_DISABLE();
|
||||
asm_monitor(&ctx->flags, 0UL, 0UL);
|
||||
if (!bitmap_test(NEED_RESCHEDULE, &ctx->flags))
|
||||
asm_mwait(0x60UL, 1UL);
|
||||
CPU_IRQ_ENABLE();
|
||||
} else {
|
||||
CPU_IRQ_ENABLE();
|
||||
asm_pause();
|
||||
CPU_IRQ_DISABLE();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -415,18 +440,6 @@ static void print_hv_banner(void)
|
||||
printf(boot_msg);
|
||||
}
|
||||
|
||||
static
|
||||
inline void asm_monitor(const uint64_t *addr, uint64_t ecx, uint64_t edx)
|
||||
{
|
||||
asm volatile("monitor\n" : : "a" (addr), "c" (ecx), "d" (edx));
|
||||
}
|
||||
|
||||
static
|
||||
inline void asm_mwait(uint64_t eax, uint64_t ecx)
|
||||
{
|
||||
asm volatile("mwait\n" : : "a" (eax), "c" (ecx));
|
||||
}
|
||||
|
||||
/* wait until *sync == wake_sync */
|
||||
void wait_sync_change(uint64_t *sync, uint64_t wake_sync)
|
||||
{
|
||||
|
@ -92,9 +92,7 @@ void default_idle(__unused struct sched_object *obj)
|
||||
} else if (need_shutdown_vm(pcpu_id)) {
|
||||
shutdown_vm_from_idle(pcpu_id);
|
||||
} else {
|
||||
CPU_IRQ_ENABLE();
|
||||
cpu_do_idle();
|
||||
CPU_IRQ_DISABLE();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,12 @@
|
||||
#include <lapic.h>
|
||||
#include <schedule.h>
|
||||
#include <sprintf.h>
|
||||
#include <cpuid.h>
|
||||
#include <cpu_caps.h>
|
||||
|
||||
#define CPUID_MWAIT_LEAF 5
|
||||
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
|
||||
#define CPUID5_ECX_INTERRUPT_BREAK 0x2
|
||||
|
||||
static uint64_t pcpu_used_bitmap;
|
||||
|
||||
@ -20,7 +26,19 @@ void init_scheduler(void)
|
||||
struct sched_context *ctx;
|
||||
uint32_t i;
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
uint32_t mwait_flag = 0;
|
||||
|
||||
if (has_monitor_cap()) {
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
mwait_flag = 1;
|
||||
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
|
||||
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
|
||||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
|
||||
!edx) {
|
||||
mwait_flag = 0;
|
||||
}
|
||||
}
|
||||
for (i = 0U; i < pcpu_nums; i++) {
|
||||
ctx = &per_cpu(sched_ctx, i);
|
||||
|
||||
@ -29,6 +47,7 @@ void init_scheduler(void)
|
||||
INIT_LIST_HEAD(&ctx->runqueue);
|
||||
ctx->flags = 0UL;
|
||||
ctx->curr_obj = NULL;
|
||||
ctx->mwait_flags = mwait_flag;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,12 +28,14 @@ struct sched_object {
|
||||
};
|
||||
|
||||
struct sched_context {
|
||||
uint64_t flags;
|
||||
uint64_t mwait_flags;
|
||||
uint64_t rserved[6];
|
||||
spinlock_t runqueue_lock;
|
||||
struct list_head runqueue;
|
||||
uint64_t flags;
|
||||
struct sched_object *curr_obj;
|
||||
spinlock_t scheduler_lock;
|
||||
};
|
||||
} __aligned(64);
|
||||
|
||||
void init_scheduler(void);
|
||||
void switch_to_idle(run_thread_t idle_thread);
|
||||
|
Loading…
Reference in New Issue
Block a user