From 90eca21d16492e14328a0f7c90bf9bc150b75fc9 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Sat, 29 Sep 2018 21:36:16 +0800 Subject: [PATCH] hv: simplify the function init_guest_state The vcpu state is initialized outside of init_guest_state: - SOS BSP state is initialized in SOS loader - UOS BSP state is initialized in UOS loader - AP state is initialized during SIPI signal emulation We could make init_guest_state only update the vcpu state to VMCS structure. Tracked-On: #1231 Signed-off-by: Yin Fengwei Acked-by: Eddie Dong --- hypervisor/arch/x86/guest/vcpu.c | 12 +++ hypervisor/arch/x86/vmx.c | 105 +---------------------- hypervisor/include/arch/x86/guest/vcpu.h | 1 - 3 files changed, 14 insertions(+), 104 deletions(-) diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 2a27d9b73..8e3475a97 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -207,6 +207,18 @@ void set_vcpu_regs(struct vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs) ectx->ldtr.selector = vcpu_regs->ldt_sel; ectx->tr.selector = vcpu_regs->tr_sel; + /* NOTE: + * This is to set the ldtr and tr to default value. + * If the set_vcpu_regs is used not only for vcpu state + * initialization, this part of code needs be revised. + */ + ectx->ldtr.base = 0UL; + ectx->tr.base = 0UL; + ectx->ldtr.limit = 0xFFFFU; + ectx->tr.limit = 0xFFFFU; + ectx->ldtr.attr = LDTR_AR; + ectx->tr.attr = TR_AR; + memcpy_s(&(ctx->guest_cpu_regs), sizeof(struct acrn_gp_regs), &(vcpu_regs->gprs), sizeof(struct acrn_gp_regs)); diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index 864a34a60..dd9e6b19c 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -545,84 +545,6 @@ void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4) cr4, cr4_vmx); } -static void init_guest_context_real(struct vcpu *vcpu) -{ - struct ext_context *ectx = - &vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx; - struct segment_sel *seg; - - /* cs, ss, ds, es, fs, gs; cs will be override later. */ - for (seg = &(ectx->cs); seg <= &(ectx->gs); seg++) { - seg->selector = 0U; - seg->base = 0UL; - seg->limit = 0xFFFFU; - seg->attr = REAL_MODE_DATA_SEG_AR; - } - - if (is_vcpu_bsp(vcpu)) { - /* There are two cases that we will start bsp in real - * mode: - * 1. UOS start - * 2. SOS resume from S3 - * - * For 1, DM will set correct entry_addr. - * For 2, SOS resume caller will set entry_addr to - * SOS wakeup vec. According to ACPI FACS spec, - * wakeup vec should be < 1MB. So we use < 1MB - * to detect whether it's resume from S3 and we - * setup CS:IP to - * (wakeup_vec >> 4):(wakeup_vec & 0x000F) - * if it's resume from S3. - * - */ - if ((uint64_t)vcpu->entry_addr < 0x100000UL) { - ectx->cs.selector = (uint16_t) - (((uint64_t)vcpu->entry_addr & 0xFFFF0UL) >> 4U); - ectx->cs.base = (uint64_t)ectx->cs.selector << 4U; - vcpu_set_rip(vcpu, (uint64_t)vcpu->entry_addr & 0x0FUL); - } else { - /* BSP is initialized with real mode */ - ectx->cs.selector = REAL_MODE_BSP_INIT_CODE_SEL; - /* For unrestricted guest, it is able - * to set a high base address - */ - ectx->cs.base = (uint64_t)vcpu->entry_addr & 0xFFFF0000UL; - vcpu_set_rip(vcpu, 0x0000FFF0UL); - } - } else { - /* AP is initialized with real mode - * and CS value is left shift 8 bits from sipi vector. - */ - ectx->cs.selector = (uint16_t)(vcpu->arch_vcpu.sipi_vector << 8U); - ectx->cs.base = (uint64_t)ectx->cs.selector << 4U; - } - ectx->cs.attr = REAL_MODE_CODE_SEG_AR; - - ectx->gdtr.base = 0UL; - ectx->gdtr.limit = 0xFFFFU; - ectx->idtr.base = 0UL; - ectx->idtr.limit = 0xFFFFU; -} - -/* only be called for UOS when bsp start from protected mode */ -static void init_guest_context_protect(struct vcpu *vcpu) -{ - struct ext_context *ectx = - &vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx; - struct segment_sel *seg; - - ectx->gdtr.base = create_guest_init_gdt(vcpu->vm, &ectx->gdtr.limit); - for (seg = &(ectx->cs); seg <= &(ectx->gs); seg++) { - seg->base = 0UL; - seg->limit = 0xFFFFFFFFU; - seg->attr = PROTECTED_MODE_DATA_SEG_AR; - seg->selector = 0x18U; - } - ectx->cs.attr = PROTECTED_MODE_CODE_SEG_AR; - ectx->cs.selector = 0x10U; /* Linear code segment */ - vcpu_set_rip(vcpu, (uint64_t)vcpu->entry_addr); -} - /* rip, rsp, ia32_efer and rflags are written to VMCS in start_vcpu */ static void init_guest_vmx(struct vcpu *vcpu, uint64_t cr0, uint64_t cr3, uint64_t cr4) @@ -673,32 +595,9 @@ static void init_guest_state(struct vcpu *vcpu) { struct cpu_context *ctx = &vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context]; - struct acrn_vcpu_regs* init_ctx = &vm0_boot_context; - enum vm_cpu_mode vcpu_mode = get_vcpu_mode(vcpu); - vcpu_set_rflags(vcpu, 0x2UL); /* Bit 1 is a active high reserved bit */ - - /* ldtr */ - ctx->ext_ctx.ldtr.selector = 0U; - ctx->ext_ctx.ldtr.base = 0UL; - ctx->ext_ctx.ldtr.limit = 0xFFFFU; - ctx->ext_ctx.ldtr.attr = LDTR_AR; - /* tr */ - ctx->ext_ctx.tr.selector = 0U; - ctx->ext_ctx.tr.base = 0UL; - ctx->ext_ctx.tr.limit = 0xFFFFU; - ctx->ext_ctx.tr.attr = TR_AR; - - if (vcpu_mode == CPU_MODE_REAL) { - init_guest_context_real(vcpu); - init_guest_vmx(vcpu, CR0_ET | CR0_NE, 0UL, 0UL); - } else if (is_vm0(vcpu->vm) && is_vcpu_bsp(vcpu)) { - init_guest_vmx(vcpu, init_ctx->cr0, init_ctx->cr3, - init_ctx->cr4 & ~CR4_VMXE); - } else { - init_guest_context_protect(vcpu); - init_guest_vmx(vcpu, CR0_ET | CR0_NE | CR0_PE, 0UL, 0UL); - } + init_guest_vmx(vcpu, ctx->run_ctx.cr0, ctx->ext_ctx.cr3, + ctx->run_ctx.cr4 & ~CR4_VMXE); } static void init_host_state(void) diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 9312bfaa0..db6f45b17 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -194,7 +194,6 @@ struct vcpu_arch { /* Information related to secondary / AP VCPU start-up */ enum vm_cpu_mode cpu_mode; uint8_t nr_sipi; - uint32_t sipi_vector; /* interrupt injection information */ uint64_t pending_req;