hv: refine guest.h

- move functions related vmexit from `guest.h` to `vmexit.h`
- move functions related msr from `guest.h` to `msr.h`
- move functions related vm_sw_loader from `guest.h` to `vm.h`
- move function `vmx_vmrun` from `guest.h` to `vcpu.h`
- move MACROs related to vcpu from `guest.h` to `vcpu.h`
- move MACRO `E820_MAX_ENTRIES` from `guest.h` to `e820.h`
- move MACROs related to irq from `guest.h` to `irq.h`
- rename `guest.h` to `guest_memory.h`

Tracked-On: #2503
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shiqing Gao 2019-02-03 10:18:05 +08:00 committed by Eddie Dong
parent 827d24ccb9
commit b5f4757650
10 changed files with 85 additions and 77 deletions

View File

@ -793,7 +793,7 @@ WARN_LOGFILE =
# Note: If this tag is empty the current directory is searched.
INPUT = custom-doxygen/mainpage.md \
../hypervisor/include/arch/x86/guest/guest.h \
../hypervisor/include/arch/x86/guest/guest_memory.h \
../hypervisor/include/arch/x86/guest/ept.h \
../hypervisor/include/arch/x86/mmu.h \
../hypervisor/include/arch/x86/pgtable.h \

View File

@ -6,7 +6,8 @@
#include <vmcs.h>
#include <msr.h>
#include <guest.h>
#include <guest_memory.h>
#include <vm.h>
#include <vcpu.h>
#include <cpu.h>
#include <security.h>

View File

@ -15,6 +15,8 @@
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
#define E820_MAX_ENTRIES 32U
/** Defines a single entry in an E820 memory map. */
struct e820_entry {
/** The base address of the memory range. */

View File

@ -4,72 +4,17 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file guest.h
* @file guest_memory.h
*
* @brief Data transferring between hypervisor and VM
* @brief ACRN Memory Management
*/
#ifndef GUEST_H
#define GUEST_H
/* Defines for VM Launch and Resume */
#define VM_RESUME 0
#define VM_LAUNCH 1
#define ACRN_DBG_PTIRQ 6U
#define ACRN_DBG_IRQ 6U
#ifndef ASSEMBLER
#include <mmu.h>
#define foreach_vcpu(idx, vm, vcpu) \
for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
(idx) < (vm)->hw.created_vcpus; \
(idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
if (vcpu->state != VCPU_OFFLINE)
/*
* VCPU related APIs
*/
#define ACRN_REQUEST_EXCP 0U
#define ACRN_REQUEST_EVENT 1U
#define ACRN_REQUEST_EXTINT 2U
#define ACRN_REQUEST_NMI 3U
#define ACRN_REQUEST_EOI_EXIT_UPDATE 4U
#define ACRN_REQUEST_EPT_FLUSH 5U
#define ACRN_REQUEST_TRP_FAULT 6U
#define ACRN_REQUEST_VPID_FLUSH 7U /* flush vpid tlb */
#define E820_MAX_ENTRIES 32U
#define save_segment(seg, SEG_NAME) \
{ \
(seg).selector = exec_vmread16(SEG_NAME##_SEL); \
(seg).base = exec_vmread(SEG_NAME##_BASE); \
(seg).limit = exec_vmread32(SEG_NAME##_LIMIT); \
(seg).attr = exec_vmread32(SEG_NAME##_ATTR); \
}
#define load_segment(seg, SEG_NAME) \
{ \
exec_vmwrite16(SEG_NAME##_SEL, (seg).selector); \
exec_vmwrite(SEG_NAME##_BASE, (seg).base); \
exec_vmwrite32(SEG_NAME##_LIMIT, (seg).limit); \
exec_vmwrite32(SEG_NAME##_ATTR, (seg).attr); \
}
/* Define segments constants for guest */
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000U)
#define REAL_MODE_DATA_SEG_AR (0x0093U)
#define REAL_MODE_CODE_SEG_AR (0x009fU)
#define PROTECTED_MODE_DATA_SEG_AR (0xc093U)
#define PROTECTED_MODE_CODE_SEG_AR (0xc09bU)
#define REAL_MODE_SEG_LIMIT (0xffffU)
#define PROTECTED_MODE_SEG_LIMIT (0xffffffffU)
#define DR7_INIT_VALUE (0x400UL)
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
/* Use # of paging level to identify paging mode */
enum vm_paging_mode {
PAGING_MODE_0_LEVEL = 0U, /* Flat */
@ -86,22 +31,6 @@ int32_t gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *e
enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu);
int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu);
void init_msr_emulation(struct acrn_vcpu *vcpu);
uint32_t vmsr_get_guest_msr_index(uint32_t msr);
void update_msr_bitmap_x2apic_apicv(const struct acrn_vcpu *vcpu);
void update_msr_bitmap_x2apic_passthru(const struct acrn_vcpu *vcpu);
struct run_context;
int32_t vmx_vmrun(struct run_context *context, int32_t ops, int32_t ibrs);
int32_t general_sw_loader(struct acrn_vm *vm);
typedef int32_t (*vm_sw_loader_t)(struct acrn_vm *vm);
extern vm_sw_loader_t vm_sw_loader;
/**
* @brief Data transfering between hypervisor and VM
*

View File

@ -50,7 +50,7 @@
#ifndef ASSEMBLER
#include <guest.h>
#include <guest_memory.h>
#include <virtual_cr.h>
/**
@ -60,6 +60,52 @@
* @{
*/
/*
* VCPU related APIs
*/
#define ACRN_REQUEST_EXCP 0U
#define ACRN_REQUEST_EVENT 1U
#define ACRN_REQUEST_EXTINT 2U
#define ACRN_REQUEST_NMI 3U
#define ACRN_REQUEST_EOI_EXIT_UPDATE 4U
#define ACRN_REQUEST_EPT_FLUSH 5U
#define ACRN_REQUEST_TRP_FAULT 6U
#define ACRN_REQUEST_VPID_FLUSH 7U /* flush vpid tlb */
#define save_segment(seg, SEG_NAME) \
{ \
(seg).selector = exec_vmread16(SEG_NAME##_SEL); \
(seg).base = exec_vmread(SEG_NAME##_BASE); \
(seg).limit = exec_vmread32(SEG_NAME##_LIMIT); \
(seg).attr = exec_vmread32(SEG_NAME##_ATTR); \
}
#define load_segment(seg, SEG_NAME) \
{ \
exec_vmwrite16(SEG_NAME##_SEL, (seg).selector); \
exec_vmwrite(SEG_NAME##_BASE, (seg).base); \
exec_vmwrite32(SEG_NAME##_LIMIT, (seg).limit); \
exec_vmwrite32(SEG_NAME##_ATTR, (seg).attr); \
}
/* Define segments constants for guest */
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000U)
#define REAL_MODE_DATA_SEG_AR (0x0093U)
#define REAL_MODE_CODE_SEG_AR (0x009fU)
#define PROTECTED_MODE_DATA_SEG_AR (0xc093U)
#define PROTECTED_MODE_CODE_SEG_AR (0xc09bU)
#define REAL_MODE_SEG_LIMIT (0xffffU)
#define PROTECTED_MODE_SEG_LIMIT (0xffffffffU)
#define DR7_INIT_VALUE (0x400UL)
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
#define foreach_vcpu(idx, vm, vcpu) \
for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
(idx) < (vm)->hw.created_vcpus; \
(idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
if (vcpu->state != VCPU_OFFLINE)
enum vcpu_state {
VCPU_INIT,
VCPU_RUNNING,
@ -304,6 +350,8 @@ vcpu_vlapic(struct acrn_vcpu *vcpu)
void default_idle(__unused struct sched_object *obj);
void vcpu_thread(struct sched_object *obj);
int32_t vmx_vmrun(struct run_context *context, int32_t ops, int32_t ibrs);
/* External Interfaces */
/**

View File

@ -6,6 +6,13 @@
#ifndef VM_H_
#define VM_H_
/* Defines for VM Launch and Resume */
#define VM_RESUME 0
#define VM_LAUNCH 1
#ifndef ASSEMBLER
#include <bsp_extern.h>
#include <vpci.h>
#include <page.h>
@ -282,6 +289,11 @@ uint16_t find_free_vm_id(void);
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
struct acrn_vm *get_sos_vm(void);
int32_t general_sw_loader(struct acrn_vm *vm);
typedef int32_t (*vm_sw_loader_t)(struct acrn_vm *vm);
extern vm_sw_loader_t vm_sw_loader;
#ifdef CONFIG_PARTITION_MODE
/*
* Default e820 mem map:
@ -309,4 +321,6 @@ static inline bool is_lapic_pt(const struct acrn_vm *vm)
return ((vm_configs[vm->vm_id].guest_flags & LAPIC_PASSTHROUGH) != 0U);
}
#endif /* !ASSEMBLER */
#endif /* VM_H_ */

View File

@ -15,6 +15,9 @@ struct vm_exit_dispatch {
int32_t vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu);
extern void vm_exit(void);
static inline uint64_t
vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)

View File

@ -41,7 +41,7 @@
#include <assign.h>
#include <vtd.h>
#include <guest.h>
#include <guest_memory.h>
#include <vmexit.h>
#include <cpufeatures.h>

View File

@ -16,6 +16,9 @@
#include <common/irq.h>
#define ACRN_DBG_PTIRQ 6U
#define ACRN_DBG_IRQ 6U
/* vectors range for dynamic allocation, usually for devices */
#define VECTOR_DYNAMIC_START 0x20U
#define VECTOR_DYNAMIC_END 0xDFU

View File

@ -592,6 +592,14 @@ static inline bool is_x2apic_write_only_msr(uint32_t msr)
}
return ret;
}
struct acrn_vcpu;
void init_msr_emulation(struct acrn_vcpu *vcpu);
uint32_t vmsr_get_guest_msr_index(uint32_t msr);
void update_msr_bitmap_x2apic_apicv(const struct acrn_vcpu *vcpu);
void update_msr_bitmap_x2apic_passthru(const struct acrn_vcpu *vcpu);
#endif /* ASSEMBLER */
/* 5 high-order bits in every field are reserved */