hv: mod: do not use explicit arch name when including headers

Instead of "#include <x86/foo.h>", use "#include <asm/foo.h>".

In other words, we are adopting the same practice in Linux kernel.

Tracked-On: #5920
Signed-off-by: Liang Yi <yi.liang@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
Liang Yi
2021-04-23 15:50:57 +08:00
committed by wenlingz
parent f3305b6373
commit 688a41c290
205 changed files with 666 additions and 660 deletions

View File

@@ -0,0 +1,162 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ASSIGN_H
#define ASSIGN_H
#include <types.h>
#include <ptdev.h>
/**
* @file assign.h
*
* @brief public APIs for Passthrough Interrupt Remapping
*/
/**
* @brief VT-d
*
* @defgroup acrn_passthrough ACRN Passthrough
* @{
*/
/**
* @brief Acknowledge a virtual interrupt for passthrough device.
*
* Acknowledge a virtual legacy interrupt for a passthrough device.
*
* @param[in] vm pointer to acrn_vm
* @param[in] virt_gsi virtual GSI number associated with the passthrough device
* @param[in] vgsi_ctlr INTX_CTLR_IOAPIC or INTX_CTLR_PIC
*
* @return None
*
* @pre vm != NULL
*
*/
void ptirq_intx_ack(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr);
/**
* @brief MSI/MSI-x remapping for passthrough device.
*
* Main entry for PCI device assignment with MSI and MSI-X.
* MSI can up to 8 vectors and MSI-X can up to 1024 Vectors.
*
* @param[in] vm pointer to acrn_vm
* @param[in] virt_bdf virtual bdf associated with the passthrough device
* @param[in] phys_bdf virtual bdf associated with the passthrough device
* @param[in] entry_nr indicate coming vectors, entry_nr = 0 means first vector
* @param[in] info structure used for MSI/MSI-x remapping
* @param[in] irte_idx caller can pass a valid IRTE index, otherwise, use INVALID_IRTE_ID
*
* @return
* - 0: on success
* - \p -ENODEV:
* - for SOS, the entry already be held by others
* - for UOS, no pre-hold mapping found.
*
* @pre vm != NULL
* @pre info != NULL
*
*/
int32_t ptirq_prepare_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
uint16_t entry_nr, struct msi_info *info, uint16_t irte_idx);
/**
* @brief INTx remapping for passthrough device.
*
* Set up the remapping of the given virtual pin for the given vm.
* This is the main entry for PCI/Legacy device assignment with INTx, calling from vIOAPIC or vPIC.
*
* @param[in] vm pointer to acrn_vm
* @param[in] virt_gsi virtual GSI number associated with the passthrough device
* @param[in] vgsi_ctlr INTX_CTLR_IOAPIC or INTX_CTLR_PIC
*
* @return
* - 0: on success
* - \p -ENODEV:
* - for SOS, the entry already be held by others
* - for UOS, no pre-hold mapping found.
*
* @pre vm != NULL
*
*/
int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr);
/**
* @brief Add an interrupt remapping entry for INTx as pre-hold mapping.
*
* Except sos_vm, Device Model should call this function to pre-hold ptdev intx
* The entry is identified by phys_pin, one entry vs. one phys_pin.
* Currently, one phys_pin can only be held by one pin source (vPIC or vIOAPIC).
*
* @param[in] vm pointer to acrn_vm
* @param[in] virt_gsi virtual pin number associated with the passthrough device
* @param[in] phys_gsi physical pin number associated with the passthrough device
* @param[in] pic_pin true for pic, false for ioapic
*
* @return
* - 0: on success
* - \p -EINVAL: invalid virt_pin value
* - \p -ENODEV: failed to add the remapping entry
*
* @pre vm != NULL
*
*/
int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin);
/**
* @brief Remove an interrupt remapping entry for INTx.
*
* Deactivate & remove mapping entry of the given virt_pin for given vm.
*
* @param[in] vm pointer to acrn_vm
* @param[in] virt_gsi virtual pin number associated with the passthrough device
* @param[in] pic_pin true for pic, false for ioapic
*
* @return None
*
* @pre vm != NULL
*
*/
void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin);
/**
* @brief Remove interrupt remapping entry/entries for MSI/MSI-x.
*
* Remove the mapping of given number of vectors of the given virtual BDF for the given vm.
*
* @param[in] vm pointer to acrn_vm
* @param[in] phys_bdf physical bdf associated with the passthrough device
* @param[in] vector_count number of vectors
*
* @return None
*
* @pre vm != NULL
*
*/
void ptirq_remove_msix_remapping(const struct acrn_vm *vm, uint16_t phys_bdf, uint32_t vector_count);
/**
* @brief Remove all interrupt remappings for INTx which are defined in VM config.
*
* Deactivate & remove all mapping entries of the virt_gsis defined in VM config for given vm.
*
* @param[in] vm pointer to acrn_vm
*
* @return None
*
* @pre vm != NULL
*
*/
void ptirq_remove_configured_intx_remappings(const struct acrn_vm *vm);
/**
* @}
*/
#endif /* ASSIGN_H */

View File

@@ -0,0 +1,169 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef EPT_H
#define EPT_H
#include <types.h>
typedef void (*pge_handler)(uint64_t *pgentry, uint64_t size);
/**
* Invalid HPA is defined for error checking,
* according to SDM vol.3A 4.1.4, the maximum
* host physical address width is 52
*/
#define INVALID_HPA (0x1UL << 52U)
#define INVALID_GPA (0x1UL << 52U)
struct acrn_vm;
/* External Interfaces */
/**
* @brief Check if the GPA range is guest valid GPA or not
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] base The specified start guest physical address of guest
* physical memory region
* @param[in] size The size of guest physical memory region
*
* @retval true if the GPA range is guest valid GPA, false otherwise.
*/
bool ept_is_valid_mr(struct acrn_vm *vm, uint64_t base, uint64_t size);
/**
* @brief EPT page tables destroy
*
* @param[inout] vm the pointer that points to VM data structure
*
* @return None
*/
void destroy_ept(struct acrn_vm *vm);
/**
* @brief Translating from guest-physical address to host-physcial address
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] gpa the specified guest-physical address
*
* @retval hpa the host physical address mapping to the \p gpa
* @retval INVALID_HPA the HPA of parameter gpa is unmapping
*/
uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa);
/**
* @brief Translating from guest-physical address to host-physcial address
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] gpa the specified guest-physical address
* @param[out] size the pointer that returns the page size of
* the page in which the gpa is
*
* @retval hpa the host physical address mapping to the \p gpa
* @retval INVALID_HPA the HPA of parameter gpa is unmapping
*/
uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size);
/**
* @brief Translating from host-physical address to guest-physical address for SOS_VM
*
* @param[in] hpa the specified host-physical address
*
* @pre: the gpa and hpa are identical mapping in SOS.
*/
uint64_t sos_vm_hpa2gpa(uint64_t hpa);
/**
* @brief Guest-physical memory region mapping
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] pml4_page The physical address of The EPTP
* @param[in] hpa The specified start host physical address of host
* physical memory region that GPA will be mapped
* @param[in] gpa The specified start guest physical address of guest
* physical memory region that needs to be mapped
* @param[in] size The size of guest physical memory region that needs
* to be mapped
* @param[in] prot_orig The specified memory access right and memory type
*
* @return None
*/
void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t hpa,
uint64_t gpa, uint64_t size, uint64_t prot_orig);
/**
* @brief Guest-physical memory page access right or memory type updating
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] pml4_page The physical address of The EPTP
* @param[in] gpa The specified start guest physical address of guest
* physical memory region whoes mapping needs to be updated
* @param[in] size The size of guest physical memory region
* @param[in] prot_set The specified memory access right and memory type
* that will be set
* @param[in] prot_clr The specified memory access right and memory type
* that will be cleared
*
* @return None
*/
void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
/**
* @brief Guest-physical memory region unmapping
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] pml4_page The physical address of The EPTP
* @param[in] gpa The specified start guest physical address of guest
* physical memory region whoes mapping needs to be deleted
* @param[in] size The size of guest physical memory region
*
* @return None
*
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size);
/**
* @brief Flush address space from the page entry
*
* @param[in] pge the pointer that points to the page entry
*
* @param[in] size the size of the page
*
* @return None
*/
void ept_flush_leaf_page(uint64_t *pge, uint64_t size);
/**
* @brief Get EPT pointer of the vm
*
* @param[in] vm the pointer that points to VM data structure
*
* @retval If the current context of vm is SECURE_WORLD, return EPT pointer of
* secure world, otherwise return EPT pointer of normal world.
*/
void *get_ept_entry(struct acrn_vm *vm);
/**
* @brief Walking through EPT table
*
* @param[in] vm the pointer that points to VM data structure
* @param[in] cb the pointer that points to walk_ept_table callback, the callback
* will be invoked when getting a present page entry from EPT, and
* the callback could get the page entry and page size parameters.
*
* @return None
*/
void walk_ept_table(struct acrn_vm *vm, pge_handler cb);
/**
* @brief EPT misconfiguration handling
*
* @param[in] vcpu the pointer that points to vcpu data structure
*
* @retval -EINVAL fail to handle the EPT misconfig
* @retval 0 Success to handle the EPT misconfig
*/
int32_t ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu);
void init_ept_pgtable(struct pgtable *table, uint16_t vm_id);
void reserve_buffer_for_ept_pages(void);
#endif /* EPT_H */

View File

@@ -0,0 +1,118 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file guest_memory.h
*
* @brief ACRN Memory Management
*/
#ifndef GUEST_H
#define GUEST_H
#ifndef ASSEMBLER
#include <types.h>
struct acrn_vcpu;
struct acrn_vm;
/* Use # of paging level to identify paging mode */
enum vm_paging_mode {
PAGING_MODE_0_LEVEL = 0U, /* Flat */
PAGING_MODE_2_LEVEL = 2U, /* 32bit paging, 2-level */
PAGING_MODE_3_LEVEL = 3U, /* PAE paging, 3-level */
PAGING_MODE_4_LEVEL = 4U, /* 64bit paging, 4-level */
PAGING_MODE_NUM,
};
/*
* VM related APIs
*/
int32_t gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu);
/* gpa --> hpa -->hva */
void *gpa2hva(struct acrn_vm *vm, uint64_t x);
/**
* @brief Data transfering between hypervisor and VM
*
* @defgroup acrn_mem ACRN Memory Management
* @{
*/
/**
* @brief Copy data from VM GPA space to HV address space
*
* @param[in] vm The pointer that points to VM data structure
* @param[in] h_ptr The pointer that points the start HV address
* of HV memory region which data is stored in
* @param[out] gpa The start GPA address of GPA memory region which data
* will be copied into
* @param[in] size The size (bytes) of GPA memory region which data is
* stored in
*
* @pre Caller(Guest) should make sure gpa is continuous.
* - gpa from hypercall input which from kernel stack is gpa continuous, not
* support kernel stack from vmap
* - some other gpa from hypercall parameters, VHM should make sure it's
* continuous
* @pre Pointer vm is non-NULL
*/
int32_t copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
/**
* @brief Copy data from HV address space to VM GPA space
*
* @param[in] vm The pointer that points to VM data structure
* @param[in] h_ptr The pointer that points the start HV address
* of HV memory region which data is stored in
* @param[out] gpa The start GPA address of GPA memory region which data
* will be copied into
* @param[in] size The size (bytes) of GPA memory region which data will be
* copied into
*
* @pre Caller(Guest) should make sure gpa is continuous.
* - gpa from hypercall input which from kernel stack is gpa continuous, not
* support kernel stack from vmap
* - some other gpa from hypercall parameters, VHM should make sure it's
* continuous
* @pre Pointer vm is non-NULL
*/
int32_t copy_to_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
/**
* @brief Copy data from VM GVA space to HV address space
*
* @param[in] vcpu The pointer that points to vcpu data structure
* @param[out] h_ptr The pointer that returns the start HV address
* of HV memory region which data will be copied to
* @param[in] gva The start GVA address of GVA memory region which data
* is stored in
* @param[in] size The size (bytes) of GVA memory region which data is
* stored in
* @param[out] err_code The page fault flags
* @param[out] fault_addr The GVA address that causes a page fault
*/
int32_t copy_from_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
/**
* @brief Copy data to VM GVA space from HV address space
*
* @param[in] vcpu The pointer that points to vcpu data structure
* @param[in] h_ptr The pointer that returns the start HV address
* of HV memory region which data will be copied to
* @param[out] gva The start GVA address of GVA memory region which data
* is stored in
* @param[in] size The size (bytes) of GVA memory region which data is
* stored in
* @param[out] err_code The page fault flags
* @param[out] fault_addr The GVA address that causes a page fault
*/
int32_t copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
/**
* @}
*/
#endif /* !ASSEMBLER */
#endif /* GUEST_H*/

View File

@@ -0,0 +1,13 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef GUEST_PM_H
#define GUEST_PM_H
int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl);
void init_guest_pm(struct acrn_vm *vm);
#endif /* PM_H */

View File

@@ -0,0 +1,65 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef HYPERV_H
#define HYPERV_H
#include <asm/guest/vcpuid.h>
/* Hyper-V MSR numbers */
#define HV_X64_MSR_GUEST_OS_ID 0x40000000U
#define HV_X64_MSR_HYPERCALL 0x40000001U
#define HV_X64_MSR_VP_INDEX 0x40000002U
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020U
#define HV_X64_MSR_REFERENCE_TSC 0x40000021U
union hyperv_ref_tsc_page_msr {
uint64_t val64;
struct {
uint64_t enabled:1;
uint64_t rsvdp:11;
uint64_t gpfn:52;
};
};
union hyperv_hypercall_msr {
uint64_t val64;
struct {
uint64_t enabled:1;
uint64_t locked:1;
uint64_t rsvdp:10;
uint64_t gpfn:52;
};
};
union hyperv_guest_os_id_msr {
uint64_t val64;
struct {
uint64_t build_number:16;
uint64_t service_version:8;
uint64_t minor_version:8;
uint64_t major_version:8;
uint64_t os_id:8;
uint64_t vendor_id:15;
uint64_t os_type:1;
};
};
struct acrn_hyperv {
union hyperv_hypercall_msr hypercall_page;
union hyperv_guest_os_id_msr guest_os_id;
union hyperv_ref_tsc_page_msr ref_tsc_page;
uint64_t tsc_scale;
uint64_t tsc_offset;
};
int32_t hyperv_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval);
int32_t hyperv_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval);
void hyperv_init_time(struct acrn_vm *vm);
void hyperv_init_vcpuid_entry(uint32_t leaf, uint32_t subleaf, uint32_t flags,
struct vcpuid_entry *entry);
#endif

View File

@@ -0,0 +1,98 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef INSTR_EMUL_H
#define INSTR_EMUL_H
#include <types.h>
#include <asm/cpu.h>
#include <asm/guest/guest_memory.h>
struct acrn_vcpu;
struct instr_emul_vie_op {
uint8_t op_type; /* type of operation (e.g. MOV) */
uint16_t op_flags;
};
#define VIE_PREFIX_SIZE 4U
#define VIE_INST_SIZE 15U
struct instr_emul_vie {
uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
uint8_t num_valid; /* size of the instruction */
uint8_t num_processed;
uint8_t addrsize:4, opsize:4; /* address and operand sizes */
uint8_t rex_w:1, /* REX prefix */
rex_r:1,
rex_x:1,
rex_b:1,
rex_present:1,
repz_present:1, /* REP/REPE/REPZ prefix */
repnz_present:1, /* REPNE/REPNZ prefix */
opsize_override:1, /* Operand size override */
addrsize_override:1, /* Address size override */
seg_override:1; /* Segment override */
uint8_t mod:2, /* ModRM byte */
reg:4,
rm:4;
uint8_t ss:2, /* SIB byte */
index:4,
base:4;
uint8_t disp_bytes;
uint8_t imm_bytes;
uint8_t scale;
enum cpu_reg_name base_register; /* CPU_REG_xyz */
enum cpu_reg_name index_register; /* CPU_REG_xyz */
enum cpu_reg_name segment_register; /* CPU_REG_xyz */
int64_t displacement; /* optional addr displacement */
int64_t immediate; /* optional immediate operand */
uint8_t decoded; /* set to 1 if successfully decoded */
uint8_t opcode;
struct instr_emul_vie_op op; /* opcode description */
uint64_t dst_gpa; /* saved dst operand gpa. Only for movs */
uint64_t gva; /* saved gva for instruction emulation */
};
struct instr_emul_ctxt {
struct instr_emul_vie vie;
};
int32_t emulate_instruction(struct acrn_vcpu *vcpu);
int32_t decode_instruction(struct acrn_vcpu *vcpu);
bool is_current_opcode_xchg(struct acrn_vcpu *vcpu);
#endif

View File

@@ -0,0 +1,14 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SPLITLOCK_H_
#define SPLITLOCK_H_
void vcpu_kick_splitlock_emulation(struct acrn_vcpu *cur_vcpu);
void vcpu_complete_splitlock_emulation(struct acrn_vcpu *cur_vcpu);
int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, bool *queue_exception);
#endif /* SPLITLOCK_H_ */

View File

@@ -0,0 +1,130 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TRUSTY_H_
#define TRUSTY_H_
#include <acrn_hv_defs.h>
#include <asm/seed.h>
#define RPMB_MAX_PARTITION_NUMBER 6U
#define MMC_PROD_NAME_WITH_PSN_LEN 15U
#define TRUSTY_RAM_SIZE (16UL * 1024UL * 1024UL) /* 16 MB for now */
/* Trusty EPT rebase gpa: 511G */
#define TRUSTY_EPT_REBASE_GPA (511UL * 1024UL * 1024UL * 1024UL)
#define NON_TRUSTY_PDPT_ENTRIES 511U
struct acrn_vcpu;
struct acrn_vm;
/* Structure of key info */
struct trusty_key_info {
uint32_t size_of_this_struct;
/* version info:
0: baseline structure
1: add ** new field
*/
uint32_t version;
/* platform:
0: Dummy (fake secret)
1: APL (APL + ABL)
2: ICL (ICL + SBL)
3: ACRN (APL|ICL + SBL + ACRN)
4: Brillo (Android Things)
*/
uint32_t platform;
/* flags info:
Bit 0: manufacturing state (0:manufacturing done;
1:in manufacturing mode)
Bit 1: secure boot state (0:disabled; 1: enabled)
Bit 2: test seeds (ICL only - 0:production seeds; 1: test seeds)
other bits all reserved as 0
*/
uint32_t flags;
/* Keep 64-bit align */
uint32_t pad1;
/* Seed list, include useeds(user seeds) and dseed(device seeds) */
uint32_t num_seeds;
struct seed_info useed_list[BOOTLOADER_SEED_MAX_ENTRIES];
struct seed_info dseed_list[BOOTLOADER_SEED_MAX_ENTRIES];
/* For ICL+ */
/* rpmb keys, Currently HMAC-SHA256 is used in RPMB spec
* and 256-bit (32byte) is enough. Hence only lower 32 bytes will be
* used for now for each entry. But keep higher 32 bytes for future
* extension. Note that, RPMB keys are already tied to storage device
* serial number.If there are multiple RPMB partitions, then we will
* get multiple available RPMB keys. And if rpmb_key[n][64] == 0,
* then the n-th RPMB key is unavailable (Either because of no such
* RPMB partition, or because OSloader doesn't want to share
* the n-th RPMB key with Trusty)
*/
uint8_t rpmb_key[RPMB_MAX_PARTITION_NUMBER][64];
/* 256-bit AES encryption key to encrypt/decrypt attestation keybox,
this key should be derived from a fixed key which is RPMB seed.
RPMB key (HMAC key) and this encryption key (AES key) are both
derived from the same RPMB seed.
*/
uint8_t attkb_enc_key[32];
/* For APL only */
/* RPMB key is derived with dseed together with this serial number,
* for ICL +, CSE directly provides the rpmb_key which is already
* tied to serial number. Concatenation of emmc product name
* with a string representation of PSN
*/
char serial[MMC_PROD_NAME_WITH_PSN_LEN];
char pad2;
};
struct secure_world_memory {
/* The original secure world base address allocated by bootloader */
uint64_t base_gpa_in_uos;
/* The secure world base address of HPA */
uint64_t base_hpa;
/* Secure world runtime memory size */
uint64_t length;
};
struct secure_world_control {
/* Flag indicates Secure World's state */
struct {
/* sworld supporting: 0(unsupported), 1(supported) */
uint64_t supported : 1;
/* sworld running status: 0(inactive), 1(active) */
uint64_t active : 1;
/* sworld context saving status: 0(unsaved), 1(saved) */
uint64_t ctx_saved : 1;
uint64_t reserved : 61;
} flag;
/* Secure world memory structure */
struct secure_world_memory sworld_memory;
};
struct trusty_startup_param {
uint32_t size_of_this_struct;
uint32_t mem_size;
uint64_t tsc_per_ms;
uint64_t trusty_mem_base;
uint32_t reserved;
uint8_t padding[4];
};
void switch_world(struct acrn_vcpu *vcpu, int32_t next_world);
bool initialize_trusty(struct acrn_vcpu *vcpu, struct trusty_boot_param *boot_param);
void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem);
void save_sworld_context(struct acrn_vcpu *vcpu);
void restore_sworld_context(struct acrn_vcpu *vcpu);
#endif /* TRUSTY_H_ */

View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef UCODE_H
#define UCODE_H
struct ucode_header {
uint32_t header_ver;
uint32_t update_ver;
uint32_t date;
uint32_t proc_sig;
uint32_t checksum;
uint32_t loader_ver;
uint32_t proc_flags;
uint32_t data_size;
uint32_t total_size;
uint32_t reserved[3];
};
void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v);
uint64_t get_microcode_version(void);
#endif /* UCODE_H */

View File

@@ -0,0 +1,756 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file vcpu.h
*
* @brief public APIs for vcpu operations
*/
#ifndef VCPU_H
#define VCPU_H
#ifndef ASSEMBLER
#include <acrn_common.h>
#include <asm/guest/guest_memory.h>
#include <asm/guest/virtual_cr.h>
#include <asm/guest/vlapic.h>
#include <asm/guest/vmtrr.h>
#include <schedule.h>
#include <event.h>
#include <io_req.h>
#include <asm/msr.h>
#include <asm/cpu.h>
#include <asm/guest/instr_emul.h>
#include <asm/vmx.h>
/**
* @brief vcpu
*
* @defgroup acrn_vcpu ACRN vcpu
* @{
*/
/*
* VCPU related APIs
*/
/**
* @defgroup virt_int_injection Event ID supported for virtual interrupt injection
*
* This is a group that includes Event ID supported for virtual interrupt injection.
*
* @{
*/
/**
* @brief Request for exception injection
*/
#define ACRN_REQUEST_EXCP 0U
/**
* @brief Request for vLAPIC event
*/
#define ACRN_REQUEST_EVENT 1U
/**
* @brief Request for external interrupt from vPIC
*/
#define ACRN_REQUEST_EXTINT 2U
/**
* @brief Request for non-maskable interrupt
*/
#define ACRN_REQUEST_NMI 3U
/**
* @brief Request for EOI exit bitmap update
*/
#define ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE 4U
/**
* @brief Request for EPT flush
*/
#define ACRN_REQUEST_EPT_FLUSH 5U
/**
* @brief Request for triple fault
*/
#define ACRN_REQUEST_TRP_FAULT 6U
/**
* @brief Request for VPID TLB flush
*/
#define ACRN_REQUEST_VPID_FLUSH 7U
/**
* @brief Request for initilizing VMCS
*/
#define ACRN_REQUEST_INIT_VMCS 8U
/**
* @brief Request for sync waiting WBINVD
*/
#define ACRN_REQUEST_WAIT_WBINVD 9U
/**
* @brief Request for split lock operation
*/
#define ACRN_REQUEST_SPLIT_LOCK 10U
/**
* @}
*/
/* End of virt_int_injection */
#define save_segment(seg, SEG_NAME) \
{ \
(seg).selector = exec_vmread16(SEG_NAME##_SEL); \
(seg).base = exec_vmread(SEG_NAME##_BASE); \
(seg).limit = exec_vmread32(SEG_NAME##_LIMIT); \
(seg).attr = exec_vmread32(SEG_NAME##_ATTR); \
}
#define load_segment(seg, SEG_NAME) \
{ \
exec_vmwrite16(SEG_NAME##_SEL, (seg).selector); \
exec_vmwrite(SEG_NAME##_BASE, (seg).base); \
exec_vmwrite32(SEG_NAME##_LIMIT, (seg).limit); \
exec_vmwrite32(SEG_NAME##_ATTR, (seg).attr); \
}
/* Define segments constants for guest */
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000U)
#define REAL_MODE_DATA_SEG_AR (0x0093U)
#define REAL_MODE_CODE_SEG_AR (0x009fU)
#define PROTECTED_MODE_DATA_SEG_AR (0xc093U)
#define PROTECTED_MODE_CODE_SEG_AR (0xc09bU)
#define REAL_MODE_SEG_LIMIT (0xffffU)
#define PROTECTED_MODE_SEG_LIMIT (0xffffffffU)
#define DR7_INIT_VALUE (0x400UL)
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
#define foreach_vcpu(idx, vm, vcpu) \
for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
(idx) < (vm)->hw.created_vcpus; \
(idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
if (vcpu->state != VCPU_OFFLINE)
enum vcpu_state {
VCPU_OFFLINE = 0U,
VCPU_INIT,
VCPU_RUNNING,
VCPU_ZOMBIE,
};
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
#define VCPU_EVENT_IOREQ 0
#define VCPU_EVENT_VIRTUAL_INTERRUPT 1
#define VCPU_EVENT_SYNC_WBINVD 2
#define VCPU_EVENT_SPLIT_LOCK 3
#define VCPU_EVENT_NUM 4
enum reset_mode;
/* 2 worlds: 0 for Normal World, 1 for Secure World */
#define NR_WORLD 2
#define NORMAL_WORLD 0
#define SECURE_WORLD 1
#define NUM_WORLD_MSRS 2U
#define NUM_COMMON_MSRS 21U
#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS)
#define EOI_EXIT_BITMAP_SIZE 256U
struct guest_cpu_context {
struct run_context run_ctx;
struct ext_context ext_ctx;
/* per world MSRs, need isolation between secure and normal world */
uint32_t world_msrs[NUM_WORLD_MSRS];
};
/* Intel SDM 24.8.2, the address must be 16-byte aligned */
struct msr_store_entry {
uint32_t msr_index;
uint32_t reserved;
uint64_t value;
} __aligned(16);
enum {
MSR_AREA_TSC_AUX = 0,
MSR_AREA_IA32_PQR_ASSOC,
MSR_AREA_COUNT,
};
struct msr_store_area {
struct msr_store_entry guest[MSR_AREA_COUNT];
struct msr_store_entry host[MSR_AREA_COUNT];
uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */
};
struct iwkey {
/* 256bit encryption key */
uint64_t encryption_key[4];
/* 128bit integration key */
uint64_t integrity_key[2];
};
struct acrn_vcpu_arch {
/* vmcs region for this vcpu, MUST be 4KB-aligned */
uint8_t vmcs[PAGE_SIZE];
/* MSR bitmap region for this vcpu, MUST be 4-Kbyte aligned */
uint8_t msr_bitmap[PAGE_SIZE];
/* per vcpu lapic */
struct acrn_vlapic vlapic;
/* pid MUST be 64 bytes aligned */
struct pi_desc pid __aligned(64);
struct acrn_vmtrr vmtrr;
int32_t cur_context;
struct guest_cpu_context contexts[NR_WORLD];
/* common MSRs, world_msrs[] is a subset of it */
uint64_t guest_msrs[NUM_GUEST_MSRS];
uint16_t vpid;
/* Holds the information needed for IRQ/exception handling. */
struct {
/* The number of the exception to raise. */
uint32_t exception;
/* The error number for the exception. */
uint32_t error;
} exception_info;
uint8_t lapic_mask;
bool irq_window_enabled;
bool emulating_lock;
uint32_t nrexits;
/* VCPU context state information */
uint32_t exit_reason;
uint32_t idt_vectoring_info;
uint64_t exit_qualification;
uint32_t proc_vm_exec_ctrls;
uint32_t inst_len;
/* Information related to secondary / AP VCPU start-up */
enum vm_cpu_mode cpu_mode;
uint8_t nr_sipi;
/* interrupt injection information */
uint64_t pending_req;
/* List of MSRS to be stored and loaded on VM exits or VM entries */
struct msr_store_area msr_area;
/* EOI_EXIT_BITMAP buffer, for the bitmap update */
uint64_t eoi_exit_bitmap[EOI_EXIT_BITMAP_SIZE >> 6U];
/* Keylocker */
struct iwkey IWKey;
bool cr4_kl_enabled;
/*
* Keylocker spec 4.4:
* Bit 0 - Status of most recent copy to or from IWKeyBackup.
* Bit 63:1 - Reserved.
*/
uint64_t iwkey_copy_status;
} __aligned(PAGE_SIZE);
struct acrn_vm;
struct acrn_vcpu {
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
/* Architecture specific definitions for this VCPU */
struct acrn_vcpu_arch arch;
uint16_t vcpu_id; /* virtual identifier for VCPU */
struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */
volatile enum vcpu_state state; /* State of this VCPU */
struct thread_object thread_obj;
bool launched; /* Whether the vcpu is launched on target pcpu */
struct instr_emul_ctxt inst_ctxt;
struct io_request req; /* used by io/ept emulation */
uint64_t reg_cached;
uint64_t reg_updated;
struct sched_event events[VCPU_EVENT_NUM];
} __aligned(PAGE_SIZE);
struct vcpu_dump {
struct acrn_vcpu *vcpu;
char *str;
uint32_t str_max;
};
struct guest_mem_dump {
struct acrn_vcpu *vcpu;
uint64_t gva;
uint64_t len;
};
static inline bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
{
return (vcpu->vcpu_id == BSP_CPU_ID);
}
static inline enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
{
return vcpu->arch.cpu_mode;
}
/* do not update Guest RIP for next VM Enter */
static inline void vcpu_retain_rip(struct acrn_vcpu *vcpu)
{
(vcpu)->arch.inst_len = 0U;
}
static inline struct acrn_vlapic *vcpu_vlapic(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch.vlapic);
}
/**
* @brief Get pointer to PI description.
*
* @param[in] vcpu Target vCPU
*
* @return pointer to PI description
*
* @pre vcpu != NULL
*/
static inline struct pi_desc *get_pi_desc(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch.pid);
}
uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu);
void default_idle(__unused struct thread_object *obj);
void vcpu_thread(struct thread_object *obj);
int32_t vmx_vmrun(struct run_context *context, int32_t ops, int32_t ibrs);
/* External Interfaces */
/**
* @brief get vcpu register value
*
* Get target vCPU's general purpose registers value in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
* @param[in] reg register of the vcpu
*
* @return the value of the register.
*/
uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg);
/**
* @brief set vcpu register value
*
* Set target vCPU's general purpose registers value in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] reg register of the vcpu
* @param[in] val the value set the register of the vcpu
*
* @return None
*/
void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val);
/**
* @brief get vcpu RIP value
*
* Get & cache target vCPU's RIP in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of RIP.
*/
uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu RIP value
*
* Update target vCPU's RIP in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set RIP
*
* @return None
*/
void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu RSP value
*
* Get & cache target vCPU's RSP in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of RSP.
*/
uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu);
/**
* @brief set vcpu RSP value
*
* Update target vCPU's RSP in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set RSP
*
* @return None
*/
void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu EFER value
*
* Get & cache target vCPU's EFER in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of EFER.
*/
uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu EFER value
*
* Update target vCPU's EFER in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set EFER
*
* @return None
*/
void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu RFLAG value
*
* Get & cache target vCPU's RFLAGS in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of RFLAGS.
*/
uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu RFLAGS value
*
* Update target vCPU's RFLAGS in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set RFLAGS
*
* @return None
*/
void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get guest emulated MSR
*
* Get the content of emulated guest MSR
*
* @param[in] vcpu pointer to vcpu data structure
* @param[in] msr the guest MSR
*
* @return the value of emulated MSR.
*/
uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr);
/**
* @brief set guest emulated MSR
*
* Update the content of emulated guest MSR
*
* @param[in] vcpu pointer to vcpu data structure
* @param[in] msr the guest MSR
* @param[in] val the value to set the target MSR
*
* @return None
*/
void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val);
/**
* @brief write eoi_exit_bitmap to VMCS fields
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return None
*/
void vcpu_set_vmcs_eoi_exit(const struct acrn_vcpu *vcpu);
/**
* @brief reset all eoi_exit_bitmaps
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return None
*/
void vcpu_reset_eoi_exit_bitmaps(struct acrn_vcpu *vcpu);
/**
* @brief set eoi_exit_bitmap bit
*
* Set corresponding bit of vector in eoi_exit_bitmap
*
* @param[in] vcpu pointer to vcpu data structure
* @param[in] vector
*
* @return None
*/
void vcpu_set_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector);
/**
* @brief clear eoi_exit_bitmap bit
*
* Clear corresponding bit of vector in eoi_exit_bitmap
*
* @param[in] vcpu pointer to vcpu data structure
* @param[in] vector
*
* @return None
*/
void vcpu_clear_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector);
/**
* @brief set all the vcpu registers
*
* Update target vCPU's all registers in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] vcpu_regs all the registers' value
*
* @return None
*/
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs);
/**
* @brief reset all the vcpu registers
*
* Reset target vCPU's all registers in run_context to initial values.
*
* @param[inout] vcpu pointer to vcpu data structure
*
* @return None
*/
void reset_vcpu_regs(struct acrn_vcpu *vcpu);
bool sanitize_cr0_cr4_pattern(void);
/**
* @brief Initialize the protect mode vcpu registers
*
* Initialize vCPU's all registers in run_context to initial protece mode values.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] vgdt_base_gpa guest physical address of gdt for guest
*
* @return None
*/
void init_vcpu_protect_mode_regs(struct acrn_vcpu *vcpu, uint64_t vgdt_base_gpa);
/**
* @brief set the vCPU startup entry
*
* Set target vCPU's startup entry in run_context.
*
* @param[inout] vcpu pointer to vCPU data structure
* @param[in] entry startup entry for the vCPU
*
* @return None
*/
void set_vcpu_startup_entry(struct acrn_vcpu *vcpu, uint64_t entry);
static inline bool is_long_mode(struct acrn_vcpu *vcpu)
{
return (vcpu_get_efer(vcpu) & MSR_IA32_EFER_LMA_BIT) != 0UL;
}
static inline bool is_paging_enabled(struct acrn_vcpu *vcpu)
{
return (vcpu_get_cr0(vcpu) & CR0_PG) != 0UL;
}
static inline bool is_pae(struct acrn_vcpu *vcpu)
{
return (vcpu_get_cr4(vcpu) & CR4_PAE) != 0UL;
}
struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id);
struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id);
void save_xsave_area(struct acrn_vcpu *vcpu, struct ext_context *ectx);
void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx);
void load_iwkey(struct acrn_vcpu *vcpu);
/**
* @brief create a vcpu for the target vm
*
* Creates/allocates a vCPU instance, with initialization for its vcpu_id,
* vpid, vmcs, vlapic, etc. It sets the init vCPU state to VCPU_INIT
*
* @param[in] pcpu_id created vcpu will run on this pcpu
* @param[in] vm pointer to vm data structure, this vcpu will owned by this vm.
* @param[out] rtn_vcpu_handle pointer to the created vcpu
*
* @retval 0 vcpu created successfully, other values failed.
*/
int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle);
/**
* @brief run into non-root mode based on vcpu setting
*
* An interface in vCPU thread to implement VM entry and VM exit.
* A CPU switches between VMX root mode and non-root mode based on it.
*
* @param[inout] vcpu pointer to vcpu data structure
* @pre vcpu != NULL
*
* @retval 0 vcpu run successfully, other values failed.
*/
int32_t run_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief unmap the vcpu with pcpu and free its vlapic
*
* Unmap the vcpu with pcpu and free its vlapic, and set the vcpu state to offline
*
* @param[inout] vcpu pointer to vcpu data structure
* @pre vcpu != NULL
* @pre vcpu->state == VCPU_ZOMBIE
* @return None
*/
void offline_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief reset vcpu state and values
*
* Reset all fields in a vCPU instance, the vCPU state is reset to VCPU_INIT.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] mode the reset mode
* @pre vcpu != NULL
* @pre vcpu->state == VCPU_ZOMBIE
* @return None
*/
void reset_vcpu(struct acrn_vcpu *vcpu, enum reset_mode mode);
/**
* @brief pause the vcpu and set new state
*
* Change a vCPU state to VCPU_ZOMBIE, and make a reschedule request for it.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] new_state the state to set vcpu
*
* @return None
*/
void zombie_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state);
/**
* @brief set the vcpu to running state, then it will be scheculed.
*
* Adds a vCPU into the run queue and make a reschedule request for it. It sets the vCPU state to VCPU_RUNNING.
*
* @param[inout] vcpu pointer to vcpu data structure
* @pre vcpu != NULL
* @pre vcpu->state == VCPU_INIT
* @return None
*/
void launch_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief kick the vcpu and let it handle pending events
*
* Kick a vCPU to handle the pending events.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return None
*/
void kick_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief create a vcpu for the vm and mapped to the pcpu.
*
* Create a vcpu for the vm, and mapped to the pcpu.
*
* @param[inout] vm pointer to vm data structure
* @param[in] pcpu_id which the vcpu will be mapped
*
* @retval 0 on success
* @retval -EINVAL if the vCPU ID is invalid
*/
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id);
/**
* @brief get physical destination cpu mask
*
* get the corresponding physical destination cpu mask for the vm and virtual destination cpu mask
*
* @param[in] vm pointer to vm data structure
* @param[in] vdmask virtual destination cpu mask
*
* @return The physical destination CPU mask
*/
uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask);
bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu);
/**
* @brief handle posted interrupts
*
* VT-d PI handler, find the corresponding vCPU for this IRQ,
* if the associated PID's bit ON is set, wake it up.
*
* @param[in] vcpu_index a zero based index of where the vCPU is located in the vCPU list for current pCPU
* @pre vcpu_index < CONFIG_MAX_VM_NUM
*
* @return None
*/
void vcpu_handle_pi_notification(uint32_t vcpu_index);
/*
* @brief Update the state of vCPU and state of vlapic
*
* The vlapic state of VM shall be updated for some vCPU
* state update cases, such as from VCPU_INIT to VCPU_RUNNING.
* @pre (vcpu != NULL)
*/
void vcpu_set_state(struct acrn_vcpu *vcpu, enum vcpu_state new_state);
/**
* @}
*/
/* End of acrn_vcpu */
#endif /* ASSEMBLER */
#endif /* VCPU_H */

View File

@@ -0,0 +1,32 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VCPUID_H_
#define VCPUID_H_
#define CPUID_CHECK_SUBLEAF (1U << 0U)
#define MAX_VM_VCPUID_ENTRIES 64U
/* Guest capability flags reported by CPUID */
#define GUEST_CAPS_PRIVILEGE_VM (1U << 0U)
struct vcpuid_entry {
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
uint32_t leaf;
uint32_t subleaf;
uint32_t flags;
uint32_t padding;
};
int32_t set_vcpuid_entries(struct acrn_vm *vm);
void guest_cpuid(struct acrn_vcpu *vcpu,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
#endif /* VCPUID_H_ */

View File

@@ -0,0 +1,122 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ARCH_X86_GUEST_VIRQ_H
#define ARCH_X86_GUEST_VIRQ_H
struct acrn_vcpu;
struct acrn_vm;
/**
* @brief virtual IRQ
*
* @addtogroup acrn_virq ACRN vIRQ
* @{
*/
/**
* @brief Queue exception to guest.
*
* This exception may be injected immediately or later,
* depends on the exeception class.
*
* @param[in] vcpu Pointer to vCPU.
* @param[in] vector_arg Vector of the exeception.
* @param[in] err_code_arg Error Code to be injected.
*
* @retval 0 on success
* @retval -EINVAL on error that vector is invalid.
*
* @pre vcpu != NULL
*/
int32_t vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector_arg, uint32_t err_code_arg);
/**
* @brief Inject external interrupt to guest.
*
* @param[in] vcpu Pointer to vCPU.
*
* @return None
*
* @pre vcpu != NULL
*/
void vcpu_inject_extint(struct acrn_vcpu *vcpu);
/**
* @brief Inject NMI to guest.
*
* @param[in] vcpu Pointer to vCPU.
*
* @return None
*
* @pre vcpu != NULL
*/
void vcpu_inject_nmi(struct acrn_vcpu *vcpu);
/**
* @brief Inject general protection exeception(GP) to guest.
*
* @param[in] vcpu Pointer to vCPU.
* @param[in] err_code Error Code to be injected.
*
* @return None
*
* @pre vcpu != NULL
*/
void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code);
/**
* @brief Inject page fault exeception(PF) to guest.
*
* @param[in] vcpu Pointer to vCPU.
* @param[in] addr Address that result in PF.
* @param[in] err_code Error Code to be injected.
*
* @return None
*
* @pre vcpu != NULL
*/
void vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code);
/**
* @brief Inject invalid opcode exeception(UD) to guest.
*
* @param[in] vcpu Pointer to vCPU.
*
* @return None
*
* @pre vcpu != NULL
*/
void vcpu_inject_ud(struct acrn_vcpu *vcpu);
/**
* @brief Inject stack fault exeception(SS) to guest.
*
* @param[in] vcpu Pointer to vCPU.
*
* @return None
*
* @pre vcpu != NULL
*/
void vcpu_inject_ss(struct acrn_vcpu *vcpu);
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid);
/*
* @pre vcpu != NULL
*/
int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t nmi_window_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu);
/**
* @}
*/
/* End of acrn_virq */
#endif /* ARCH_X86_GUEST_VIRQ_H */

View File

@@ -0,0 +1,95 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VCR_H
#define VCR_H
/**
* @file virtual_cr.h
*
* @brief public APIs for vCR operations
*/
uint64_t get_cr4_reserved_bits(void);
void init_cr0_cr4_host_guest_mask(void);
/**
* @brief vCR from vcpu
*
* @defgroup vCR ACRN
* @{
*/
/**
* @brief get vcpu CR0 value
*
* Get & cache target vCPU's CR0 in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of CR0.
*/
uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu CR0 value
*
* Update target vCPU's CR0 in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set CR0
*/
void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu CR2 value
*
* Get & cache target vCPU's CR2 in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of CR2.
*/
uint64_t vcpu_get_cr2(const struct acrn_vcpu *vcpu);
/**
* @brief set vcpu CR2 value
*
* Update target vCPU's CR2 in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set CR2
*/
void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu CR4 value
*
* Get & cache target vCPU's CR4 in run_context.
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return the value of CR4.
*/
uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu CR4 value
*
* Update target vCPU's CR4 in run_context.
*
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set CR4
*/
void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @}
*/
/* End of vCR */
int32_t cr_access_vmexit_handler(struct acrn_vcpu *vcpu);
#endif /* VCR_H */

View File

@@ -0,0 +1,201 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef VLAPIC_H
#define VLAPIC_H
#include <asm/page.h>
#include <asm/timer.h>
#include <asm/apicreg.h>
/**
* @file vlapic.h
*
* @brief public APIs for virtual LAPIC
*/
#define VLAPIC_MAXLVT_INDEX APIC_LVT_CMCI
struct vlapic_timer {
struct hv_timer timer;
uint32_t mode;
uint32_t tmicr;
uint32_t divisor_shift;
};
struct acrn_vlapic {
/*
* Please keep 'apic_page' as the first field in
* current structure, as below alignment restrictions are mandatory
* to support APICv features:
* - 'apic_page' MUST be 4KB aligned.
* IRR, TMR and PIR could be accessed by other vCPUs when deliver
* an interrupt to vLAPIC.
*/
struct lapic_regs apic_page;
uint32_t vapic_id;
uint32_t esr_pending;
int32_t esr_firing;
struct vlapic_timer vtimer;
/*
* isrv: vector number for the highest priority bit that is set in the ISR
*/
uint32_t isrv;
uint64_t msr_apicbase;
const struct acrn_apicv_ops *ops;
/*
* Copies of some registers in the virtual APIC page. We do this for
* a couple of different reasons:
* - to be able to detect what changed (e.g. svr_last)
* - to maintain a coherent snapshot of the register (e.g. lvt_last)
*/
uint32_t svr_last;
uint32_t lvt_last[VLAPIC_MAXLVT_INDEX + 1];
} __aligned(PAGE_SIZE);
struct acrn_vcpu;
struct acrn_apicv_ops {
void (*accept_intr)(struct acrn_vlapic *vlapic, uint32_t vector, bool level);
void (*inject_intr)(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
bool (*has_pending_delivery_intr)(struct acrn_vcpu *vcpu);
bool (*has_pending_intr)(struct acrn_vcpu *vcpu);
bool (*apic_read_access_may_valid)(uint32_t offset);
bool (*apic_write_access_may_valid)(uint32_t offset);
bool (*x2apic_read_msr_may_valid)(uint32_t offset);
bool (*x2apic_write_msr_may_valid)(uint32_t offset);
};
enum reset_mode;
extern const struct acrn_apicv_ops *apicv_ops;
void vlapic_set_apicv_ops(void);
/**
* @brief virtual LAPIC
*
* @addtogroup acrn_vlapic ACRN vLAPIC
* @{
*/
void vlapic_inject_intr(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
bool vlapic_has_pending_delivery_intr(struct acrn_vcpu *vcpu);
bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu);
uint64_t vlapic_get_tsc_deadline_msr(const struct acrn_vlapic *vlapic);
void vlapic_set_tsc_deadline_msr(struct acrn_vlapic *vlapic, uint64_t val_arg);
uint64_t vlapic_get_apicbase(const struct acrn_vlapic *vlapic);
int32_t vlapic_set_apicbase(struct acrn_vlapic *vlapic, uint64_t new);
int32_t vlapic_x2apic_read(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *val);
int32_t vlapic_x2apic_write(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val);
/*
* Signals to the LAPIC that an interrupt at 'vector' needs to be generated
* to the 'cpu', the state is recorded in IRR.
* @pre vcpu != NULL
* @pre vector <= 255U
*/
void vlapic_set_intr(struct acrn_vcpu *vcpu, uint32_t vector, bool level);
#define LAPIC_TRIG_LEVEL true
#define LAPIC_TRIG_EDGE false
/**
* @brief Triggers LAPIC local interrupt(LVT).
*
* @param[in] vm Pointer to VM data structure
* @param[in] vcpu_id_arg ID of vCPU, BROADCAST_CPU_ID means triggering
* interrupt to all vCPUs.
* @param[in] lvt_index The index which LVT would to be fired.
*
* @retval 0 on success.
* @retval -EINVAL on error that vcpu_id_arg or vector of the LVT is invalid.
*
* @pre vm != NULL
*/
int32_t vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t lvt_index);
/**
* @brief Inject MSI to target VM.
*
* @param[in] vm Pointer to VM data structure
* @param[in] addr MSI address.
* @param[in] data MSI data.
*
* @retval 0 on success.
* @retval -1 on error that addr is invalid.
*
* @pre vm != NULL
*/
int32_t vlapic_inject_msi(struct acrn_vm *vm, uint64_t addr, uint64_t data);
void vlapic_receive_intr(struct acrn_vm *vm, bool level, uint32_t dest,
bool phys, uint32_t delmode, uint32_t vec, bool rh);
/**
* @pre vlapic != NULL
*/
static inline uint32_t vlapic_get_apicid(const struct acrn_vlapic *vlapic)
{
return vlapic->vapic_id;
}
void vlapic_create(struct acrn_vcpu *vcpu, uint16_t pcpu_id);
/*
* @pre vcpu != NULL
*/
void vlapic_free(struct acrn_vcpu *vcpu);
void vlapic_reset(struct acrn_vlapic *vlapic, const struct acrn_apicv_ops *ops, enum reset_mode mode);
void vlapic_restore(struct acrn_vlapic *vlapic, const struct lapic_regs *regs);
uint64_t vlapic_apicv_get_apic_access_addr(void);
uint64_t vlapic_apicv_get_apic_page_addr(struct acrn_vlapic *vlapic);
int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t apic_write_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu);
void vlapic_update_tpr_threshold(const struct acrn_vlapic *vlapic);
int32_t tpr_below_threshold_vmexit_handler(struct acrn_vcpu *vcpu);
void vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
uint32_t dest, bool phys, bool lowprio);
void vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
uint32_t dest, bool phys);
bool is_x2apic_enabled(const struct acrn_vlapic *vlapic);
bool is_xapic_enabled(const struct acrn_vlapic *vlapic);
/**
* @}
*/
/* End of acrn_vlapic */
#endif /* VLAPIC_H */

View File

@@ -0,0 +1,277 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_H_
#define VM_H_
/* Defines for VM Launch and Resume */
#define VM_RESUME 0
#define VM_LAUNCH 1
#ifndef ASSEMBLER
#include <asm/lib/bits.h>
#include <asm/lib/spinlock.h>
#include <asm/pgtable.h>
#include <asm/guest/vcpu.h>
#include <vioapic.h>
#include <vpic.h>
#include <asm/guest/vmx_io.h>
#include <vuart.h>
#include <asm/guest/trusty.h>
#include <asm/guest/vcpuid.h>
#include <vpci.h>
#include <asm/cpu_caps.h>
#include <asm/e820.h>
#include <asm/vm_config.h>
#ifdef CONFIG_HYPERV_ENABLED
#include <asm/guest/hyperv.h>
#endif
enum reset_mode {
POWER_ON_RESET, /* reset by hardware Power-on */
COLD_RESET, /* hardware cold reset */
WARM_RESET, /* behavior slightly differ from cold reset, that some MSRs might be retained. */
INIT_RESET, /* reset by INIT */
SOFTWARE_RESET, /* reset by software disable<->enable */
};
struct vm_hw_info {
/* vcpu array of this VM */
struct acrn_vcpu vcpu_array[MAX_VCPUS_PER_VM];
uint16_t created_vcpus; /* Number of created vcpus */
uint64_t cpu_affinity; /* Actual pCPUs this VM runs on. The set bits represent the pCPU IDs */
} __aligned(PAGE_SIZE);
struct sw_module_info {
/* sw modules like ramdisk, bootargs, firmware, etc. */
void *src_addr; /* HVA */
void *load_addr; /* GPA */
uint32_t size;
};
struct sw_kernel_info {
void *kernel_src_addr; /* HVA */
void *kernel_load_addr; /* GPA */
void *kernel_entry_addr; /* GPA */
uint32_t kernel_size;
};
struct vm_sw_info {
enum os_kernel_type kernel_type; /* Guest kernel type */
/* Kernel information (common for all guest types) */
struct sw_kernel_info kernel_info;
struct sw_module_info bootargs_info;
struct sw_module_info ramdisk_info;
struct sw_module_info acpi_info;
/* HVA to IO shared page */
void *io_shared_page;
/* If enable IO completion polling mode */
bool is_polling_ioreq;
};
struct vm_pm_info {
uint8_t px_cnt; /* count of all Px states */
struct cpu_px_data px_data[MAX_PSTATE];
uint8_t cx_cnt; /* count of all Cx entries */
struct cpu_cx_data cx_data[MAX_CSTATE];
struct pm_s_state_data *sx_state_data; /* data for S3/S5 implementation */
};
/* Enumerated type for VM states */
enum vm_state {
VM_POWERED_OFF = 0, /* MUST set 0 because vm_state's initialization depends on clear BSS section */
VM_CREATED, /* VM created / awaiting start (boot) */
VM_RUNNING, /* VM running */
VM_READY_TO_POWEROFF, /* RTVM only, it is trying to poweroff by itself */
VM_PAUSED, /* VM paused */
};
enum vm_vlapic_mode {
VM_VLAPIC_DISABLED = 0U,
VM_VLAPIC_XAPIC,
VM_VLAPIC_X2APIC,
VM_VLAPIC_TRANSITION
};
struct vm_arch {
/* I/O bitmaps A and B for this VM, MUST be 4-Kbyte aligned */
uint8_t io_bitmap[PAGE_SIZE*2];
/* EPT hierarchy for Normal World */
void *nworld_eptp;
/* EPT hierarchy for Secure World
* Secure world can access Normal World's memory,
* but Normal World can not access Secure World's memory.
*/
void *sworld_eptp;
struct pgtable ept_pgtable;
struct acrn_vioapics vioapics; /* Virtual IOAPIC/s */
struct acrn_vpic vpic; /* Virtual PIC */
#ifdef CONFIG_HYPERV_ENABLED
struct acrn_hyperv hyperv;
#endif
enum vm_vlapic_mode vlapic_mode; /* Represents vLAPIC mode across vCPUs*/
/*
* Keylocker spec 4.5:
* Bit 0 - Backup/restore valid.
* Bit 1 - Reserved.
* Bit 2 - Backup key storage read/write error.
* Bit 3 - IWKeyBackup consumed.
* Bit 63:4 - Reserved.
*/
uint64_t iwkey_backup_status;
spinlock_t iwkey_backup_lock; /* Spin-lock used to protect internal key backup/restore */
struct iwkey iwkey_backup;
/* reference to virtual platform to come here (as needed) */
bool vm_mwait_cap;
} __aligned(PAGE_SIZE);
struct acrn_vm {
struct vm_arch arch_vm; /* Reference to this VM's arch information */
struct vm_hw_info hw; /* Reference to this VM's HW information */
struct vm_sw_info sw; /* Reference to SW associated with this VM */
struct vm_pm_info pm; /* Reference to this VM's arch information */
uint32_t e820_entry_num;
struct e820_entry *e820_entries;
uint16_t vm_id; /* Virtual machine identifier */
enum vm_state state; /* VM state */
struct acrn_vuart vuart[MAX_VUART_NUM_PER_VM]; /* Virtual UART */
enum vpic_wire_mode wire_mode;
struct iommu_domain *iommu; /* iommu domain of this VM */
/* vm_state_lock used to protect vm/vcpu state transition,
* the initialization depends on the clear BSS section
*/
spinlock_t vm_state_lock;
spinlock_t vlapic_mode_lock; /* Spin-lock used to protect vlapic_mode modifications for a VM */
spinlock_t ept_lock; /* Spin-lock used to protect ept add/modify/remove for a VM */
spinlock_t emul_mmio_lock; /* Used to protect emulation mmio_node concurrent access for a VM */
uint16_t nr_emul_mmio_regions; /* the emulated mmio_region number */
struct mem_io_node emul_mmio[CONFIG_MAX_EMULATED_MMIO_REGIONS];
struct vm_io_handler_desc emul_pio[EMUL_PIO_IDX_MAX];
uint8_t uuid[16];
struct secure_world_control sworld_control;
/* Secure World's snapshot
* Currently, Secure World is only running on vcpu[0],
* so the snapshot only stores the vcpu0's run_context
* of secure world.
*/
struct guest_cpu_context sworld_snapshot;
uint32_t vcpuid_entry_nr, vcpuid_level, vcpuid_xlevel;
struct vcpuid_entry vcpuid_entries[MAX_VM_VCPUID_ENTRIES];
struct acrn_vpci vpci;
uint8_t vrtc_offset;
uint64_t intr_inject_delay_delta; /* delay of intr injection */
} __aligned(PAGE_SIZE);
/*
* @pre vlapic != NULL
*/
static inline uint64_t vm_active_cpus(const struct acrn_vm *vm)
{
uint64_t dmask = 0UL;
uint16_t i;
const struct acrn_vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
bitmap_set_nolock(vcpu->vcpu_id, &dmask);
}
return dmask;
}
/*
* @pre vcpu_id < MAX_VCPUS_PER_VM
* @pre &(vm->hw.vcpu_array[vcpu_id])->state != VCPU_OFFLINE
*/
static inline struct acrn_vcpu *vcpu_from_vid(struct acrn_vm *vm, uint16_t vcpu_id)
{
return &(vm->hw.vcpu_array[vcpu_id]);
}
static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_id)
{
uint16_t i;
struct acrn_vcpu *vcpu, *target_vcpu = NULL;
foreach_vcpu(i, vm, vcpu) {
if (pcpuid_from_vcpu(vcpu) == pcpu_id) {
target_vcpu = vcpu;
break;
}
}
return target_vcpu;
}
/* Convert relative vm id to absolute vm id */
static inline uint16_t rel_vmid_2_vmid(uint16_t sos_vmid, uint16_t rel_vmid) {
return (sos_vmid + rel_vmid);
}
/* Convert absolute vm id to relative vm id */
static inline uint16_t vmid_2_rel_vmid(uint16_t sos_vmid, uint16_t vmid) {
return (vmid - sos_vmid);
}
void make_shutdown_vm_request(uint16_t pcpu_id);
bool need_shutdown_vm(uint16_t pcpu_id);
int32_t shutdown_vm(struct acrn_vm *vm);
void poweroff_if_rt_vm(struct acrn_vm *vm);
void pause_vm(struct acrn_vm *vm);
void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec);
void start_vm(struct acrn_vm *vm);
int32_t reset_vm(struct acrn_vm *vm);
int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm);
void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config);
void launch_vms(uint16_t pcpu_id);
bool is_poweroff_vm(const struct acrn_vm *vm);
bool is_created_vm(const struct acrn_vm *vm);
bool is_paused_vm(const struct acrn_vm *vm);
bool is_sos_vm(const struct acrn_vm *vm);
bool is_postlaunched_vm(const struct acrn_vm *vm);
bool is_prelaunched_vm(const struct acrn_vm *vm);
uint16_t get_vmid_by_uuid(const uint8_t *uuid);
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
struct acrn_vm *get_sos_vm(void);
void create_sos_vm_e820(struct acrn_vm *vm);
void create_prelaunched_vm_e820(struct acrn_vm *vm);
int32_t vm_sw_loader(struct acrn_vm *vm);
void vrtc_init(struct acrn_vm *vm);
bool is_lapic_pt_configured(const struct acrn_vm *vm);
bool is_rt_vm(const struct acrn_vm *vm);
bool is_pi_capable(const struct acrn_vm *vm);
bool has_rt_vm(void);
struct acrn_vm *get_highest_severity_vm(bool runtime);
bool vm_hide_mtrr(const struct acrn_vm *vm);
void update_vm_vlapic_state(struct acrn_vm *vm);
enum vm_vlapic_mode check_vm_vlapic_mode(const struct acrn_vm *vm);
/*
* @pre vm != NULL
*/
void get_vm_lock(struct acrn_vm *vm);
/*
* @pre vm != NULL
*/
void put_vm_lock(struct acrn_vm *vm);
void *get_sworld_memory_base(void);
#endif /* !ASSEMBLER */
#endif /* VM_H_ */

View File

@@ -0,0 +1,16 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_RESET_H_
#define VM_RESET_H_
#include <acrn_common.h>
void register_reset_port_handler(struct acrn_vm *vm);
void shutdown_vm_from_idle(uint16_t pcpu_id);
void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu);
#endif /* VM_RESET_H_ */

View File

@@ -0,0 +1,49 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VMCS_H_
#define VMCS_H_
#define VM_SUCCESS 0
#define VM_FAIL -1
#ifndef ASSEMBLER
#include <types.h>
#include <asm/guest/vcpu.h>
#define VMX_VMENTRY_FAIL 0x80000000U
#define APIC_ACCESS_OFFSET 0xFFFUL /* 11:0, offset within the APIC page */
#define APIC_ACCESS_TYPE 0xF000UL /* 15:12, access type */
#define TYPE_LINEAR_APIC_INST_READ (0UL << 12U)
#define TYPE_LINEAR_APIC_INST_WRITE (1UL << 12U)
/* VM exit qulifications for APIC-access
* Access type:
* 0 = linear access for a data read during instruction execution
* 1 = linear access for a data write during instruction execution
* 2 = linear access for an instruction fetch
* 3 = linear access (read or write) during event delivery
* 10 = guest-physical access during event delivery
* 15 = guest-physical access for an instructon fetch or during
* instruction execution
*/
static inline uint64_t apic_access_type(uint64_t qual)
{
return (qual & APIC_ACCESS_TYPE);
}
static inline uint64_t apic_access_offset(uint64_t qual)
{
return (qual & APIC_ACCESS_OFFSET);
}
void init_vmcs(struct acrn_vcpu *vcpu);
void load_vmcs(const struct acrn_vcpu *vcpu);
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu);
#endif /* ASSEMBLER */
#endif /* VMCS_H_ */

View File

@@ -0,0 +1,89 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VMEXIT_H_
#define VMEXIT_H_
struct vm_exit_dispatch {
int32_t (*handler)(struct acrn_vcpu *);
uint32_t need_exit_qualification;
};
int32_t vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu);
extern void vm_exit(void);
static inline uint64_t
vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)
{
return (exit_qual &
(((1UL << (msb + 1U)) - 1UL) - ((1UL << lsb) - 1UL)));
}
/* access Control-Register Info using exit qualification field */
static inline uint64_t vm_exit_cr_access_cr_num(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 0U) >> 0U);
}
static inline uint64_t vm_exit_cr_access_type(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 4U) >> 4U);
}
static inline uint64_t vm_exit_cr_access_lmsw_op(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
}
static inline uint64_t vm_exit_cr_access_reg_idx(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 11U, 8U) >> 8U);
}
static inline uint64_t vm_exit_cr_access_lmsw_src_date(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
}
/* access IO Access Info using exit qualification field */
static inline uint64_t vm_exit_io_instruction_size(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 2U, 0U) >> 0U);
}
static inline uint64_t
vm_exit_io_instruction_access_direction(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 3U) >> 3U);
}
static inline uint64_t vm_exit_io_instruction_is_string(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 4U, 4U) >> 4U);
}
static inline uint64_t
vm_exit_io_instruction_is_rep_prefixed(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 5U) >> 5U);
}
static inline uint64_t
vm_exit_io_instruction_is_operand_encoding(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
}
static inline uint64_t vm_exit_io_instruction_port_number(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
}
#endif /* VMEXIT_H_ */

View File

@@ -0,0 +1,87 @@
/*
* Copyright (C) <2018> Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file vmtrr.h
*
* @brief MTRR Virtualization
*/
#ifndef VMTRR_H
#define VMTRR_H
/**
* @brief MTRR Virtualization
*
* @addtogroup acrn_mem ACRN Memory Management
* @{
*/
#define FIXED_RANGE_MTRR_NUM 11U
#define MTRR_SUB_RANGE_NUM 8U
union mtrr_cap_reg {
uint64_t value;
struct {
uint32_t vcnt:8;
uint32_t fix:1;
uint32_t res0:1;
uint32_t wc:1;
uint32_t res1:21;
uint32_t res2:32;
} bits;
};
union mtrr_def_type_reg {
uint64_t value;
struct {
uint32_t type:8;
uint32_t res0:2;
uint32_t fixed_enable:1;
uint32_t enable:1;
uint32_t res1:20;
uint32_t res2:32;
} bits;
};
union mtrr_fixed_range_reg {
uint64_t value;
uint8_t type[MTRR_SUB_RANGE_NUM];
};
struct acrn_vmtrr {
union mtrr_cap_reg cap;
union mtrr_def_type_reg def_type;
union mtrr_fixed_range_reg fixed_range[FIXED_RANGE_MTRR_NUM];
};
struct acrn_vcpu;
/**
* @brief Virtual MTRR MSR write
*
* @param[inout] vcpu The pointer that points VCPU data structure
* @param[in] msr Virtual MTRR MSR Address
* @param[in] value The value that will be writen into virtual MTRR MSR
*
* @return None
*/
void write_vmtrr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t value);
/**
* @brief Virtual MTRR MSR read
*
* @param[in] vcpu The pointer that points VCPU data structure
* @param[in] msr Virtual MTRR MSR Address
*
* @return The specified virtual MTRR MSR value
*/
uint64_t read_vmtrr(const struct acrn_vcpu *vcpu, uint32_t msr);
/**
* @brief Virtual MTRR initialization
*
* @param[inout] vcpu The pointer that points VCPU data structure
*
* @return None
*/
void init_vmtrr(struct acrn_vcpu *vcpu);
/**
* @}
*/
#endif /* VMTRR_H */

View File

@@ -0,0 +1,91 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef IO_EMUL_H
#define IO_EMUL_H
#include <types.h>
/* Define emulated port IO index */
#define PIC_PRIMARY_PIO_IDX 0U
#define PIC_SECONDARY_PIO_IDX (PIC_PRIMARY_PIO_IDX + 1U)
#define PIC_ELC_PIO_IDX (PIC_SECONDARY_PIO_IDX + 1U)
#define PCI_CFGADDR_PIO_IDX (PIC_ELC_PIO_IDX + 1U)
#define PCI_CFGDATA_PIO_IDX (PCI_CFGADDR_PIO_IDX + 1U)
/* When MAX_VUART_NUM_PER_VM is larger than 2, UART_PIO_IDXn should also be added here */
#define UART_PIO_IDX0 (PCI_CFGDATA_PIO_IDX + 1U)
#define UART_PIO_IDX1 (UART_PIO_IDX0 + 1U)
#define PM1A_EVT_PIO_IDX (UART_PIO_IDX1 + 1U)
#define PM1A_CNT_PIO_IDX (PM1A_EVT_PIO_IDX + 1U)
#define PM1B_EVT_PIO_IDX (PM1A_CNT_PIO_IDX + 1U)
#define PM1B_CNT_PIO_IDX (PM1B_EVT_PIO_IDX + 1U)
#define RTC_PIO_IDX (PM1B_CNT_PIO_IDX + 1U)
#define VIRTUAL_PM1A_CNT_PIO_IDX (RTC_PIO_IDX + 1U)
#define KB_PIO_IDX (VIRTUAL_PM1A_CNT_PIO_IDX + 1U)
#define CF9_PIO_IDX (KB_PIO_IDX + 1U)
#define PIO_RESET_REG_IDX (CF9_PIO_IDX + 1U)
#define SLEEP_CTL_PIO_IDX (PIO_RESET_REG_IDX + 1U)
#define EMUL_PIO_IDX_MAX (SLEEP_CTL_PIO_IDX + 1U)
/**
* @brief The handler of VM exits on I/O instructions
*
* @param vcpu The virtual CPU which triggers the VM exit on I/O instruction
*/
int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu);
/**
* @brief EPT violation handling
*
* @param[in] vcpu the pointer that points to vcpu data structure
*
* @retval -EINVAL fail to handle the EPT violation
* @retval 0 Success to handle the EPT violation
*/
int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu);
/**
* @brief General complete-work for port I/O emulation
*
* @pre io_req->io_type == REQ_PORTIO
*
* @remark This function must be called when \p io_req is completed, after
* either a previous call to emulate_io() returning 0 or the corresponding VHM
* request having transferred to the COMPLETE state.
*/
void emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req);
/**
* @brief Allow a VM to access a port I/O range
*
* This API enables direct access from the given \p vm to the port I/O space
* starting from \p port_address to \p port_address + \p nbytes - 1.
*
* @param vm The VM whose port I/O access permissions is to be changed
* @param port_address The start address of the port I/O range
* @param nbytes The size of the range, in bytes
*/
void allow_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_t nbytes);
/**
* @brief Allow a VM to access a port I/O range
*
* This API enables direct access from the given \p vm to the port I/O space
* starting from \p port_address to \p port_address + \p nbytes - 1.
*
* @param vm The VM whose port I/O access permissions is to be changed
* @param port_address The start address of the port I/O range
* @param nbytes The size of the range, in bytes
*/
void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_t nbytes);
/**
* @brief Fire VHM interrupt to SOS
*
* @return None
*/
void arch_fire_vhm_interrupt(void);
#endif /* IO_EMUL_H */