acrn-hypervisor/hypervisor/include/arch/x86/guest/vmcs.h
Li, Fei1 0e046c7a0a hv: vlapic: clear which access type we support for APIC-Access VM Exit
The current implement doesn't clear which access type we support for
APIC-Access VM Exit:
1) linear access for an instruction fetch
-- APIC-access page is mapped as UC which doesn't support fetch
2) linear access (read or write) during event delivery
-- Which is not happened in normal case except the guest went wrong, such as,
set the IDT table in APIC-access page. In this case, we don't need to support.
3) guest-physical access during event delivery;
   guest-physical access for an instruction fetch or during instruction execution
-- Do we plan to support enable APIC in real mode ? I don't think so.

Tracked-On: #1842
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2019-06-20 08:53:25 +08:00

71 lines
1.8 KiB
C

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VMCS_H_
#define VMCS_H_
#define VM_SUCCESS 0
#define VM_FAIL -1
#ifndef ASSEMBLER
#include <types.h>
#include <vmx.h>
#include <vcpu.h>
#define VMX_VMENTRY_FAIL 0x80000000U
#define APIC_ACCESS_OFFSET 0xFFFUL /* 11:0, offset within the APIC page */
#define APIC_ACCESS_TYPE 0xF000UL /* 15:12, access type */
#define TYPE_LINEAR_APIC_INST_READ (0UL << 12U)
#define TYPE_LINEAR_APIC_INST_WRITE (1UL << 12U)
static inline uint32_t vmx_eoi_exit(uint32_t vector)
{
return (VMX_EOI_EXIT0_FULL + ((vector >> 6U) * 2U));
}
/* VM exit qulifications for APIC-access
* Access type:
* 0 = linear access for a data read during instruction execution
* 1 = linear access for a data write during instruction execution
* 2 = linear access for an instruction fetch
* 3 = linear access (read or write) during event delivery
* 10 = guest-physical access during event delivery
* 15 = guest-physical access for an instructon fetch or during
* instruction execution
*/
static inline uint64_t apic_access_type(uint64_t qual)
{
return (qual & APIC_ACCESS_TYPE);
}
static inline uint64_t apic_access_offset(uint64_t qual)
{
return (qual & APIC_ACCESS_OFFSET);
}
#define RFLAGS_C (1U<<0U)
#define RFLAGS_Z (1U<<6U)
#define RFLAGS_AC (1U<<18U)
#define VMX_SUPPORT_UNRESTRICTED_GUEST (1U<<5U)
void init_vmcs(struct acrn_vcpu *vcpu);
uint64_t vmx_rdmsr_pat(const struct acrn_vcpu *vcpu);
int32_t vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value);
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu);
static inline enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
{
return vcpu->arch.cpu_mode;
}
#endif /* ASSEMBLER */
#endif /* VMCS_H_ */