mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-07 06:32:08 +00:00
HV:treewide:Fix type conversion in VMX, timer and MTTR module
There are some integer type conversions in the VMX, timer and MTTR module detected by static analysis tool. Update related integer type in VMX, timer and MTTR module. Add related constant value with 'U/UL' suffix. V1-->V2: Resolve few rebase conflicts. V2-->V3: Add 'h' for uint16_t argument in log function; Update the type of temp variable 'type' as uint8_t in MTTR module to reduce type conversion. Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
f7efd0fee5
commit
84d320d49c
@ -7,14 +7,14 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_MTRR_ENABLED
|
#ifdef CONFIG_MTRR_ENABLED
|
||||||
|
|
||||||
#define MTRR_FIXED_RANGE_ALL_WB ((uint64_t)MTRR_MEM_TYPE_WB \
|
#define MTRR_FIXED_RANGE_ALL_WB (MTRR_MEM_TYPE_WB \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 8) \
|
| (MTRR_MEM_TYPE_WB << 8U) \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 16) \
|
| (MTRR_MEM_TYPE_WB << 16U) \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 24) \
|
| (MTRR_MEM_TYPE_WB << 24U) \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 32) \
|
| (MTRR_MEM_TYPE_WB << 32U) \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 40) \
|
| (MTRR_MEM_TYPE_WB << 40U) \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 48) \
|
| (MTRR_MEM_TYPE_WB << 48U) \
|
||||||
| (((uint64_t)MTRR_MEM_TYPE_WB) << 56))
|
| (MTRR_MEM_TYPE_WB << 56U))
|
||||||
|
|
||||||
struct fixed_range_mtrr_maps {
|
struct fixed_range_mtrr_maps {
|
||||||
uint32_t msr;
|
uint32_t msr;
|
||||||
@ -22,53 +22,56 @@ struct fixed_range_mtrr_maps {
|
|||||||
uint32_t sub_range_size;
|
uint32_t sub_range_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MAX_FIXED_RANGE_ADDR 0x100000
|
#define MAX_FIXED_RANGE_ADDR 0x100000UL
|
||||||
static struct fixed_range_mtrr_maps fixed_mtrr_map[FIXED_RANGE_MTRR_NUM] = {
|
static struct fixed_range_mtrr_maps fixed_mtrr_map[FIXED_RANGE_MTRR_NUM] = {
|
||||||
{ MSR_IA32_MTRR_FIX64K_00000, 0x0, 0x10000 },
|
{ MSR_IA32_MTRR_FIX64K_00000, 0x0U, 0x10000U },
|
||||||
{ MSR_IA32_MTRR_FIX16K_80000, 0x80000, 0x4000 },
|
{ MSR_IA32_MTRR_FIX16K_80000, 0x80000U, 0x4000U },
|
||||||
{ MSR_IA32_MTRR_FIX16K_A0000, 0xA0000, 0x4000 },
|
{ MSR_IA32_MTRR_FIX16K_A0000, 0xA0000U, 0x4000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_C0000, 0xC0000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_C0000, 0xC0000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_C8000, 0xC8000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_C8000, 0xC8000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_D0000, 0xD0000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_D0000, 0xD0000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_D8000, 0xD8000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_D8000, 0xD8000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_E0000, 0xE0000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_E0000, 0xE0000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_E8000, 0xE8000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_E8000, 0xE8000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_F0000, 0xF0000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_F0000, 0xF0000U, 0x1000U },
|
||||||
{ MSR_IA32_MTRR_FIX4K_F8000, 0xF8000, 0x1000 },
|
{ MSR_IA32_MTRR_FIX4K_F8000, 0xF8000U, 0x1000U },
|
||||||
};
|
};
|
||||||
|
|
||||||
int is_fixed_range_mtrr(uint32_t msr)
|
static bool is_fixed_range_mtrr(uint32_t msr)
|
||||||
{
|
{
|
||||||
return (msr >= fixed_mtrr_map[0].msr)
|
return (msr >= fixed_mtrr_map[0].msr)
|
||||||
&& (msr <= fixed_mtrr_map[FIXED_RANGE_MTRR_NUM - 1].msr);
|
&& (msr <= fixed_mtrr_map[FIXED_RANGE_MTRR_NUM - 1U].msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_index_of_fixed_mtrr(uint32_t msr)
|
static uint32_t get_index_of_fixed_mtrr(uint32_t msr)
|
||||||
{
|
{
|
||||||
int i;
|
uint32_t i;
|
||||||
|
|
||||||
for (i = 0; i < FIXED_RANGE_MTRR_NUM; i++) {
|
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
|
||||||
if (fixed_mtrr_map[i].msr == msr)
|
if (fixed_mtrr_map[i].msr == msr)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
int get_subrange_size_of_fixed_mtrr(int subrange_id)
|
static uint32_t
|
||||||
|
get_subrange_size_of_fixed_mtrr(uint32_t subrange_id)
|
||||||
{
|
{
|
||||||
return fixed_mtrr_map[subrange_id].sub_range_size;
|
return fixed_mtrr_map[subrange_id].sub_range_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
int get_subrange_start_of_fixed_mtrr(int index, int subrange_id)
|
static uint32_t
|
||||||
|
get_subrange_start_of_fixed_mtrr(uint32_t index, uint32_t subrange_id)
|
||||||
{
|
{
|
||||||
return (fixed_mtrr_map[index].start + subrange_id *
|
return (fixed_mtrr_map[index].start + subrange_id *
|
||||||
get_subrange_size_of_fixed_mtrr(index));
|
get_subrange_size_of_fixed_mtrr(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
int get_subrange_end_of_fixed_mtrr(int index, int subrange_id)
|
static uint32_t
|
||||||
|
get_subrange_end_of_fixed_mtrr(uint32_t index, uint32_t subrange_id)
|
||||||
{
|
{
|
||||||
return (get_subrange_start_of_fixed_mtrr(index, subrange_id) +
|
return (get_subrange_start_of_fixed_mtrr(index, subrange_id) +
|
||||||
get_subrange_size_of_fixed_mtrr(index) - 1);
|
get_subrange_size_of_fixed_mtrr(index) - 1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_mtrr_enabled(struct vcpu *vcpu)
|
static inline bool is_mtrr_enabled(struct vcpu *vcpu)
|
||||||
@ -90,23 +93,23 @@ static inline uint8_t get_default_memory_type(struct vcpu *vcpu)
|
|||||||
void init_mtrr(struct vcpu *vcpu)
|
void init_mtrr(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
union mtrr_cap_reg cap = {0};
|
union mtrr_cap_reg cap = {0};
|
||||||
int i;
|
uint32_t i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We emulate fixed range MTRRs only
|
* We emulate fixed range MTRRs only
|
||||||
* And expecting the guests won't write variable MTRRs
|
* And expecting the guests won't write variable MTRRs
|
||||||
* since MTRRCap.vcnt is 0
|
* since MTRRCap.vcnt is 0
|
||||||
*/
|
*/
|
||||||
vcpu->mtrr.cap.bits.vcnt = 0;
|
vcpu->mtrr.cap.bits.vcnt = 0U;
|
||||||
vcpu->mtrr.cap.bits.fix = 1;
|
vcpu->mtrr.cap.bits.fix = 1U;
|
||||||
vcpu->mtrr.def_type.bits.enable = 1;
|
vcpu->mtrr.def_type.bits.enable = 1U;
|
||||||
vcpu->mtrr.def_type.bits.fixed_enable = 1;
|
vcpu->mtrr.def_type.bits.fixed_enable = 1U;
|
||||||
vcpu->mtrr.def_type.bits.type = MTRR_MEM_TYPE_UC;
|
vcpu->mtrr.def_type.bits.type = MTRR_MEM_TYPE_UC;
|
||||||
|
|
||||||
if (is_vm0(vcpu->vm))
|
if (is_vm0(vcpu->vm))
|
||||||
cap.value = msr_read(MSR_IA32_MTRR_CAP);
|
cap.value = msr_read(MSR_IA32_MTRR_CAP);
|
||||||
|
|
||||||
for (i = 0; i < FIXED_RANGE_MTRR_NUM; i++) {
|
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
|
||||||
if (cap.bits.fix) {
|
if (cap.bits.fix) {
|
||||||
/*
|
/*
|
||||||
* The system firmware runs in VMX non-root mode on VM0.
|
* The system firmware runs in VMX non-root mode on VM0.
|
||||||
@ -124,18 +127,18 @@ void init_mtrr(struct vcpu *vcpu)
|
|||||||
vcpu->mtrr.fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
|
vcpu->mtrr.fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_dbg("vm%d vcpu%hu fixed-range MTRR[%d]: %16llx",
|
pr_dbg("vm%d vcpu%hu fixed-range MTRR[%u]: %16llx",
|
||||||
vcpu->vm->attr.id, vcpu->vcpu_id, i,
|
vcpu->vm->attr.id, vcpu->vcpu_id, i,
|
||||||
vcpu->mtrr.fixed_range[i].value);
|
vcpu->mtrr.fixed_range[i].value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t update_ept(struct vm *vm, uint64_t start,
|
static uint32_t update_ept(struct vm *vm, uint64_t start,
|
||||||
uint64_t size, uint32_t type)
|
uint64_t size, uint8_t type)
|
||||||
{
|
{
|
||||||
uint64_t attr;
|
uint64_t attr;
|
||||||
|
|
||||||
switch (type) {
|
switch ((uint64_t)type) {
|
||||||
case MTRR_MEM_TYPE_WC:
|
case MTRR_MEM_TYPE_WC:
|
||||||
attr = IA32E_EPT_WC;
|
attr = IA32E_EPT_WC;
|
||||||
break;
|
break;
|
||||||
@ -159,9 +162,9 @@ static uint32_t update_ept(struct vm *vm, uint64_t start,
|
|||||||
|
|
||||||
static void update_ept_mem_type(struct vcpu *vcpu)
|
static void update_ept_mem_type(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t type;
|
uint8_t type;
|
||||||
uint64_t start, size;
|
uint64_t start, size;
|
||||||
int i, j;
|
uint32_t i, j;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel SDM, Vol 3, 11.11.2.1 Section "IA32_MTRR_DEF_TYPE MSR":
|
* Intel SDM, Vol 3, 11.11.2.1 Section "IA32_MTRR_DEF_TYPE MSR":
|
||||||
@ -169,18 +172,18 @@ static void update_ept_mem_type(struct vcpu *vcpu)
|
|||||||
* - when def_type.FE is clear, MTRRdefType.type is applied
|
* - when def_type.FE is clear, MTRRdefType.type is applied
|
||||||
*/
|
*/
|
||||||
if (!is_mtrr_enabled(vcpu) || !is_fixed_range_mtrr_enabled(vcpu)) {
|
if (!is_mtrr_enabled(vcpu) || !is_fixed_range_mtrr_enabled(vcpu)) {
|
||||||
update_ept(vcpu->vm, 0, MAX_FIXED_RANGE_ADDR,
|
update_ept(vcpu->vm, 0U, MAX_FIXED_RANGE_ADDR,
|
||||||
get_default_memory_type(vcpu));
|
get_default_memory_type(vcpu));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Deal with fixed-range MTRRs only */
|
/* Deal with fixed-range MTRRs only */
|
||||||
for (i = 0; i < FIXED_RANGE_MTRR_NUM; i++) {
|
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
|
||||||
type = vcpu->mtrr.fixed_range[i].type[0];
|
type = vcpu->mtrr.fixed_range[i].type[0];
|
||||||
start = get_subrange_start_of_fixed_mtrr(i, 0);
|
start = get_subrange_start_of_fixed_mtrr(i, 0U);
|
||||||
size = get_subrange_size_of_fixed_mtrr(i);
|
size = get_subrange_size_of_fixed_mtrr(i);
|
||||||
|
|
||||||
for (j = 1; j < MTRR_SUB_RANGE_NUM; j++) {
|
for (j = 1U; j < MTRR_SUB_RANGE_NUM; j++) {
|
||||||
/* If it's same type, combine the subrange together */
|
/* If it's same type, combine the subrange together */
|
||||||
if (type == vcpu->mtrr.fixed_range[i].type[j]) {
|
if (type == vcpu->mtrr.fixed_range[i].type[j]) {
|
||||||
size += get_subrange_size_of_fixed_mtrr(i);
|
size += get_subrange_size_of_fixed_mtrr(i);
|
||||||
@ -236,7 +239,7 @@ void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value)
|
|||||||
uint64_t mtrr_rdmsr(struct vcpu *vcpu, uint32_t msr)
|
uint64_t mtrr_rdmsr(struct vcpu *vcpu, uint32_t msr)
|
||||||
{
|
{
|
||||||
struct mtrr_state *mtrr = &vcpu->mtrr;
|
struct mtrr_state *mtrr = &vcpu->mtrr;
|
||||||
uint64_t ret = 0;
|
uint64_t ret = 0UL;
|
||||||
|
|
||||||
if (msr == MSR_IA32_MTRR_CAP)
|
if (msr == MSR_IA32_MTRR_CAP)
|
||||||
ret = mtrr->cap.value;
|
ret = mtrr->cap.value;
|
||||||
|
@ -6,10 +6,10 @@
|
|||||||
|
|
||||||
#include <hypervisor.h>
|
#include <hypervisor.h>
|
||||||
|
|
||||||
#define MAX_TIMER_ACTIONS 32
|
#define MAX_TIMER_ACTIONS 32U
|
||||||
#define TIMER_IRQ (NR_IRQS - 1)
|
#define TIMER_IRQ (NR_IRQS - 1U)
|
||||||
#define CAL_MS 10
|
#define CAL_MS 10U
|
||||||
#define MIN_TIMER_PERIOD_US 500
|
#define MIN_TIMER_PERIOD_US 500U
|
||||||
|
|
||||||
uint32_t tsc_khz = 0U;
|
uint32_t tsc_khz = 0U;
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ void timer_init(void)
|
|||||||
char name[32] = {0};
|
char name[32] = {0};
|
||||||
uint16_t pcpu_id = get_cpu_id();
|
uint16_t pcpu_id = get_cpu_id();
|
||||||
|
|
||||||
snprintf(name, 32, "timer_tick[%d]", pcpu_id);
|
snprintf(name, 32, "timer_tick[%hu]", pcpu_id);
|
||||||
if (request_timer_irq(pcpu_id, tsc_deadline_handler, NULL, name) < 0) {
|
if (request_timer_irq(pcpu_id, tsc_deadline_handler, NULL, name) < 0) {
|
||||||
pr_err("Timer setup failed");
|
pr_err("Timer setup failed");
|
||||||
return;
|
return;
|
||||||
@ -232,7 +232,7 @@ void check_tsc(void)
|
|||||||
|
|
||||||
static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
|
static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
|
||||||
{
|
{
|
||||||
#define PIT_TICK_RATE 1193182UL
|
#define PIT_TICK_RATE 1193182U
|
||||||
#define PIT_TARGET 0x3FFFU
|
#define PIT_TARGET 0x3FFFU
|
||||||
#define PIT_MAX_COUNT 0xFFFFU
|
#define PIT_MAX_COUNT 0xFFFFU
|
||||||
|
|
||||||
@ -254,9 +254,9 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
|
|||||||
* Read/Write least significant byte first, mode 0, 16 bits.
|
* Read/Write least significant byte first, mode 0, 16 bits.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
io_write_byte(0x30, 0x43);
|
io_write_byte(0x30U, 0x43U);
|
||||||
io_write_byte(initial_pit & 0x00ffU, 0x40); /* Write LSB */
|
io_write_byte(initial_pit & 0x00ffU, 0x40U); /* Write LSB */
|
||||||
io_write_byte(initial_pit >> 8, 0x40); /* Write MSB */
|
io_write_byte(initial_pit >> 8U, 0x40U); /* Write MSB */
|
||||||
|
|
||||||
current_tsc = rdtsc();
|
current_tsc = rdtsc();
|
||||||
|
|
||||||
@ -264,10 +264,10 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
|
|||||||
/* Port 0x43 ==> Control word write; 0x00 ==> Select
|
/* Port 0x43 ==> Control word write; 0x00 ==> Select
|
||||||
* Counter 0, Counter Latch Command, Mode 0; 16 bits
|
* Counter 0, Counter Latch Command, Mode 0; 16 bits
|
||||||
*/
|
*/
|
||||||
io_write_byte(0x00, 0x43);
|
io_write_byte(0x00U, 0x43U);
|
||||||
|
|
||||||
current_pit = io_read_byte(0x40); /* Read LSB */
|
current_pit = io_read_byte(0x40U); /* Read LSB */
|
||||||
current_pit |= io_read_byte(0x40) << 8; /* Read MSB */
|
current_pit |= io_read_byte(0x40U) << 8U; /* Read MSB */
|
||||||
/* Let the counter count down to PIT_TARGET */
|
/* Let the counter count down to PIT_TARGET */
|
||||||
} while (current_pit > PIT_TARGET);
|
} while (current_pit > PIT_TARGET);
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ static uint64_t native_calibrate_tsc(void)
|
|||||||
if (boot_cpu_data.cpuid_level >= 0x15U) {
|
if (boot_cpu_data.cpuid_level >= 0x15U) {
|
||||||
uint32_t eax_denominator, ebx_numerator, ecx_hz, reserved;
|
uint32_t eax_denominator, ebx_numerator, ecx_hz, reserved;
|
||||||
|
|
||||||
cpuid(0x15, &eax_denominator, &ebx_numerator,
|
cpuid(0x15U, &eax_denominator, &ebx_numerator,
|
||||||
&ecx_hz, &reserved);
|
&ecx_hz, &reserved);
|
||||||
|
|
||||||
if (eax_denominator != 0U && ebx_numerator != 0U) {
|
if (eax_denominator != 0U && ebx_numerator != 0U) {
|
||||||
|
@ -146,8 +146,8 @@ int vmexit_handler(struct vcpu *vcpu)
|
|||||||
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||||
uint32_t vector_info = vcpu->arch_vcpu.idt_vectoring_info;
|
uint32_t vector_info = vcpu->arch_vcpu.idt_vectoring_info;
|
||||||
uint32_t vector = vector_info & 0xffU;
|
uint32_t vector = vector_info & 0xffU;
|
||||||
uint32_t type = (vector_info & VMX_INT_TYPE_MASK) >> 8;
|
uint32_t type = (vector_info & VMX_INT_TYPE_MASK) >> 8U;
|
||||||
uint32_t err_code = 0;
|
uint32_t err_code = 0U;
|
||||||
|
|
||||||
if (type == VMX_INT_TYPE_HW_EXP) {
|
if (type == VMX_INT_TYPE_HW_EXP) {
|
||||||
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
|
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
|
||||||
@ -229,7 +229,7 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
|
|||||||
(uint32_t *)&cur_context->guest_cpu_regs.regs.rcx,
|
(uint32_t *)&cur_context->guest_cpu_regs.regs.rcx,
|
||||||
(uint32_t *)&cur_context->guest_cpu_regs.regs.rdx);
|
(uint32_t *)&cur_context->guest_cpu_regs.regs.rdx);
|
||||||
|
|
||||||
TRACE_2L(TRACE_VMEXIT_CPUID, vcpu->vcpu_id, 0UL);
|
TRACE_2L(TRACE_VMEXIT_CPUID, (uint64_t)vcpu->vcpu_id, 0UL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -307,7 +307,7 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
|||||||
struct run_context *ctx_ptr;
|
struct run_context *ctx_ptr;
|
||||||
|
|
||||||
val64 = exec_vmread(VMX_GUEST_CR4);
|
val64 = exec_vmread(VMX_GUEST_CR4);
|
||||||
if ((val64 & CR4_OSXSAVE) == 0U) {
|
if ((val64 & CR4_OSXSAVE) == 0UL) {
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -326,7 +326,7 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
val64 = ((ctx_ptr->guest_cpu_regs.regs.rax) & 0xffffffffUL) |
|
val64 = ((ctx_ptr->guest_cpu_regs.regs.rax) & 0xffffffffUL) |
|
||||||
(ctx_ptr->guest_cpu_regs.regs.rdx << 32UL);
|
(ctx_ptr->guest_cpu_regs.regs.rdx << 32U);
|
||||||
|
|
||||||
/*bit 0(x87 state) of XCR0 can't be cleared*/
|
/*bit 0(x87 state) of XCR0 can't be cleared*/
|
||||||
if ((val64 & 0x01UL) == 0UL) {
|
if ((val64 & 0x01UL) == 0UL) {
|
||||||
@ -338,7 +338,7 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
|||||||
*set to 10b as it is necessary to set both bits
|
*set to 10b as it is necessary to set both bits
|
||||||
*to use AVX instructions.
|
*to use AVX instructions.
|
||||||
**/
|
**/
|
||||||
if (((val64 >> 1UL) & 0x3UL) == 0x2UL) {
|
if (((val64 >> 1U) & 0x3UL) == 0x2UL) {
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -10,18 +10,18 @@
|
|||||||
extern struct efi_ctx* efi_ctx;
|
extern struct efi_ctx* efi_ctx;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000)
|
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000U)
|
||||||
#define REAL_MODE_DATA_SEG_AR (0x0093)
|
#define REAL_MODE_DATA_SEG_AR (0x0093U)
|
||||||
#define REAL_MODE_CODE_SEG_AR (0x009f)
|
#define REAL_MODE_CODE_SEG_AR (0x009fU)
|
||||||
#define PROTECTED_MODE_DATA_SEG_AR (0xc093)
|
#define PROTECTED_MODE_DATA_SEG_AR (0xc093U)
|
||||||
#define PROTECTED_MODE_CODE_SEG_AR (0xc09b)
|
#define PROTECTED_MODE_CODE_SEG_AR (0xc09bU)
|
||||||
|
|
||||||
static uint32_t cr0_host_mask;
|
static uint64_t cr0_host_mask;
|
||||||
static uint32_t cr0_always_on_mask;
|
static uint64_t cr0_always_on_mask;
|
||||||
static uint32_t cr0_always_off_mask;
|
static uint64_t cr0_always_off_mask;
|
||||||
static uint32_t cr4_host_mask;
|
static uint64_t cr4_host_mask;
|
||||||
static uint32_t cr4_always_on_mask;
|
static uint64_t cr4_always_on_mask;
|
||||||
static uint32_t cr4_always_off_mask;
|
static uint64_t cr4_always_off_mask;
|
||||||
|
|
||||||
static inline int exec_vmxon(void *addr)
|
static inline int exec_vmxon(void *addr)
|
||||||
{
|
{
|
||||||
@ -86,7 +86,7 @@ int exec_vmxon_instr(uint16_t pcpu_id)
|
|||||||
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||||
|
|
||||||
/* Allocate page aligned memory for VMXON region */
|
/* Allocate page aligned memory for VMXON region */
|
||||||
if (per_cpu(vmxon_region_pa, pcpu_id) == 0) {
|
if (per_cpu(vmxon_region_pa, pcpu_id) == 0UL) {
|
||||||
vmxon_region_va = alloc_page();
|
vmxon_region_va = alloc_page();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -97,8 +97,8 @@ int exec_vmxon_instr(uint16_t pcpu_id)
|
|||||||
/* Initialize vmxon page with revision id from IA32 VMX BASIC
|
/* Initialize vmxon page with revision id from IA32 VMX BASIC
|
||||||
* MSR
|
* MSR
|
||||||
*/
|
*/
|
||||||
tmp32 = msr_read(MSR_IA32_VMX_BASIC);
|
tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC);
|
||||||
(void)memcpy_s((uint32_t *) vmxon_region_va, 4, &tmp32, 4);
|
(void)memcpy_s((uint32_t *) vmxon_region_va, 4U, (void *)&tmp32, 4U);
|
||||||
|
|
||||||
/* Turn on CR0.NE and CR4.VMXE */
|
/* Turn on CR0.NE and CR4.VMXE */
|
||||||
CPU_CR_READ(cr0, &tmp64);
|
CPU_CR_READ(cr0, &tmp64);
|
||||||
@ -256,7 +256,7 @@ static uint32_t get_cs_access_rights(void)
|
|||||||
|
|
||||||
asm volatile ("movw %%cs, %%ax" : "=a" (sel_value));
|
asm volatile ("movw %%cs, %%ax" : "=a" (sel_value));
|
||||||
asm volatile ("lar %%eax, %%eax" : "=a" (usable_ar) : "a"(sel_value));
|
asm volatile ("lar %%eax, %%eax" : "=a" (usable_ar) : "a"(sel_value));
|
||||||
usable_ar = usable_ar >> 8;
|
usable_ar = usable_ar >> 8U;
|
||||||
usable_ar &= 0xf0ffU; /* clear bits 11:8 */
|
usable_ar &= 0xf0ffU; /* clear bits 11:8 */
|
||||||
|
|
||||||
return usable_ar;
|
return usable_ar;
|
||||||
@ -265,7 +265,7 @@ static uint32_t get_cs_access_rights(void)
|
|||||||
static void init_cr0_cr4_host_mask(__unused struct vcpu *vcpu)
|
static void init_cr0_cr4_host_mask(__unused struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
static bool inited = false;
|
static bool inited = false;
|
||||||
uint32_t fixed0, fixed1;
|
uint64_t fixed0, fixed1;
|
||||||
if (!inited) {
|
if (!inited) {
|
||||||
/* Read the CR0 fixed0 / fixed1 MSR registers */
|
/* Read the CR0 fixed0 / fixed1 MSR registers */
|
||||||
fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
|
fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
|
||||||
@ -295,12 +295,12 @@ static void init_cr0_cr4_host_mask(__unused struct vcpu *vcpu)
|
|||||||
|
|
||||||
exec_vmwrite(VMX_CR0_MASK, cr0_host_mask);
|
exec_vmwrite(VMX_CR0_MASK, cr0_host_mask);
|
||||||
/* Output CR0 mask value */
|
/* Output CR0 mask value */
|
||||||
pr_dbg("CR0 mask value: 0x%x", cr0_host_mask);
|
pr_dbg("CR0 mask value: 0x%016llx", cr0_host_mask);
|
||||||
|
|
||||||
|
|
||||||
exec_vmwrite(VMX_CR4_MASK, cr4_host_mask);
|
exec_vmwrite(VMX_CR4_MASK, cr4_host_mask);
|
||||||
/* Output CR4 mask value */
|
/* Output CR4 mask value */
|
||||||
pr_dbg("CR4 mask value: 0x%x", cr4_host_mask);
|
pr_dbg("CR4 mask value: 0x%016llx", cr4_host_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu)
|
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu)
|
||||||
@ -318,14 +318,15 @@ uint64_t vmx_rdmsr_pat(struct vcpu *vcpu)
|
|||||||
|
|
||||||
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||||
{
|
{
|
||||||
uint32_t field, i;
|
uint32_t i;
|
||||||
|
uint64_t field;
|
||||||
struct run_context *context =
|
struct run_context *context =
|
||||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||||
|
|
||||||
for (i = 0U; i < 8U; i++) {
|
for (i = 0U; i < 8U; i++) {
|
||||||
field = (value >> (i * 8U)) & 0xffU;
|
field = (value >> (i * 8U)) & 0xffUL;
|
||||||
if ((PAT_MEM_TYPE_INVALID(field) ||
|
if ((PAT_MEM_TYPE_INVALID(field) ||
|
||||||
(PAT_FIELD_RSV_BITS & field) != 0U)) {
|
(PAT_FIELD_RSV_BITS & field) != 0UL)) {
|
||||||
pr_err("invalid guest IA32_PAT: 0x%016llx", value);
|
pr_err("invalid guest IA32_PAT: 0x%016llx", value);
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
@ -373,7 +374,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
uint32_t entry_ctrls;
|
uint32_t entry_ctrls;
|
||||||
bool paging_enabled = !!(context->cr0 & CR0_PG);
|
bool paging_enabled = !!(context->cr0 & CR0_PG);
|
||||||
|
|
||||||
if ((cr0 & (cr0_always_off_mask | CR0_RESERVED_MASK)) != 0U) {
|
if ((cr0 & (cr0_always_off_mask | CR0_RESERVED_MASK)) != 0UL) {
|
||||||
pr_err("Not allow to set always off / reserved bits for CR0");
|
pr_err("Not allow to set always off / reserved bits for CR0");
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
@ -382,9 +383,9 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
/* TODO: Check all invalid guest statuses according to the change of
|
/* TODO: Check all invalid guest statuses according to the change of
|
||||||
* CR0, and inject a #GP to guest */
|
* CR0, and inject a #GP to guest */
|
||||||
|
|
||||||
if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0U) &&
|
if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0UL) &&
|
||||||
!paging_enabled && ((cr0 & CR0_PG) != 0U)) {
|
!paging_enabled && ((cr0 & CR0_PG) != 0UL)) {
|
||||||
if ((context->cr4 & CR4_PAE) == 0U) {
|
if ((context->cr4 & CR4_PAE) == 0UL) {
|
||||||
pr_err("Can't enable long mode when PAE disabled");
|
pr_err("Can't enable long mode when PAE disabled");
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
@ -397,8 +398,8 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
|
|
||||||
context->ia32_efer |= MSR_IA32_EFER_LMA_BIT;
|
context->ia32_efer |= MSR_IA32_EFER_LMA_BIT;
|
||||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
||||||
} else if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0U) &&
|
} else if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0UL) &&
|
||||||
paging_enabled && ((cr0 & CR0_PG) == 0U)){
|
paging_enabled && ((cr0 & CR0_PG) == 0UL)){
|
||||||
/* Disable long mode */
|
/* Disable long mode */
|
||||||
pr_dbg("VMM: Disable long mode");
|
pr_dbg("VMM: Disable long mode");
|
||||||
entry_ctrls = exec_vmread32(VMX_ENTRY_CONTROLS);
|
entry_ctrls = exec_vmread32(VMX_ENTRY_CONTROLS);
|
||||||
@ -410,16 +411,16 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* If CR0.CD or CR0.NW get changed */
|
/* If CR0.CD or CR0.NW get changed */
|
||||||
if (((context->cr0 ^ cr0) & (CR0_CD | CR0_NW)) != 0U) {
|
if (((context->cr0 ^ cr0) & (CR0_CD | CR0_NW)) != 0UL) {
|
||||||
if ((cr0 & CR0_CD) == 0U && ((cr0 & CR0_NW) != 0U)) {
|
if ((cr0 & CR0_CD) == 0UL && ((cr0 & CR0_NW) != 0UL)) {
|
||||||
pr_err("not allow to set CR0.NW while clearing CR0.CD");
|
pr_err("not allow to set CR0.NW while clearing CR0.CD");
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No action if only CR0.NW is changed */
|
/* No action if only CR0.NW is changed */
|
||||||
if (((context->cr0 ^ cr0) & CR0_CD) != 0U) {
|
if (((context->cr0 ^ cr0) & CR0_CD) != 0UL) {
|
||||||
if ((cr0 & CR0_CD) != 0U) {
|
if ((cr0 & CR0_CD) != 0UL) {
|
||||||
/*
|
/*
|
||||||
* When the guest requests to set CR0.CD, we don't allow
|
* When the guest requests to set CR0.CD, we don't allow
|
||||||
* guest's CR0.CD to be actually set, instead, we write guest
|
* guest's CR0.CD to be actually set, instead, we write guest
|
||||||
@ -447,7 +448,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
exec_vmwrite(VMX_CR0_READ_SHADOW, cr0 & 0xFFFFFFFFUL);
|
exec_vmwrite(VMX_CR0_READ_SHADOW, cr0 & 0xFFFFFFFFUL);
|
||||||
context->cr0 = cr0;
|
context->cr0 = cr0;
|
||||||
|
|
||||||
pr_dbg("VMM: Try to write %08x, allow to write 0x%08x to CR0",
|
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0",
|
||||||
cr0, cr0_vmx);
|
cr0, cr0_vmx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -515,7 +516,7 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Do NOT support nested guest */
|
/* Do NOT support nested guest */
|
||||||
if ((cr4 & CR4_VMXE) != 0U) {
|
if ((cr4 & CR4_VMXE) != 0UL) {
|
||||||
pr_err("Nested guest not supported");
|
pr_err("Nested guest not supported");
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return 0;
|
||||||
@ -527,7 +528,7 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
|||||||
exec_vmwrite(VMX_CR4_READ_SHADOW, cr4 & 0xFFFFFFFFUL);
|
exec_vmwrite(VMX_CR4_READ_SHADOW, cr4 & 0xFFFFFFFFUL);
|
||||||
context->cr4 = cr4;
|
context->cr4 = cr4;
|
||||||
|
|
||||||
pr_dbg("VMM: Try to write %08x, allow to write 0x%08x to CR4",
|
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4",
|
||||||
cr4, cr4_vmx);
|
cr4, cr4_vmx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -574,12 +575,12 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
* checked.
|
* checked.
|
||||||
*/
|
*/
|
||||||
if (vcpu_mode == CPU_MODE_REAL) {
|
if (vcpu_mode == CPU_MODE_REAL) {
|
||||||
vmx_write_cr4(vcpu, 0);
|
vmx_write_cr4(vcpu, 0UL);
|
||||||
vmx_write_cr3(vcpu, 0);
|
vmx_write_cr3(vcpu, 0UL);
|
||||||
vmx_write_cr0(vcpu, CR0_ET | CR0_NE);
|
vmx_write_cr0(vcpu, CR0_ET | CR0_NE);
|
||||||
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
||||||
vmx_write_cr4(vcpu, 0);
|
vmx_write_cr4(vcpu, 0UL);
|
||||||
vmx_write_cr3(vcpu, 0);
|
vmx_write_cr3(vcpu, 0UL);
|
||||||
vmx_write_cr0(vcpu, CR0_ET | CR0_NE | CR0_PE);
|
vmx_write_cr0(vcpu, CR0_ET | CR0_NE | CR0_PE);
|
||||||
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
||||||
vmx_write_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE);
|
vmx_write_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE);
|
||||||
@ -616,9 +617,9 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
|
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
|
||||||
sel =((uint64_t)vcpu->entry_addr & 0xFFFF0UL)
|
sel = (uint16_t)(((uint64_t)vcpu->entry_addr & 0xFFFF0UL)
|
||||||
>> 4UL;
|
>> 4U);
|
||||||
base = sel << 4U;
|
base = (uint64_t)sel << 4U;
|
||||||
} else {
|
} else {
|
||||||
/* BSP is initialized with real mode */
|
/* BSP is initialized with real mode */
|
||||||
sel = REAL_MODE_BSP_INIT_CODE_SEL;
|
sel = REAL_MODE_BSP_INIT_CODE_SEL;
|
||||||
@ -639,20 +640,20 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
access = REAL_MODE_CODE_SEG_AR;
|
access = REAL_MODE_CODE_SEG_AR;
|
||||||
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
||||||
limit = 0xffffffffU;
|
limit = 0xffffffffU;
|
||||||
base = 0U;
|
base = 0UL;
|
||||||
access = PROTECTED_MODE_CODE_SEG_AR;
|
access = PROTECTED_MODE_CODE_SEG_AR;
|
||||||
sel = 0x10U; /* Linear CS selector in guest init gdt */
|
sel = 0x10U; /* Linear CS selector in guest init gdt */
|
||||||
} else {
|
} else {
|
||||||
HV_ARCH_VMX_GET_CS(sel);
|
HV_ARCH_VMX_GET_CS(sel);
|
||||||
access = get_cs_access_rights();
|
access = get_cs_access_rights();
|
||||||
limit = 0xffffffffU;
|
limit = 0xffffffffU;
|
||||||
base = 0U;
|
base = 0UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Selector */
|
/* Selector */
|
||||||
field = VMX_GUEST_CS_SEL;
|
field = VMX_GUEST_CS_SEL;
|
||||||
exec_vmwrite16(field, sel);
|
exec_vmwrite16(field, sel);
|
||||||
pr_dbg("VMX_GUEST_CS_SEL: 0x%x ", sel);
|
pr_dbg("VMX_GUEST_CS_SEL: 0x%hx ", sel);
|
||||||
|
|
||||||
/* Limit */
|
/* Limit */
|
||||||
field = VMX_GUEST_CS_LIMIT;
|
field = VMX_GUEST_CS_LIMIT;
|
||||||
@ -708,14 +709,14 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
/* GDTR - Global Descriptor Table */
|
/* GDTR - Global Descriptor Table */
|
||||||
if (vcpu_mode == CPU_MODE_REAL) {
|
if (vcpu_mode == CPU_MODE_REAL) {
|
||||||
/* Base */
|
/* Base */
|
||||||
base = 0U;
|
base = 0UL;
|
||||||
|
|
||||||
/* Limit */
|
/* Limit */
|
||||||
limit = 0xFFFF;
|
limit = 0xFFFFU;
|
||||||
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
||||||
base = create_guest_init_gdt(vcpu->vm, &limit);
|
base = create_guest_init_gdt(vcpu->vm, &limit);
|
||||||
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
||||||
descriptor_table gdtb = {0, 0};
|
descriptor_table gdtb = {0U, 0UL};
|
||||||
|
|
||||||
/* Base *//* TODO: Should guest GDTB point to host GDTB ? */
|
/* Base *//* TODO: Should guest GDTB point to host GDTB ? */
|
||||||
/* Obtain the current global descriptor table base */
|
/* Obtain the current global descriptor table base */
|
||||||
@ -723,7 +724,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
|
|
||||||
value32 = gdtb.limit;
|
value32 = gdtb.limit;
|
||||||
|
|
||||||
if (((gdtb.base >> 47) & 0x1UL) != 0UL) {
|
if (((gdtb.base >> 47U) & 0x1UL) != 0UL) {
|
||||||
gdtb.base |= 0xffff000000000000UL;
|
gdtb.base |= 0xffff000000000000UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -736,7 +737,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
/* GDTR Base */
|
/* GDTR Base */
|
||||||
field = VMX_GUEST_GDTR_BASE;
|
field = VMX_GUEST_GDTR_BASE;
|
||||||
exec_vmwrite(field, base);
|
exec_vmwrite(field, base);
|
||||||
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%x ", base);
|
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016llx ", base);
|
||||||
|
|
||||||
/* GDTR Limit */
|
/* GDTR Limit */
|
||||||
field = VMX_GUEST_GDTR_LIMIT;
|
field = VMX_GUEST_GDTR_LIMIT;
|
||||||
@ -747,19 +748,19 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
if ((vcpu_mode == CPU_MODE_REAL) ||
|
if ((vcpu_mode == CPU_MODE_REAL) ||
|
||||||
(vcpu_mode == CPU_MODE_PROTECTED)) {
|
(vcpu_mode == CPU_MODE_PROTECTED)) {
|
||||||
/* Base */
|
/* Base */
|
||||||
base = 0U;
|
base = 0UL;
|
||||||
|
|
||||||
/* Limit */
|
/* Limit */
|
||||||
limit = 0xFFFF;
|
limit = 0xFFFFU;
|
||||||
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
||||||
descriptor_table idtb = {0, 0};
|
descriptor_table idtb = {0U, 0UL};
|
||||||
|
|
||||||
/* TODO: Should guest IDTR point to host IDTR ? */
|
/* TODO: Should guest IDTR point to host IDTR ? */
|
||||||
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
||||||
/* Limit */
|
/* Limit */
|
||||||
limit = idtb.limit;
|
limit = idtb.limit;
|
||||||
|
|
||||||
if (((idtb.base >> 47) & 0x1UL) != 0UL) {
|
if (((idtb.base >> 47U) & 0x1UL) != 0UL) {
|
||||||
idtb.base |= 0xffff000000000000UL;
|
idtb.base |= 0xffff000000000000UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -770,7 +771,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
/* IDTR Base */
|
/* IDTR Base */
|
||||||
field = VMX_GUEST_IDTR_BASE;
|
field = VMX_GUEST_IDTR_BASE;
|
||||||
exec_vmwrite(field, base);
|
exec_vmwrite(field, base);
|
||||||
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%x ", base);
|
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016llx ", base);
|
||||||
|
|
||||||
/* IDTR Limit */
|
/* IDTR Limit */
|
||||||
field = VMX_GUEST_IDTR_LIMIT;
|
field = VMX_GUEST_IDTR_LIMIT;
|
||||||
@ -789,22 +790,22 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
/***************************************************/
|
/***************************************************/
|
||||||
/* ES, CS, SS, DS, FS, GS */
|
/* ES, CS, SS, DS, FS, GS */
|
||||||
/***************************************************/
|
/***************************************************/
|
||||||
data32_idx = 0x10;
|
data32_idx = 0x10U;
|
||||||
if (vcpu_mode == CPU_MODE_REAL) {
|
if (vcpu_mode == CPU_MODE_REAL) {
|
||||||
es = data32_idx;
|
es = data32_idx;
|
||||||
ss = data32_idx;
|
ss = data32_idx;
|
||||||
ds = data32_idx;
|
ds = data32_idx;
|
||||||
fs = data32_idx;
|
fs = data32_idx;
|
||||||
gs = data32_idx;
|
gs = data32_idx;
|
||||||
limit = 0xffff;
|
limit = 0xffffU;
|
||||||
|
|
||||||
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
||||||
/* Linear data segment in guest init gdt */
|
/* Linear data segment in guest init gdt */
|
||||||
es = 0x18;
|
es = 0x18U;
|
||||||
ss = 0x18;
|
ss = 0x18U;
|
||||||
ds = 0x18;
|
ds = 0x18U;
|
||||||
fs = 0x18;
|
fs = 0x18U;
|
||||||
gs = 0x18;
|
gs = 0x18U;
|
||||||
limit = 0xffffffffU;
|
limit = 0xffffffffU;
|
||||||
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
||||||
asm volatile ("movw %%es, %%ax":"=a" (es));
|
asm volatile ("movw %%es, %%ax":"=a" (es));
|
||||||
@ -818,23 +819,23 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
/* Selector */
|
/* Selector */
|
||||||
field = VMX_GUEST_ES_SEL;
|
field = VMX_GUEST_ES_SEL;
|
||||||
exec_vmwrite16(field, es);
|
exec_vmwrite16(field, es);
|
||||||
pr_dbg("VMX_GUEST_ES_SEL: 0x%x ", es);
|
pr_dbg("VMX_GUEST_ES_SEL: 0x%hx ", es);
|
||||||
|
|
||||||
field = VMX_GUEST_SS_SEL;
|
field = VMX_GUEST_SS_SEL;
|
||||||
exec_vmwrite16(field, ss);
|
exec_vmwrite16(field, ss);
|
||||||
pr_dbg("VMX_GUEST_SS_SEL: 0x%x ", ss);
|
pr_dbg("VMX_GUEST_SS_SEL: 0x%hx ", ss);
|
||||||
|
|
||||||
field = VMX_GUEST_DS_SEL;
|
field = VMX_GUEST_DS_SEL;
|
||||||
exec_vmwrite16(field, ds);
|
exec_vmwrite16(field, ds);
|
||||||
pr_dbg("VMX_GUEST_DS_SEL: 0x%x ", ds);
|
pr_dbg("VMX_GUEST_DS_SEL: 0x%hx ", ds);
|
||||||
|
|
||||||
field = VMX_GUEST_FS_SEL;
|
field = VMX_GUEST_FS_SEL;
|
||||||
exec_vmwrite16(field, fs);
|
exec_vmwrite16(field, fs);
|
||||||
pr_dbg("VMX_GUEST_FS_SEL: 0x%x ", fs);
|
pr_dbg("VMX_GUEST_FS_SEL: 0x%hx ", fs);
|
||||||
|
|
||||||
field = VMX_GUEST_GS_SEL;
|
field = VMX_GUEST_GS_SEL;
|
||||||
exec_vmwrite16(field, gs);
|
exec_vmwrite16(field, gs);
|
||||||
pr_dbg("VMX_GUEST_GS_SEL: 0x%x ", gs);
|
pr_dbg("VMX_GUEST_GS_SEL: 0x%hx ", gs);
|
||||||
|
|
||||||
/* Limit */
|
/* Limit */
|
||||||
field = VMX_GUEST_ES_LIMIT;
|
field = VMX_GUEST_ES_LIMIT;
|
||||||
@ -1011,8 +1012,8 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
uint64_t trbase_lo;
|
uint64_t trbase_lo;
|
||||||
uint64_t trbase_hi;
|
uint64_t trbase_hi;
|
||||||
uint64_t realtrbase;
|
uint64_t realtrbase;
|
||||||
descriptor_table gdtb = {0, 0};
|
descriptor_table gdtb = {0U, 0UL};
|
||||||
descriptor_table idtb = {0, 0};
|
descriptor_table idtb = {0U, 0UL};
|
||||||
uint16_t tr_sel;
|
uint16_t tr_sel;
|
||||||
|
|
||||||
pr_dbg("*********************");
|
pr_dbg("*********************");
|
||||||
@ -1059,7 +1060,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
field = VMX_HOST_TR_SEL;
|
field = VMX_HOST_TR_SEL;
|
||||||
asm volatile ("str %%ax":"=a" (tr_sel));
|
asm volatile ("str %%ax":"=a" (tr_sel));
|
||||||
exec_vmwrite16(field, tr_sel);
|
exec_vmwrite16(field, tr_sel);
|
||||||
pr_dbg("VMX_HOST_TR_SEL: 0x%x ", tr_sel);
|
pr_dbg("VMX_HOST_TR_SEL: 0x%hx ", tr_sel);
|
||||||
|
|
||||||
/******************************************************
|
/******************************************************
|
||||||
* 32-bit fields
|
* 32-bit fields
|
||||||
@ -1072,7 +1073,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
|
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
|
||||||
value32 = gdtb.limit;
|
value32 = gdtb.limit;
|
||||||
|
|
||||||
if (((gdtb.base >> 47) & 0x1UL) != 0UL) {
|
if (((gdtb.base >> 47U) & 0x1UL) != 0UL) {
|
||||||
gdtb.base |= 0xffff000000000000UL;
|
gdtb.base |= 0xffff000000000000UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1083,7 +1084,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
|
|
||||||
/* TODO: Should guest TR point to host TR ? */
|
/* TODO: Should guest TR point to host TR ? */
|
||||||
trbase = gdtb.base + tr_sel;
|
trbase = gdtb.base + tr_sel;
|
||||||
if (((trbase >> 47) & 0x1UL) != 0UL) {
|
if (((trbase >> 47U) & 0x1UL) != 0UL) {
|
||||||
trbase |= 0xffff000000000000UL;
|
trbase |= 0xffff000000000000UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1092,25 +1093,25 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
".byte 0x36\n"
|
".byte 0x36\n"
|
||||||
"movq (%%rax),%%rax\n":"=a" (trbase_lo):"0"(trbase)
|
"movq (%%rax),%%rax\n":"=a" (trbase_lo):"0"(trbase)
|
||||||
);
|
);
|
||||||
realtrbase = ((trbase_lo >> 16) & (0x0ffffUL)) |
|
realtrbase = ((trbase_lo >> 16U) & (0x0ffffUL)) |
|
||||||
(((trbase_lo >> 32) & 0x000000ffUL) << 16) |
|
(((trbase_lo >> 32U) & 0x000000ffUL) << 16U) |
|
||||||
(((trbase_lo >> 56) & 0xffUL) << 24);
|
(((trbase_lo >> 56U) & 0xffUL) << 24U);
|
||||||
|
|
||||||
/* SS segment override for upper32 bits of base in ia32e mode */
|
/* SS segment override for upper32 bits of base in ia32e mode */
|
||||||
asm volatile ("mov %0,%%rax\n"
|
asm volatile ("mov %0,%%rax\n"
|
||||||
".byte 0x36\n"
|
".byte 0x36\n"
|
||||||
"movq 8(%%rax),%%rax\n":"=a" (trbase_hi):"0"(trbase));
|
"movq 8(%%rax),%%rax\n":"=a" (trbase_hi):"0"(trbase));
|
||||||
realtrbase = realtrbase | (trbase_hi << 32);
|
realtrbase = realtrbase | (trbase_hi << 32U);
|
||||||
|
|
||||||
/* Set up host and guest TR base fields */
|
/* Set up host and guest TR base fields */
|
||||||
field = VMX_HOST_TR_BASE;
|
field = VMX_HOST_TR_BASE;
|
||||||
exec_vmwrite(field, realtrbase);
|
exec_vmwrite(field, realtrbase);
|
||||||
pr_dbg("VMX_HOST_TR_BASE: 0x%x ", realtrbase);
|
pr_dbg("VMX_HOST_TR_BASE: 0x%016llx ", realtrbase);
|
||||||
|
|
||||||
/* Obtain the current interrupt descriptor table base */
|
/* Obtain the current interrupt descriptor table base */
|
||||||
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
||||||
/* base */
|
/* base */
|
||||||
if (((idtb.base >> 47) & 0x1UL) != 0UL) {
|
if (((idtb.base >> 47U) & 0x1UL) != 0UL) {
|
||||||
idtb.base |= 0xffff000000000000UL;
|
idtb.base |= 0xffff000000000000UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1142,21 +1143,18 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
pr_dbg("Natural-width********");
|
pr_dbg("Natural-width********");
|
||||||
/* Set up host CR0 field */
|
/* Set up host CR0 field */
|
||||||
CPU_CR_READ(cr0, &value);
|
CPU_CR_READ(cr0, &value);
|
||||||
value = (uint32_t) value;
|
|
||||||
field = VMX_HOST_CR0;
|
field = VMX_HOST_CR0;
|
||||||
exec_vmwrite(field, value);
|
exec_vmwrite(field, value);
|
||||||
pr_dbg("VMX_HOST_CR0: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_CR0: 0x%016llx ", value);
|
||||||
|
|
||||||
/* Set up host CR3 field */
|
/* Set up host CR3 field */
|
||||||
CPU_CR_READ(cr3, &value);
|
CPU_CR_READ(cr3, &value);
|
||||||
value = (uint32_t) value;
|
|
||||||
field = VMX_HOST_CR3;
|
field = VMX_HOST_CR3;
|
||||||
exec_vmwrite(field, value);
|
exec_vmwrite(field, value);
|
||||||
pr_dbg("VMX_HOST_CR3: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_CR3: 0x%016llx ", value);
|
||||||
|
|
||||||
/* Set up host CR4 field */
|
/* Set up host CR4 field */
|
||||||
CPU_CR_READ(cr4, &value);
|
CPU_CR_READ(cr4, &value);
|
||||||
value = (uint32_t) value;
|
|
||||||
field = VMX_HOST_CR4;
|
field = VMX_HOST_CR4;
|
||||||
exec_vmwrite(field, value);
|
exec_vmwrite(field, value);
|
||||||
pr_dbg("VMX_HOST_CR4: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_CR4: 0x%016llx ", value);
|
||||||
@ -1414,7 +1412,7 @@ static void init_entry_ctrl(__unused struct vcpu *vcpu)
|
|||||||
* on VM entry processor is in IA32e 64 bitmode * Start guest with host
|
* on VM entry processor is in IA32e 64 bitmode * Start guest with host
|
||||||
* IA32_PAT and IA32_EFER
|
* IA32_PAT and IA32_EFER
|
||||||
*/
|
*/
|
||||||
value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS);
|
value32 = (uint32_t)msr_read(MSR_IA32_VMX_ENTRY_CTLS);
|
||||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||||
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
||||||
}
|
}
|
||||||
@ -1496,7 +1494,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
|||||||
/* Selector */
|
/* Selector */
|
||||||
field = VMX_GUEST_CS_SEL;
|
field = VMX_GUEST_CS_SEL;
|
||||||
exec_vmwrite16(field, efi_ctx->cs_sel);
|
exec_vmwrite16(field, efi_ctx->cs_sel);
|
||||||
pr_dbg("VMX_GUEST_CS_SEL: 0x%x ", efi_ctx->cs_sel);
|
pr_dbg("VMX_GUEST_CS_SEL: 0x%hx ", efi_ctx->cs_sel);
|
||||||
|
|
||||||
/* Access */
|
/* Access */
|
||||||
field = VMX_GUEST_CS_ATTR;
|
field = VMX_GUEST_CS_ATTR;
|
||||||
@ -1505,23 +1503,23 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
|||||||
|
|
||||||
field = VMX_GUEST_ES_SEL;
|
field = VMX_GUEST_ES_SEL;
|
||||||
exec_vmwrite16(field, efi_ctx->es_sel);
|
exec_vmwrite16(field, efi_ctx->es_sel);
|
||||||
pr_dbg("VMX_GUEST_ES_SEL: 0x%x ", efi_ctx->es_sel);
|
pr_dbg("VMX_GUEST_ES_SEL: 0x%hx ", efi_ctx->es_sel);
|
||||||
|
|
||||||
field = VMX_GUEST_SS_SEL;
|
field = VMX_GUEST_SS_SEL;
|
||||||
exec_vmwrite16(field, efi_ctx->ss_sel);
|
exec_vmwrite16(field, efi_ctx->ss_sel);
|
||||||
pr_dbg("VMX_GUEST_SS_SEL: 0x%x ", efi_ctx->ss_sel);
|
pr_dbg("VMX_GUEST_SS_SEL: 0x%hx ", efi_ctx->ss_sel);
|
||||||
|
|
||||||
field = VMX_GUEST_DS_SEL;
|
field = VMX_GUEST_DS_SEL;
|
||||||
exec_vmwrite16(field, efi_ctx->ds_sel);
|
exec_vmwrite16(field, efi_ctx->ds_sel);
|
||||||
pr_dbg("VMX_GUEST_DS_SEL: 0x%x ", efi_ctx->ds_sel);
|
pr_dbg("VMX_GUEST_DS_SEL: 0x%hx ", efi_ctx->ds_sel);
|
||||||
|
|
||||||
field = VMX_GUEST_FS_SEL;
|
field = VMX_GUEST_FS_SEL;
|
||||||
exec_vmwrite16(field, efi_ctx->fs_sel);
|
exec_vmwrite16(field, efi_ctx->fs_sel);
|
||||||
pr_dbg("VMX_GUEST_FS_SEL: 0x%x ", efi_ctx->fs_sel);
|
pr_dbg("VMX_GUEST_FS_SEL: 0x%hx ", efi_ctx->fs_sel);
|
||||||
|
|
||||||
field = VMX_GUEST_GS_SEL;
|
field = VMX_GUEST_GS_SEL;
|
||||||
exec_vmwrite16(field, efi_ctx->gs_sel);
|
exec_vmwrite16(field, efi_ctx->gs_sel);
|
||||||
pr_dbg("VMX_GUEST_GS_SEL: 0x%x ", efi_ctx->gs_sel);
|
pr_dbg("VMX_GUEST_GS_SEL: 0x%hx ", efi_ctx->gs_sel);
|
||||||
|
|
||||||
/* Base */
|
/* Base */
|
||||||
field = VMX_GUEST_ES_BASE;
|
field = VMX_GUEST_ES_BASE;
|
||||||
@ -1572,7 +1570,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
|||||||
|
|
||||||
int init_vmcs(struct vcpu *vcpu)
|
int init_vmcs(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t vmx_rev_id;
|
uint64_t vmx_rev_id;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
uint64_t vmcs_pa;
|
uint64_t vmcs_pa;
|
||||||
|
|
||||||
@ -1586,7 +1584,7 @@ int init_vmcs(struct vcpu *vcpu)
|
|||||||
|
|
||||||
/* Obtain the VM Rev ID from HW and populate VMCS page with it */
|
/* Obtain the VM Rev ID from HW and populate VMCS page with it */
|
||||||
vmx_rev_id = msr_read(MSR_IA32_VMX_BASIC);
|
vmx_rev_id = msr_read(MSR_IA32_VMX_BASIC);
|
||||||
(void)memcpy_s((void *) vcpu->arch_vcpu.vmcs, 4, &vmx_rev_id, 4);
|
(void)memcpy_s(vcpu->arch_vcpu.vmcs, 4U, (void *)&vmx_rev_id, 4U);
|
||||||
|
|
||||||
/* Execute VMCLEAR on current VMCS */
|
/* Execute VMCLEAR on current VMCS */
|
||||||
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
||||||
@ -1606,7 +1604,7 @@ int init_vmcs(struct vcpu *vcpu)
|
|||||||
init_exit_ctrl(vcpu);
|
init_exit_ctrl(vcpu);
|
||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == 0) {
|
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == 0U) {
|
||||||
override_uefi_vmcs(vcpu);
|
override_uefi_vmcs(vcpu);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -53,21 +53,21 @@
|
|||||||
#define CPU_STACK_ALIGN 16UL
|
#define CPU_STACK_ALIGN 16UL
|
||||||
|
|
||||||
/* CR0 register definitions */
|
/* CR0 register definitions */
|
||||||
#define CR0_PG (1U<<31) /* paging enable */
|
#define CR0_PG (1UL<<31U) /* paging enable */
|
||||||
#define CR0_CD (1U<<30) /* cache disable */
|
#define CR0_CD (1UL<<30U) /* cache disable */
|
||||||
#define CR0_NW (1U<<29) /* not write through */
|
#define CR0_NW (1UL<<29U) /* not write through */
|
||||||
#define CR0_AM (1U<<18) /* alignment mask */
|
#define CR0_AM (1UL<<18U) /* alignment mask */
|
||||||
#define CR0_WP (1U<<16) /* write protect */
|
#define CR0_WP (1UL<<16U) /* write protect */
|
||||||
#define CR0_NE (1U<<5) /* numeric error */
|
#define CR0_NE (1UL<<5U) /* numeric error */
|
||||||
#define CR0_ET (1U<<4) /* extension type */
|
#define CR0_ET (1UL<<4U) /* extension type */
|
||||||
#define CR0_TS (1U<<3) /* task switched */
|
#define CR0_TS (1UL<<3U) /* task switched */
|
||||||
#define CR0_EM (1U<<2) /* emulation */
|
#define CR0_EM (1UL<<2U) /* emulation */
|
||||||
#define CR0_MP (1U<<1) /* monitor coprocessor */
|
#define CR0_MP (1UL<<1U) /* monitor coprocessor */
|
||||||
#define CR0_PE (1U<<0) /* protected mode enabled */
|
#define CR0_PE (1UL<<0U) /* protected mode enabled */
|
||||||
|
|
||||||
/* CR3 register definitions */
|
/* CR3 register definitions */
|
||||||
#define CR3_PWT (1U<<3) /* page-level write through */
|
#define CR3_PWT (1UL<<3U) /* page-level write through */
|
||||||
#define CR3_PCD (1U<<4) /* page-level cache disable */
|
#define CR3_PCD (1UL<<4U) /* page-level cache disable */
|
||||||
|
|
||||||
/* CR4 register definitions */
|
/* CR4 register definitions */
|
||||||
#define CR4_VME (1UL<<0) /* virtual 8086 mode extensions */
|
#define CR4_VME (1UL<<0) /* virtual 8086 mode extensions */
|
||||||
|
@ -27,9 +27,9 @@
|
|||||||
* bytes.
|
* bytes.
|
||||||
* Task State Segment (TSS) selectors are 16 bytes on x86-64 instead of 8 bytes.
|
* Task State Segment (TSS) selectors are 16 bytes on x86-64 instead of 8 bytes.
|
||||||
*/
|
*/
|
||||||
#define X64_SEG_DESC_SIZE (0x8) /* In long mode SEG Descriptors are 8 bytes */
|
#define X64_SEG_DESC_SIZE (0x8U) /* In long mode SEG Descriptors are 8 bytes */
|
||||||
#define X64_LDT_DESC_SIZE (0x10)/* In long mode LDT Descriptors are 16 bytes */
|
#define X64_LDT_DESC_SIZE (0x10U)/* In long mode LDT Descriptors are 16 bytes */
|
||||||
#define X64_TSS_DESC_SIZE (0x10)/* In long mode TSS Descriptors are 16 bytes */
|
#define X64_TSS_DESC_SIZE (0x10U)/* In long mode TSS Descriptors are 16 bytes */
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
*
|
*
|
||||||
@ -41,13 +41,13 @@
|
|||||||
*
|
*
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
/* Number of global 8 byte segments descriptor(s) */
|
/* Number of global 8 byte segments descriptor(s) */
|
||||||
#define HOST_GDT_RING0_SEG_SELECTORS (0x3) /* rsvd, code, data */
|
#define HOST_GDT_RING0_SEG_SELECTORS (0x3U) /* rsvd, code, data */
|
||||||
/* Offsets of global 8 byte segment descriptors */
|
/* Offsets of global 8 byte segment descriptors */
|
||||||
#define HOST_GDT_RING0_RSVD_SEL (0x0000)
|
#define HOST_GDT_RING0_RSVD_SEL (0x0000U)
|
||||||
#define HOST_GDT_RING0_CODE_SEL (0x0008)
|
#define HOST_GDT_RING0_CODE_SEL (0x0008U)
|
||||||
#define HOST_GDT_RING0_DATA_SEL (0x0010)
|
#define HOST_GDT_RING0_DATA_SEL (0x0010U)
|
||||||
/* Number of global 16 byte LDT descriptor(s) */
|
/* Number of global 16 byte LDT descriptor(s) */
|
||||||
#define HOST_GDT_RING0_TSS_SELECTORS (0x1)
|
#define HOST_GDT_RING0_TSS_SELECTORS (0x1U)
|
||||||
/* One for each CPU in the hypervisor. */
|
/* One for each CPU in the hypervisor. */
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
|
@ -492,10 +492,10 @@
|
|||||||
#define MSR_LNC_BIOS_CACHE_AS_RAM 0x000002E0U /* Configure CAR */
|
#define MSR_LNC_BIOS_CACHE_AS_RAM 0x000002E0U /* Configure CAR */
|
||||||
|
|
||||||
/* EFER bits */
|
/* EFER bits */
|
||||||
#define MSR_IA32_EFER_SCE_BIT (1U<<0)
|
#define MSR_IA32_EFER_SCE_BIT (1UL<<0U)
|
||||||
#define MSR_IA32_EFER_LME_BIT (1U<<8) /* IA32e mode enable */
|
#define MSR_IA32_EFER_LME_BIT (1UL<<8U) /* IA32e mode enable */
|
||||||
#define MSR_IA32_EFER_LMA_BIT (1U<<10) /* IA32e mode active */
|
#define MSR_IA32_EFER_LMA_BIT (1UL<<10U) /* IA32e mode active */
|
||||||
#define MSR_IA32_EFER_NXE_BIT (1U<<11)
|
#define MSR_IA32_EFER_NXE_BIT (1UL<<11U)
|
||||||
|
|
||||||
/* FEATURE CONTROL bits */
|
/* FEATURE CONTROL bits */
|
||||||
#define MSR_IA32_FEATURE_CONTROL_LOCK (1U<<0)
|
#define MSR_IA32_FEATURE_CONTROL_LOCK (1U<<0)
|
||||||
@ -503,12 +503,12 @@
|
|||||||
#define MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX (1U<<2)
|
#define MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX (1U<<2)
|
||||||
|
|
||||||
/* PAT memory type definitions */
|
/* PAT memory type definitions */
|
||||||
#define PAT_MEM_TYPE_UC 0x00U /* uncached */
|
#define PAT_MEM_TYPE_UC 0x00UL /* uncached */
|
||||||
#define PAT_MEM_TYPE_WC 0x01U /* write combining */
|
#define PAT_MEM_TYPE_WC 0x01UL /* write combining */
|
||||||
#define PAT_MEM_TYPE_WT 0x04U /* write through */
|
#define PAT_MEM_TYPE_WT 0x04UL /* write through */
|
||||||
#define PAT_MEM_TYPE_WP 0x05U /* write protected */
|
#define PAT_MEM_TYPE_WP 0x05UL /* write protected */
|
||||||
#define PAT_MEM_TYPE_WB 0x06U /* writeback */
|
#define PAT_MEM_TYPE_WB 0x06UL /* writeback */
|
||||||
#define PAT_MEM_TYPE_UCM 0x07U /* uncached minus */
|
#define PAT_MEM_TYPE_UCM 0x07UL /* uncached minus */
|
||||||
#define PAT_MEM_TYPE_INVALID(x) (((x) != PAT_MEM_TYPE_UC) && \
|
#define PAT_MEM_TYPE_INVALID(x) (((x) != PAT_MEM_TYPE_UC) && \
|
||||||
((x) != PAT_MEM_TYPE_WC) && \
|
((x) != PAT_MEM_TYPE_WC) && \
|
||||||
((x) != PAT_MEM_TYPE_WT) && \
|
((x) != PAT_MEM_TYPE_WT) && \
|
||||||
@ -520,29 +520,29 @@
|
|||||||
#define PAT_FIELD_RSV_BITS (0xF8U)
|
#define PAT_FIELD_RSV_BITS (0xF8U)
|
||||||
|
|
||||||
#define PAT_POWER_ON_VALUE (PAT_MEM_TYPE_WB + \
|
#define PAT_POWER_ON_VALUE (PAT_MEM_TYPE_WB + \
|
||||||
((uint64_t)PAT_MEM_TYPE_WT << 8) + \
|
(PAT_MEM_TYPE_WT << 8U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UCM << 16) + \
|
(PAT_MEM_TYPE_UCM << 16U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 24) + \
|
(PAT_MEM_TYPE_UC << 24U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_WB << 32) + \
|
(PAT_MEM_TYPE_WB << 32U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_WT << 40) + \
|
(PAT_MEM_TYPE_WT << 40U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UCM << 48) + \
|
(PAT_MEM_TYPE_UCM << 48U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 56))
|
(PAT_MEM_TYPE_UC << 56U))
|
||||||
|
|
||||||
#define PAT_ALL_UC_VALUE (PAT_MEM_TYPE_UC + \
|
#define PAT_ALL_UC_VALUE (PAT_MEM_TYPE_UC + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 8) + \
|
(PAT_MEM_TYPE_UC << 8U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 16) + \
|
(PAT_MEM_TYPE_UC << 16U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 24) + \
|
(PAT_MEM_TYPE_UC << 24U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 32) + \
|
(PAT_MEM_TYPE_UC << 32U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 40) + \
|
(PAT_MEM_TYPE_UC << 40U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 48) + \
|
(PAT_MEM_TYPE_UC << 48U) + \
|
||||||
((uint64_t)PAT_MEM_TYPE_UC << 56))
|
(PAT_MEM_TYPE_UC << 56U))
|
||||||
|
|
||||||
/* MTRR memory type definitions */
|
/* MTRR memory type definitions */
|
||||||
#define MTRR_MEM_TYPE_UC 0x00U /* uncached */
|
#define MTRR_MEM_TYPE_UC 0x00UL /* uncached */
|
||||||
#define MTRR_MEM_TYPE_WC 0x01U /* write combining */
|
#define MTRR_MEM_TYPE_WC 0x01UL /* write combining */
|
||||||
#define MTRR_MEM_TYPE_WT 0x04U /* write through */
|
#define MTRR_MEM_TYPE_WT 0x04UL /* write through */
|
||||||
#define MTRR_MEM_TYPE_WP 0x05U /* write protected */
|
#define MTRR_MEM_TYPE_WP 0x05UL /* write protected */
|
||||||
#define MTRR_MEM_TYPE_WB 0x06U /* writeback */
|
#define MTRR_MEM_TYPE_WB 0x06UL /* writeback */
|
||||||
|
|
||||||
/* misc. MTRR flag definitions */
|
/* misc. MTRR flag definitions */
|
||||||
#define MTRR_ENABLE 0x800U /* MTRR enable */
|
#define MTRR_ENABLE 0x800U /* MTRR enable */
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
#ifndef MTRR_H
|
#ifndef MTRR_H
|
||||||
#define MTRR_H
|
#define MTRR_H
|
||||||
|
|
||||||
#define FIXED_RANGE_MTRR_NUM 11
|
#define FIXED_RANGE_MTRR_NUM 11U
|
||||||
#define MTRR_SUB_RANGE_NUM 8
|
#define MTRR_SUB_RANGE_NUM 8U
|
||||||
|
|
||||||
union mtrr_cap_reg {
|
union mtrr_cap_reg {
|
||||||
uint64_t value;
|
uint64_t value;
|
||||||
|
Loading…
Reference in New Issue
Block a user