mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-21 13:08:42 +00:00
mmu: refine functions for invept
- rename mmu_invept to invept - panic if HW doesn't support invept Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
parent
a68850e00f
commit
878b5a0466
@ -289,7 +289,7 @@ int acrn_do_intr_process(struct vcpu *vcpu)
|
|||||||
uint64_t *pending_intr_bits = &vcpu->arch_vcpu.pending_intr;
|
uint64_t *pending_intr_bits = &vcpu->arch_vcpu.pending_intr;
|
||||||
|
|
||||||
if (bitmap_test_and_clear(ACRN_REQUEST_TLB_FLUSH, pending_intr_bits))
|
if (bitmap_test_and_clear(ACRN_REQUEST_TLB_FLUSH, pending_intr_bits))
|
||||||
mmu_invept(vcpu);
|
invept(vcpu);
|
||||||
|
|
||||||
if (bitmap_test_and_clear(ACRN_REQUEST_TMR_UPDATE, pending_intr_bits))
|
if (bitmap_test_and_clear(ACRN_REQUEST_TMR_UPDATE, pending_intr_bits))
|
||||||
vioapic_update_tmr(vcpu);
|
vioapic_update_tmr(vcpu);
|
||||||
|
@ -118,6 +118,9 @@ static void check_mmu_capability(void)
|
|||||||
cpuid(CPUID_EXTEND_FUNCTION_1, &eax, &ebx, &ecx, &edx);
|
cpuid(CPUID_EXTEND_FUNCTION_1, &eax, &ebx, &ecx, &edx);
|
||||||
mm_caps.mmu_1gb_page_supported = (edx & CPUID_EDX_PAGE1GB) ?
|
mm_caps.mmu_1gb_page_supported = (edx & CPUID_EDX_PAGE1GB) ?
|
||||||
(true) : (false);
|
(true) : (false);
|
||||||
|
|
||||||
|
if (!mm_caps.invept_supported)
|
||||||
|
panic("invept must be supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool check_invept_single_support(void)
|
static inline bool check_invept_single_support(void)
|
||||||
@ -132,7 +135,7 @@ static inline bool check_invept_global_support(void)
|
|||||||
mm_caps.invept_global_context_supported;
|
mm_caps.invept_global_context_supported;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mmu_invept(struct vcpu *vcpu)
|
void invept(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct invept_desc desc = {0};
|
struct invept_desc desc = {0};
|
||||||
|
|
||||||
|
@ -174,8 +174,8 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
vm->sworld_control.sworld_memory.base_hpa = hpa;
|
vm->sworld_control.sworld_memory.base_hpa = hpa;
|
||||||
vm->sworld_control.sworld_memory.length = size;
|
vm->sworld_control.sworld_memory.length = size;
|
||||||
|
|
||||||
mmu_invept(vm->current_vcpu);
|
invept(vm->current_vcpu);
|
||||||
mmu_invept(vm0->current_vcpu);
|
invept(vm0->current_vcpu);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,7 +324,7 @@ int unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
|||||||
uint64_t size, uint32_t flags);
|
uint64_t size, uint32_t flags);
|
||||||
int modify_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
int modify_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
||||||
uint64_t size, uint32_t flags);
|
uint64_t size, uint32_t flags);
|
||||||
void mmu_invept(struct vcpu *vcpu);
|
void invept(struct vcpu *vcpu);
|
||||||
bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size);
|
bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size);
|
||||||
int obtain_last_page_table_entry(struct map_params *map_params,
|
int obtain_last_page_table_entry(struct map_params *map_params,
|
||||||
struct entry_params *entry, void *addr, bool direct);
|
struct entry_params *entry, void *addr, bool direct);
|
||||||
|
Loading…
Reference in New Issue
Block a user