diff --git a/hypervisor/arch/x86/guest/ept.c b/hypervisor/arch/x86/guest/ept.c index 1f57d3096..77e8784da 100644 --- a/hypervisor/arch/x86/guest/ept.c +++ b/hypervisor/arch/x86/guest/ept.c @@ -30,21 +30,18 @@ void destroy_ept(struct acrn_vm *vm) } } -/* using return value INVALID_HPA as error code */ +/** + * @pre: vm != NULL. + */ uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size) { + /* using return value INVALID_HPA as error code */ uint64_t hpa = INVALID_HPA; const uint64_t *pgentry; uint64_t pg_size = 0UL; void *eptp; - struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_pcpu_id()); - - if ((vcpu != NULL) && (vcpu->arch.cur_context == SECURE_WORLD)) { - eptp = vm->arch_vm.sworld_eptp; - } else { - eptp = vm->arch_vm.nworld_eptp; - } + eptp = get_ept_entry(vm); pgentry = lookup_address((uint64_t *)eptp, gpa, &pg_size, &vm->arch_vm.ept_mem_ops); if (pgentry != NULL) { hpa = ((*pgentry & (~(pg_size - 1UL))) @@ -158,3 +155,63 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); } } + +/** + * @pre: vm != NULL. + */ +void *get_ept_entry(struct acrn_vm *vm) +{ + void *eptp; + struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_pcpu_id()); + + if ((vcpu != NULL) && (vcpu->arch.cur_context == SECURE_WORLD)) { + eptp = vm->arch_vm.sworld_eptp; + } else { + eptp = vm->arch_vm.nworld_eptp; + } + + return eptp; +} + +/** + * @pre vm != NULL && cb != NULL. + */ +void walk_ept_table(struct acrn_vm *vm, pge_handler cb) +{ + const struct memory_ops *mem_ops = &vm->arch_vm.ept_mem_ops; + uint64_t *pml4e, *pdpte, *pde, *pte; + uint64_t i, j, k, m; + + for (i = 0UL; i < PTRS_PER_PML4E; i++) { + pml4e = pml4e_offset((uint64_t *)get_ept_entry(vm), i << PML4E_SHIFT); + if (mem_ops->pgentry_present(*pml4e) == 0UL) { + continue; + } + for (j = 0UL; j < PTRS_PER_PDPTE; j++) { + pdpte = pdpte_offset(pml4e, j << PDPTE_SHIFT); + if (mem_ops->pgentry_present(*pdpte) == 0UL) { + continue; + } + if (pdpte_large(*pdpte) != 0UL) { + cb(pdpte, PDPTE_SIZE); + continue; + } + for (k = 0UL; k < PTRS_PER_PDE; k++) { + pde = pde_offset(pdpte, k << PDE_SHIFT); + if (mem_ops->pgentry_present(*pde) == 0UL) { + continue; + } + if (pde_large(*pde) != 0UL) { + cb(pde, PDE_SIZE); + continue; + } + for (m = 0UL; m < PTRS_PER_PTE; m++) { + pte = pte_offset(pde, m << PTE_SHIFT); + if (mem_ops->pgentry_present(*pte) != 0UL) { + cb(pte, PTE_SIZE); + } + } + } + } + } +} diff --git a/hypervisor/include/arch/x86/guest/ept.h b/hypervisor/include/arch/x86/guest/ept.h index 7e239977e..3e2d920da 100644 --- a/hypervisor/include/arch/x86/guest/ept.h +++ b/hypervisor/include/arch/x86/guest/ept.h @@ -8,6 +8,8 @@ #define EPT_H #include +typedef void (*pge_handler)(uint64_t *pgentry, uint64_t size); + /** * Invalid HPA is defined for error checking, * according to SDM vol.3A 4.1.4, the maximum @@ -104,6 +106,28 @@ void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size); +/** + * @brief Get EPT pointer of the vm + * + * @param[in] vm the pointer that points to VM data structure + * + * @retval If the current context of vm is SECURE_WORLD, return EPT pointer of + * secure world, otherwise return EPT pointer of normal world. + */ +void *get_ept_entry(struct acrn_vm *vm); + +/** + * @brief Walking through EPT table + * + * @param[in] vm the pointer that points to VM data structure + * @param[in] cb the pointer that points to walk_ept_table callback, the callback + * will be invoked when getting a present page entry from EPT, and + * the callback could get the page entry and page size parameters. + * + * @return None + */ +void walk_ept_table(struct acrn_vm *vm, pge_handler cb); + /** * @brief EPT misconfiguration handling *