hv: l1tf: sanitize mapping for idle EPT

sanitize mapping for idel EPT

Tracked-On: #1672
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
This commit is contained in:
Li, Fei1 2018-10-11 18:32:31 +08:00 committed by lijinxia
parent fb68468cfd
commit 9e39732259
6 changed files with 35 additions and 4 deletions

View File

@ -93,6 +93,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
status = -ENOMEM;
goto err;
}
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
/* Only for SOS: Configure VM software information */
/* For UOS: This VM software information is configure in DM */

View File

@ -31,6 +31,7 @@
#include <reloc.h>
static void *mmu_pml4_addr;
static void *sanitized_page[CPU_PAGE_SIZE];
static struct vmx_capability {
uint32_t ept;
@ -187,6 +188,24 @@ void invept(const struct vcpu *vcpu)
}
}
static inline uint64_t get_sanitized_page(void)
{
return hva2hpa(sanitized_page);
}
void sanitize_pte_entry(uint64_t *ptep)
{
set_pgentry(ptep, get_sanitized_page());
}
void sanitize_pte(uint64_t *pt_page)
{
uint64_t i;
for (i = 0UL; i < PTRS_PER_PTE; i++) {
sanitize_pte_entry(pt_page + i);
}
}
uint64_t get_paging_pml4(void)
{
/* Return address to caller */
@ -255,6 +274,9 @@ void init_paging(void)
/* Enable paging */
enable_paging(hva2hpa(mmu_pml4_addr));
/* set ptep in sanitized_page point to itself */
sanitize_pte((uint64_t *)sanitized_page);
}
void *alloc_paging_struct(void)

View File

@ -65,7 +65,7 @@ static inline void local_modify_or_del_pte(uint64_t *pte,
new_pte |= prot_set;
set_pgentry(pte, new_pte);
} else {
set_pgentry(pte, 0);
sanitize_pte_entry(pte);
}
}
@ -80,6 +80,8 @@ static inline int construct_pgentry(enum _page_table_type ptt, uint64_t *pde)
return -ENOMEM;
}
sanitize_pte((uint64_t *)pd_page);
prot = (ptt == PTT_PRIMARY) ? PAGE_TABLE: EPT_RWX;
set_pgentry(pde, hva2hpa(pd_page) | prot);
return 0;

View File

@ -99,6 +99,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
*/
pml4_base = alloc_paging_struct();
vm->arch_vm.sworld_eptp = pml4_base;
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
/* The trusty memory is remapped to guest physical address
* of gpa_rebased to gpa_rebased + size
@ -147,6 +148,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
void destroy_secure_world(struct vm *vm, bool need_clr_mem)
{
uint64_t j;
void *pdpt_addr;
struct vm *vm0 = get_vm_from_vmid(0U);
uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
@ -175,9 +177,10 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
/* Free trusty ept page-structures */
pdpt_addr =
(void *)pml4e_page_vaddr(*(uint64_t *)vm->arch_vm.sworld_eptp);
/* memset PDPTEs except trusty memory */
(void)memset(pdpt_addr, 0UL,
NON_TRUSTY_PDPT_ENTRIES * sizeof(uint64_t));
/* identical PDPTEs except trusty memory */
for (j = 0UL; j < NON_TRUSTY_PDPT_ENTRIES; j++) {
sanitize_pte_entry((uint64_t *)pdpt_addr + j);
}
free_ept_mem((uint64_t *)vm->arch_vm.sworld_eptp);
vm->arch_vm.sworld_eptp = NULL;
}

View File

@ -69,6 +69,7 @@ static int vdev_pt_init(struct pci_vdev *vdev)
if (vm->iommu == NULL) {
if (vm->arch_vm.nworld_eptp == 0UL) {
vm->arch_vm.nworld_eptp = alloc_paging_struct();
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
}
vm->iommu = create_iommu_domain(vm->vm_id,
hva2hpa(vm->arch_vm.nworld_eptp), 48U);

View File

@ -75,6 +75,8 @@ struct cpu_page {
uint8_t contents[CPU_PAGE_SIZE];
};
void sanitize_pte_entry(uint64_t *ptep);
void sanitize_pte(uint64_t *pt_page);
uint64_t get_paging_pml4(void);
void *alloc_paging_struct(void);
void free_paging_struct(void *ptr);