HV: enable SMEP in hypervisor

- this patch is to enable SMEP in hypervisor, SMEP protects
   guests' memory from supervisor-mode instruction fetches,
   in other words, hypervisor which operating in supervisor
   mode can't fetch instructions from (guests' memory)
   linear addresses that are accessible in user mode.

Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
Yonghua Huang 2018-06-14 18:22:51 +08:00 committed by Jack Ren
parent b2b49a64a9
commit 098c2e6788
4 changed files with 40 additions and 7 deletions

View File

@ -495,6 +495,8 @@ void bsp_boot_init(void)
pr_fatal("Please apply the latest CPU uCode patch!");
}
enable_smep();
/* Initialize the shell */
shell_init();
@ -546,6 +548,9 @@ void cpu_secondary_init(void)
* primary/boot CPU
*/
enable_paging(get_paging_pml4());
enable_smep();
early_init_lapic();
/* Find the logical ID of this CPU given the LAPIC ID

View File

@ -491,7 +491,8 @@ static int get_table_entry(void *addr, void *table_base,
}
static void *walk_paging_struct(void *addr, void *table_base,
uint32_t table_level, struct map_params *map_params)
uint32_t table_level, struct map_params *map_params,
uint64_t attr)
{
uint32_t table_offset;
uint64_t table_entry;
@ -523,7 +524,7 @@ static void *walk_paging_struct(void *addr, void *table_base,
IA32E_EPT_X_BIT);
} else {
/* Set table preset bits to P bit or r/w bit */
entry_present = (IA32E_COMM_P_BIT | IA32E_COMM_RW_BIT);
entry_present = IA32E_COMM_P_BIT;
}
/* Determine if a valid entry exists */
@ -544,6 +545,9 @@ static void *walk_paging_struct(void *addr, void *table_base,
/* Write entry to current table to reference the new
* sub-table
*/
if (map_params->page_table_type == PTT_HOST)
entry_present |= attr;
MEM_WRITE64(table_base + table_offset,
HVA2HPA(sub_table_addr) | entry_present);
} else {
@ -573,6 +577,16 @@ void enable_paging(uint64_t pml4_base_addr)
CPU_CR_WRITE(cr3, pml4_base_addr);
}
void enable_smep(void)
{
uint64_t val64 = 0;
/* Enable CR4.SMEP*/
CPU_CR_READ(cr4, &val64);
CPU_CR_WRITE(cr4, val64 | CR4_SMEP);
}
void init_paging(void)
{
struct map_params map_params;
@ -581,10 +595,12 @@ void init_paging(void)
int attr_wb = (MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_WRITE |
MMU_MEM_ATTR_EXECUTE |
MMU_MEM_ATTR_USER |
MMU_MEM_ATTR_WB_CACHE);
int attr_uc = (MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_WRITE |
MMU_MEM_ATTR_EXECUTE |
MMU_MEM_ATTR_USER |
MMU_MEM_ATTR_UNCACHED);
pr_dbg("HV MMU Initialization");
@ -616,6 +632,13 @@ void init_paging(void)
}
}
/* set the paging-structure entries' U/S flag
* to supervisor-mode for hypervisor owned memroy.
*/
modify_mem(&map_params, (void *)CONFIG_RAM_START,
(void *)CONFIG_RAM_START,
CONFIG_RAM_SIZE, attr_wb & (~MMU_MEM_ATTR_USER));
pr_dbg("Enabling MMU ");
/* Enable paging */
@ -689,6 +712,9 @@ uint64_t config_page_table_attr(struct map_params *map_params, uint32_t flags)
? IA32E_EPT_X_BIT : 0);
}
if ((table_type == PTT_HOST) && (flags & MMU_MEM_ATTR_USER))
attr |= MMU_MEM_ATTR_BIT_USER_ACCESSIBLE;
/* EPT & VT-d share the same page tables, set SNP bit
* to force snooping of PCIe devices if the page
* is cachable
@ -857,7 +883,7 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
/* Walk from the PML4 table to the PDPT table */
table_addr = walk_paging_struct(vaddr, table_addr, IA32E_PML4,
map_params);
map_params, attr);
if (table_addr == NULL)
return 0;
@ -874,7 +900,7 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
&& (MEM_ALIGNED_CHECK(paddr, MEM_2M))) {
/* Walk from the PDPT table to the PD table */
table_addr = walk_paging_struct(vaddr, table_addr,
IA32E_PDPT, map_params);
IA32E_PDPT, map_params, attr);
if (table_addr == NULL)
return 0;
/* Map this 2 MByte memory region */
@ -884,12 +910,12 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
} else {
/* Walk from the PDPT table to the PD table */
table_addr = walk_paging_struct(vaddr,
table_addr, IA32E_PDPT, map_params);
table_addr, IA32E_PDPT, map_params, attr);
if (table_addr == NULL)
return 0;
/* Walk from the PD table to the page table */
table_addr = walk_paging_struct(vaddr,
table_addr, IA32E_PD, map_params);
table_addr, IA32E_PD, map_params, attr);
if (table_addr == NULL)
return 0;
/* Map this 4 KByte memory region */
@ -1166,4 +1192,3 @@ int modify_mem_mt(struct map_params *map_params, void *paddr, void *vaddr,
}
return ret;
}

View File

@ -84,6 +84,8 @@
#define CR4_SMXE (1<<14) /* SMX enable */
#define CR4_PCIDE (1<<17) /* PCID enable */
#define CR4_OSXSAVE (1<<18)
#define CR4_SMEP (1<<20)
#define CR4_SMAP (1<<21)
/* XSAVE and Processor Extended States enable bit */

View File

@ -299,6 +299,7 @@ bool check_mmu_1gb_support(int page_table_type);
void *alloc_paging_struct(void);
void free_paging_struct(void *ptr);
void enable_paging(uint64_t pml4_base_addr);
void enable_smep(void);
void init_paging(void);
int map_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags);