hv: enable NX in hypervisor

- enable NX feature in hypervisor:
  1. Set 'XD' bit for all pages, including pages for guests
     when initialize MMU tables in hypervisor.
  2. remove 'XD' bit for pages that contain hypervisor instructions.
  3. enable MSR EFER.NXE,which will enable page access restriction by
     preventing instruction fetches form pages with XD bit set.

- remove "-Wl -z noexecstack" GCC flag option in hypervisor
  Makefile as it would not affect stack attribute in hyervisor,
  which setup stack itself, instead of by loader.

Tracked-On: #1122
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Yonghua Huang 2018-12-04 21:13:43 +08:00 committed by wenlingz
parent 405d1335c6
commit 4d13ad9d08
5 changed files with 35 additions and 4 deletions

View File

@ -76,7 +76,6 @@ ASFLAGS += -m64 -nostdinc -nostdlib
LDFLAGS += -Wl,--gc-sections -nostartfiles -nostdlib LDFLAGS += -Wl,--gc-sections -nostartfiles -nostdlib
LDFLAGS += -Wl,-n,-z,max-page-size=0x1000 LDFLAGS += -Wl,-n,-z,max-page-size=0x1000
LDFLAGS += -Wl,-z,noexecstack
ifeq (y, $(CONFIG_RELOC)) ifeq (y, $(CONFIG_RELOC))
# on X86_64, when build with "-pie", GCC fails on linking R_X86_64_32 # on X86_64, when build with "-pie", GCC fails on linking R_X86_64_32

View File

@ -107,6 +107,13 @@ trampoline_fixup_target:
orl $0x00000100, %eax orl $0x00000100, %eax
wrmsr wrmsr
/* 0xc0000080 = MSR_IA32_EFER */
movl $0xc0000080, %ecx
rdmsr
/* 0x00000800 = MSR_IA32_EFER_NXE_BIT */
orl $0x00000800, %eax
wrmsr
/* Enable paging, protection, numeric error and co-processor /* Enable paging, protection, numeric error and co-processor
monitoring in CR0 to enter long mode */ monitoring in CR0 to enter long mode */

View File

@ -209,11 +209,20 @@ void enable_paging(void)
{ {
uint64_t tmp64 = 0UL; uint64_t tmp64 = 0UL;
/*
* Enable MSR IA32_EFER.NXE bit,to prevent
* instruction fetching from pages with XD bit set.
*/
tmp64 = msr_read(MSR_IA32_EFER);
tmp64 |= MSR_IA32_EFER_NXE_BIT;
msr_write(MSR_IA32_EFER, tmp64);
/* Enable Write Protect, inhibiting writing to read-only pages */ /* Enable Write Protect, inhibiting writing to read-only pages */
CPU_CR_READ(cr0, &tmp64); CPU_CR_READ(cr0, &tmp64);
CPU_CR_WRITE(cr0, tmp64 | CR0_WP); CPU_CR_WRITE(cr0, tmp64 | CR0_WP);
CPU_CR_WRITE(cr3, hva2hpa(ppt_mmu_pml4_addr)); CPU_CR_WRITE(cr3, hva2hpa(ppt_mmu_pml4_addr));
} }
void enable_smep(void) void enable_smep(void)
@ -225,14 +234,13 @@ void enable_smep(void)
CPU_CR_WRITE(cr4, val64 | CR4_SMEP); CPU_CR_WRITE(cr4, val64 | CR4_SMEP);
} }
void init_paging(void) void init_paging(void)
{ {
uint64_t hv_hpa; uint64_t hv_hpa, text_end, size;
uint32_t i; uint32_t i;
uint64_t low32_max_ram = 0UL; uint64_t low32_max_ram = 0UL;
uint64_t high64_max_ram; uint64_t high64_max_ram;
uint64_t attr_uc = (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_CACHE_UC); uint64_t attr_uc = (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_CACHE_UC | PAGE_NX);
const struct e820_entry *entry; const struct e820_entry *entry;
uint32_t entries_count = get_e820_entries_count(); uint32_t entries_count = get_e820_entries_count();
@ -282,6 +290,17 @@ void init_paging(void)
CONFIG_HV_RAM_SIZE + (((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL), CONFIG_HV_RAM_SIZE + (((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL),
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, &ppt_mem_ops, MR_MODIFY); PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, &ppt_mem_ops, MR_MODIFY);
size = ((uint64_t)&ld_text_end - CONFIG_HV_RAM_START);
text_end = hv_hpa + size;
/*round up 'text_end' to 2MB aligned.*/
text_end = (text_end + PDE_SIZE - 1UL) & PDE_MASK;
/*
* remove 'NX' bit for pages that contain hv code section, as by default XD bit is set for
* all pages, including pages for guests.
*/
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hpa & PDE_MASK,
text_end - (hv_hpa & PDE_MASK), 0UL, PAGE_NX, &ppt_mem_ops, MR_MODIFY);
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (uint64_t)get_reserve_sworld_memory_base(), mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (uint64_t)get_reserve_sworld_memory_base(),
TRUSTY_RAM_SIZE * (CONFIG_MAX_VM_NUM - 1U), PAGE_USER, 0UL, &ppt_mem_ops, MR_MODIFY); TRUSTY_RAM_SIZE * (CONFIG_MAX_VM_NUM - 1U), PAGE_USER, 0UL, &ppt_mem_ops, MR_MODIFY);

View File

@ -31,6 +31,10 @@ SECTIONS
*(.retpoline_thunk) *(.retpoline_thunk)
} > ram } > ram
/*Align text top boundary to 2MBytes.*/
. = ALIGN(0x200000);
ld_text_end = . ;
.rodata : .rodata :
{ {
*(.rodata*) ; *(.rodata*) ;

View File

@ -53,6 +53,8 @@
#define IA32E_REF_MASK \ #define IA32E_REF_MASK \
(boot_cpu_data.physical_address_mask) (boot_cpu_data.physical_address_mask)
extern uint8_t ld_text_end;
static inline uint64_t round_page_up(uint64_t addr) static inline uint64_t round_page_up(uint64_t addr)
{ {
return (((addr + (uint64_t)PAGE_SIZE) - 1UL) & PAGE_MASK); return (((addr + (uint64_t)PAGE_SIZE) - 1UL) & PAGE_MASK);