mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-24 14:33:38 +00:00
hv: fixup addresses in the c code for relocation
- Trampoline code doesn't have the same relocation delta with HV, Need to manually patch them when referenced from HV - replace all references to CONFIG_RAM_START with the actual HV load address Signed-off-by: Zheng Gen <gen.zheng@intel.com> Signed-off-by: Zide Chen <zide.chen@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
This commit is contained in:
parent
bc8b3a40f8
commit
92cd2612fc
@ -7,6 +7,7 @@
|
|||||||
#include <hypervisor.h>
|
#include <hypervisor.h>
|
||||||
#include <schedule.h>
|
#include <schedule.h>
|
||||||
#include <version.h>
|
#include <version.h>
|
||||||
|
#include <reloc.h>
|
||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
#include <acrn_efi.h>
|
#include <acrn_efi.h>
|
||||||
@ -662,39 +663,47 @@ static void update_trampoline_code_refs(uint64_t dest_pa)
|
|||||||
* trampoline code starts in real mode,
|
* trampoline code starts in real mode,
|
||||||
* so the target addres is HPA
|
* so the target addres is HPA
|
||||||
*/
|
*/
|
||||||
val = dest_pa + (uint64_t)trampoline_fixup_target;
|
val = dest_pa + trampoline_relo_addr(trampoline_fixup_target);
|
||||||
|
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)trampoline_fixup_cs);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(trampoline_fixup_cs));
|
||||||
*(uint16_t *)(ptr) = (uint16_t)((val >> 4) & 0xFFFFU);
|
*(uint16_t *)(ptr) = (uint16_t)((val >> 4) & 0xFFFFU);
|
||||||
|
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)trampoline_fixup_ip);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(trampoline_fixup_ip));
|
||||||
*(uint16_t *)(ptr) = (uint16_t)(val & 0xfU);
|
*(uint16_t *)(ptr) = (uint16_t)(val & 0xfU);
|
||||||
|
|
||||||
/* Update temporary page tables */
|
/* Update temporary page tables */
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)CPU_Boot_Page_Tables_ptr);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(CPU_Boot_Page_Tables_ptr));
|
||||||
*(uint32_t *)(ptr) += dest_pa;
|
*(uint32_t *)(ptr) += dest_pa;
|
||||||
|
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)CPU_Boot_Page_Tables_Start);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(CPU_Boot_Page_Tables_Start));
|
||||||
*(uint64_t *)(ptr) += dest_pa;
|
*(uint64_t *)(ptr) += dest_pa;
|
||||||
|
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)trampoline_pdpt_addr);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(trampoline_pdpt_addr));
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
*(uint64_t *)(ptr + sizeof(uint64_t) * i) += dest_pa;
|
*(uint64_t *)(ptr + sizeof(uint64_t) * i) += dest_pa;
|
||||||
|
|
||||||
/* update the gdt base pointer with relocated offset */
|
/* update the gdt base pointer with relocated offset */
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)trampoline_gdt_ptr);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(trampoline_gdt_ptr));
|
||||||
*(uint64_t *)(ptr + 2) += dest_pa;
|
*(uint64_t *)(ptr + 2) += dest_pa;
|
||||||
|
|
||||||
/* update trampoline jump pointer with relocated offset */
|
/* update trampoline jump pointer with relocated offset */
|
||||||
ptr = HPA2HVA(dest_pa + (uint64_t)trampoline_start64_fixup);
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(trampoline_start64_fixup));
|
||||||
*(uint32_t *)ptr += (uint32_t)dest_pa;
|
*(uint32_t *)ptr += (uint32_t)dest_pa;
|
||||||
|
|
||||||
|
/* update trampoline's main entry pointer */
|
||||||
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(main_entry));
|
||||||
|
*(uint64_t *)ptr += get_hv_image_delta();
|
||||||
|
|
||||||
|
/* update trampoline's spinlock pointer */
|
||||||
|
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(trampoline_spinlock_ptr));
|
||||||
|
*(uint64_t *)ptr += get_hv_image_delta();
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t prepare_trampoline(void)
|
static uint64_t prepare_trampoline(void)
|
||||||
{
|
{
|
||||||
uint64_t size, dest_pa;
|
uint64_t size, dest_pa;
|
||||||
|
|
||||||
size = (uint64_t)_ld_trampoline_end - (uint64_t)trampoline_start16;
|
size = (uint64_t)_ld_trampoline_end - (uint64_t)_ld_trampoline_start;
|
||||||
#ifndef CONFIG_EFI_STUB
|
#ifndef CONFIG_EFI_STUB
|
||||||
dest_pa = e820_alloc_low_memory(CONFIG_LOW_RAM_SIZE);
|
dest_pa = e820_alloc_low_memory(CONFIG_LOW_RAM_SIZE);
|
||||||
#else
|
#else
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <hypervisor.h>
|
#include <hypervisor.h>
|
||||||
#include <bsp_extern.h>
|
#include <bsp_extern.h>
|
||||||
#include <multiboot.h>
|
#include <multiboot.h>
|
||||||
|
#include <reloc.h>
|
||||||
|
|
||||||
#define ACRN_DBG_GUEST 6
|
#define ACRN_DBG_GUEST 6
|
||||||
|
|
||||||
@ -496,7 +497,7 @@ static void rebuild_vm0_e820(void)
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint64_t entry_start;
|
uint64_t entry_start;
|
||||||
uint64_t entry_end;
|
uint64_t entry_end;
|
||||||
uint64_t hv_start = CONFIG_RAM_START;
|
uint64_t hv_start = get_hv_image_base();
|
||||||
uint64_t hv_end = hv_start + CONFIG_RAM_SIZE;
|
uint64_t hv_end = hv_start + CONFIG_RAM_SIZE;
|
||||||
struct e820_entry *entry, new_entry = {0};
|
struct e820_entry *entry, new_entry = {0};
|
||||||
|
|
||||||
@ -577,6 +578,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
|
|||||||
IA32E_EPT_X_BIT |
|
IA32E_EPT_X_BIT |
|
||||||
IA32E_EPT_UNCACHED);
|
IA32E_EPT_UNCACHED);
|
||||||
struct e820_entry *entry;
|
struct e820_entry *entry;
|
||||||
|
uint64_t hv_hpa;
|
||||||
|
|
||||||
rebuild_vm0_e820();
|
rebuild_vm0_e820();
|
||||||
dev_dbg(ACRN_DBG_GUEST,
|
dev_dbg(ACRN_DBG_GUEST,
|
||||||
@ -610,8 +612,8 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
|
|||||||
/* unmap hypervisor itself for safety
|
/* unmap hypervisor itself for safety
|
||||||
* will cause EPT violation if sos accesses hv memory
|
* will cause EPT violation if sos accesses hv memory
|
||||||
*/
|
*/
|
||||||
ept_mmap(vm, CONFIG_RAM_START, CONFIG_RAM_START,
|
hv_hpa = get_hv_image_base();
|
||||||
CONFIG_RAM_SIZE, MAP_UNMAP, 0);
|
ept_mmap(vm, hv_hpa, hv_hpa, CONFIG_RAM_SIZE, MAP_UNMAP, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <hypervisor.h>
|
#include <hypervisor.h>
|
||||||
|
#include <reloc.h>
|
||||||
|
|
||||||
static void *mmu_pml4_addr;
|
static void *mmu_pml4_addr;
|
||||||
|
|
||||||
@ -593,6 +594,7 @@ void init_paging(void)
|
|||||||
{
|
{
|
||||||
struct map_params map_params;
|
struct map_params map_params;
|
||||||
struct e820_entry *entry;
|
struct e820_entry *entry;
|
||||||
|
uint64_t hv_hpa;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
int attr_wb = (MMU_MEM_ATTR_BIT_READ_WRITE |
|
int attr_wb = (MMU_MEM_ATTR_BIT_READ_WRITE |
|
||||||
MMU_MEM_ATTR_BIT_USER_ACCESSIBLE |
|
MMU_MEM_ATTR_BIT_USER_ACCESSIBLE |
|
||||||
@ -632,8 +634,8 @@ void init_paging(void)
|
|||||||
/* set the paging-structure entries' U/S flag
|
/* set the paging-structure entries' U/S flag
|
||||||
* to supervisor-mode for hypervisor owned memroy.
|
* to supervisor-mode for hypervisor owned memroy.
|
||||||
*/
|
*/
|
||||||
modify_mem(&map_params, (void *)CONFIG_RAM_START,
|
hv_hpa = get_hv_image_base();
|
||||||
(void *)CONFIG_RAM_START,
|
modify_mem(&map_params, (void *)hv_hpa, (void *)hv_hpa,
|
||||||
CONFIG_RAM_SIZE, attr_wb & (~MMU_MEM_ATTR_BIT_USER_ACCESSIBLE));
|
CONFIG_RAM_SIZE, attr_wb & (~MMU_MEM_ATTR_BIT_USER_ACCESSIBLE));
|
||||||
|
|
||||||
pr_dbg("Enabling MMU ");
|
pr_dbg("Enabling MMU ");
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
#include <hypervisor.h>
|
#include <hypervisor.h>
|
||||||
|
#include <reloc.h>
|
||||||
|
|
||||||
struct run_context cpu_ctx;
|
struct run_context cpu_ctx;
|
||||||
|
|
||||||
@ -78,7 +79,6 @@ int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
|||||||
uint32_t pcpu_id;
|
uint32_t pcpu_id;
|
||||||
uint64_t pmain_entry_saved;
|
uint64_t pmain_entry_saved;
|
||||||
uint32_t guest_wakeup_vec32;
|
uint32_t guest_wakeup_vec32;
|
||||||
uint64_t *pmain_entry;
|
|
||||||
|
|
||||||
/* We assume enter s3 success by default */
|
/* We assume enter s3 success by default */
|
||||||
host_enter_s3_success = 1;
|
host_enter_s3_success = 1;
|
||||||
@ -104,20 +104,14 @@ int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
|||||||
/* offline all APs */
|
/* offline all APs */
|
||||||
stop_cpus();
|
stop_cpus();
|
||||||
|
|
||||||
/* Trampoline code is relocatable now. We have to calculate
|
|
||||||
* main_entry address with relocation base address
|
|
||||||
*/
|
|
||||||
pmain_entry =
|
|
||||||
(uint64_t *)(HPA2HVA(trampoline_start16_paddr) +
|
|
||||||
(uint64_t) main_entry);
|
|
||||||
|
|
||||||
/* Save default main entry and we will restore it after
|
/* Save default main entry and we will restore it after
|
||||||
* back from S3. So the AP online could jmp to correct
|
* back from S3. So the AP online could jmp to correct
|
||||||
* main entry.
|
* main entry.
|
||||||
*/
|
*/
|
||||||
pmain_entry_saved = *pmain_entry;
|
pmain_entry_saved = read_trampoline_sym(main_entry);
|
||||||
|
|
||||||
/* Set the main entry for resume from S3 state */
|
/* Set the main entry for resume from S3 state */
|
||||||
*pmain_entry = (uint64_t)restore_s3_context;
|
write_trampoline_sym(main_entry, (uint64_t)restore_s3_context);
|
||||||
|
|
||||||
CPU_IRQ_DISABLE();
|
CPU_IRQ_DISABLE();
|
||||||
vmx_off(pcpu_id);
|
vmx_off(pcpu_id);
|
||||||
@ -141,7 +135,7 @@ int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
|||||||
CPU_IRQ_ENABLE();
|
CPU_IRQ_ENABLE();
|
||||||
|
|
||||||
/* restore the default main entry */
|
/* restore the default main entry */
|
||||||
*pmain_entry = pmain_entry_saved;
|
write_trampoline_sym(main_entry, pmain_entry_saved);
|
||||||
|
|
||||||
/* online all APs again */
|
/* online all APs again */
|
||||||
start_cpus();
|
start_cpus();
|
||||||
|
@ -10,6 +10,8 @@ extern void _relocate(void);
|
|||||||
extern uint64_t get_hv_image_delta(void);
|
extern uint64_t get_hv_image_delta(void);
|
||||||
extern uint64_t get_hv_image_base(void);
|
extern uint64_t get_hv_image_base(void);
|
||||||
extern uint64_t trampoline_relo_addr(void *addr);
|
extern uint64_t trampoline_relo_addr(void *addr);
|
||||||
|
extern uint64_t read_trampoline_sym(void *sym);
|
||||||
|
extern void write_trampoline_sym(void *sym, uint64_t val);
|
||||||
|
|
||||||
/* external symbols that are helpful for relocation */
|
/* external symbols that are helpful for relocation */
|
||||||
extern uint8_t _DYNAMIC[];
|
extern uint8_t _DYNAMIC[];
|
||||||
|
@ -131,3 +131,19 @@ void _relocate(void)
|
|||||||
start = (struct Elf64_Rel *)((char *)start + size);
|
start = (struct Elf64_Rel *)((char *)start + size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t read_trampoline_sym(void *sym)
|
||||||
|
{
|
||||||
|
uint64_t *hva;
|
||||||
|
|
||||||
|
hva = HPA2HVA(trampoline_start16_paddr) + trampoline_relo_addr(sym);
|
||||||
|
return *hva;
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_trampoline_sym(void *sym, uint64_t val)
|
||||||
|
{
|
||||||
|
uint64_t *hva;
|
||||||
|
|
||||||
|
hva = HPA2HVA(trampoline_start16_paddr) + trampoline_relo_addr(sym);
|
||||||
|
*hva = val;
|
||||||
|
}
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <schedule.h>
|
#include <schedule.h>
|
||||||
#include <hypercall.h>
|
#include <hypercall.h>
|
||||||
#include <version.h>
|
#include <version.h>
|
||||||
|
#include <reloc.h>
|
||||||
|
|
||||||
#define ACRN_DBG_HYCALL 6
|
#define ACRN_DBG_HYCALL 6
|
||||||
|
|
||||||
@ -393,7 +394,7 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
|
|||||||
int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm,
|
int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm,
|
||||||
struct vm_set_memmap *memmap)
|
struct vm_set_memmap *memmap)
|
||||||
{
|
{
|
||||||
uint64_t hpa;
|
uint64_t hpa, base_paddr;
|
||||||
uint32_t attr, prot;
|
uint32_t attr, prot;
|
||||||
|
|
||||||
if ((memmap->length & 0xFFFUL) != 0UL) {
|
if ((memmap->length & 0xFFFUL) != 0UL) {
|
||||||
@ -406,10 +407,11 @@ int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm,
|
|||||||
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
|
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
|
||||||
target_vm->attr.id, memmap->remote_gpa, hpa, memmap->length);
|
target_vm->attr.id, memmap->remote_gpa, hpa, memmap->length);
|
||||||
|
|
||||||
if (((hpa <= CONFIG_RAM_START) &&
|
base_paddr = get_hv_image_base();
|
||||||
(hpa + memmap->length > CONFIG_RAM_START)) ||
|
if (((hpa <= base_paddr) &&
|
||||||
((hpa >= CONFIG_RAM_START) &&
|
(hpa + memmap->length > base_paddr)) ||
|
||||||
(hpa < CONFIG_RAM_START + CONFIG_RAM_SIZE))) {
|
((hpa >= base_paddr) &&
|
||||||
|
(hpa < base_paddr + CONFIG_RAM_SIZE))) {
|
||||||
pr_err("%s: ERROR! overlap the HV memory region.", __func__);
|
pr_err("%s: ERROR! overlap the HV memory region.", __func__);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user