mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-05 10:20:55 +00:00
hv: pgtable: fix 'Use of function like macro'
Convert HPA2HVA, HVA2HPA, GPA2HVA and HVA2GPA to inline functions. v1 -> v2: * Modify the following statement. rsdp = biosacpi_search_rsdp((char *)hpa2hva((uint64_t)(*addr << 4)), 0x400); Instead of "(uint64_t)(*addr << 4)", "(uint64_t)(*addr) << 4U" would be clearer. Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
6ee9321bd8
commit
97aeb7f4ff
@ -92,7 +92,7 @@ static int local_gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_inf
|
||||
i--;
|
||||
|
||||
addr = addr & IA32E_REF_MASK;
|
||||
base = GPA2HVA(vcpu->vm, addr);
|
||||
base = gpa2hva(vcpu->vm, addr);
|
||||
if (base == NULL) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
@ -166,7 +166,7 @@ static int local_gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
|
||||
int ret;
|
||||
|
||||
addr = pw_info->top_entry & 0xFFFFFFF0U;
|
||||
base = GPA2HVA(vcpu->vm, addr);
|
||||
base = gpa2hva(vcpu->vm, addr);
|
||||
if (base == NULL) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
@ -283,7 +283,7 @@ static inline uint32_t local_copy_gpa(const struct vm *vm, void *h_ptr, uint64_t
|
||||
len = (size > (pg_size - offset_in_pg)) ?
|
||||
(pg_size - offset_in_pg) : size;
|
||||
|
||||
g_ptr = HPA2HVA(hpa);
|
||||
g_ptr = hpa2hva(hpa);
|
||||
|
||||
if (cp_from_vm) {
|
||||
(void)memcpy_s(h_ptr, len, g_ptr, len);
|
||||
@ -399,13 +399,13 @@ void init_e820(void)
|
||||
|
||||
if (boot_regs[0] == MULTIBOOT_INFO_MAGIC) {
|
||||
struct multiboot_info *mbi = (struct multiboot_info *)
|
||||
(HPA2HVA((uint64_t)boot_regs[1]));
|
||||
(hpa2hva((uint64_t)boot_regs[1]));
|
||||
|
||||
pr_info("Multiboot info detected\n");
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MMAP) != 0U) {
|
||||
struct multiboot_mmap *mmap =
|
||||
(struct multiboot_mmap *)
|
||||
HPA2HVA((uint64_t)mbi->mi_mmap_addr);
|
||||
hpa2hva((uint64_t)mbi->mi_mmap_addr);
|
||||
e820_entries = mbi->mi_mmap_length/
|
||||
sizeof(struct multiboot_mmap);
|
||||
if (e820_entries > E820_MAX_ENTRIES) {
|
||||
@ -671,7 +671,7 @@ static const uint64_t guest_init_gdt[] = {
|
||||
|
||||
uint64_t create_guest_init_gdt(struct vm *vm, uint32_t *limit)
|
||||
{
|
||||
void *gtd_addr = GPA2HVA(vm, GUEST_INIT_GDT_START);
|
||||
void *gtd_addr = gpa2hva(vm, GUEST_INIT_GDT_START);
|
||||
|
||||
*limit = sizeof(guest_init_gdt) - 1U;
|
||||
(void)memcpy_s(gtd_addr, 64U, guest_init_gdt, sizeof(guest_init_gdt));
|
||||
|
@ -301,7 +301,7 @@ int mptable_build(struct vm *vm)
|
||||
struct mpfps *mpfp;
|
||||
size_t mptable_length, table_length;
|
||||
|
||||
startaddr = (char *)GPA2HVA(vm, MPTABLE_BASE);
|
||||
startaddr = (char *)gpa2hva(vm, MPTABLE_BASE);
|
||||
|
||||
table_length = vm->vm_desc->mptable->mpch.base_table_length;
|
||||
mptable_length = sizeof(struct mpfps) + table_length;
|
||||
|
@ -2096,7 +2096,7 @@ vlapic_apicv_get_apic_access_addr(__unused struct vm *vm)
|
||||
|
||||
(void)memset((void *)apicv_apic_access_addr, 0U, CPU_PAGE_SIZE);
|
||||
}
|
||||
return HVA2HPA(apicv_apic_access_addr);
|
||||
return hva2hpa(apicv_apic_access_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2105,7 +2105,7 @@ vlapic_apicv_get_apic_access_addr(__unused struct vm *vm)
|
||||
uint64_t
|
||||
vlapic_apicv_get_apic_page_addr(struct acrn_vlapic *vlapic)
|
||||
{
|
||||
return HVA2HPA(&(vlapic->apic_page));
|
||||
return hva2hpa(&(vlapic->apic_page));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -102,7 +102,7 @@ void init_msr_emulation(struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Set up MSR bitmap - pg 2904 24.6.9 */
|
||||
value64 = HVA2HPA(vcpu->vm->arch_vm.msr_bitmap);
|
||||
value64 = hva2hpa(vcpu->vm->arch_vm.msr_bitmap);
|
||||
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
|
||||
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ static void *map_ioapic(uint64_t ioapic_paddr)
|
||||
/* At some point we may need to translate this paddr to a vaddr.
|
||||
* 1:1 mapping for now.
|
||||
*/
|
||||
return HPA2HVA(ioapic_paddr);
|
||||
return hpa2hva(ioapic_paddr);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
|
@ -177,7 +177,7 @@ static void map_lapic(void)
|
||||
/* At some point we may need to translate this paddr to a vaddr. 1:1
|
||||
* mapping for now.
|
||||
*/
|
||||
lapic_info.xapic.vaddr = HPA2HVA(lapic_info.xapic.paddr);
|
||||
lapic_info.xapic.vaddr = hpa2hva(lapic_info.xapic.paddr);
|
||||
}
|
||||
|
||||
void early_init_lapic(void)
|
||||
|
@ -172,11 +172,11 @@ void invept(struct vcpu *vcpu)
|
||||
struct invept_desc desc = {0};
|
||||
|
||||
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
|
||||
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
desc.eptp = hva2hpa(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
(3UL << 3U) | 6UL;
|
||||
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
||||
if (vcpu->vm->sworld_control.flag.active != 0UL) {
|
||||
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.sworld_eptp)
|
||||
desc.eptp = hva2hpa(vcpu->vm->arch_vm.sworld_eptp)
|
||||
| (3UL << 3U) | 6UL;
|
||||
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
||||
}
|
||||
@ -190,7 +190,7 @@ void invept(struct vcpu *vcpu)
|
||||
uint64_t get_paging_pml4(void)
|
||||
{
|
||||
/* Return address to caller */
|
||||
return HVA2HPA(mmu_pml4_addr);
|
||||
return hva2hpa(mmu_pml4_addr);
|
||||
}
|
||||
|
||||
void enable_paging(uint64_t pml4_base_addr)
|
||||
@ -254,7 +254,7 @@ void init_paging(void)
|
||||
PTT_PRIMARY, MR_MODIFY);
|
||||
|
||||
/* Enable paging */
|
||||
enable_paging(HVA2HPA(mmu_pml4_addr));
|
||||
enable_paging(hva2hpa(mmu_pml4_addr));
|
||||
}
|
||||
|
||||
void *alloc_paging_struct(void)
|
||||
|
@ -49,7 +49,7 @@ static int split_large_page(uint64_t *pte,
|
||||
}
|
||||
|
||||
ref_prot = (ptt == PTT_PRIMARY) ? PAGE_TABLE : EPT_RWX;
|
||||
set_pgentry(pte, HVA2HPA((void *)pbase) | ref_prot);
|
||||
set_pgentry(pte, hva2hpa((void *)pbase) | ref_prot);
|
||||
|
||||
/* TODO: flush the TLB */
|
||||
|
||||
@ -81,7 +81,7 @@ static inline int construct_pgentry(enum _page_table_type ptt, uint64_t *pde)
|
||||
}
|
||||
|
||||
prot = (ptt == PTT_PRIMARY) ? PAGE_TABLE: EPT_RWX;
|
||||
set_pgentry(pde, HVA2HPA(pd_page) | prot);
|
||||
set_pgentry(pde, hva2hpa(pd_page) | prot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ static void acpi_gas_write(struct acpi_generic_address *gas, uint32_t val)
|
||||
uint16_t val16 = (uint16_t)val;
|
||||
|
||||
if (gas->space_id == SPACE_SYSTEM_MEMORY)
|
||||
mmio_write16(val16, HPA2HVA(gas->address));
|
||||
mmio_write16(val16, hpa2hva(gas->address));
|
||||
else
|
||||
pio_write16(val16, (uint16_t)gas->address);
|
||||
}
|
||||
@ -34,7 +34,7 @@ static uint32_t acpi_gas_read(struct acpi_generic_address *gas)
|
||||
uint32_t ret = 0U;
|
||||
|
||||
if (gas->space_id == SPACE_SYSTEM_MEMORY)
|
||||
ret = mmio_read16(HPA2HVA(gas->address));
|
||||
ret = mmio_read16(hpa2hva(gas->address));
|
||||
else
|
||||
ret = pio_read16((uint16_t)gas->address);
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
* of gpa_rebased to gpa_rebased + size
|
||||
*/
|
||||
sub_table_addr = alloc_paging_struct();
|
||||
sworld_pml4e = HVA2HPA(sub_table_addr) | table_present;
|
||||
sworld_pml4e = hva2hpa(sub_table_addr) | table_present;
|
||||
set_pgentry((uint64_t *)pml4_base, sworld_pml4e);
|
||||
|
||||
nworld_pml4e = get_pgentry((uint64_t *)vm->arch_vm.nworld_eptp);
|
||||
@ -160,7 +160,7 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
|
||||
}
|
||||
if (need_clr_mem) {
|
||||
/* clear trusty memory space */
|
||||
(void)memset(HPA2HVA(hpa), 0U, size);
|
||||
(void)memset(hpa2hva(hpa), 0U, size);
|
||||
}
|
||||
|
||||
/* restore memory to SOS ept mapping */
|
||||
@ -316,12 +316,12 @@ void switch_world(struct vcpu *vcpu, int next_world)
|
||||
/* load EPTP for next world */
|
||||
if (next_world == NORMAL_WORLD) {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
(3UL<<3) | 6UL);
|
||||
hva2hpa(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
(3UL << 3) | 6UL);
|
||||
} else {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
HVA2HPA(vcpu->vm->arch_vm.sworld_eptp) |
|
||||
(3UL<<3) | 6UL);
|
||||
hva2hpa(vcpu->vm->arch_vm.sworld_eptp) |
|
||||
(3UL << 3) | 6UL);
|
||||
}
|
||||
|
||||
/* Update world index */
|
||||
@ -338,7 +338,7 @@ static bool setup_trusty_info(struct vcpu *vcpu,
|
||||
struct trusty_mem *mem;
|
||||
struct trusty_key_info *key_info;
|
||||
|
||||
mem = (struct trusty_mem *)(HPA2HVA(mem_base_hpa));
|
||||
mem = (struct trusty_mem *)(hpa2hva(mem_base_hpa));
|
||||
|
||||
/* copy key_info to the first page of trusty memory */
|
||||
(void)memcpy_s(&mem->first_page.data.key_info, sizeof(g_key_info),
|
||||
@ -442,7 +442,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa;
|
||||
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
HVA2HPA(vm->arch_vm.sworld_eptp) | (3UL<<3) | 6UL);
|
||||
hva2hpa(vm->arch_vm.sworld_eptp) | (3UL << 3) | 6UL);
|
||||
|
||||
/* save Normal World context */
|
||||
save_world_ctx(vcpu, &vcpu->arch_vcpu.contexts[NORMAL_WORLD].ext_ctx);
|
||||
|
@ -181,11 +181,12 @@ void dump_lapic(void)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_INTR,
|
||||
"LAPIC: TIME %08x, init=0x%x cur=0x%x ISR=0x%x IRR=0x%x",
|
||||
mmio_read32(HPA2HVA(LAPIC_BASE + LAPIC_LVT_TIMER_REGISTER)),
|
||||
mmio_read32(HPA2HVA(LAPIC_BASE + LAPIC_INITIAL_COUNT_REGISTER)),
|
||||
mmio_read32(HPA2HVA(LAPIC_BASE + LAPIC_CURRENT_COUNT_REGISTER)),
|
||||
mmio_read32(HPA2HVA(LAPIC_BASE + LAPIC_IN_SERVICE_REGISTER_7)),
|
||||
mmio_read32(HPA2HVA(LAPIC_BASE + LAPIC_INT_REQUEST_REGISTER_7)));
|
||||
mmio_read32(hpa2hva(LAPIC_BASE + LAPIC_LVT_TIMER_REGISTER)),
|
||||
mmio_read32(hpa2hva(LAPIC_BASE + LAPIC_INITIAL_COUNT_REGISTER)),
|
||||
mmio_read32(hpa2hva(LAPIC_BASE + LAPIC_CURRENT_COUNT_REGISTER)),
|
||||
mmio_read32(hpa2hva(LAPIC_BASE + LAPIC_IN_SERVICE_REGISTER_7)),
|
||||
mmio_read32(hpa2hva(LAPIC_BASE + LAPIC_INT_REQUEST_REGISTER_7))
|
||||
);
|
||||
}
|
||||
|
||||
/* SDM Vol3 -6.15, Table 6-4 - interrupt and exception classes */
|
||||
|
@ -96,11 +96,11 @@ void exec_vmxon_instr(uint16_t pcpu_id)
|
||||
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
|
||||
|
||||
/* Turn ON VMX */
|
||||
vmxon_region_pa = HVA2HPA(vmxon_region_va);
|
||||
vmxon_region_pa = hva2hpa(vmxon_region_va);
|
||||
exec_vmxon(&vmxon_region_pa);
|
||||
|
||||
if (vcpu != NULL) {
|
||||
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
exec_vmptrld(&vmcs_pa);
|
||||
}
|
||||
}
|
||||
@ -112,7 +112,7 @@ void vmx_off(uint16_t pcpu_id)
|
||||
uint64_t vmcs_pa;
|
||||
|
||||
if (vcpu != NULL) {
|
||||
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
exec_vmclear((void *)&vmcs_pa);
|
||||
}
|
||||
|
||||
@ -991,7 +991,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
* TODO: introduce API to make this data driven based
|
||||
* on VMX_EPT_VPID_CAP
|
||||
*/
|
||||
value64 = HVA2HPA(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
|
||||
value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
|
||||
pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64);
|
||||
|
||||
@ -1022,10 +1022,10 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
exec_vmwrite32(VMX_CR3_TARGET_COUNT, 0U);
|
||||
|
||||
/* Set up IO bitmap register A and B - pg 2902 24.6.4 */
|
||||
value64 = HVA2HPA(vm->arch_vm.iobitmap[0]);
|
||||
value64 = hva2hpa(vm->arch_vm.iobitmap[0]);
|
||||
exec_vmwrite64(VMX_IO_BITMAP_A_FULL, value64);
|
||||
pr_dbg("VMX_IO_BITMAP_A: 0x%016llx ", value64);
|
||||
value64 = HVA2HPA(vm->arch_vm.iobitmap[1]);
|
||||
value64 = hva2hpa(vm->arch_vm.iobitmap[1]);
|
||||
exec_vmwrite64(VMX_IO_BITMAP_B_FULL, value64);
|
||||
pr_dbg("VMX_IO_BITMAP_B: 0x%016llx ", value64);
|
||||
|
||||
@ -1149,7 +1149,7 @@ void init_vmcs(struct vcpu *vcpu)
|
||||
(void)memcpy_s(vcpu->arch_vcpu.vmcs, 4U, (void *)&vmx_rev_id, 4U);
|
||||
|
||||
/* Execute VMCLEAR on current VMCS */
|
||||
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
exec_vmclear((void *)&vmcs_pa);
|
||||
|
||||
/* Load VMCS pointer */
|
||||
|
@ -178,16 +178,17 @@ static void register_hrhd_units(void)
|
||||
|
||||
static uint32_t iommu_read32(struct dmar_drhd_rt *dmar_uint, uint32_t offset)
|
||||
{
|
||||
return mmio_read32(HPA2HVA(dmar_uint->drhd->reg_base_addr + offset));
|
||||
return mmio_read32(hpa2hva(dmar_uint->drhd->reg_base_addr + offset));
|
||||
}
|
||||
|
||||
static uint64_t iommu_read64(struct dmar_drhd_rt *dmar_uint, uint32_t offset)
|
||||
{
|
||||
uint64_t value;
|
||||
|
||||
value = mmio_read32(HPA2HVA(dmar_uint->drhd->reg_base_addr + offset + 4U));
|
||||
value = mmio_read32(hpa2hva(dmar_uint->drhd->reg_base_addr + offset +
|
||||
4U));
|
||||
value = value << 32U;
|
||||
value = value | mmio_read32(HPA2HVA(dmar_uint->drhd->reg_base_addr +
|
||||
value = value | mmio_read32(hpa2hva(dmar_uint->drhd->reg_base_addr +
|
||||
offset));
|
||||
|
||||
return value;
|
||||
@ -196,7 +197,7 @@ static uint64_t iommu_read64(struct dmar_drhd_rt *dmar_uint, uint32_t offset)
|
||||
static void iommu_write32(struct dmar_drhd_rt *dmar_uint, uint32_t offset,
|
||||
uint32_t value)
|
||||
{
|
||||
mmio_write32(value, HPA2HVA(dmar_uint->drhd->reg_base_addr + offset));
|
||||
mmio_write32(value, hpa2hva(dmar_uint->drhd->reg_base_addr + offset));
|
||||
}
|
||||
|
||||
static void iommu_write64(struct dmar_drhd_rt *dmar_uint, uint32_t offset,
|
||||
@ -205,10 +206,11 @@ static void iommu_write64(struct dmar_drhd_rt *dmar_uint, uint32_t offset,
|
||||
uint32_t temp;
|
||||
|
||||
temp = (uint32_t)value;
|
||||
mmio_write32(temp, HPA2HVA(dmar_uint->drhd->reg_base_addr + offset));
|
||||
mmio_write32(temp, hpa2hva(dmar_uint->drhd->reg_base_addr + offset));
|
||||
|
||||
temp = (uint32_t)(value >> 32U);
|
||||
mmio_write32(temp, HPA2HVA(dmar_uint->drhd->reg_base_addr + offset + 4U));
|
||||
mmio_write32(temp,
|
||||
hpa2hva(dmar_uint->drhd->reg_base_addr + offset + 4U));
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -976,14 +978,15 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
void *root_table_vaddr = alloc_paging_struct();
|
||||
|
||||
if (root_table_vaddr != NULL) {
|
||||
dmar_uint->root_table_addr = HVA2HPA(root_table_vaddr);
|
||||
dmar_uint->root_table_addr = hva2hpa(root_table_vaddr);
|
||||
} else {
|
||||
ASSERT(false, "failed to allocate root table!");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
root_table = (struct dmar_root_entry *)HPA2HVA(dmar_uint->root_table_addr);
|
||||
root_table =
|
||||
(struct dmar_root_entry *)hpa2hva(dmar_uint->root_table_addr);
|
||||
|
||||
root_entry = root_table + bus;
|
||||
|
||||
@ -994,7 +997,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
|
||||
if (vaddr != NULL) {
|
||||
/* create context table for the bus if not present */
|
||||
context_table_addr = HVA2HPA(vaddr);
|
||||
context_table_addr = hva2hpa(vaddr);
|
||||
|
||||
context_table_addr = context_table_addr >> 12;
|
||||
|
||||
@ -1022,7 +1025,8 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
|
||||
context_table_addr = context_table_addr << 12;
|
||||
|
||||
context_table = (struct dmar_context_entry *)HPA2HVA(context_table_addr);
|
||||
context_table =
|
||||
(struct dmar_context_entry *)hpa2hva(context_table_addr);
|
||||
context_entry = context_table + devfun;
|
||||
|
||||
/* the context entry should not be present */
|
||||
@ -1114,14 +1118,16 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
return 1;
|
||||
}
|
||||
|
||||
root_table = (struct dmar_root_entry *)HPA2HVA(dmar_uint->root_table_addr);
|
||||
root_table =
|
||||
(struct dmar_root_entry *)hpa2hva(dmar_uint->root_table_addr);
|
||||
root_entry = root_table + bus;
|
||||
|
||||
context_table_addr = dmar_get_bitslice(root_entry->lower,
|
||||
ROOT_ENTRY_LOWER_CTP_MASK,
|
||||
ROOT_ENTRY_LOWER_CTP_POS);
|
||||
context_table_addr = context_table_addr << 12;
|
||||
context_table = (struct dmar_context_entry *)HPA2HVA(context_table_addr);
|
||||
context_table =
|
||||
(struct dmar_context_entry *)hpa2hva(context_table_addr);
|
||||
|
||||
context_entry = context_table + devfun;
|
||||
|
||||
@ -1303,7 +1309,7 @@ void init_iommu_vm0_domain(struct vm *vm0)
|
||||
uint16_t devfun;
|
||||
|
||||
vm0->iommu = create_iommu_domain(vm0->vm_id,
|
||||
HVA2HPA(vm0->arch_vm.nworld_eptp), 48U);
|
||||
hva2hpa(vm0->arch_vm.nworld_eptp), 48U);
|
||||
|
||||
vm0_domain = (struct iommu_domain *) vm0->iommu;
|
||||
|
||||
|
@ -144,15 +144,16 @@ static void *get_rsdp(void)
|
||||
#endif
|
||||
|
||||
/* EBDA is addressed by the 16 bit pointer at 0x40E */
|
||||
addr = (uint16_t *)HPA2HVA(0x40E);
|
||||
addr = (uint16_t *)hpa2hva(0x40eUL);
|
||||
|
||||
rsdp = biosacpi_search_rsdp((char *)HPA2HVA((uint64_t)(*addr << 4)), 0x400);
|
||||
rsdp = biosacpi_search_rsdp((char *)hpa2hva((uint64_t)(*addr) << 4U),
|
||||
0x400);
|
||||
if (rsdp != NULL) {
|
||||
return rsdp;
|
||||
}
|
||||
|
||||
/* Check the upper memory BIOS space, 0xe0000 - 0xfffff. */
|
||||
rsdp = biosacpi_search_rsdp((char *)HPA2HVA(0xe0000), 0x20000);
|
||||
rsdp = biosacpi_search_rsdp((char *)hpa2hva(0xe0000UL), 0x20000);
|
||||
if (rsdp != NULL) {
|
||||
return rsdp;
|
||||
}
|
||||
@ -163,7 +164,7 @@ static void *get_rsdp(void)
|
||||
static int
|
||||
probe_table(uint64_t address, const char *sig)
|
||||
{
|
||||
void *va = HPA2HVA(address);
|
||||
void *va = hpa2hva(address);
|
||||
struct acpi_table_header *table = (struct acpi_table_header *)va;
|
||||
|
||||
if (strncmp(table->signature, sig, ACPI_NAME_SIZE) != 0) {
|
||||
@ -189,7 +190,8 @@ static void *get_acpi_tbl(const char *sig)
|
||||
* the version 1.0 portion of the RSDP. Version 2.0 has
|
||||
* an additional checksum that we verify first.
|
||||
*/
|
||||
xsdt = (struct acpi_table_xsdt *)HPA2HVA(rsdp->xsdt_physical_address);
|
||||
xsdt = (struct acpi_table_xsdt *)
|
||||
hpa2hva(rsdp->xsdt_physical_address);
|
||||
count = (xsdt->header.length -
|
||||
sizeof(struct acpi_table_header)) /
|
||||
sizeof(uint64_t);
|
||||
@ -203,7 +205,7 @@ static void *get_acpi_tbl(const char *sig)
|
||||
} else {
|
||||
/* Root table is an RSDT (32-bit physical addresses) */
|
||||
rsdt = (struct acpi_table_rsdt *)
|
||||
HPA2HVA((uint64_t)rsdp->rsdt_physical_address);
|
||||
hpa2hva((uint64_t)rsdp->rsdt_physical_address);
|
||||
count = (rsdt->header.length -
|
||||
sizeof(struct acpi_table_header)) /
|
||||
sizeof(uint32_t);
|
||||
@ -216,7 +218,7 @@ static void *get_acpi_tbl(const char *sig)
|
||||
}
|
||||
}
|
||||
|
||||
return HPA2HVA(addr);
|
||||
return hpa2hva(addr);
|
||||
}
|
||||
|
||||
static uint16_t local_parse_madt(void *madt, uint8_t lapic_id_array[MAX_PCPU_NUM])
|
||||
|
@ -141,7 +141,7 @@ uint64_t read_trampoline_sym(void *sym)
|
||||
{
|
||||
uint64_t *hva;
|
||||
|
||||
hva = HPA2HVA(trampoline_start16_paddr) + trampoline_relo_addr(sym);
|
||||
hva = hpa2hva(trampoline_start16_paddr) + trampoline_relo_addr(sym);
|
||||
return *hva;
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ void write_trampoline_sym(void *sym, uint64_t val)
|
||||
{
|
||||
uint64_t *hva;
|
||||
|
||||
hva = HPA2HVA(trampoline_start16_paddr) + trampoline_relo_addr(sym);
|
||||
hva = hpa2hva(trampoline_start16_paddr) + trampoline_relo_addr(sym);
|
||||
*hva = val;
|
||||
}
|
||||
|
||||
@ -168,41 +168,41 @@ static void update_trampoline_code_refs(uint64_t dest_pa)
|
||||
*/
|
||||
val = dest_pa + trampoline_relo_addr(&trampoline_fixup_target);
|
||||
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_fixup_cs));
|
||||
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_fixup_cs));
|
||||
*(uint16_t *)(ptr) = (uint16_t)((val >> 4) & 0xFFFFU);
|
||||
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_fixup_ip));
|
||||
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_fixup_ip));
|
||||
*(uint16_t *)(ptr) = (uint16_t)(val & 0xfU);
|
||||
|
||||
/* Update temporary page tables */
|
||||
ptr = HPA2HVA(dest_pa +
|
||||
ptr = hpa2hva(dest_pa +
|
||||
trampoline_relo_addr(&CPU_Boot_Page_Tables_ptr));
|
||||
*(uint32_t *)(ptr) += (uint32_t)dest_pa;
|
||||
|
||||
ptr = HPA2HVA(dest_pa +
|
||||
ptr = hpa2hva(dest_pa +
|
||||
trampoline_relo_addr(&CPU_Boot_Page_Tables_Start));
|
||||
*(uint64_t *)(ptr) += dest_pa;
|
||||
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_pdpt_addr));
|
||||
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_pdpt_addr));
|
||||
for (i = 0; i < 4; i++) {
|
||||
*(uint64_t *)(ptr + sizeof(uint64_t) * i) += dest_pa;
|
||||
}
|
||||
|
||||
/* update the gdt base pointer with relocated offset */
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_gdt_ptr));
|
||||
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_gdt_ptr));
|
||||
*(uint64_t *)(ptr + 2) += dest_pa;
|
||||
|
||||
/* update trampoline jump pointer with relocated offset */
|
||||
ptr = HPA2HVA(dest_pa +
|
||||
ptr = hpa2hva(dest_pa +
|
||||
trampoline_relo_addr(&trampoline_start64_fixup));
|
||||
*(uint32_t *)ptr += dest_pa;
|
||||
|
||||
/* update trampoline's main entry pointer */
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(main_entry));
|
||||
ptr = hpa2hva(dest_pa + trampoline_relo_addr(main_entry));
|
||||
*(uint64_t *)ptr += get_hv_image_delta();
|
||||
|
||||
/* update trampoline's spinlock pointer */
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_spinlock_ptr));
|
||||
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_spinlock_ptr));
|
||||
*(uint64_t *)ptr += get_hv_image_delta();
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ uint64_t prepare_trampoline(void)
|
||||
pr_dbg("trampoline code: %llx size %x", dest_pa, size);
|
||||
|
||||
/* Copy segment for AP initialization code below 1MB */
|
||||
(void)memcpy_s(HPA2HVA(dest_pa), (size_t)size, &_ld_trampoline_load,
|
||||
(void)memcpy_s(hpa2hva(dest_pa), (size_t)size, &_ld_trampoline_load,
|
||||
(size_t)size);
|
||||
update_trampoline_code_refs(dest_pa);
|
||||
trampoline_start16_paddr = dest_pa;
|
||||
|
@ -40,9 +40,9 @@ static void parse_other_modules(struct vm *vm,
|
||||
|
||||
for (i = 0U; i < mods_count; i++) {
|
||||
uint32_t type_len;
|
||||
const char *start = HPA2HVA((uint64_t)mods[i].mm_string);
|
||||
const char *start = hpa2hva((uint64_t)mods[i].mm_string);
|
||||
const char *end;
|
||||
void *mod_addr = HPA2HVA((uint64_t)mods[i].mm_mod_start);
|
||||
void *mod_addr = hpa2hva((uint64_t)mods[i].mm_mod_start);
|
||||
uint32_t mod_size = mods[i].mm_mod_end - mods[i].mm_mod_start;
|
||||
|
||||
dev_dbg(ACRN_DBG_BOOT, "other mod-%d start=0x%x, end=0x%x",
|
||||
@ -62,7 +62,7 @@ static void parse_other_modules(struct vm *vm,
|
||||
type_len = end - start;
|
||||
if (strncmp("FIRMWARE", start, type_len) == 0) {
|
||||
char dyn_bootargs[100] = {'\0'};
|
||||
void *load_addr = GPA2HVA(vm,
|
||||
void *load_addr = gpa2hva(vm,
|
||||
(uint64_t)vm->sw.linux_info.bootargs_load_addr);
|
||||
uint32_t args_size = vm->sw.linux_info.bootargs_size;
|
||||
static int copy_once = 1;
|
||||
@ -154,12 +154,12 @@ static void *parse_image_boot_params(struct vm *vm, char *cmdline)
|
||||
}
|
||||
|
||||
param = arg + len;
|
||||
boot_params = (struct image_boot_params *)HPA2HVA(strtoul_hex(param));
|
||||
boot_params = (struct image_boot_params *)hpa2hva(strtoul_hex(param));
|
||||
if (boot_params == NULL) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
parse_seed_list((struct seed_list_hob *)HPA2HVA(
|
||||
parse_seed_list((struct seed_list_hob *)hpa2hva(
|
||||
boot_params->p_seed_list));
|
||||
|
||||
/*
|
||||
@ -198,7 +198,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mbi = HPA2HVA((uint64_t)boot_regs[1]);
|
||||
mbi = hpa2hva((uint64_t)boot_regs[1]);
|
||||
|
||||
dev_dbg(ACRN_DBG_BOOT, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MODS) == 0U) {
|
||||
@ -218,7 +218,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
|
||||
vm->sw.kernel_type = VM_LINUX_GUEST;
|
||||
vm->sw.kernel_info.kernel_src_addr =
|
||||
HPA2HVA((uint64_t)mods[0].mm_mod_start);
|
||||
hpa2hva((uint64_t)mods[0].mm_mod_start);
|
||||
vm->sw.kernel_info.kernel_size =
|
||||
mods[0].mm_mod_end - mods[0].mm_mod_start;
|
||||
|
||||
@ -253,7 +253,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mbi = HPA2HVA((uint64_t)boot_regs[1]);
|
||||
mbi = hpa2hva((uint64_t)boot_regs[1]);
|
||||
|
||||
dev_dbg(ACRN_DBG_BOOT, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MODS) == 0U) {
|
||||
@ -264,7 +264,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
dev_dbg(ACRN_DBG_BOOT, "mod counts=%d\n", mbi->mi_mods_count);
|
||||
|
||||
/* mod[0] is for kernel&cmdline, other mod for ramdisk/firmware info*/
|
||||
mods = (struct multiboot_module *)HPA2HVA((uint64_t)mbi->mi_mods_addr);
|
||||
mods = (struct multiboot_module *)hpa2hva((uint64_t)mbi->mi_mods_addr);
|
||||
|
||||
dev_dbg(ACRN_DBG_BOOT, "mod0 start=0x%x, end=0x%x",
|
||||
mods[0].mm_mod_start, mods[0].mm_mod_end);
|
||||
@ -273,10 +273,10 @@ int init_vm_boot_info(struct vm *vm)
|
||||
|
||||
vm->sw.kernel_type = VM_LINUX_GUEST;
|
||||
vm->sw.kernel_info.kernel_src_addr =
|
||||
HPA2HVA((uint64_t)mods[0].mm_mod_start);
|
||||
hpa2hva((uint64_t)mods[0].mm_mod_start);
|
||||
vm->sw.kernel_info.kernel_size =
|
||||
mods[0].mm_mod_end - mods[0].mm_mod_start;
|
||||
vm->sw.kernel_info.kernel_load_addr = (void *)HVA2GPA(vm,
|
||||
vm->sw.kernel_info.kernel_load_addr = (void *)hva2gpa(vm,
|
||||
get_kernel_load_addr(vm->sw.kernel_info.kernel_src_addr));
|
||||
|
||||
/*
|
||||
@ -290,7 +290,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
char buf[MAX_BOOT_PARAMS_LEN];
|
||||
|
||||
cmd_dst = kernel_cmdline;
|
||||
cmd_src = HPA2HVA((uint64_t)mbi->mi_cmdline);
|
||||
cmd_src = hpa2hva((uint64_t)mbi->mi_cmdline);
|
||||
|
||||
boot_params_addr = parse_image_boot_params(vm, cmd_src);
|
||||
/*
|
||||
@ -300,8 +300,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
if (boot_params_addr != NULL) {
|
||||
(void)memset(buf, 0U, sizeof(buf));
|
||||
snprintf(buf, MAX_BOOT_PARAMS_LEN, "%s0x%X ",
|
||||
boot_params_arg,
|
||||
HVA2GPA(vm, (uint64_t)boot_params_addr));
|
||||
boot_params_arg, hva2gpa(vm, boot_params_addr));
|
||||
(void)strncpy_s(cmd_dst, MEM_2K, buf,
|
||||
MAX_BOOT_PARAMS_LEN);
|
||||
off = strnlen_s(cmd_dst, MEM_2K);
|
||||
@ -315,7 +314,7 @@ int init_vm_boot_info(struct vm *vm)
|
||||
off += 1U;
|
||||
|
||||
cmd_dst += off;
|
||||
cmd_src = HPA2HVA((uint64_t)mods[0].mm_string);
|
||||
cmd_src = hpa2hva((uint64_t)mods[0].mm_string);
|
||||
(void)strncpy_s(cmd_dst, MEM_2K - off, cmd_src,
|
||||
strnlen_s(cmd_src, MEM_2K - off));
|
||||
|
||||
@ -324,9 +323,9 @@ int init_vm_boot_info(struct vm *vm)
|
||||
strnlen_s(kernel_cmdline, MEM_2K);
|
||||
} else {
|
||||
vm->sw.linux_info.bootargs_src_addr =
|
||||
HPA2HVA((uint64_t)mods[0].mm_string);
|
||||
hpa2hva((uint64_t)mods[0].mm_string);
|
||||
vm->sw.linux_info.bootargs_size =
|
||||
strnlen_s(HPA2HVA((uint64_t)mods[0].mm_string),
|
||||
strnlen_s(hpa2hva((uint64_t)mods[0].mm_string),
|
||||
MEM_2K);
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ int parse_hv_cmdline(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mbi = (struct multiboot_info *)(HPA2HVA((uint64_t)boot_regs[1]));
|
||||
mbi = (struct multiboot_info *)(hpa2hva((uint64_t)boot_regs[1]));
|
||||
dev_dbg(ACRN_DBG_PARSE, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
|
||||
if (!(mbi->mi_flags & MULTIBOOT_INFO_HAS_CMDLINE)) {
|
||||
@ -81,7 +81,7 @@ int parse_hv_cmdline(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
start = (char *)HPA2HVA((uint64_t)mbi->mi_cmdline);
|
||||
start = (char *)hpa2hva((uint64_t)mbi->mi_cmdline);
|
||||
dev_dbg(ACRN_DBG_PARSE, "hv cmdline: %s", start);
|
||||
|
||||
do {
|
||||
|
@ -79,7 +79,7 @@ void *get_rsdp_from_uefi(void)
|
||||
if (!efi_initialized)
|
||||
efi_init();
|
||||
|
||||
return HPA2HVA(efi_ctx->rsdp);
|
||||
return hpa2hva(efi_ctx->rsdp);
|
||||
}
|
||||
|
||||
void *get_ap_trampoline_buf(void)
|
||||
@ -94,12 +94,13 @@ static void efi_init(void)
|
||||
if (boot_regs[0] != MULTIBOOT_INFO_MAGIC)
|
||||
ASSERT(0, "no multiboot info found");
|
||||
|
||||
mbi = (struct multiboot_info *)HPA2HVA(((uint64_t)(uint32_t)boot_regs[1]));
|
||||
mbi = (struct multiboot_info *)
|
||||
hpa2hva(((uint64_t)(uint32_t)boot_regs[1]));
|
||||
|
||||
if (!(mbi->mi_flags & MULTIBOOT_INFO_HAS_DRIVES))
|
||||
ASSERT(0, "no multiboot drivers for uefi found");
|
||||
|
||||
efi_ctx = (struct boot_ctx *)HPA2HVA((uint64_t)mbi->mi_drives_addr);
|
||||
efi_ctx = (struct boot_ctx *)hpa2hva((uint64_t)mbi->mi_drives_addr);
|
||||
ASSERT(efi_ctx != NULL, "no uefi context found");
|
||||
|
||||
vm_sw_loader = uefi_sw_loader;
|
||||
|
@ -381,7 +381,7 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
target_vm->sw.io_shared_page = HPA2HVA(hpa);
|
||||
target_vm->sw.io_shared_page = hpa2hva(hpa);
|
||||
|
||||
req_buf = target_vm->sw.io_shared_page;
|
||||
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
|
||||
@ -534,7 +534,7 @@ int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param)
|
||||
|
||||
idx = 0U;
|
||||
/*TODO: use copy_from_gpa for this buffer page */
|
||||
regions = GPA2HVA(vm, set_regions.regions_gpa);
|
||||
regions = gpa2hva(vm, set_regions.regions_gpa);
|
||||
while (idx < set_regions.mr_num) {
|
||||
/* the force pointer change below is for back compatible
|
||||
* to struct vm_memory_region, it will be removed in the future
|
||||
@ -693,7 +693,7 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
|
||||
}
|
||||
/* TODO: how to get vm's address width? */
|
||||
target_vm->iommu = create_iommu_domain(vmid,
|
||||
HVA2HPA(target_vm->arch_vm.nworld_eptp), 48U);
|
||||
hva2hpa(target_vm->arch_vm.nworld_eptp), 48U);
|
||||
if (target_vm->iommu == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -807,7 +807,7 @@ int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
|
||||
}
|
||||
|
||||
if (ssp.gpa != 0U) {
|
||||
hva = (uint64_t *)GPA2HVA(vm, ssp.gpa);
|
||||
hva = (uint64_t *)gpa2hva(vm, ssp.gpa);
|
||||
} else {
|
||||
hva = (uint64_t *)NULL;
|
||||
}
|
||||
|
@ -47,14 +47,14 @@ static uint64_t create_zero_page(struct vm *vm)
|
||||
uint64_t gpa, addr;
|
||||
|
||||
/* Set zeropage in Linux Guest RAM region just past boot args */
|
||||
hva = GPA2HVA(vm, (uint64_t)sw_linux->bootargs_load_addr);
|
||||
hva = gpa2hva(vm, (uint64_t)sw_linux->bootargs_load_addr);
|
||||
zeropage = (struct zero_page *)((char *)hva + MEM_4K);
|
||||
|
||||
/* clear the zeropage */
|
||||
(void)memset(zeropage, 0U, MEM_2K);
|
||||
|
||||
/* copy part of the header into the zero page */
|
||||
hva = GPA2HVA(vm, (uint64_t)sw_kernel->kernel_load_addr);
|
||||
hva = gpa2hva(vm, (uint64_t)sw_kernel->kernel_load_addr);
|
||||
(void)memcpy_s(&(zeropage->hdr), sizeof(zeropage->hdr),
|
||||
&(hva->hdr), sizeof(hva->hdr));
|
||||
|
||||
@ -79,7 +79,7 @@ static uint64_t create_zero_page(struct vm *vm)
|
||||
zeropage->e820_nentries = (uint8_t)create_e820_table(zeropage->e820);
|
||||
|
||||
/* Get the host physical address of the zeropage */
|
||||
gpa = hpa2gpa(vm, HVA2HPA((uint64_t)zeropage));
|
||||
gpa = hpa2gpa(vm, hva2hpa((void *)zeropage));
|
||||
|
||||
/* Return Physical Base Address of zeropage */
|
||||
return gpa;
|
||||
@ -92,7 +92,7 @@ int load_guest(struct vm *vm, struct vcpu *vcpu)
|
||||
void *hva;
|
||||
uint64_t lowmem_gpa_top;
|
||||
|
||||
hva = GPA2HVA(vm, GUEST_CFG_OFFSET);
|
||||
hva = gpa2hva(vm, GUEST_CFG_OFFSET);
|
||||
lowmem_gpa_top = *(uint64_t *)hva;
|
||||
|
||||
/* hardcode vcpu entry addr(kernel entry) & rsi (zeropage)*/
|
||||
@ -100,7 +100,7 @@ int load_guest(struct vm *vm, struct vcpu *vcpu)
|
||||
vcpu_set_gpreg(vcpu, i, 0UL);
|
||||
}
|
||||
|
||||
hva = GPA2HVA(vm, lowmem_gpa_top -
|
||||
hva = gpa2hva(vm, lowmem_gpa_top -
|
||||
MEM_4K - MEM_2K);
|
||||
vcpu->entry_addr = (void *)(*((uint64_t *)hva));
|
||||
vcpu_set_gpreg(vcpu, CPU_REG_RSI, lowmem_gpa_top - MEM_4K);
|
||||
@ -156,7 +156,7 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Calculate the host-physical address where the guest will be loaded */
|
||||
hva = GPA2HVA(vm, (uint64_t)sw_kernel->kernel_load_addr);
|
||||
hva = gpa2hva(vm, (uint64_t)sw_kernel->kernel_load_addr);
|
||||
|
||||
/* Copy the guest kernel image to its run-time location */
|
||||
(void)memcpy_s((void *)hva, sw_kernel->kernel_size,
|
||||
@ -175,7 +175,7 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Get host-physical address for guest bootargs */
|
||||
hva = GPA2HVA(vm,
|
||||
hva = gpa2hva(vm,
|
||||
(uint64_t)sw_linux->bootargs_load_addr);
|
||||
|
||||
/* Copy Guest OS bootargs to its load location */
|
||||
@ -221,7 +221,7 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
|
||||
/* Check if a RAM disk is present with Linux guest */
|
||||
if (sw_linux->ramdisk_src_addr != NULL) {
|
||||
/* Get host-physical address for guest RAM disk */
|
||||
hva = GPA2HVA(vm,
|
||||
hva = gpa2hva(vm,
|
||||
(uint64_t)sw_linux->ramdisk_load_addr);
|
||||
|
||||
/* Copy RAM disk to its load location */
|
||||
|
@ -37,7 +37,8 @@ static inline uint32_t uart16550_read_reg(uint64_t base, uint16_t reg_idx)
|
||||
if (serial_port_mapped) {
|
||||
return pio_read8((uint16_t)base + reg_idx);
|
||||
} else {
|
||||
return mmio_read32((void*)((uint32_t*)HPA2HVA(base) + reg_idx));
|
||||
return mmio_read32((void *)((uint32_t *)hpa2hva(base) +
|
||||
reg_idx));
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,7 +51,8 @@ static inline void uart16550_write_reg(uint64_t base,
|
||||
if (serial_port_mapped) {
|
||||
pio_write8((uint8_t)val, (uint16_t)base + reg_idx);
|
||||
} else {
|
||||
mmio_write32(val, (void*)((uint32_t*)HPA2HVA(base) + reg_idx));
|
||||
mmio_write32(val, (void *)((uint32_t *)hpa2hva(base) +
|
||||
reg_idx));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ static int vdev_pt_init(struct pci_vdev *vdev)
|
||||
vm->arch_vm.nworld_eptp = alloc_paging_struct();
|
||||
}
|
||||
vm->iommu = create_iommu_domain(vm->vm_id,
|
||||
HVA2HPA(vm->arch_vm.nworld_eptp), 48U);
|
||||
hva2hpa(vm->arch_vm.nworld_eptp), 48U);
|
||||
}
|
||||
|
||||
ret = assign_iommu_device(vm->iommu, vdev->pdev.bdf.bits.b,
|
||||
|
@ -10,8 +10,15 @@
|
||||
#include <pgtable_types.h>
|
||||
|
||||
/* hpa <--> hva, now it is 1:1 mapping */
|
||||
#define HPA2HVA(x) ((void *)(x))
|
||||
#define HVA2HPA(x) ((uint64_t)(x))
|
||||
static inline void *hpa2hva(uint64_t x)
|
||||
{
|
||||
return (void *)x;
|
||||
}
|
||||
|
||||
static inline uint64_t hva2hpa(void *x)
|
||||
{
|
||||
return (uint64_t)x;
|
||||
}
|
||||
|
||||
static inline uint64_t pml4e_index(uint64_t address)
|
||||
{
|
||||
@ -35,17 +42,17 @@ static inline uint64_t pte_index(uint64_t address)
|
||||
|
||||
static inline uint64_t *pml4e_page_vaddr(uint64_t pml4e)
|
||||
{
|
||||
return HPA2HVA(pml4e & PML4E_PFN_MASK);
|
||||
return hpa2hva(pml4e & PML4E_PFN_MASK);
|
||||
}
|
||||
|
||||
static inline uint64_t *pdpte_page_vaddr(uint64_t pdpte)
|
||||
{
|
||||
return HPA2HVA(pdpte & PDPTE_PFN_MASK);
|
||||
return hpa2hva(pdpte & PDPTE_PFN_MASK);
|
||||
}
|
||||
|
||||
static inline uint64_t *pde_page_vaddr(uint64_t pde)
|
||||
{
|
||||
return HPA2HVA(pde & PDE_PFN_MASK);
|
||||
return hpa2hva(pde & PDE_PFN_MASK);
|
||||
}
|
||||
|
||||
static inline uint64_t *pml4e_offset(uint64_t *pml4_page, uint64_t addr)
|
||||
|
@ -30,8 +30,16 @@
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
/* gpa --> hpa -->hva */
|
||||
#define GPA2HVA(vm, x) HPA2HVA(gpa2hpa(vm, x))
|
||||
#define HVA2GPA(vm, x) hpa2gpa(vm, HVA2HPA(x))
|
||||
static inline void *gpa2hva(const struct vm *vm, uint64_t x)
|
||||
{
|
||||
return hpa2hva(gpa2hpa(vm, x));
|
||||
}
|
||||
|
||||
static inline uint64_t hva2gpa(const struct vm *vm, void *x)
|
||||
{
|
||||
return hpa2gpa(vm, hva2hpa(x));
|
||||
}
|
||||
|
||||
#endif /* !ASSEMBLER */
|
||||
|
||||
#endif /* HYPERVISOR_H */
|
||||
|
Loading…
Reference in New Issue
Block a user