mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-21 05:02:24 +00:00
hv: refine 'uint64_t' string print format in x86 moudle
Use "0x%lx" string to format 'uint64_t' type value, instead of "0x%llx". Tracked-On: #4020 Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
parent
fb29d1f99f
commit
e51386fe04
@ -151,7 +151,7 @@ void init_e820(void)
|
|||||||
hv_e820[i].type = mmap[i].type;
|
hv_e820[i].type = mmap[i].type;
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_E820, "mmap table: %d type: 0x%x\n", i, mmap[i].type);
|
dev_dbg(ACRN_DBG_E820, "mmap table: %d type: 0x%x\n", i, mmap[i].type);
|
||||||
dev_dbg(ACRN_DBG_E820, "Base: 0x%016llx length: 0x%016llx",
|
dev_dbg(ACRN_DBG_E820, "Base: 0x%016lx length: 0x%016lx",
|
||||||
mmap[i].baseaddr, mmap[i].length);
|
mmap[i].baseaddr, mmap[i].length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,7 @@ static void ptirq_build_physical_msi(struct acrn_vm *vm, struct ptirq_msi_info *
|
|||||||
info->pmsi_addr.bits.rh = MSI_ADDR_RH;
|
info->pmsi_addr.bits.rh = MSI_ADDR_RH;
|
||||||
info->pmsi_addr.bits.dest_mode = MSI_ADDR_DESTMODE_LOGICAL;
|
info->pmsi_addr.bits.dest_mode = MSI_ADDR_DESTMODE_LOGICAL;
|
||||||
}
|
}
|
||||||
dev_dbg(ACRN_DBG_IRQ, "MSI %s addr:data = 0x%llx:%x(V) -> 0x%llx:%x(P)",
|
dev_dbg(ACRN_DBG_IRQ, "MSI %s addr:data = 0x%lx:%x(V) -> 0x%lx:%x(P)",
|
||||||
(info->pmsi_addr.ir_bits.intr_format != 0U) ? " Remappable Format" : "Compatibility Format",
|
(info->pmsi_addr.ir_bits.intr_format != 0U) ? " Remappable Format" : "Compatibility Format",
|
||||||
info->vmsi_addr.full, info->vmsi_data.full,
|
info->vmsi_addr.full, info->vmsi_data.full,
|
||||||
info->pmsi_addr.full, info->pmsi_data.full);
|
info->pmsi_addr.full, info->pmsi_data.full);
|
||||||
@ -548,7 +548,7 @@ void ptirq_softirq(uint16_t pcpu_id)
|
|||||||
entry->allocated_pirq,
|
entry->allocated_pirq,
|
||||||
msi->vmsi_data.bits.vector,
|
msi->vmsi_data.bits.vector,
|
||||||
irq_to_vector(entry->allocated_pirq));
|
irq_to_vector(entry->allocated_pirq));
|
||||||
dev_dbg(ACRN_DBG_PTIRQ, " vmsi_addr: 0x%llx vmsi_data: 0x%x",
|
dev_dbg(ACRN_DBG_PTIRQ, " vmsi_addr: 0x%lx vmsi_data: 0x%x",
|
||||||
msi->vmsi_addr.full,
|
msi->vmsi_addr.full,
|
||||||
msi->vmsi_data.full);
|
msi->vmsi_data.full);
|
||||||
}
|
}
|
||||||
|
@ -93,10 +93,10 @@ int32_t ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
|
|||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
|
|
||||||
/* TODO - EPT Violation handler */
|
/* TODO - EPT Violation handler */
|
||||||
pr_fatal("%s, Guest linear address: 0x%016llx ",
|
pr_fatal("%s, Guest linear address: 0x%016lx ",
|
||||||
__func__, exec_vmread(VMX_GUEST_LINEAR_ADDR));
|
__func__, exec_vmread(VMX_GUEST_LINEAR_ADDR));
|
||||||
|
|
||||||
pr_fatal("%s, Guest physical address: 0x%016llx ",
|
pr_fatal("%s, Guest physical address: 0x%016lx ",
|
||||||
__func__, exec_vmread64(VMX_GUEST_PHYSICAL_ADDR_FULL));
|
__func__, exec_vmread64(VMX_GUEST_PHYSICAL_ADDR_FULL));
|
||||||
|
|
||||||
ASSERT(status == 0, "EPT Misconfiguration is not handled.\n");
|
ASSERT(status == 0, "EPT Misconfiguration is not handled.\n");
|
||||||
@ -113,7 +113,7 @@ void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
|||||||
struct acrn_vcpu *vcpu;
|
struct acrn_vcpu *vcpu;
|
||||||
uint64_t prot = prot_orig;
|
uint64_t prot = prot_orig;
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx size: 0x%016llx prot: 0x%016x\n",
|
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016lx gpa: 0x%016lx size: 0x%016lx prot: 0x%016x\n",
|
||||||
__func__, vm->vm_id, hpa, gpa, size, prot);
|
__func__, vm->vm_id, hpa, gpa, size, prot);
|
||||||
|
|
||||||
/* EPT & VT-d share the same page tables, set SNP bit
|
/* EPT & VT-d share the same page tables, set SNP bit
|
||||||
@ -139,7 +139,7 @@ void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
|||||||
uint16_t i;
|
uint16_t i;
|
||||||
uint64_t local_prot = prot_set;
|
uint64_t local_prot = prot_set;
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
|
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%lx size 0x%lx\n", __func__, vm->vm_id, gpa, size);
|
||||||
|
|
||||||
if (((local_prot & EPT_MT_MASK) != EPT_UNCACHED) && iommu_snoop_supported(vm->iommu)) {
|
if (((local_prot & EPT_MT_MASK) != EPT_UNCACHED) && iommu_snoop_supported(vm->iommu)) {
|
||||||
local_prot |= EPT_SNOOP_CTRL;
|
local_prot |= EPT_SNOOP_CTRL;
|
||||||
@ -159,7 +159,7 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t
|
|||||||
struct acrn_vcpu *vcpu;
|
struct acrn_vcpu *vcpu;
|
||||||
uint16_t i;
|
uint16_t i;
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
|
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%lx size 0x%lx\n", __func__, vm->vm_id, gpa, size);
|
||||||
|
|
||||||
mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_mem_ops, MR_DEL);
|
mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_mem_ops, MR_DEL);
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ static inline uint32_t local_copy_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t
|
|||||||
|
|
||||||
hpa = local_gpa2hpa(vm, gpa, &pg_size);
|
hpa = local_gpa2hpa(vm, gpa, &pg_size);
|
||||||
if (hpa == INVALID_HPA) {
|
if (hpa == INVALID_HPA) {
|
||||||
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping",
|
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping",
|
||||||
__func__, vm->vm_id, gpa);
|
__func__, vm->vm_id, gpa);
|
||||||
len = 0U;
|
len = 0U;
|
||||||
} else {
|
} else {
|
||||||
|
@ -188,7 +188,7 @@ hyperv_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: MSR=0x%x wval=0x%llx vcpuid=%d vmid=%d",
|
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: MSR=0x%x wval=0x%lx vcpuid=%d vmid=%d",
|
||||||
__func__, msr, wval, vcpu->vcpu_id, vcpu->vm->vm_id);
|
__func__, msr, wval, vcpu->vcpu_id, vcpu->vm->vm_id);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -221,7 +221,7 @@ hyperv_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: MSR=0x%x rval=0x%llx vcpuid=%d vmid=%d",
|
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: MSR=0x%x rval=0x%lx vcpuid=%d vmid=%d",
|
||||||
__func__, msr, *rval, vcpu->vcpu_id, vcpu->vm->vm_id);
|
__func__, msr, *rval, vcpu->vcpu_id, vcpu->vm->vm_id);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2355,7 +2355,7 @@ int32_t decode_instruction(struct acrn_vcpu *vcpu)
|
|||||||
retval = vie_init(&emul_ctxt->vie, vcpu);
|
retval = vie_init(&emul_ctxt->vie, vcpu);
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
if (retval != -EFAULT) {
|
if (retval != -EFAULT) {
|
||||||
pr_err("init vie failed @ 0x%016llx:", vcpu_get_rip(vcpu));
|
pr_err("init vie failed @ 0x%016lx:", vcpu_get_rip(vcpu));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
@ -2365,7 +2365,7 @@ int32_t decode_instruction(struct acrn_vcpu *vcpu)
|
|||||||
retval = local_decode_instruction(cpu_mode, seg_desc_def32(csar), &emul_ctxt->vie);
|
retval = local_decode_instruction(cpu_mode, seg_desc_def32(csar), &emul_ctxt->vie);
|
||||||
|
|
||||||
if (retval != 0) {
|
if (retval != 0) {
|
||||||
pr_err("decode instruction failed @ 0x%016llx:", vcpu_get_rip(vcpu));
|
pr_err("decode instruction failed @ 0x%016lx:", vcpu_get_rip(vcpu));
|
||||||
vcpu_inject_ud(vcpu);
|
vcpu_inject_ud(vcpu);
|
||||||
retval = -EFAULT;
|
retval = -EFAULT;
|
||||||
} else {
|
} else {
|
||||||
|
@ -235,7 +235,7 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
|||||||
/* clear read cache, next time read should from VMCS */
|
/* clear read cache, next time read should from VMCS */
|
||||||
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
|
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
|
||||||
|
|
||||||
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", cr0_mask, cr0_vmx);
|
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR0", cr0_mask, cr0_vmx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -337,7 +337,7 @@ static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
|
|||||||
/* clear read cache, next time read should from VMCS */
|
/* clear read cache, next time read should from VMCS */
|
||||||
bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached);
|
bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached);
|
||||||
|
|
||||||
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4", cr4, cr4_vmx);
|
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR4", cr4, cr4_vmx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -388,12 +388,12 @@ void init_cr0_cr4_host_mask(void)
|
|||||||
|
|
||||||
exec_vmwrite(VMX_CR0_GUEST_HOST_MASK, cr0_host_owned_bits);
|
exec_vmwrite(VMX_CR0_GUEST_HOST_MASK, cr0_host_owned_bits);
|
||||||
/* Output CR0 mask value */
|
/* Output CR0 mask value */
|
||||||
pr_dbg("CR0 guest-host mask value: 0x%016llx", cr0_host_owned_bits);
|
pr_dbg("CR0 guest-host mask value: 0x%016lx", cr0_host_owned_bits);
|
||||||
|
|
||||||
|
|
||||||
exec_vmwrite(VMX_CR4_GUEST_HOST_MASK, cr4_host_owned_bits);
|
exec_vmwrite(VMX_CR4_GUEST_HOST_MASK, cr4_host_owned_bits);
|
||||||
/* Output CR4 mask value */
|
/* Output CR4 mask value */
|
||||||
pr_dbg("CR4 guest-host mask value: 0x%016llx", cr4_host_owned_bits);
|
pr_dbg("CR4 guest-host mask value: 0x%016lx", cr4_host_owned_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
|
uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
|
||||||
|
@ -1130,7 +1130,7 @@ vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast
|
|||||||
}
|
}
|
||||||
bitmap_set_nolock(vcpu_id, dmask);
|
bitmap_set_nolock(vcpu_id, dmask);
|
||||||
}
|
}
|
||||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: logical destmod, dmask: 0x%016llx", __func__, *dmask);
|
dev_dbg(ACRN_DBG_LAPICPT, "%s: logical destmod, dmask: 0x%016lx", __func__, *dmask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1527,7 +1527,7 @@ static int32_t vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg, uint
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic read offset %#x, data %#llx", offset, *data);
|
dev_dbg(ACRN_DBG_LAPIC, "vlapic read offset %x, data %lx", offset, *data);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,7 +334,7 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
|||||||
const struct e820_entry *p_e820 = vm->e820_entries;
|
const struct e820_entry *p_e820 = vm->e820_entries;
|
||||||
const struct mem_range *p_mem_range_info = get_mem_range_info();
|
const struct mem_range *p_mem_range_info = get_mem_range_info();
|
||||||
|
|
||||||
pr_dbg("sos_vm: bottom memory - 0x%llx, top memory - 0x%llx\n",
|
pr_dbg("sos_vm: bottom memory - 0x%lx, top memory - 0x%lx\n",
|
||||||
p_mem_range_info->mem_bottom, p_mem_range_info->mem_top);
|
p_mem_range_info->mem_bottom, p_mem_range_info->mem_top);
|
||||||
|
|
||||||
if (p_mem_range_info->mem_top > EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE)) {
|
if (p_mem_range_info->mem_top > EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE)) {
|
||||||
@ -357,7 +357,7 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
|||||||
for (i = 0U; i < entries_count; i++) {
|
for (i = 0U; i < entries_count; i++) {
|
||||||
entry = p_e820 + i;
|
entry = p_e820 + i;
|
||||||
pr_dbg("e820 table: %d type: 0x%x", i, entry->type);
|
pr_dbg("e820 table: %d type: 0x%x", i, entry->type);
|
||||||
pr_dbg("BaseAddress: 0x%016llx length: 0x%016llx\n", entry->baseaddr, entry->length);
|
pr_dbg("BaseAddress: 0x%016lx length: 0x%016lx\n", entry->baseaddr, entry->length);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap all platform EPC resource from SOS.
|
/* Unmap all platform EPC resource from SOS.
|
||||||
@ -609,7 +609,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
|
|||||||
wait_pcpus_offline(mask);
|
wait_pcpus_offline(mask);
|
||||||
|
|
||||||
if ((mask != 0UL) && (!start_pcpus(mask))) {
|
if ((mask != 0UL) && (!start_pcpus(mask))) {
|
||||||
pr_fatal("Failed to start all cpus in mask(0x%llx)", mask);
|
pr_fatal("Failed to start all cpus in mask(0x%lx)", mask);
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -690,7 +690,7 @@ int32_t reset_vm(struct acrn_vm *vm)
|
|||||||
wait_pcpus_offline(mask);
|
wait_pcpus_offline(mask);
|
||||||
|
|
||||||
if ((mask != 0UL) && (!start_pcpus(mask))) {
|
if ((mask != 0UL) && (!start_pcpus(mask))) {
|
||||||
pr_fatal("Failed to start all cpus in mask(0x%llx)", mask);
|
pr_fatal("Failed to start all cpus in mask(0x%lx)", mask);
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);
|
uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);
|
||||||
|
|
||||||
if (!is_hypercall_from_ring0()) {
|
if (!is_hypercall_from_ring0()) {
|
||||||
pr_err("hypercall 0x%llx is only allowed from RING-0!\n", hypcall_id);
|
pr_err("hypercall 0x%lx is only allowed from RING-0!\n", hypcall_id);
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
ret = -EACCES;
|
ret = -EACCES;
|
||||||
} else if (hypcall_id == HC_WORLD_SWITCH) {
|
} else if (hypcall_id == HC_WORLD_SWITCH) {
|
||||||
@ -243,7 +243,7 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
/* Dispatch the hypercall handler */
|
/* Dispatch the hypercall handler */
|
||||||
ret = dispatch_sos_hypercall(vcpu);
|
ret = dispatch_sos_hypercall(vcpu);
|
||||||
} else {
|
} else {
|
||||||
pr_err("hypercall 0x%llx is only allowed from SOS_VM!\n", hypcall_id);
|
pr_err("hypercall 0x%lx is only allowed from SOS_VM!\n", hypcall_id);
|
||||||
vcpu_inject_ud(vcpu);
|
vcpu_inject_ud(vcpu);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -31,14 +31,14 @@ static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
|||||||
exec_vmwrite(VMX_GUEST_CR3, cr3);
|
exec_vmwrite(VMX_GUEST_CR3, cr3);
|
||||||
|
|
||||||
exec_vmwrite(VMX_GUEST_GDTR_BASE, ectx->gdtr.base);
|
exec_vmwrite(VMX_GUEST_GDTR_BASE, ectx->gdtr.base);
|
||||||
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016llx", ectx->gdtr.base);
|
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016lx", ectx->gdtr.base);
|
||||||
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, ectx->gdtr.limit);
|
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, ectx->gdtr.limit);
|
||||||
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%016llx", ectx->gdtr.limit);
|
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%016lx", ectx->gdtr.limit);
|
||||||
|
|
||||||
exec_vmwrite(VMX_GUEST_IDTR_BASE, ectx->idtr.base);
|
exec_vmwrite(VMX_GUEST_IDTR_BASE, ectx->idtr.base);
|
||||||
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016llx", ectx->idtr.base);
|
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016lx", ectx->idtr.base);
|
||||||
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, ectx->idtr.limit);
|
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, ectx->idtr.limit);
|
||||||
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%016llx", ectx->idtr.limit);
|
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%016lx", ectx->idtr.limit);
|
||||||
|
|
||||||
/* init segment selectors: es, cs, ss, ds, fs, gs, ldtr, tr */
|
/* init segment selectors: es, cs, ss, ds, fs, gs, ldtr, tr */
|
||||||
load_segment(ectx->cs, VMX_GUEST_CS);
|
load_segment(ectx->cs, VMX_GUEST_CS);
|
||||||
@ -143,7 +143,7 @@ static void init_host_state(void)
|
|||||||
tss_addr = hva2hpa((void *)&get_cpu_var(tss));
|
tss_addr = hva2hpa((void *)&get_cpu_var(tss));
|
||||||
/* Set up host TR base fields */
|
/* Set up host TR base fields */
|
||||||
exec_vmwrite(VMX_HOST_TR_BASE, tss_addr);
|
exec_vmwrite(VMX_HOST_TR_BASE, tss_addr);
|
||||||
pr_dbg("VMX_HOST_TR_BASE: 0x%016llx ", tss_addr);
|
pr_dbg("VMX_HOST_TR_BASE: 0x%016lx ", tss_addr);
|
||||||
|
|
||||||
/* Obtain the current interrupt descriptor table base */
|
/* Obtain the current interrupt descriptor table base */
|
||||||
idt_base = sidt();
|
idt_base = sidt();
|
||||||
@ -161,11 +161,11 @@ static void init_host_state(void)
|
|||||||
|
|
||||||
value64 = msr_read(MSR_IA32_PAT);
|
value64 = msr_read(MSR_IA32_PAT);
|
||||||
exec_vmwrite64(VMX_HOST_IA32_PAT_FULL, value64);
|
exec_vmwrite64(VMX_HOST_IA32_PAT_FULL, value64);
|
||||||
pr_dbg("VMX_HOST_IA32_PAT: 0x%016llx ", value64);
|
pr_dbg("VMX_HOST_IA32_PAT: 0x%016lx ", value64);
|
||||||
|
|
||||||
value64 = msr_read(MSR_IA32_EFER);
|
value64 = msr_read(MSR_IA32_EFER);
|
||||||
exec_vmwrite64(VMX_HOST_IA32_EFER_FULL, value64);
|
exec_vmwrite64(VMX_HOST_IA32_EFER_FULL, value64);
|
||||||
pr_dbg("VMX_HOST_IA32_EFER: 0x%016llx ",
|
pr_dbg("VMX_HOST_IA32_EFER: 0x%016lx ",
|
||||||
value64);
|
value64);
|
||||||
|
|
||||||
/**************************************************/
|
/**************************************************/
|
||||||
@ -174,31 +174,31 @@ static void init_host_state(void)
|
|||||||
/* Set up host CR0 field */
|
/* Set up host CR0 field */
|
||||||
CPU_CR_READ(cr0, &value);
|
CPU_CR_READ(cr0, &value);
|
||||||
exec_vmwrite(VMX_HOST_CR0, value);
|
exec_vmwrite(VMX_HOST_CR0, value);
|
||||||
pr_dbg("VMX_HOST_CR0: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_CR0: 0x%016lx ", value);
|
||||||
|
|
||||||
/* Set up host CR3 field */
|
/* Set up host CR3 field */
|
||||||
CPU_CR_READ(cr3, &value);
|
CPU_CR_READ(cr3, &value);
|
||||||
exec_vmwrite(VMX_HOST_CR3, value);
|
exec_vmwrite(VMX_HOST_CR3, value);
|
||||||
pr_dbg("VMX_HOST_CR3: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_CR3: 0x%016lx ", value);
|
||||||
|
|
||||||
/* Set up host CR4 field */
|
/* Set up host CR4 field */
|
||||||
CPU_CR_READ(cr4, &value);
|
CPU_CR_READ(cr4, &value);
|
||||||
exec_vmwrite(VMX_HOST_CR4, value);
|
exec_vmwrite(VMX_HOST_CR4, value);
|
||||||
pr_dbg("VMX_HOST_CR4: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_CR4: 0x%016lx ", value);
|
||||||
|
|
||||||
/* Set up host and guest FS base address */
|
/* Set up host and guest FS base address */
|
||||||
value = msr_read(MSR_IA32_FS_BASE);
|
value = msr_read(MSR_IA32_FS_BASE);
|
||||||
exec_vmwrite(VMX_HOST_FS_BASE, value);
|
exec_vmwrite(VMX_HOST_FS_BASE, value);
|
||||||
pr_dbg("VMX_HOST_FS_BASE: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_FS_BASE: 0x%016lx ", value);
|
||||||
value = msr_read(MSR_IA32_GS_BASE);
|
value = msr_read(MSR_IA32_GS_BASE);
|
||||||
exec_vmwrite(VMX_HOST_GS_BASE, value);
|
exec_vmwrite(VMX_HOST_GS_BASE, value);
|
||||||
pr_dbg("VMX_HOST_GS_BASE: 0x%016llx ", value);
|
pr_dbg("VMX_HOST_GS_BASE: 0x%016lx ", value);
|
||||||
|
|
||||||
/* Set up host instruction pointer on VM Exit */
|
/* Set up host instruction pointer on VM Exit */
|
||||||
value64 = (uint64_t)&vm_exit;
|
value64 = (uint64_t)&vm_exit;
|
||||||
pr_dbg("HOST RIP on VMExit %016llx ", value64);
|
pr_dbg("HOST RIP on VMExit %016lx ", value64);
|
||||||
exec_vmwrite(VMX_HOST_RIP, value64);
|
exec_vmwrite(VMX_HOST_RIP, value64);
|
||||||
pr_dbg("vm exit return address = %016llx ", value64);
|
pr_dbg("vm exit return address = %016lx ", value64);
|
||||||
|
|
||||||
/* As a type I hypervisor, just init sysenter fields to 0 */
|
/* As a type I hypervisor, just init sysenter fields to 0 */
|
||||||
exec_vmwrite32(VMX_HOST_IA32_SYSENTER_CS, 0U);
|
exec_vmwrite32(VMX_HOST_IA32_SYSENTER_CS, 0U);
|
||||||
@ -353,7 +353,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
|
value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
|
||||||
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
|
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
|
||||||
pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64);
|
pr_dbg("VMX_EPT_POINTER: 0x%016lx ", value64);
|
||||||
|
|
||||||
/* Set up guest exception mask bitmap setting a bit * causes a VM exit
|
/* Set up guest exception mask bitmap setting a bit * causes a VM exit
|
||||||
* on corresponding guest * exception - pg 2902 24.6.3
|
* on corresponding guest * exception - pg 2902 24.6.3
|
||||||
@ -384,10 +384,10 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||||||
/* Set up IO bitmap register A and B - pg 2902 24.6.4 */
|
/* Set up IO bitmap register A and B - pg 2902 24.6.4 */
|
||||||
value64 = hva2hpa(vm->arch_vm.io_bitmap);
|
value64 = hva2hpa(vm->arch_vm.io_bitmap);
|
||||||
exec_vmwrite64(VMX_IO_BITMAP_A_FULL, value64);
|
exec_vmwrite64(VMX_IO_BITMAP_A_FULL, value64);
|
||||||
pr_dbg("VMX_IO_BITMAP_A: 0x%016llx ", value64);
|
pr_dbg("VMX_IO_BITMAP_A: 0x%016lx ", value64);
|
||||||
value64 = hva2hpa((void *)&(vm->arch_vm.io_bitmap[PAGE_SIZE]));
|
value64 = hva2hpa((void *)&(vm->arch_vm.io_bitmap[PAGE_SIZE]));
|
||||||
exec_vmwrite64(VMX_IO_BITMAP_B_FULL, value64);
|
exec_vmwrite64(VMX_IO_BITMAP_B_FULL, value64);
|
||||||
pr_dbg("VMX_IO_BITMAP_B: 0x%016llx ", value64);
|
pr_dbg("VMX_IO_BITMAP_B: 0x%016lx ", value64);
|
||||||
|
|
||||||
init_msr_emulation(vcpu);
|
init_msr_emulation(vcpu);
|
||||||
|
|
||||||
|
@ -205,11 +205,11 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
basic_exit_reason = (uint16_t)(vcpu->arch.exit_reason & 0xFFFFU);
|
basic_exit_reason = (uint16_t)(vcpu->arch.exit_reason & 0xFFFFU);
|
||||||
|
|
||||||
/* Log details for exit */
|
/* Log details for exit */
|
||||||
pr_dbg("Exit Reason: 0x%016llx ", vcpu->arch.exit_reason);
|
pr_dbg("Exit Reason: 0x%016lx ", vcpu->arch.exit_reason);
|
||||||
|
|
||||||
/* Ensure exit reason is within dispatch table */
|
/* Ensure exit reason is within dispatch table */
|
||||||
if (basic_exit_reason >= ARRAY_SIZE(dispatch_table)) {
|
if (basic_exit_reason >= ARRAY_SIZE(dispatch_table)) {
|
||||||
pr_err("Invalid Exit Reason: 0x%016llx ", vcpu->arch.exit_reason);
|
pr_err("Invalid Exit Reason: 0x%016lx ", vcpu->arch.exit_reason);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
/* Calculate dispatch table entry */
|
/* Calculate dispatch table entry */
|
||||||
@ -244,12 +244,12 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
|
|
||||||
static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
|
static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016llx ",
|
pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016lx ",
|
||||||
exec_vmread(VMX_GUEST_RIP));
|
exec_vmread(VMX_GUEST_RIP));
|
||||||
|
|
||||||
pr_fatal("Exit Reason: 0x%016llx ", vcpu->arch.exit_reason);
|
pr_fatal("Exit Reason: 0x%016lx ", vcpu->arch.exit_reason);
|
||||||
|
|
||||||
pr_err("Exit qualification: 0x%016llx ",
|
pr_err("Exit qualification: 0x%016lx ",
|
||||||
exec_vmread(VMX_EXIT_QUALIFICATION));
|
exec_vmread(VMX_EXIT_QUALIFICATION));
|
||||||
|
|
||||||
TRACE_2L(TRACE_VMEXIT_UNHANDLED, vcpu->arch.exit_reason, 0UL);
|
TRACE_2L(TRACE_VMEXIT_UNHANDLED, vcpu->arch.exit_reason, 0UL);
|
||||||
@ -259,7 +259,7 @@ static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
|
|
||||||
static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu)
|
static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
pr_fatal("VM%d: triple fault @ guest RIP 0x%016llx, exit qualification: 0x%016llx",
|
pr_fatal("VM%d: triple fault @ guest RIP 0x%016lx, exit qualification: 0x%016lx",
|
||||||
vcpu->vm->vm_id, exec_vmread(VMX_GUEST_RIP), exec_vmread(VMX_EXIT_QUALIFICATION));
|
vcpu->vm->vm_id, exec_vmread(VMX_GUEST_RIP), exec_vmread(VMX_EXIT_QUALIFICATION));
|
||||||
triple_fault_shutdown_vm(vcpu);
|
triple_fault_shutdown_vm(vcpu);
|
||||||
|
|
||||||
|
@ -347,7 +347,7 @@ void init_msr_emulation(struct acrn_vcpu *vcpu)
|
|||||||
/* Setup MSR bitmap - Intel SDM Vol3 24.6.9 */
|
/* Setup MSR bitmap - Intel SDM Vol3 24.6.9 */
|
||||||
value64 = hva2hpa(vcpu->arch.msr_bitmap);
|
value64 = hva2hpa(vcpu->arch.msr_bitmap);
|
||||||
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
|
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
|
||||||
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
|
pr_dbg("VMX_MSR_BITMAP: 0x%016lx ", value64);
|
||||||
|
|
||||||
/* Initialize the MSR save/store area */
|
/* Initialize the MSR save/store area */
|
||||||
init_msr_area(vcpu);
|
init_msr_area(vcpu);
|
||||||
@ -362,7 +362,7 @@ static int32_t write_pat_msr(struct acrn_vcpu *vcpu, uint64_t value)
|
|||||||
for (i = 0U; i < 8U; i++) {
|
for (i = 0U; i < 8U; i++) {
|
||||||
field = (value >> (i * 8U)) & 0xffUL;
|
field = (value >> (i * 8U)) & 0xffUL;
|
||||||
if (pat_mem_type_invalid(field) || ((PAT_FIELD_RSV_BITS & field) != 0UL)) {
|
if (pat_mem_type_invalid(field) || ((PAT_FIELD_RSV_BITS & field) != 0UL)) {
|
||||||
pr_err("invalid guest IA32_PAT: 0x%016llx", value);
|
pr_err("invalid guest IA32_PAT: 0x%016lx", value);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
|
|||||||
vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
|
vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_dbg("vm%d vcpu%hu fixed-range MTRR[%u]: %16llx",
|
pr_dbg("vm%d vcpu%hu fixed-range MTRR[%u]: %16lx",
|
||||||
vcpu->vm->vm_id, vcpu->vcpu_id, i,
|
vcpu->vm->vm_id, vcpu->vcpu_id, i,
|
||||||
vmtrr->fixed_range[i].value);
|
vmtrr->fixed_range[i].value);
|
||||||
}
|
}
|
||||||
|
@ -164,8 +164,8 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
pr_acrnlog("Guest Linear Address: 0x%016llx", exec_vmread(VMX_GUEST_LINEAR_ADDR));
|
pr_acrnlog("Guest Linear Address: 0x%016lx", exec_vmread(VMX_GUEST_LINEAR_ADDR));
|
||||||
pr_acrnlog("Guest Physical Address address: 0x%016llx", gpa);
|
pr_acrnlog("Guest Physical Address address: 0x%016lx", gpa);
|
||||||
}
|
}
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ static inline int32_t asm_invept(uint64_t type, struct invept_desc desc)
|
|||||||
static inline void local_invept(uint64_t type, struct invept_desc desc)
|
static inline void local_invept(uint64_t type, struct invept_desc desc)
|
||||||
{
|
{
|
||||||
if (asm_invept(type, desc) != 0) {
|
if (asm_invept(type, desc) != 0) {
|
||||||
pr_dbg("%s, failed. type = %llu, eptp = 0x%llx", __func__, type, desc.eptp);
|
pr_dbg("%s, failed. type = %llu, eptp = 0x%lx", __func__, type, desc.eptp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +228,7 @@ void init_paging(void)
|
|||||||
high64_max_ram = round_pde_up(p_mem_range_info->mem_top);
|
high64_max_ram = round_pde_up(p_mem_range_info->mem_top);
|
||||||
if ((high64_max_ram > (CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)) ||
|
if ((high64_max_ram > (CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)) ||
|
||||||
(high64_max_ram < (1UL << 32U))) {
|
(high64_max_ram < (1UL << 32U))) {
|
||||||
printf("ERROR!!! high64_max_ram: 0x%llx, top address space: 0x%llx\n",
|
printf("ERROR!!! high64_max_ram: 0x%lx, top address space: 0x%lx\n",
|
||||||
high64_max_ram, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
|
high64_max_ram, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
|
||||||
panic("Please configure HV_ADDRESS_SPACE correctly!\n");
|
panic("Please configure HV_ADDRESS_SPACE correctly!\n");
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ static void split_large_page(uint64_t *pte, enum _page_table_level level,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, pbase: 0x%llx\n", __func__, ref_paddr, pbase);
|
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, pbase: 0x%lx\n", __func__, ref_paddr, pbase);
|
||||||
|
|
||||||
paddr = ref_paddr;
|
paddr = ref_paddr;
|
||||||
for (i = 0UL; i < PTRS_PER_PTE; i++) {
|
for (i = 0UL; i < PTRS_PER_PTE; i++) {
|
||||||
@ -92,7 +92,7 @@ static void modify_or_del_pte(const uint64_t *pde, uint64_t vaddr_start, uint64_
|
|||||||
uint64_t vaddr = vaddr_start;
|
uint64_t vaddr = vaddr_start;
|
||||||
uint64_t index = pte_index(vaddr);
|
uint64_t index = pte_index(vaddr);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", __func__, vaddr, vaddr_end);
|
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||||
for (; index < PTRS_PER_PTE; index++) {
|
for (; index < PTRS_PER_PTE; index++) {
|
||||||
uint64_t *pte = pt_page + index;
|
uint64_t *pte = pt_page + index;
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ static void modify_or_del_pte(const uint64_t *pde, uint64_t vaddr_start, uint64_
|
|||||||
* is present or not.
|
* is present or not.
|
||||||
*/
|
*/
|
||||||
if ((type == MR_MODIFY) && (vaddr >= MEM_1M)) {
|
if ((type == MR_MODIFY) && (vaddr >= MEM_1M)) {
|
||||||
pr_warn("%s, vaddr: 0x%llx pte is not present.\n", __func__, vaddr);
|
pr_warn("%s, vaddr: 0x%lx pte is not present.\n", __func__, vaddr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
local_modify_or_del_pte(pte, prot_set, prot_clr, type, mem_ops);
|
local_modify_or_del_pte(pte, prot_set, prot_clr, type, mem_ops);
|
||||||
@ -129,14 +129,14 @@ static void modify_or_del_pde(const uint64_t *pdpte, uint64_t vaddr_start, uint6
|
|||||||
uint64_t vaddr = vaddr_start;
|
uint64_t vaddr = vaddr_start;
|
||||||
uint64_t index = pde_index(vaddr);
|
uint64_t index = pde_index(vaddr);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", __func__, vaddr, vaddr_end);
|
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||||
for (; index < PTRS_PER_PDE; index++) {
|
for (; index < PTRS_PER_PDE; index++) {
|
||||||
uint64_t *pde = pd_page + index;
|
uint64_t *pde = pd_page + index;
|
||||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||||
|
|
||||||
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
||||||
if (type == MR_MODIFY) {
|
if (type == MR_MODIFY) {
|
||||||
pr_warn("%s, addr: 0x%llx pde is not present.\n", __func__, vaddr);
|
pr_warn("%s, addr: 0x%lx pde is not present.\n", __func__, vaddr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (pde_large(*pde) != 0UL) {
|
if (pde_large(*pde) != 0UL) {
|
||||||
@ -174,14 +174,14 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
|
|||||||
uint64_t vaddr = vaddr_start;
|
uint64_t vaddr = vaddr_start;
|
||||||
uint64_t index = pdpte_index(vaddr);
|
uint64_t index = pdpte_index(vaddr);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", __func__, vaddr, vaddr_end);
|
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||||
for (; index < PTRS_PER_PDPTE; index++) {
|
for (; index < PTRS_PER_PDPTE; index++) {
|
||||||
uint64_t *pdpte = pdpt_page + index;
|
uint64_t *pdpte = pdpt_page + index;
|
||||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||||
|
|
||||||
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
||||||
if (type == MR_MODIFY) {
|
if (type == MR_MODIFY) {
|
||||||
pr_warn("%s, vaddr: 0x%llx pdpte is not present.\n", __func__, vaddr);
|
pr_warn("%s, vaddr: 0x%lx pdpte is not present.\n", __func__, vaddr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (pdpte_large(*pdpte) != 0UL) {
|
if (pdpte_large(*pdpte) != 0UL) {
|
||||||
@ -227,7 +227,7 @@ void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
|||||||
uint64_t *pml4e;
|
uint64_t *pml4e;
|
||||||
|
|
||||||
vaddr_end = vaddr + round_page_down(size);
|
vaddr_end = vaddr + round_page_down(size);
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: 0x%llx, size: 0x%llx\n",
|
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: 0x%lx, size: 0x%lx\n",
|
||||||
__func__, vaddr, size);
|
__func__, vaddr, size);
|
||||||
|
|
||||||
while (vaddr < vaddr_end) {
|
while (vaddr < vaddr_end) {
|
||||||
@ -254,13 +254,13 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
|
|||||||
uint64_t paddr = paddr_start;
|
uint64_t paddr = paddr_start;
|
||||||
uint64_t index = pte_index(vaddr);
|
uint64_t index = pte_index(vaddr);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, vaddr: [0x%llx - 0x%llx]\n",
|
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
|
||||||
__func__, paddr, vaddr_start, vaddr_end);
|
__func__, paddr, vaddr_start, vaddr_end);
|
||||||
for (; index < PTRS_PER_PTE; index++) {
|
for (; index < PTRS_PER_PTE; index++) {
|
||||||
uint64_t *pte = pt_page + index;
|
uint64_t *pte = pt_page + index;
|
||||||
|
|
||||||
if (mem_ops->pgentry_present(*pte) != 0UL) {
|
if (mem_ops->pgentry_present(*pte) != 0UL) {
|
||||||
pr_fatal("%s, pte 0x%llx is already present!\n", __func__, vaddr);
|
pr_fatal("%s, pte 0x%lx is already present!\n", __func__, vaddr);
|
||||||
} else {
|
} else {
|
||||||
set_pgentry(pte, paddr | prot, mem_ops);
|
set_pgentry(pte, paddr | prot, mem_ops);
|
||||||
paddr += PTE_SIZE;
|
paddr += PTE_SIZE;
|
||||||
@ -285,14 +285,14 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
|
|||||||
uint64_t paddr = paddr_start;
|
uint64_t paddr = paddr_start;
|
||||||
uint64_t index = pde_index(vaddr);
|
uint64_t index = pde_index(vaddr);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, vaddr: [0x%llx - 0x%llx]\n",
|
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
|
||||||
__func__, paddr, vaddr, vaddr_end);
|
__func__, paddr, vaddr, vaddr_end);
|
||||||
for (; index < PTRS_PER_PDE; index++) {
|
for (; index < PTRS_PER_PDE; index++) {
|
||||||
uint64_t *pde = pd_page + index;
|
uint64_t *pde = pd_page + index;
|
||||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||||
|
|
||||||
if (pde_large(*pde) != 0UL) {
|
if (pde_large(*pde) != 0UL) {
|
||||||
pr_fatal("%s, pde 0x%llx is already present!\n", __func__, vaddr);
|
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
|
||||||
} else {
|
} else {
|
||||||
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
||||||
if (mem_aligned_check(paddr, PDE_SIZE) &&
|
if (mem_aligned_check(paddr, PDE_SIZE) &&
|
||||||
@ -332,13 +332,13 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
|
|||||||
uint64_t paddr = paddr_start;
|
uint64_t paddr = paddr_start;
|
||||||
uint64_t index = pdpte_index(vaddr);
|
uint64_t index = pdpte_index(vaddr);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, vaddr: [0x%llx - 0x%llx]\n", __func__, paddr, vaddr, vaddr_end);
|
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n", __func__, paddr, vaddr, vaddr_end);
|
||||||
for (; index < PTRS_PER_PDPTE; index++) {
|
for (; index < PTRS_PER_PDPTE; index++) {
|
||||||
uint64_t *pdpte = pdpt_page + index;
|
uint64_t *pdpte = pdpt_page + index;
|
||||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||||
|
|
||||||
if (pdpte_large(*pdpte) != 0UL) {
|
if (pdpte_large(*pdpte) != 0UL) {
|
||||||
pr_fatal("%s, pdpte 0x%llx is already present!\n", __func__, vaddr);
|
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
|
||||||
} else {
|
} else {
|
||||||
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
||||||
if (mem_aligned_check(paddr, PDPTE_SIZE) &&
|
if (mem_aligned_check(paddr, PDPTE_SIZE) &&
|
||||||
@ -378,7 +378,7 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint
|
|||||||
uint64_t paddr;
|
uint64_t paddr;
|
||||||
uint64_t *pml4e;
|
uint64_t *pml4e;
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr 0x%llx, vaddr 0x%llx, size 0x%llx\n", __func__, paddr_base, vaddr_base, size);
|
dev_dbg(ACRN_DBG_MMU, "%s, paddr 0x%lx, vaddr 0x%lx, size 0x%lx\n", __func__, paddr_base, vaddr_base, size);
|
||||||
|
|
||||||
/* align address to page size*/
|
/* align address to page size*/
|
||||||
vaddr = round_page_up(vaddr_base);
|
vaddr = round_page_up(vaddr_base);
|
||||||
|
@ -109,7 +109,7 @@ uint64_t prepare_trampoline(void)
|
|||||||
size = (uint64_t)(&ld_trampoline_end - &ld_trampoline_start);
|
size = (uint64_t)(&ld_trampoline_end - &ld_trampoline_start);
|
||||||
dest_pa = get_ap_trampoline_buf();
|
dest_pa = get_ap_trampoline_buf();
|
||||||
|
|
||||||
pr_dbg("trampoline code: %llx size %x", dest_pa, size);
|
pr_dbg("trampoline code: %lx size %x", dest_pa, size);
|
||||||
|
|
||||||
/* Copy segment for AP initialization code below 1MB */
|
/* Copy segment for AP initialization code below 1MB */
|
||||||
(void)memcpy_s(hpa2hva(dest_pa), (size_t)size, &ld_trampoline_load,
|
(void)memcpy_s(hpa2hva(dest_pa), (size_t)size, &ld_trampoline_load,
|
||||||
|
@ -464,7 +464,7 @@ static int32_t dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit)
|
|||||||
{
|
{
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_IOMMU, "Register dmar uint [%d] @0x%llx", dmar_unit->index, dmar_unit->drhd->reg_base_addr);
|
dev_dbg(ACRN_DBG_IOMMU, "Register dmar uint [%d] @0x%lx", dmar_unit->index, dmar_unit->drhd->reg_base_addr);
|
||||||
|
|
||||||
spinlock_init(&dmar_unit->lock);
|
spinlock_init(&dmar_unit->lock);
|
||||||
|
|
||||||
@ -479,7 +479,7 @@ static int32_t dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit)
|
|||||||
dmar_unit->ecap_iotlb_offset = iommu_ecap_iro(dmar_unit->ecap) * 16U;
|
dmar_unit->ecap_iotlb_offset = iommu_ecap_iro(dmar_unit->ecap) * 16U;
|
||||||
|
|
||||||
#if DBG_IOMMU
|
#if DBG_IOMMU
|
||||||
pr_info("version:0x%x, cap:0x%llx, ecap:0x%llx",
|
pr_info("version:0x%x, cap:0x%lx, ecap:0x%lx",
|
||||||
iommu_read32(dmar_unit, DMAR_VER_REG), dmar_unit->cap, dmar_unit->ecap);
|
iommu_read32(dmar_unit, DMAR_VER_REG), dmar_unit->cap, dmar_unit->ecap);
|
||||||
pr_info("sagaw:0x%x, msagaw:0x%x, iotlb offset 0x%x",
|
pr_info("sagaw:0x%x, msagaw:0x%x, iotlb offset 0x%x",
|
||||||
iommu_cap_sagaw(dmar_unit->cap), dmar_unit->cap_msagaw, dmar_unit->ecap_iotlb_offset);
|
iommu_cap_sagaw(dmar_unit->cap), dmar_unit->cap_msagaw, dmar_unit->ecap_iotlb_offset);
|
||||||
@ -875,7 +875,7 @@ static void fault_record_analysis(__unused uint64_t low, uint64_t high)
|
|||||||
if (!dma_frcd_up_f(high)) {
|
if (!dma_frcd_up_f(high)) {
|
||||||
dmar_bdf.value = dma_frcd_up_sid(high);
|
dmar_bdf.value = dma_frcd_up_sid(high);
|
||||||
/* currently skip PASID related parsing */
|
/* currently skip PASID related parsing */
|
||||||
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%llx",
|
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%lx",
|
||||||
(dma_frcd_up_t(high) != 0U) ? "Read/Atomic" : "Write", dma_frcd_up_fr(high),
|
(dma_frcd_up_t(high) != 0U) ? "Read/Atomic" : "Write", dma_frcd_up_fr(high),
|
||||||
dmar_bdf.bits.b, dmar_bdf.bits.d, dmar_bdf.bits.f, low);
|
dmar_bdf.bits.b, dmar_bdf.bits.d, dmar_bdf.bits.f, low);
|
||||||
#if DBG_IOMMU
|
#if DBG_IOMMU
|
||||||
@ -916,7 +916,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
|
|||||||
fault_record.lo_64 = iommu_read64(dmar_unit, record_reg_offset);
|
fault_record.lo_64 = iommu_read64(dmar_unit, record_reg_offset);
|
||||||
fault_record.hi_64 = iommu_read64(dmar_unit, record_reg_offset + 8U);
|
fault_record.hi_64 = iommu_read64(dmar_unit, record_reg_offset + 8U);
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_IOMMU, "%s: record[%d] @0x%x: 0x%llx, 0x%llx",
|
dev_dbg(ACRN_DBG_IOMMU, "%s: record[%d] @0x%x: 0x%lx, 0x%lx",
|
||||||
__func__, index, record_reg_offset, fault_record.lo_64, fault_record.hi_64);
|
__func__, index, record_reg_offset, fault_record.lo_64, fault_record.hi_64);
|
||||||
|
|
||||||
fault_record_analysis(fault_record.lo_64, fault_record.hi_64);
|
fault_record_analysis(fault_record.lo_64, fault_record.hi_64);
|
||||||
@ -1117,7 +1117,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
} else if (dmar_get_bitslice(context_entry->lo_64, CTX_ENTRY_LOWER_P_MASK, CTX_ENTRY_LOWER_P_POS) != 0UL) {
|
} else if (dmar_get_bitslice(context_entry->lo_64, CTX_ENTRY_LOWER_P_MASK, CTX_ENTRY_LOWER_P_POS) != 0UL) {
|
||||||
/* the context entry should not be present */
|
/* the context entry should not be present */
|
||||||
pr_err("%s: context entry@0x%llx (Lower:%x) ", __func__, context_entry, context_entry->lo_64);
|
pr_err("%s: context entry@0x%lx (Lower:%x) ", __func__, context_entry, context_entry->lo_64);
|
||||||
pr_err("already present for %x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
pr_err("already present for %x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user