mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-21 13:08:42 +00:00
hv:refine 'uint64_t' string print format in comm moudle
Use "0x%lx" string to format 'uint64_t' type value, instead of "0x%llx". Tracked-On: #4020 Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
parent
e51386fe04
commit
0eb427f122
@ -195,7 +195,7 @@ void init_pcpu_post(uint16_t pcpu_id)
|
||||
/* Calibrate TSC Frequency */
|
||||
calibrate_tsc();
|
||||
|
||||
pr_acrnlog("HV version %s-%s-%s %s (daily tag:%s) build by %s%s, start time %lluus",
|
||||
pr_acrnlog("HV version %s-%s-%s %s (daily tag:%s) build by %s%s, start time %luus",
|
||||
HV_FULL_VERSION,
|
||||
HV_BUILD_TIME, HV_BUILD_VERSION, HV_BUILD_TYPE,
|
||||
HV_DAILY_TAG,
|
||||
|
@ -85,7 +85,7 @@ static inline void local_invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
|
||||
const struct invvpid_operand operand = { vpid, 0U, 0U, gva };
|
||||
|
||||
if (asm_invvpid(operand, type) != 0) {
|
||||
pr_dbg("%s, failed. type = %llu, vpid = %u", __func__, type, vpid);
|
||||
pr_dbg("%s, failed. type = %lu, vpid = %u", __func__, type, vpid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,7 +106,7 @@ static inline int32_t asm_invept(uint64_t type, struct invept_desc desc)
|
||||
static inline void local_invept(uint64_t type, struct invept_desc desc)
|
||||
{
|
||||
if (asm_invept(type, desc) != 0) {
|
||||
pr_dbg("%s, failed. type = %llu, eptp = 0x%lx", __func__, type, desc.eptp);
|
||||
pr_dbg("%s, failed. type = %lu, eptp = 0x%lx", __func__, type, desc.eptp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid)
|
||||
uint16_t i;
|
||||
int32_t ret = 0;
|
||||
|
||||
pr_info("sos offline cpu with lapicid %lld", lapicid);
|
||||
pr_info("sos offline cpu with lapicid %ld", lapicid);
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
if (vlapic_get_apicid(vcpu_vlapic(vcpu)) == lapicid) {
|
||||
@ -164,7 +164,7 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
|
||||
/* GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH set in guest_flags */
|
||||
if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0U)
|
||||
&& ((vm_config->guest_flags & GUEST_FLAG_RT) == 0U)) {
|
||||
pr_err("Wrong guest flags 0x%llx\n", vm_config->guest_flags);
|
||||
pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags);
|
||||
ret = -1;
|
||||
} else {
|
||||
ret = create_vm(vm_id, vm_config, &target_vm);
|
||||
@ -390,7 +390,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
|
||||
vmsi_addr.full = vmsi->msi_addr;
|
||||
vmsi_data.full = (uint32_t)vmsi->msi_data;
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: msi_addr 0x%016llx, msi_data 0x%016llx",
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: msi_addr 0x%016lx, msi_data 0x%016lx",
|
||||
__func__, vmsi->msi_addr, vmsi->msi_data);
|
||||
|
||||
if (vmsi_addr.bits.addr_base == MSI_ADDR_BASE) {
|
||||
@ -402,7 +402,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
|
||||
* and handled by hardware.
|
||||
*/
|
||||
vlapic_calc_dest_lapic_pt(vm, &vdmask, false, vdest, phys);
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: vcpu destination mask 0x%016llx", __func__, vdmask);
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: vcpu destination mask 0x%016lx", __func__, vdmask);
|
||||
|
||||
vcpu_id = ffs64(vdmask);
|
||||
while (vcpu_id != INVALID_BIT_INDEX) {
|
||||
@ -419,7 +419,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
|
||||
icr.bits.destination_mode = MSI_ADDR_DESTMODE_LOGICAL;
|
||||
|
||||
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: icr.value 0x%016llx", __func__, icr.value);
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: icr.value 0x%016lx", __func__, icr.value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -510,7 +510,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
|
||||
|
||||
hpa = gpa2hpa(vm, iobuf.req_buf);
|
||||
if (hpa == INVALID_HPA) {
|
||||
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
|
||||
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
|
||||
__func__, vm->vm_id, iobuf.req_buf);
|
||||
target_vm->sw.io_shared_page = NULL;
|
||||
} else {
|
||||
@ -577,7 +577,7 @@ static int32_t add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_v
|
||||
|
||||
hpa = gpa2hpa(vm, region->sos_vm_gpa);
|
||||
if (hpa == INVALID_HPA) {
|
||||
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
|
||||
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
|
||||
__func__, vm->vm_id, region->sos_vm_gpa);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
@ -632,13 +632,13 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
|
||||
if ((region->size & (PAGE_SIZE - 1UL)) != 0UL) {
|
||||
pr_err("%s: [vm%d] map size 0x%x is not page aligned",
|
||||
__func__, target_vm->vm_id, region->size);
|
||||
ret = -EINVAL;
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
if (!ept_is_mr_valid(target_vm, region->gpa, region->size)) {
|
||||
pr_err("%s, invalid gpa: 0x%llx, size: 0x%llx, top_address_space: 0x%llx", __func__,
|
||||
region->gpa, region->size,
|
||||
target_vm->arch_vm.ept_mem_ops.info->ept.top_address_space);
|
||||
ret = 0;
|
||||
pr_err("%s, invalid gpa: 0x%lx, size: 0x%lx, top_address_space: 0x%lx", __func__,
|
||||
region->gpa, region->size,
|
||||
target_vm->arch_vm.ept_mem_ops.info->ept.top_address_space);
|
||||
ret = 0;
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_HYCALL,
|
||||
"[vm%d] type=%d gpa=0x%x sos_vm_gpa=0x%x size=0x%x",
|
||||
@ -718,7 +718,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
|
||||
|
||||
hpa = gpa2hpa(vm, wp->gpa);
|
||||
if (hpa == INVALID_HPA) {
|
||||
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
|
||||
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
|
||||
__func__, vm->vm_id, wp->gpa);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
@ -799,7 +799,7 @@ int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
||||
&& (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) == 0)) {
|
||||
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
|
||||
if (v_gpa2hpa.hpa == INVALID_HPA) {
|
||||
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
|
||||
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
|
||||
__func__, target_vm->vm_id, v_gpa2hpa.gpa);
|
||||
} else if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
|
@ -145,7 +145,7 @@ static void prepare_loading_bzimage(struct acrn_vm *vm, struct acrn_vcpu *vcpu)
|
||||
|
||||
reserving_1g_pages = (vm_config->memory.size >> 30U) - NUM_REMAIN_1G_PAGES;
|
||||
if (reserving_1g_pages > 0) {
|
||||
snprintf(dyn_bootargs, 100U, " hugepagesz=1G hugepages=%lld", reserving_1g_pages);
|
||||
snprintf(dyn_bootargs, 100U, " hugepagesz=1G hugepages=%ld", reserving_1g_pages);
|
||||
(void)copy_to_gpa(vm, dyn_bootargs, ((uint64_t)bootargs_info->load_addr
|
||||
+ bootargs_info->size), (strnlen_s(dyn_bootargs, 99U) + 1U));
|
||||
}
|
||||
@ -224,7 +224,7 @@ int32_t direct_boot_sw_loader(struct acrn_vm *vm)
|
||||
if (ret == 0) {
|
||||
/* Set VCPU entry point to kernel entry */
|
||||
vcpu_set_rip(vcpu, (uint64_t)sw_kernel->kernel_entry_addr);
|
||||
pr_info("%s, VM %hu VCPU %hu Entry: 0x%016llx ", __func__, vm->vm_id, vcpu->vcpu_id,
|
||||
pr_info("%s, VM %hu VCPU %hu Entry: 0x%016lx ", __func__, vm->vm_id, vcpu->vcpu_id,
|
||||
sw_kernel->kernel_entry_addr);
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ static int32_t hcall_profiling_ops(struct acrn_vm *vm, uint64_t cmd, uint64_t pa
|
||||
ret = profiling_get_status_info(vm, param);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: invalid profiling command %llu\n", __func__, cmd);
|
||||
pr_err("%s: invalid profiling command %lu\n", __func__, cmd);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ void do_logmsg(uint32_t severity, const char *fmt, ...)
|
||||
|
||||
(void)memset(buffer, 0U, LOG_MESSAGE_MAX_SIZE);
|
||||
/* Put time-stamp, CPU ID and severity into buffer */
|
||||
snprintf(buffer, LOG_MESSAGE_MAX_SIZE, "[%lluus][cpu=%hu][sev=%u][seq=%u]:",
|
||||
snprintf(buffer, LOG_MESSAGE_MAX_SIZE, "[%luus][cpu=%hu][sev=%u][seq=%u]:",
|
||||
timestamp, pcpu_id, severity, atomic_inc_return(&logmsg_ctl.seq));
|
||||
|
||||
/* Put message into remaining portion of local buffer */
|
||||
|
@ -996,7 +996,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
sep_collection_switch = prof_control.switches;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
" old_switch: %llu sep_collection_switch: %llu!",
|
||||
" old_switch: %lu sep_collection_switch: %lu!",
|
||||
old_switch, sep_collection_switch);
|
||||
|
||||
for (i = 0U; i < (uint16_t)MAX_SEP_FEATURE_ID; i++) {
|
||||
@ -1030,7 +1030,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
socwatch_collection_switch = prof_control.switches;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
"socwatch_collection_switch: %llu!",
|
||||
"socwatch_collection_switch: %lu!",
|
||||
socwatch_collection_switch);
|
||||
|
||||
if (socwatch_collection_switch != 0UL) {
|
||||
|
@ -500,7 +500,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
read_write = mmio_handler->read_write;
|
||||
handler_private_data = mmio_handler->handler_private_data;
|
||||
} else {
|
||||
pr_fatal("Err MMIO, address:0x%llx, size:%x", address, size);
|
||||
pr_fatal("Err MMIO, address:0x%lx, size:%x", address, size);
|
||||
status = -EIO;
|
||||
}
|
||||
break;
|
||||
@ -578,8 +578,7 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
*/
|
||||
struct pio_request *pio_req = &io_req->reqs.pio;
|
||||
|
||||
pr_fatal("%s Err: access dir %d, io_type %d, "
|
||||
"addr = 0x%llx, size=%lu", __func__,
|
||||
pr_fatal("%s Err: access dir %d, io_type %d, addr = 0x%lx, size=%lu", __func__,
|
||||
pio_req->direction, io_req->io_type,
|
||||
pio_req->address, pio_req->size);
|
||||
}
|
||||
@ -636,7 +635,7 @@ static inline struct mem_io_node *find_match_mmio_node(struct acrn_vm *vm,
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
pr_fatal("%s, vm[%d] no match mmio region [0x%llx, 0x%llx] is found",
|
||||
pr_fatal("%s, vm[%d] no match mmio region [0x%lx, 0x%lx] is found",
|
||||
__func__, vm->vm_id, start, end);
|
||||
mmio_node = NULL;
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ static void vdev_pt_map_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
||||
/* Remember the previously mapped MMIO vbar */
|
||||
vdev->bar_base_mapped[idx] = vbar_base;
|
||||
} else {
|
||||
pr_fatal("%s, %x:%x.%x set invalid bar[%d] address: 0x%llx\n", __func__,
|
||||
pr_fatal("%s, %x:%x.%x set invalid bar[%d] address: 0x%lx\n", __func__,
|
||||
vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, idx, vbar_base);
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ static void vmsix_table_rw(const struct pci_vdev *vdev, struct mmio_request *mmi
|
||||
|
||||
}
|
||||
} else {
|
||||
pr_err("%s, invalid arguments %llx - %llx", __func__, mmio->value, mmio->address);
|
||||
pr_err("%s, invalid arguments %lx - %lx", __func__, mmio->value, mmio->address);
|
||||
}
|
||||
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user