mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-10 20:43:48 +00:00
hv: rename the ACRN_DBG_XXX
Refine this MACRO 'ACRN_DBG_XXX' to 'DBG_LEVEL_XXX' Tracked-On: #4348 Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
parent
03f5c639a0
commit
b90862921e
@ -23,7 +23,7 @@ static struct e820_entry hv_e820[E820_MAX_ENTRIES];
|
||||
/* Describe the top/bottom/size of the physical memory the hypervisor manages */
|
||||
static struct mem_range hv_mem_range;
|
||||
|
||||
#define ACRN_DBG_E820 6U
|
||||
#define DBG_LEVEL_E820 6U
|
||||
|
||||
static void obtain_mem_range_info(void)
|
||||
{
|
||||
@ -134,7 +134,7 @@ void init_e820(void)
|
||||
hv_e820_entries_nr = E820_MAX_ENTRIES;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_E820, "mmap length 0x%x addr 0x%x entries %d\n",
|
||||
dev_dbg(DBG_LEVEL_E820, "mmap length 0x%x addr 0x%x entries %d\n",
|
||||
mbi->mi_mmap_length, mbi->mi_mmap_addr, hv_e820_entries_nr);
|
||||
|
||||
for (i = 0U; i < hv_e820_entries_nr; i++) {
|
||||
@ -150,8 +150,8 @@ void init_e820(void)
|
||||
hv_e820[i].length = mmap[i].length;
|
||||
hv_e820[i].type = mmap[i].type;
|
||||
|
||||
dev_dbg(ACRN_DBG_E820, "mmap table: %d type: 0x%x\n", i, mmap[i].type);
|
||||
dev_dbg(ACRN_DBG_E820, "Base: 0x%016lx length: 0x%016lx",
|
||||
dev_dbg(DBG_LEVEL_E820, "mmap table: %d type: 0x%x\n", i, mmap[i].type);
|
||||
dev_dbg(DBG_LEVEL_E820, "Base: 0x%016lx length: 0x%016lx",
|
||||
mmap[i].baseaddr, mmap[i].length);
|
||||
}
|
||||
} else {
|
||||
|
@ -160,7 +160,7 @@ static void ptirq_build_physical_msi(struct acrn_vm *vm, struct ptirq_msi_info *
|
||||
info->pmsi_addr.bits.rh = MSI_ADDR_RH;
|
||||
info->pmsi_addr.bits.dest_mode = MSI_ADDR_DESTMODE_LOGICAL;
|
||||
}
|
||||
dev_dbg(ACRN_DBG_IRQ, "MSI %s addr:data = 0x%lx:%x(V) -> 0x%lx:%x(P)",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "MSI %s addr:data = 0x%lx:%x(V) -> 0x%lx:%x(P)",
|
||||
(info->pmsi_addr.ir_bits.intr_format != 0U) ? " Remappable Format" : "Compatibility Format",
|
||||
info->vmsi_addr.full, info->vmsi_data.full,
|
||||
info->pmsi_addr.full, info->pmsi_data.full);
|
||||
@ -242,7 +242,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
|
||||
rte.bits.dest_field = dest_mask;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_IRQ, "IOAPIC RTE %s = 0x%x:%x(V) -> 0x%x:%x(P)",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "IOAPIC RTE %s = 0x%x:%x(V) -> 0x%x:%x(P)",
|
||||
(rte.ir_bits.intr_format != 0U) ? "Remappable Format" : "Compatibility Format",
|
||||
virt_rte.u.hi_32, virt_rte.u.lo_32,
|
||||
rte.u.hi_32, rte.u.lo_32);
|
||||
@ -259,7 +259,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
|
||||
rte.bits.trigger_mode = IOAPIC_RTE_TRGRMODE_LEVEL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_IRQ, "IOAPIC RTE %s = 0x%x:%x(P) -> 0x%x:%x(P)",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "IOAPIC RTE %s = 0x%x:%x(P) -> 0x%x:%x(P)",
|
||||
(rte.ir_bits.intr_format != 0U) ? "Remappable Format" : "Compatibility Format",
|
||||
phys_rte.u.hi_32, phys_rte.u.lo_32,
|
||||
rte.u.hi_32, rte.u.lo_32);
|
||||
@ -315,7 +315,7 @@ static struct ptirq_remapping_info *add_msix_remapping(struct acrn_vm *vm,
|
||||
* required. */
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_IRQ, "VM%d MSIX add vector mapping vbdf%x:pbdf%x idx=%d",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "VM%d MSIX add vector mapping vbdf%x:pbdf%x idx=%d",
|
||||
vm->vm_id, virt_bdf, phys_bdf, entry_nr);
|
||||
|
||||
return entry;
|
||||
@ -340,7 +340,7 @@ remove_msix_remapping(const struct acrn_vm *vm, uint16_t virt_bdf, uint32_t entr
|
||||
intr_src.src.msi.value = entry->phys_sid.msi_id.bdf;
|
||||
dmar_free_irte(intr_src, (uint16_t)entry->allocated_pirq);
|
||||
|
||||
dev_dbg(ACRN_DBG_IRQ,
|
||||
dev_dbg(DBG_LEVEL_IRQ,
|
||||
"VM%d MSIX remove vector mapping vbdf-pbdf:0x%x-0x%x idx=%d",
|
||||
entry->vm->vm_id, virt_bdf,
|
||||
entry->phys_sid.msi_id.bdf, entry_nr);
|
||||
@ -410,7 +410,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
|
||||
} else {
|
||||
vm->arch_vm.vioapic.vpin_to_pt_entry[virt_pin] = entry;
|
||||
}
|
||||
dev_dbg(ACRN_DBG_IRQ, "VM%d INTX add pin mapping vpin%d:ppin%d",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "VM%d INTX add pin mapping vpin%d:ppin%d",
|
||||
entry->vm->vm_id, virt_pin, phys_pin);
|
||||
}
|
||||
}
|
||||
@ -440,11 +440,11 @@ static void remove_intx_remapping(struct acrn_vm *vm, uint32_t virt_pin, bool pi
|
||||
intr_src.src.ioapic_id = ioapic_irq_to_ioapic_id(phys_irq);
|
||||
|
||||
dmar_free_irte(intr_src, (uint16_t)phys_irq);
|
||||
dev_dbg(ACRN_DBG_IRQ,
|
||||
dev_dbg(DBG_LEVEL_IRQ,
|
||||
"deactive %s intx entry:ppin=%d, pirq=%d ",
|
||||
pic_pin ? "vPIC" : "vIOAPIC",
|
||||
entry->phys_sid.intx_id.pin, phys_irq);
|
||||
dev_dbg(ACRN_DBG_IRQ, "from vm%d vpin=%d\n",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "from vm%d vpin=%d\n",
|
||||
entry->vm->vm_id, virt_pin);
|
||||
}
|
||||
|
||||
@ -489,7 +489,7 @@ static void ptirq_handle_intx(struct acrn_vm *vm,
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PTIRQ,
|
||||
dev_dbg(DBG_LEVEL_PTIRQ,
|
||||
"dev-assign: irq=0x%x assert vr: 0x%x vRTE=0x%lx",
|
||||
entry->allocated_pirq,
|
||||
irq_to_vector(entry->allocated_pirq),
|
||||
@ -544,11 +544,11 @@ void ptirq_softirq(uint16_t pcpu_id)
|
||||
if (msi != NULL) {
|
||||
/* TODO: msi destmode check required */
|
||||
(void)vlapic_intr_msi(entry->vm, msi->vmsi_addr.full, msi->vmsi_data.full);
|
||||
dev_dbg(ACRN_DBG_PTIRQ, "dev-assign: irq=0x%x MSI VR: 0x%x-0x%x",
|
||||
dev_dbg(DBG_LEVEL_PTIRQ, "dev-assign: irq=0x%x MSI VR: 0x%x-0x%x",
|
||||
entry->allocated_pirq,
|
||||
msi->vmsi_data.bits.vector,
|
||||
irq_to_vector(entry->allocated_pirq));
|
||||
dev_dbg(ACRN_DBG_PTIRQ, " vmsi_addr: 0x%lx vmsi_data: 0x%x",
|
||||
dev_dbg(DBG_LEVEL_PTIRQ, " vmsi_addr: 0x%lx vmsi_data: 0x%x",
|
||||
msi->vmsi_addr.full,
|
||||
msi->vmsi_data.full);
|
||||
}
|
||||
@ -589,7 +589,7 @@ void ptirq_intx_ack(struct acrn_vm *vm, uint32_t virt_pin, uint32_t vpin_src)
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PTIRQ, "dev-assign: irq=0x%x acked vr: 0x%x",
|
||||
dev_dbg(DBG_LEVEL_PTIRQ, "dev-assign: irq=0x%x acked vr: 0x%x",
|
||||
phys_irq, irq_to_vector(phys_irq));
|
||||
ioapic_gsi_unmask_irq(phys_irq);
|
||||
}
|
||||
@ -671,7 +671,7 @@ int32_t ptirq_prepare_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t
|
||||
if (ret == 0) {
|
||||
entry->msi = *info;
|
||||
vbdf.value = virt_bdf;
|
||||
dev_dbg(ACRN_DBG_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d",
|
||||
vbdf.bits.b, vbdf.bits.d, vbdf.bits.f, entry_nr, info->vmsi_data.bits.vector,
|
||||
irq_to_vector(entry->allocated_pirq), entry->vm->vm_id);
|
||||
}
|
||||
@ -792,7 +792,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_pin, uint32_t vpi
|
||||
spinlock_obtain(&ptdev_lock);
|
||||
/* if vpin source need switch */
|
||||
if ((need_switch_vpin_src) && (entry != NULL)) {
|
||||
dev_dbg(ACRN_DBG_IRQ,
|
||||
dev_dbg(DBG_LEVEL_IRQ,
|
||||
"IOAPIC pin=%hhu pirq=%u vpin=%d switch from %s to %s vpin=%d for vm%d",
|
||||
entry->phys_sid.intx_id.pin,
|
||||
entry->allocated_pirq, entry->virt_sid.intx_id.pin,
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <logmsg.h>
|
||||
#include <trace.h>
|
||||
|
||||
#define ACRN_DBG_EPT 6U
|
||||
#define DBG_LEVEL_EPT 6U
|
||||
|
||||
bool ept_is_mr_valid(const struct acrn_vm *vm, uint64_t base, uint64_t size)
|
||||
{
|
||||
@ -113,7 +113,7 @@ void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint64_t prot = prot_orig;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016lx gpa: 0x%016lx size: 0x%016lx prot: 0x%016x\n",
|
||||
dev_dbg(DBG_LEVEL_EPT, "%s, vm[%d] hpa: 0x%016lx gpa: 0x%016lx size: 0x%016lx prot: 0x%016x\n",
|
||||
__func__, vm->vm_id, hpa, gpa, size, prot);
|
||||
|
||||
/* EPT & VT-d share the same page tables, set SNP bit
|
||||
@ -139,7 +139,7 @@ void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
uint16_t i;
|
||||
uint64_t local_prot = prot_set;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%lx size 0x%lx\n", __func__, vm->vm_id, gpa, size);
|
||||
dev_dbg(DBG_LEVEL_EPT, "%s,vm[%d] gpa 0x%lx size 0x%lx\n", __func__, vm->vm_id, gpa, size);
|
||||
|
||||
if (((local_prot & EPT_MT_MASK) != EPT_UNCACHED) && iommu_snoop_supported(vm->iommu)) {
|
||||
local_prot |= EPT_SNOOP_CTRL;
|
||||
@ -159,7 +159,7 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint16_t i;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%lx size 0x%lx\n", __func__, vm->vm_id, gpa, size);
|
||||
dev_dbg(DBG_LEVEL_EPT, "%s,vm[%d] gpa 0x%lx size 0x%lx\n", __func__, vm->vm_id, gpa, size);
|
||||
|
||||
mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_mem_ops, MR_DEL);
|
||||
|
||||
|
@ -15,8 +15,6 @@
|
||||
#include <ept.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_GUEST 6U
|
||||
|
||||
struct page_walk_info {
|
||||
uint64_t top_entry; /* Top level paging structure entry */
|
||||
uint32_t level;
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <vmx.h>
|
||||
#include <hyperv.h>
|
||||
|
||||
#define ACRN_DBG_HYPERV 6U
|
||||
#define DBG_LEVEL_HYPERV 6U
|
||||
|
||||
/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */
|
||||
#define CPUID3A_TIME_REF_COUNT_MSR (1U << 1U)
|
||||
@ -75,7 +75,7 @@ hyperv_get_tsc_scale_offset(struct acrn_vm *vm, uint64_t *scale, uint64_t *offse
|
||||
/* ret = (10000U << 64U) / get_tsc_khz() */
|
||||
ret = u64_shl64_div_u64(10000U, khz);
|
||||
|
||||
dev_dbg(ACRN_DBG_HYPERV, "%s, ret = 0x%lx", __func__, ret);
|
||||
dev_dbg(DBG_LEVEL_HYPERV, "%s, ret = 0x%lx", __func__, ret);
|
||||
|
||||
vm->arch_vm.hyperv.tsc_scale = ret;
|
||||
vm->arch_vm.hyperv.tsc_offset = 0UL;
|
||||
@ -188,7 +188,7 @@ hyperv_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: MSR=0x%x wval=0x%lx vcpuid=%d vmid=%d",
|
||||
dev_dbg(DBG_LEVEL_HYPERV, "hv: %s: MSR=0x%x wval=0x%lx vcpuid=%d vmid=%d",
|
||||
__func__, msr, wval, vcpu->vcpu_id, vcpu->vm->vm_id);
|
||||
|
||||
return ret;
|
||||
@ -221,7 +221,7 @@ hyperv_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval)
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: MSR=0x%x rval=0x%lx vcpuid=%d vmid=%d",
|
||||
dev_dbg(DBG_LEVEL_HYPERV, "hv: %s: MSR=0x%x rval=0x%lx vcpuid=%d vmid=%d",
|
||||
__func__, msr, *rval, vcpu->vcpu_id, vcpu->vm->vm_id);
|
||||
|
||||
return ret;
|
||||
@ -278,6 +278,6 @@ hyperv_init_vcpuid_entry(uint32_t leaf, uint32_t subleaf, uint32_t flags,
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_HYPERV, "hv: %s: leaf=%x subleaf=%x flags=%x eax=%x ebx=%x ecx=%x edx=%x",
|
||||
dev_dbg(DBG_LEVEL_HYPERV, "hv: %s: leaf=%x subleaf=%x flags=%x eax=%x ebx=%x ecx=%x edx=%x",
|
||||
__func__, leaf, subleaf, flags, entry->eax, entry->ebx, entry->ecx, entry->edx);
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#define EXCEPTION_ERROR_CODE_VALID 8U
|
||||
|
||||
#define ACRN_DBG_INTR 6U
|
||||
#define DBG_LEVEL_INTR 6U
|
||||
|
||||
#define EXCEPTION_CLASS_BENIGN 1
|
||||
#define EXCEPTION_CLASS_CONT 2
|
||||
@ -131,7 +131,7 @@ static bool vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
|
||||
|
||||
vpic_pending_intr(vm_pic(vcpu->vm), &vector);
|
||||
if (vector <= NR_MAX_VECTOR) {
|
||||
dev_dbg(ACRN_DBG_INTR, "VPIC: to inject PIC vector %d\n",
|
||||
dev_dbg(DBG_LEVEL_INTR, "VPIC: to inject PIC vector %d\n",
|
||||
vector & 0xFFU);
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
VMX_INT_INFO_VALID |
|
||||
|
@ -62,7 +62,7 @@ static inline uint32_t prio(uint32_t x)
|
||||
#define LOGICAL_ID_MASK 0xFU
|
||||
#define CLUSTER_ID_MASK 0xFFFF0U
|
||||
|
||||
#define ACRN_DBG_LAPIC 6U
|
||||
#define DBG_LEVEL_VLAPIC 6U
|
||||
|
||||
#if VLAPIC_VERBOS
|
||||
static inline void vlapic_dump_irr(const struct acrn_vlapic *vlapic, const char *msg)
|
||||
@ -70,7 +70,7 @@ static inline void vlapic_dump_irr(const struct acrn_vlapic *vlapic, const char
|
||||
const struct lapic_reg *irrptr = &(vlapic->apic_page.irr[0]);
|
||||
|
||||
for (uint8_t i = 0U; i < 8U; i++) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "%s irr%u 0x%08x", msg, i, irrptr[i].v);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "%s irr%u 0x%08x", msg, i, irrptr[i].v);
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ static inline void vlapic_dump_isr(const struct acrn_vlapic *vlapic, const char
|
||||
const struct lapic_reg *isrptr = &(vlapic->apic_page.isr[0]);
|
||||
|
||||
for (uint8_t i = 0U; i < 8U; i++) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "%s isr%u 0x%08x", msg, i, isrptr[i].v);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "%s isr%u 0x%08x", msg, i, isrptr[i].v);
|
||||
}
|
||||
}
|
||||
#else
|
||||
@ -185,7 +185,7 @@ vlapic_build_id(const struct acrn_vlapic *vlapic)
|
||||
lapic_regs_id = vlapic_id << APIC_ID_SHIFT;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic APIC PAGE ID : 0x%08x", lapic_regs_id);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic APIC PAGE ID : 0x%08x", lapic_regs_id);
|
||||
|
||||
return lapic_regs_id;
|
||||
}
|
||||
@ -233,12 +233,12 @@ vlapic_write_dfr(struct acrn_vlapic *vlapic)
|
||||
lapic->dfr.v |= APIC_DFR_RESERVED;
|
||||
|
||||
if ((lapic->dfr.v & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_FLAT) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic DFR in Flat Model");
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic DFR in Flat Model");
|
||||
} else if ((lapic->dfr.v & APIC_DFR_MODEL_MASK)
|
||||
== APIC_DFR_MODEL_CLUSTER) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic DFR in Cluster Model");
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic DFR in Cluster Model");
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "DFR in Unknown Model %#x", lapic->dfr);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "DFR in Unknown Model %#x", lapic->dfr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ vlapic_write_ldr(struct acrn_vlapic *vlapic)
|
||||
|
||||
lapic = &(vlapic->apic_page);
|
||||
lapic->ldr.v &= ~APIC_LDR_RESERVED;
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic LDR set to %#x", lapic->ldr);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic LDR set to %#x", lapic->ldr);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -525,7 +525,7 @@ vlapic_reset_tmr(struct acrn_vlapic *vlapic)
|
||||
int16_t i;
|
||||
struct lapic_regs *lapic;
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vlapic resetting all vectors to edge-triggered");
|
||||
|
||||
lapic = &(vlapic->apic_page);
|
||||
@ -588,7 +588,7 @@ static void vlapic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector, bool
|
||||
|
||||
lapic = &(vlapic->apic_page);
|
||||
if ((lapic->svr.v & APIC_SVR_ENABLE) == 0U) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic is software disabled, ignoring interrupt %u", vector);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic is software disabled, ignoring interrupt %u", vector);
|
||||
} else {
|
||||
signal_event(&vlapic->vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
|
||||
vlapic->ops->accept_intr(vlapic, vector, level);
|
||||
@ -764,7 +764,7 @@ vlapic_write_lvt(struct acrn_vlapic *vlapic, uint32_t offset)
|
||||
if ((vlapic->vm->wire_mode == VPIC_WIRE_INTR) ||
|
||||
(vlapic->vm->wire_mode == VPIC_WIRE_NULL)) {
|
||||
vlapic->vm->wire_mode = VPIC_WIRE_LAPIC;
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vpic wire mode -> LAPIC");
|
||||
} else {
|
||||
pr_err("WARNING:invalid vpic wire mode change");
|
||||
@ -774,7 +774,7 @@ vlapic_write_lvt(struct acrn_vlapic *vlapic, uint32_t offset)
|
||||
} else if (((last & APIC_LVT_M) == 0U) && ((val & APIC_LVT_M) != 0U)) {
|
||||
if (vlapic->vm->wire_mode == VPIC_WIRE_LAPIC) {
|
||||
vlapic->vm->wire_mode = VPIC_WIRE_NULL;
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vpic wire mode -> NULL");
|
||||
}
|
||||
} else {
|
||||
@ -872,7 +872,7 @@ vlapic_update_ppr(struct acrn_vlapic *vlapic)
|
||||
}
|
||||
|
||||
vlapic->apic_page.ppr.v = ppr;
|
||||
dev_dbg(ACRN_DBG_LAPIC, "%s 0x%02x", __func__, ppr);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "%s 0x%02x", __func__, ppr);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -891,7 +891,7 @@ vlapic_process_eoi(struct acrn_vlapic *vlapic)
|
||||
bitpos = (vector & 0x1fU);
|
||||
bitmap32_clear_nolock((uint16_t)bitpos, &isrptr[i].v);
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC, "EOI vector %u", vector);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "EOI vector %u", vector);
|
||||
vlapic_dump_isr(vlapic, "vlapic_process_eoi");
|
||||
|
||||
vlapic->isrv = vlapic_find_isrv(vlapic);
|
||||
@ -905,7 +905,7 @@ vlapic_process_eoi(struct acrn_vlapic *vlapic)
|
||||
vcpu_make_request(vlapic->vcpu, ACRN_REQUEST_EVENT);
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC, "Gratuitous EOI");
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "Gratuitous EOI");
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1054,7 +1054,7 @@ static inline bool is_dest_field_matched(const struct acrn_vlapic *vlapic, uint3
|
||||
}
|
||||
} else {
|
||||
/* Guest has configured a bad logical model for this vcpu. */
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic has bad logical model %x", dfr);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic has bad logical model %x", dfr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1148,7 +1148,7 @@ vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast
|
||||
}
|
||||
bitmap_set_nolock(vcpu_id, dmask);
|
||||
}
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: logical destmod, dmask: 0x%016lx", __func__, *dmask);
|
||||
dev_dbg(DBG_LEVEL_LAPICPT, "%s: logical destmod, dmask: 0x%016lx", __func__, *dmask);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1158,7 +1158,7 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t
|
||||
if (mode == APIC_DELMODE_INIT) {
|
||||
if ((icr_low & APIC_LEVEL_MASK) != APIC_LEVEL_DEASSERT) {
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"Sending INIT to %hu",
|
||||
target_vcpu->vcpu_id);
|
||||
|
||||
@ -1178,7 +1178,7 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t
|
||||
if ((target_vcpu->state == VCPU_INIT) &&
|
||||
(target_vcpu->arch.nr_sipi != 0U)) {
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"Sending SIPI to %hu with vector %u",
|
||||
target_vcpu->vcpu_id,
|
||||
(icr_low & APIC_VECTOR_MASK));
|
||||
@ -1229,14 +1229,14 @@ static void vlapic_write_icrlo(struct acrn_vlapic *vlapic)
|
||||
|
||||
if ((mode == APIC_DELMODE_FIXED) && (vec < 16U)) {
|
||||
vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR);
|
||||
dev_dbg(ACRN_DBG_LAPIC, "Ignoring invalid IPI %u", vec);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "Ignoring invalid IPI %u", vec);
|
||||
} else if (((shorthand == APIC_DEST_SELF) || (shorthand == APIC_DEST_ALLISELF))
|
||||
&& ((mode == APIC_DELMODE_NMI) || (mode == APIC_DELMODE_INIT)
|
||||
|| (mode == APIC_DELMODE_STARTUP))) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "Invalid ICR value");
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "Invalid ICR value");
|
||||
} else {
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"icrlo 0x%08x icrhi 0x%08x triggered ipi %u",
|
||||
icr_low, icr_high, vec);
|
||||
|
||||
@ -1268,12 +1268,12 @@ static void vlapic_write_icrlo(struct acrn_vlapic *vlapic)
|
||||
|
||||
if (mode == APIC_DELMODE_FIXED) {
|
||||
vlapic_set_intr(target_vcpu, vec, LAPIC_TRIG_EDGE);
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vlapic sending ipi %u to vcpu_id %hu",
|
||||
vec, vcpu_id);
|
||||
} else if (mode == APIC_DELMODE_NMI) {
|
||||
vcpu_inject_nmi(target_vcpu);
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vlapic send ipi nmi to vcpu_id %hu", vcpu_id);
|
||||
} else if (mode == APIC_DELMODE_INIT) {
|
||||
vlapic_process_init_sipi(target_vcpu, mode, icr_low);
|
||||
@ -1404,14 +1404,14 @@ vlapic_write_svr(struct acrn_vlapic *vlapic)
|
||||
* The apic is now disabled so stop the apic timer
|
||||
* and mask all the LVT entries.
|
||||
*/
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic is software-disabled");
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic is software-disabled");
|
||||
del_timer(&vlapic->vtimer.timer);
|
||||
|
||||
vlapic_mask_lvts(vlapic);
|
||||
/* the only one enabled LINT0-ExtINT vlapic disabled */
|
||||
if (vlapic->vm->wire_mode == VPIC_WIRE_NULL) {
|
||||
vlapic->vm->wire_mode = VPIC_WIRE_INTR;
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vpic wire mode -> INTR");
|
||||
}
|
||||
} else {
|
||||
@ -1419,7 +1419,7 @@ vlapic_write_svr(struct acrn_vlapic *vlapic)
|
||||
* The apic is now enabled so restart the apic timer
|
||||
* if it is configured in periodic mode.
|
||||
*/
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic is software-enabled");
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic is software-enabled");
|
||||
if (vlapic_lvtt_period(vlapic)) {
|
||||
if (set_expiration(vlapic)) {
|
||||
/* vlapic_init_timer has been called,
|
||||
@ -1546,7 +1546,7 @@ static int32_t vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg, uint
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic read offset %x, data %lx", offset, *data);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic read offset %x, data %lx", offset, *data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1560,7 +1560,7 @@ static int32_t vlapic_write(struct acrn_vlapic *vlapic, uint32_t offset, uint64_
|
||||
ASSERT(((offset & 0xfU) == 0U) && (offset < PAGE_SIZE),
|
||||
"%s: invalid offset %#x", __func__, offset);
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic write offset %#x, data %#lx", offset, data);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "vlapic write offset %#x, data %#lx", offset, data);
|
||||
|
||||
if (offset <= sizeof(*lapic)) {
|
||||
switch (offset) {
|
||||
@ -1837,7 +1837,7 @@ vlapic_receive_intr(struct acrn_vm *vm, bool level, uint32_t dest, bool phys,
|
||||
if ((delmode != IOAPIC_RTE_DELMODE_FIXED) &&
|
||||
(delmode != IOAPIC_RTE_DELMODE_LOPRI) &&
|
||||
(delmode != IOAPIC_RTE_DELMODE_EXINT)) {
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vlapic intr invalid delmode %#x", delmode);
|
||||
} else {
|
||||
lowprio = (delmode == IOAPIC_RTE_DELMODE_LOPRI) || rh;
|
||||
@ -1880,7 +1880,7 @@ vlapic_set_intr(struct acrn_vcpu *vcpu, uint32_t vector, bool level)
|
||||
vlapic = vcpu_vlapic(vcpu);
|
||||
if (vector < 16U) {
|
||||
vlapic_set_error(vlapic, APIC_ESR_RECEIVE_ILLEGAL_VECTOR);
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
dev_dbg(DBG_LEVEL_VLAPIC,
|
||||
"vlapic ignoring interrupt to vector %u", vector);
|
||||
} else {
|
||||
vlapic_accept_intr(vlapic, vector, level);
|
||||
@ -1955,7 +1955,7 @@ vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg)
|
||||
|
||||
address.full = addr;
|
||||
data.full = (uint32_t) msg;
|
||||
dev_dbg(ACRN_DBG_LAPIC, "lapic MSI addr: %#lx msg: %#lx", address.full, data.full);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "lapic MSI addr: %#lx msg: %#lx", address.full, data.full);
|
||||
|
||||
if (address.bits.addr_base == MSI_ADDR_BASE) {
|
||||
/*
|
||||
@ -1976,13 +1976,13 @@ vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg)
|
||||
delmode = data.bits.delivery_mode;
|
||||
vec = data.bits.vector;
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC, "lapic MSI %s dest %#x, vec %u",
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "lapic MSI %s dest %#x, vec %u",
|
||||
phys ? "physical" : "logical", dest, vec);
|
||||
|
||||
vlapic_receive_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec, rh);
|
||||
ret = 0;
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "lapic MSI invalid addr %#lx", address.full);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "lapic MSI invalid addr %#lx", address.full);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
@ -2084,7 +2084,7 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
|
||||
/* convert the dest from virtual apic_id to physical apic_id */
|
||||
if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) {
|
||||
papic_id = per_cpu(lapic_id, pcpuid_from_vcpu(target_vcpu));
|
||||
dev_dbg(ACRN_DBG_LAPICPT,
|
||||
dev_dbg(DBG_LEVEL_LAPICPT,
|
||||
"%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx",
|
||||
__func__, vapic_id, papic_id, icr_low);
|
||||
msr_write(MSR_IA32_EXT_APIC_ICR, (((uint64_t)papic_id) << 32U) | icr_low);
|
||||
@ -2532,7 +2532,7 @@ static void vlapic_x2apic_self_ipi_handler(struct acrn_vlapic *vlapic)
|
||||
target_vcpu = vlapic->vcpu;
|
||||
if (vector < 16U) {
|
||||
vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR);
|
||||
dev_dbg(ACRN_DBG_LAPIC, "Ignoring invalid IPI %u", vector);
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "Ignoring invalid IPI %u", vector);
|
||||
} else {
|
||||
vlapic_set_intr(target_vcpu, vector, LAPIC_TRIG_EDGE);
|
||||
}
|
||||
|
@ -564,7 +564,7 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
if (is_lapic_pt_configured(vcpu->vm)) {
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: switching to x2apic and passthru", __func__);
|
||||
dev_dbg(DBG_LEVEL_LAPICPT, "%s: switching to x2apic and passthru", __func__);
|
||||
/*
|
||||
* Disable external interrupt exiting and irq ack
|
||||
* Disable posted interrupt processing
|
||||
|
@ -246,7 +246,7 @@ static void ioapic_set_routing(uint32_t gsi, uint32_t vr)
|
||||
set_irq_trigger_mode(gsi, false);
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_IRQ, "GSI: irq:%d pin:%hhu rte:%lx",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "GSI: irq:%d pin:%hhu rte:%lx",
|
||||
gsi, gsi_table_data[gsi].pin,
|
||||
rte.full);
|
||||
}
|
||||
@ -272,7 +272,7 @@ void ioapic_set_rte(uint32_t irq, union ioapic_rte rte)
|
||||
addr = gsi_table_data[irq].addr;
|
||||
ioapic_set_rte_entry(addr, gsi_table_data[irq].pin, rte);
|
||||
|
||||
dev_dbg(ACRN_DBG_IRQ, "GSI: irq:%d pin:%hhu rte:%lx",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "GSI: irq:%d pin:%hhu rte:%lx",
|
||||
irq, gsi_table_data[irq].pin,
|
||||
rte.full);
|
||||
}
|
||||
@ -334,10 +334,10 @@ ioapic_irq_gsi_mask_unmask(uint32_t irq, bool mask)
|
||||
rte.bits.intr_mask = IOAPIC_RTE_MASK_CLR;
|
||||
}
|
||||
ioapic_set_rte_entry(addr, pin, rte);
|
||||
dev_dbg(ACRN_DBG_PTIRQ, "update: irq:%d pin:%hhu rte:%lx",
|
||||
dev_dbg(DBG_LEVEL_PTIRQ, "update: irq:%d pin:%hhu rte:%lx",
|
||||
irq, pin, rte.full);
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_PTIRQ, "NULL Address returned from gsi_table_data");
|
||||
dev_dbg(DBG_LEVEL_PTIRQ, "NULL Address returned from gsi_table_data");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -359,7 +359,7 @@ ioapic_nr_pins(void *ioapic_base)
|
||||
uint32_t nr_pins;
|
||||
|
||||
version = ioapic_read_reg32(ioapic_base, IOAPIC_VER);
|
||||
dev_dbg(ACRN_DBG_IRQ, "IOAPIC version: %x", version);
|
||||
dev_dbg(DBG_LEVEL_IRQ, "IOAPIC version: %x", version);
|
||||
|
||||
/* The 23:16 bits in the version register is the highest entry in the
|
||||
* I/O redirection table, which is 1 smaller than the number of
|
||||
|
@ -220,7 +220,7 @@ int32_t request_irq(uint32_t req_irq, irq_action_t action_fn, void *priv_data,
|
||||
spinlock_irqrestore_release(&desc->lock, rflags);
|
||||
|
||||
ret = (int32_t)irq;
|
||||
dev_dbg(ACRN_DBG_IRQ, "[%s] irq%d vr:0x%x", __func__, irq, desc->vector);
|
||||
dev_dbg(DBG_LEVEL_IRQ, "[%s] irq%d vr:0x%x", __func__, irq, desc->vector);
|
||||
} else {
|
||||
spinlock_irqrestore_release(&desc->lock, rflags);
|
||||
|
||||
@ -241,7 +241,7 @@ void free_irq(uint32_t irq)
|
||||
|
||||
if (irq < NR_IRQS) {
|
||||
desc = &irq_desc_array[irq];
|
||||
dev_dbg(ACRN_DBG_IRQ, "[%s] irq%d vr:0x%x",
|
||||
dev_dbg(DBG_LEVEL_IRQ, "[%s] irq%d vr:0x%x",
|
||||
__func__, irq, irq_to_vector(irq));
|
||||
|
||||
free_irq_vector(irq);
|
||||
|
@ -94,7 +94,7 @@ void setup_notification(void)
|
||||
pr_err("Failed to setup notification");
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PTIRQ, "NOTIFY: irq[%d] setup vector %x",
|
||||
dev_dbg(DBG_LEVEL_PTIRQ, "NOTIFY: irq[%d] setup vector %x",
|
||||
notification_irq, irq_to_vector(notification_irq));
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <mmu.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_MMU 6U
|
||||
#define DBG_LEVEL_MMU 6U
|
||||
|
||||
/*
|
||||
* Split a large page table into next level page table.
|
||||
@ -42,7 +42,7 @@ static void split_large_page(uint64_t *pte, enum _page_table_level level,
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, pbase: 0x%lx\n", __func__, ref_paddr, pbase);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, pbase: 0x%lx\n", __func__, ref_paddr, pbase);
|
||||
|
||||
paddr = ref_paddr;
|
||||
for (i = 0UL; i < PTRS_PER_PTE; i++) {
|
||||
@ -93,7 +93,7 @@ static void modify_or_del_pte(const uint64_t *pde, uint64_t vaddr_start, uint64_
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t index = pte_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PTE; index++) {
|
||||
uint64_t *pte = pt_page + index;
|
||||
|
||||
@ -130,7 +130,7 @@ static void modify_or_del_pde(const uint64_t *pdpte, uint64_t vaddr_start, uint6
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t index = pde_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PDE; index++) {
|
||||
uint64_t *pde = pd_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||
@ -175,7 +175,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t index = pdpte_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PDPTE; index++) {
|
||||
uint64_t *pdpte = pdpt_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||
@ -228,7 +228,7 @@ void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t *pml4e;
|
||||
|
||||
vaddr_end = vaddr + round_page_down(size);
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: 0x%lx, size: 0x%lx\n",
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: 0x%lx, size: 0x%lx\n",
|
||||
__func__, vaddr, size);
|
||||
|
||||
while (vaddr < vaddr_end) {
|
||||
@ -255,7 +255,7 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
|
||||
uint64_t paddr = paddr_start;
|
||||
uint64_t index = pte_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
|
||||
__func__, paddr, vaddr_start, vaddr_end);
|
||||
for (; index < PTRS_PER_PTE; index++) {
|
||||
uint64_t *pte = pt_page + index;
|
||||
@ -286,7 +286,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
|
||||
uint64_t paddr = paddr_start;
|
||||
uint64_t index = pde_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
|
||||
__func__, paddr, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PDE; index++) {
|
||||
uint64_t *pde = pd_page + index;
|
||||
@ -335,7 +335,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
|
||||
uint64_t paddr = paddr_start;
|
||||
uint64_t index = pdpte_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n", __func__, paddr, vaddr, vaddr_end);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n", __func__, paddr, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PDPTE; index++) {
|
||||
uint64_t *pdpte = pdpt_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||
@ -383,7 +383,7 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint
|
||||
uint64_t paddr;
|
||||
uint64_t *pml4e;
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr 0x%lx, vaddr 0x%lx, size 0x%lx\n", __func__, paddr_base, vaddr_base, size);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, paddr 0x%lx, vaddr 0x%lx, size 0x%lx\n", __func__, paddr_base, vaddr_base, size);
|
||||
|
||||
/* align address to page size*/
|
||||
vaddr = round_page_up(vaddr_base);
|
||||
|
@ -26,10 +26,10 @@
|
||||
#define DBG_IOMMU 0
|
||||
|
||||
#if DBG_IOMMU
|
||||
#define ACRN_DBG_IOMMU LOG_INFO
|
||||
#define DBG_LEVEL_IOMMU LOG_INFO
|
||||
#define DMAR_FAULT_LOOP_MAX 10
|
||||
#else
|
||||
#define ACRN_DBG_IOMMU 6U
|
||||
#define DBG_LEVEL_IOMMU 6U
|
||||
#endif
|
||||
#define LEVEL_WIDTH 9U
|
||||
|
||||
@ -408,7 +408,7 @@ static void dmar_enable_intr_remapping(struct dmar_drhd_rt *dmar_unit)
|
||||
}
|
||||
|
||||
spinlock_release(&(dmar_unit->lock));
|
||||
dev_dbg(ACRN_DBG_IOMMU, "%s: gsr:0x%x", __func__, status);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "%s: gsr:0x%x", __func__, status);
|
||||
}
|
||||
|
||||
static void dmar_enable_translation(struct dmar_drhd_rt *dmar_unit)
|
||||
@ -428,7 +428,7 @@ static void dmar_enable_translation(struct dmar_drhd_rt *dmar_unit)
|
||||
|
||||
spinlock_release(&(dmar_unit->lock));
|
||||
|
||||
dev_dbg(ACRN_DBG_IOMMU, "%s: gsr:0x%x", __func__, status);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "%s: gsr:0x%x", __func__, status);
|
||||
}
|
||||
|
||||
static void dmar_disable_intr_remapping(struct dmar_drhd_rt *dmar_unit)
|
||||
@ -465,7 +465,7 @@ static int32_t dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
|
||||
dev_dbg(ACRN_DBG_IOMMU, "Register dmar uint [%d] @0x%lx", dmar_unit->index, dmar_unit->drhd->reg_base_addr);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "Register dmar uint [%d] @0x%lx", dmar_unit->index, dmar_unit->drhd->reg_base_addr);
|
||||
|
||||
spinlock_init(&dmar_unit->lock);
|
||||
|
||||
@ -524,7 +524,7 @@ static int32_t dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit)
|
||||
* translation paging structures?
|
||||
*/
|
||||
if (iommu_ecap_sc(dmar_unit->ecap) == 0U) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "dmar uint doesn't support snoop control!");
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "dmar uint doesn't support snoop control!");
|
||||
}
|
||||
|
||||
dmar_disable_translation(dmar_unit);
|
||||
@ -892,7 +892,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
|
||||
struct dmar_entry fault_record;
|
||||
int32_t loop = 0;
|
||||
|
||||
dev_dbg(ACRN_DBG_IOMMU, "%s: irq = %d", __func__, irq);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "%s: irq = %d", __func__, irq);
|
||||
|
||||
fsr = iommu_read32(dmar_unit, DMAR_FSTS_REG);
|
||||
|
||||
@ -905,7 +905,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
|
||||
index = dma_fsts_fri(fsr);
|
||||
record_reg_offset = (uint32_t)dmar_unit->cap_fault_reg_offset + (index * 16U);
|
||||
if (index >= dmar_unit->cap_num_fault_regs) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "%s: invalid FR Index", __func__);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "%s: invalid FR Index", __func__);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -913,7 +913,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
|
||||
fault_record.lo_64 = iommu_read64(dmar_unit, record_reg_offset);
|
||||
fault_record.hi_64 = iommu_read64(dmar_unit, record_reg_offset + 8U);
|
||||
|
||||
dev_dbg(ACRN_DBG_IOMMU, "%s: record[%d] @0x%x: 0x%lx, 0x%lx",
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "%s: record[%d] @0x%x: 0x%lx, 0x%lx",
|
||||
__func__, index, record_reg_offset, fault_record.lo_64, fault_record.hi_64);
|
||||
|
||||
fault_record_analysis(fault_record.lo_64, fault_record.hi_64);
|
||||
@ -924,7 +924,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
|
||||
|
||||
#ifdef DMAR_FAULT_LOOP_MAX
|
||||
if (loop > DMAR_FAULT_LOOP_MAX) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "%s: loop more than %d times", __func__, DMAR_FAULT_LOOP_MAX);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "%s: loop more than %d times", __func__, DMAR_FAULT_LOOP_MAX);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
@ -950,7 +950,7 @@ static void dmar_setup_interrupt(struct dmar_drhd_rt *dmar_unit)
|
||||
}
|
||||
|
||||
vector = irq_to_vector(dmar_unit->dmar_irq);
|
||||
dev_dbg(ACRN_DBG_IOMMU, "irq#%d vector#%d for dmar_unit", dmar_unit->dmar_irq, vector);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "irq#%d vector#%d for dmar_unit", dmar_unit->dmar_irq, vector);
|
||||
|
||||
dmar_fault_msi_write(dmar_unit, vector);
|
||||
dmar_fault_event_unmask(dmar_unit);
|
||||
@ -993,7 +993,7 @@ static void dmar_disable_qi(struct dmar_drhd_rt *dmar_unit)
|
||||
|
||||
static void dmar_prepare(struct dmar_drhd_rt *dmar_unit)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_IOMMU, "enable dmar uint [0x%x]", dmar_unit->drhd->reg_base_addr);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "enable dmar uint [0x%x]", dmar_unit->drhd->reg_base_addr);
|
||||
dmar_setup_interrupt(dmar_unit);
|
||||
dmar_set_root_table(dmar_unit);
|
||||
dmar_enable_qi(dmar_unit);
|
||||
@ -1002,7 +1002,7 @@ static void dmar_prepare(struct dmar_drhd_rt *dmar_unit)
|
||||
|
||||
static void dmar_enable(struct dmar_drhd_rt *dmar_unit)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_IOMMU, "enable dmar uint [0x%x]", dmar_unit->drhd->reg_base_addr);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "enable dmar uint [0x%x]", dmar_unit->drhd->reg_base_addr);
|
||||
dmar_invalid_context_cache_global(dmar_unit);
|
||||
dmar_invalid_iotlb_global(dmar_unit);
|
||||
dmar_invalid_iec_global(dmar_unit);
|
||||
@ -1068,7 +1068,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint8_t bus, uint8_
|
||||
pr_err("no dmar unit found for device: %x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||
ret = -EINVAL;
|
||||
} else if (dmar_unit->drhd->ignore) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "device is ignored :0x%x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "device is ignored :0x%x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||
} else if ((!dmar_unit_support_aw(dmar_unit, domain->addr_width)) || (dmar_unit->root_table_addr == 0UL)) {
|
||||
pr_err("invalid dmar unit");
|
||||
ret = -EINVAL;
|
||||
@ -1076,7 +1076,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint8_t bus, uint8_
|
||||
if (iommu_ecap_sc(dmar_unit->ecap) == 0U) {
|
||||
/* TODO: remove iommu_snoop from iommu_domain */
|
||||
domain->iommu_snoop = false;
|
||||
dev_dbg(ACRN_DBG_IOMMU, "vm=%d add %x:%x no snoop control!", domain->vm_id, bus, devfun);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "vm=%d add %x:%x no snoop control!", domain->vm_id, bus, devfun);
|
||||
}
|
||||
|
||||
root_table = (struct dmar_entry *)hpa2hva(dmar_unit->root_table_addr);
|
||||
@ -1182,7 +1182,7 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint8_t bu
|
||||
pr_err("no dmar unit found for device: %x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||
ret = -EINVAL;
|
||||
} else if (dmar_unit->drhd->ignore) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "device is ignored :0x%x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "device is ignored :0x%x:%x.%x", bus, sid.bits.d, sid.bits.f);
|
||||
} else {
|
||||
root_table = (struct dmar_entry *)hpa2hva(dmar_unit->root_table_addr);
|
||||
root_entry = root_table + bus;
|
||||
@ -1235,7 +1235,7 @@ static void do_action_for_iommus(void (*action)(struct dmar_drhd_rt *))
|
||||
if (!dmar_unit->drhd->ignore) {
|
||||
action(dmar_unit);
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "ignore dmar_unit @0x%x", dmar_unit->drhd->reg_base_addr);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "ignore dmar_unit @0x%x", dmar_unit->drhd->reg_base_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1265,7 +1265,7 @@ struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_ta
|
||||
domain->is_tt_ept = true;
|
||||
domain->iommu_snoop = true;
|
||||
|
||||
dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
|
||||
vmid_to_domainid(domain->vm_id), domain->vm_id, domain->trans_table_ptr);
|
||||
}
|
||||
|
||||
@ -1382,7 +1382,7 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
|
||||
pr_err("no dmar unit found for device: %x:%x.%x", sid.bits.b, sid.bits.d, sid.bits.f);
|
||||
ret = -EINVAL;
|
||||
} else if (dmar_unit->drhd->ignore) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "device is ignored :0x%x:%x.%x", sid.bits.b, sid.bits.d, sid.bits.f);
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "device is ignored :0x%x:%x.%x", sid.bits.b, sid.bits.d, sid.bits.f);
|
||||
ret = -EINVAL;
|
||||
} else if (dmar_unit->ir_table_addr == 0UL) {
|
||||
pr_err("IR table is not set for dmar unit");
|
||||
@ -1423,7 +1423,7 @@ void dmar_free_irte(struct intr_source intr_src, uint16_t index)
|
||||
pr_err("no dmar unit found for device: %x:%x.%x", intr_src.src.msi.bits.b,
|
||||
intr_src.src.msi.bits.d, intr_src.src.msi.bits.f);
|
||||
} else if (dmar_unit->drhd->ignore) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "device is ignored :0x%x:%x.%x", intr_src.src.msi.bits.b,
|
||||
dev_dbg(DBG_LEVEL_IOMMU, "device is ignored :0x%x:%x.%x", intr_src.src.msi.bits.b,
|
||||
intr_src.src.msi.bits.d, intr_src.src.msi.bits.f);
|
||||
} else if (dmar_unit->ir_table_addr == 0UL) {
|
||||
pr_err("IR table is not set for dmar unit");
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <logmsg.h>
|
||||
#include <vboot.h>
|
||||
|
||||
#define ACRN_DBG_PARSE 6
|
||||
#define DBG_LEVEL_PARSE 6
|
||||
|
||||
int32_t parse_hv_cmdline(void)
|
||||
{
|
||||
@ -25,15 +25,15 @@ int32_t parse_hv_cmdline(void)
|
||||
}
|
||||
|
||||
mbi = (struct multiboot_info *)(hpa2hva_early((uint64_t)boot_regs[1]));
|
||||
dev_dbg(ACRN_DBG_PARSE, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
dev_dbg(DBG_LEVEL_PARSE, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_CMDLINE) == 0U) {
|
||||
dev_dbg(ACRN_DBG_PARSE, "no hv cmdline!");
|
||||
dev_dbg(DBG_LEVEL_PARSE, "no hv cmdline!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
start = (char *)hpa2hva_early((uint64_t)mbi->mi_cmdline);
|
||||
dev_dbg(ACRN_DBG_PARSE, "hv cmdline: %s", start);
|
||||
dev_dbg(DBG_LEVEL_PARSE, "hv cmdline: %s", start);
|
||||
|
||||
while ((start != NULL) && ((*start) != '\0')) {
|
||||
while ((*start) == ' ')
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <deprivilege_boot.h>
|
||||
#include <vboot_info.h>
|
||||
|
||||
#define ACRN_DBG_BOOT 6U
|
||||
#define DBG_LEVEL_BOOT 6U
|
||||
|
||||
#define MAX_BOOT_PARAMS_LEN 64U
|
||||
#define INVALID_MOD_IDX 0xFFFFU
|
||||
@ -135,7 +135,7 @@ static int32_t init_vm_kernel_info(struct acrn_vm *vm, const struct multiboot_mo
|
||||
{
|
||||
struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
|
||||
|
||||
dev_dbg(ACRN_DBG_BOOT, "kernel mod start=0x%x, end=0x%x",
|
||||
dev_dbg(DBG_LEVEL_BOOT, "kernel mod start=0x%x, end=0x%x",
|
||||
mod->mm_mod_start, mod->mm_mod_end);
|
||||
|
||||
vm->sw.kernel_type = vm_config->os_config.kernel_type;
|
||||
@ -217,7 +217,7 @@ static int32_t init_vm_sw_load(struct acrn_vm *vm, const struct multiboot_info *
|
||||
uint32_t mod_idx;
|
||||
int32_t ret = -EINVAL;
|
||||
|
||||
dev_dbg(ACRN_DBG_BOOT, "mod counts=%d\n", mbi->mi_mods_count);
|
||||
dev_dbg(DBG_LEVEL_BOOT, "mod counts=%d\n", mbi->mi_mods_count);
|
||||
|
||||
if (mods != NULL) {
|
||||
mod_idx = get_mod_idx_by_tag(mods, mbi->mi_mods_count, vm_config->os_config.kernel_mod_tag);
|
||||
@ -255,7 +255,7 @@ static int32_t init_general_vm_boot_info(struct acrn_vm *vm)
|
||||
|
||||
if (mbi != NULL) {
|
||||
stac();
|
||||
dev_dbg(ACRN_DBG_BOOT, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
dev_dbg(DBG_LEVEL_BOOT, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MODS) == 0U) {
|
||||
panic("no multiboot module info found");
|
||||
} else {
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <errno.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_HYCALL 6U
|
||||
#define DBG_LEVEL_HYCALL 6U
|
||||
|
||||
bool is_hypercall_from_ring0(void)
|
||||
{
|
||||
@ -168,7 +168,7 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
|
||||
} else {
|
||||
ret = create_vm(vm_id, vm_config, &target_vm);
|
||||
if (ret != 0) {
|
||||
dev_dbg(ACRN_DBG_HYCALL, "HCALL: Create VM failed");
|
||||
dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed");
|
||||
cv.vmid = ACRN_INVALID_VMID;
|
||||
ret = -1;
|
||||
} else {
|
||||
@ -389,7 +389,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
|
||||
vmsi_addr.full = vmsi->msi_addr;
|
||||
vmsi_data.full = (uint32_t)vmsi->msi_data;
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: msi_addr 0x%016lx, msi_data 0x%016lx",
|
||||
dev_dbg(DBG_LEVEL_LAPICPT, "%s: msi_addr 0x%016lx, msi_data 0x%016lx",
|
||||
__func__, vmsi->msi_addr, vmsi->msi_data);
|
||||
|
||||
if (vmsi_addr.bits.addr_base == MSI_ADDR_BASE) {
|
||||
@ -401,7 +401,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
|
||||
* and handled by hardware.
|
||||
*/
|
||||
vlapic_calc_dest_lapic_pt(vm, &vdmask, false, vdest, phys);
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: vcpu destination mask 0x%016lx", __func__, vdmask);
|
||||
dev_dbg(DBG_LEVEL_LAPICPT, "%s: vcpu destination mask 0x%016lx", __func__, vdmask);
|
||||
|
||||
vcpu_id = ffs64(vdmask);
|
||||
while (vcpu_id != INVALID_BIT_INDEX) {
|
||||
@ -418,7 +418,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
|
||||
icr.bits.destination_mode = MSI_ADDR_DESTMODE_LOGICAL;
|
||||
|
||||
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
|
||||
dev_dbg(ACRN_DBG_LAPICPT, "%s: icr.value 0x%016lx", __func__, icr.value);
|
||||
dev_dbg(DBG_LEVEL_LAPICPT, "%s: icr.value 0x%016lx", __func__, icr.value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -504,7 +504,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
|
||||
if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) != 0) {
|
||||
pr_err("%p %s: Unable copy param to vm\n", target_vm, __func__);
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[%d] SET BUFFER=0x%p",
|
||||
dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p",
|
||||
vmid, iobuf.req_buf);
|
||||
|
||||
hpa = gpa2hpa(vm, iobuf.req_buf);
|
||||
@ -544,7 +544,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
|
||||
|
||||
/* make sure we have set req_buf */
|
||||
if ((!is_poweroff_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
||||
dev_dbg(DBG_LEVEL_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
||||
vmid, vcpu_id);
|
||||
|
||||
if (vcpu_id >= target_vm->hw.created_vcpus) {
|
||||
@ -637,7 +637,7 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
|
||||
target_vm->arch_vm.ept_mem_ops.info->ept.top_address_space);
|
||||
ret = 0;
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_HYCALL,
|
||||
dev_dbg(DBG_LEVEL_HYCALL,
|
||||
"[vm%d] type=%d gpa=0x%x sos_vm_gpa=0x%x size=0x%x",
|
||||
target_vm->vm_id, region->type, region->gpa,
|
||||
region->sos_vm_gpa, region->size);
|
||||
@ -723,7 +723,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
|
||||
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
|
||||
__func__, vm->vm_id, wp->gpa);
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
|
||||
dev_dbg(DBG_LEVEL_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
|
||||
vm->vm_id, wp->gpa, hpa);
|
||||
|
||||
base_paddr = hva2hpa((void *)(get_hv_image_base()));
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <errno.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_TRUSTY_HYCALL 6U
|
||||
|
||||
/**
|
||||
* @brief Switch vCPU state between Normal/Secure World.
|
||||
|
@ -16,8 +16,8 @@
|
||||
#include <sprintf.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_PROFILING 5U
|
||||
#define ACRN_ERR_PROFILING 3U
|
||||
#define DBG_LEVEL_PROFILING 5U
|
||||
#define DBG_LEVEL_ERR_PROFILING 3U
|
||||
|
||||
#define MAJOR_VERSION 1
|
||||
#define MINOR_VERSION 0
|
||||
@ -38,10 +38,10 @@ extern struct irq_desc irq_desc_array[NR_IRQS];
|
||||
|
||||
static void profiling_initialize_vmsw(void)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
}
|
||||
|
||||
@ -57,11 +57,11 @@ static void profiling_initialize_pmi(void)
|
||||
struct profiling_msr_op *msrop = NULL;
|
||||
struct sep_state *ss = &get_cpu_var(profiling_info.s_state);
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
|
||||
if (ss == NULL) {
|
||||
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
return;
|
||||
}
|
||||
@ -78,7 +78,7 @@ static void profiling_initialize_pmi(void)
|
||||
}
|
||||
if (msrop->msr_op_type == (uint8_t)MSR_OP_WRITE) {
|
||||
msr_write(msrop->msr_id, msrop->value);
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%lx",
|
||||
__func__, get_pcpu_id(), msrop->msr_id, msrop->value);
|
||||
}
|
||||
@ -87,7 +87,7 @@ static void profiling_initialize_pmi(void)
|
||||
|
||||
ss->pmu_state = PMU_SETUP;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
}
|
||||
|
||||
@ -103,22 +103,22 @@ static void profiling_enable_pmu(void)
|
||||
struct profiling_msr_op *msrop = NULL;
|
||||
struct sep_state *ss = &get_cpu_var(profiling_info.s_state);
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
|
||||
if (ss == NULL) {
|
||||
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unmask LAPIC LVT entry for PMC register */
|
||||
lvt_perf_ctr = (uint32_t) msr_read(MSR_IA32_EXT_APIC_LVT_PMI);
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: 0x%x, 0x%lx",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: 0x%x, 0x%lx",
|
||||
__func__, MSR_IA32_EXT_APIC_LVT_PMI, lvt_perf_ctr);
|
||||
lvt_perf_ctr &= LVT_PERFCTR_BIT_UNMASK;
|
||||
msr_write(MSR_IA32_EXT_APIC_LVT_PMI, lvt_perf_ctr);
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: 0x%x, 0x%lx",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: 0x%x, 0x%lx",
|
||||
__func__, MSR_IA32_EXT_APIC_LVT_PMI, lvt_perf_ctr);
|
||||
|
||||
if (ss->guest_debugctl_value != 0U) {
|
||||
@ -154,7 +154,7 @@ static void profiling_enable_pmu(void)
|
||||
}
|
||||
if (msrop->msr_op_type == (uint8_t)MSR_OP_WRITE) {
|
||||
msr_write(msrop->msr_id, msrop->value);
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%lx",
|
||||
__func__, get_pcpu_id(), msrop->msr_id, msrop->value);
|
||||
}
|
||||
@ -163,7 +163,7 @@ static void profiling_enable_pmu(void)
|
||||
|
||||
ss->pmu_state = PMU_RUNNING;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ static void profiling_disable_pmu(void)
|
||||
struct profiling_msr_op *msrop = NULL;
|
||||
struct sep_state *ss = &get_cpu_var(profiling_info.s_state);
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
|
||||
if (ss != NULL) {
|
||||
@ -201,7 +201,7 @@ static void profiling_disable_pmu(void)
|
||||
}
|
||||
if (msrop->msr_op_type == (uint8_t)MSR_OP_WRITE) {
|
||||
msr_write(msrop->msr_id, msrop->value);
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%lx",
|
||||
__func__, get_pcpu_id(), msrop->msr_id, msrop->value);
|
||||
}
|
||||
@ -216,10 +216,10 @@ static void profiling_disable_pmu(void)
|
||||
|
||||
ss->pmu_state = PMU_SETUP;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
} else {
|
||||
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
}
|
||||
}
|
||||
@ -315,7 +315,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
uint64_t rflags;
|
||||
spinlock_t *sw_lock = NULL;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
|
||||
if (collector == COLLECT_PROFILE_DATA) {
|
||||
@ -323,7 +323,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
|
||||
if (sbuf == NULL) {
|
||||
ss->samples_dropped++;
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: sbuf is NULL exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: sbuf is NULL exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
return 0;
|
||||
}
|
||||
@ -372,7 +372,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
|
||||
if ((uint64_t)remaining_space < (DATA_HEADER_SIZE + payload_size)) {
|
||||
ss->samples_dropped++;
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: not enough space left in sbuf[%d: %d] exiting cpu%d",
|
||||
__func__, remaining_space,
|
||||
DATA_HEADER_SIZE + payload_size, get_pcpu_id());
|
||||
@ -394,7 +394,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
sbuf = per_cpu(sbuf, get_pcpu_id())[ACRN_SOCWATCH];
|
||||
|
||||
if (sbuf == NULL) {
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: socwatch buffers not initialized?", __func__);
|
||||
return 0;
|
||||
}
|
||||
@ -418,7 +418,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
|
||||
switch (type) {
|
||||
case SOCWATCH_MSR_OP:
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: generating cstate/pstate sample socwatch cpu %d",
|
||||
__func__, sw_msrop->cpu_id);
|
||||
pkt_header.cpu_id = (uint16_t)sw_msrop->cpu_id;
|
||||
@ -429,7 +429,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
break;
|
||||
|
||||
case SOCWATCH_VM_SWITCH_TRACING:
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: generating vm-switch sample", __func__);
|
||||
payload_size = VM_SWITCH_TRACE_SIZE;
|
||||
payload = &get_cpu_var(profiling_info.vm_trace);
|
||||
@ -460,7 +460,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
|
||||
spinlock_irqrestore_release(sw_lock, rflags);
|
||||
} else {
|
||||
dev_dbg(ACRN_ERR_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_ERR_PROFILING,
|
||||
"%s: Unknown collector type", __func__);
|
||||
return 0;
|
||||
}
|
||||
@ -478,19 +478,19 @@ static void profiling_handle_msrops(void)
|
||||
struct sw_msr_op_info *sw_msrop
|
||||
= &(get_cpu_var(profiling_info.sw_msr_info));
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
|
||||
if ((my_msr_node == NULL) ||
|
||||
(my_msr_node->msr_op_state != (int32_t)MSR_OP_REQUESTED)) {
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: invalid my_msr_node on cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: invalid my_msr_node on cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
return;
|
||||
}
|
||||
|
||||
if ((my_msr_node->num_entries == 0U) ||
|
||||
(my_msr_node->num_entries >= MAX_MSR_LIST_NUM)) {
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: invalid num_entries on cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
return;
|
||||
@ -501,7 +501,7 @@ static void profiling_handle_msrops(void)
|
||||
case MSR_OP_READ:
|
||||
my_msr_node->entries[i].value
|
||||
= msr_read(my_msr_node->entries[i].msr_id);
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: MSRREAD cpu%d, msr_id=0x%x, msr_val=0x%lx",
|
||||
__func__, get_pcpu_id(), my_msr_node->entries[i].msr_id,
|
||||
my_msr_node->entries[i].value);
|
||||
@ -509,7 +509,7 @@ static void profiling_handle_msrops(void)
|
||||
case MSR_OP_READ_CLEAR:
|
||||
my_msr_node->entries[i].value
|
||||
= msr_read(my_msr_node->entries[i].msr_id);
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: MSRREADCLEAR cpu%d, msr_id=0x%x, msr_val=0x%lx",
|
||||
__func__, get_pcpu_id(), my_msr_node->entries[i].msr_id,
|
||||
my_msr_node->entries[i].value);
|
||||
@ -518,7 +518,7 @@ static void profiling_handle_msrops(void)
|
||||
case MSR_OP_WRITE:
|
||||
msr_write(my_msr_node->entries[i].msr_id,
|
||||
my_msr_node->entries[i].value);
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%lx",
|
||||
__func__, get_pcpu_id(), my_msr_node->entries[i].msr_id,
|
||||
my_msr_node->entries[i].value);
|
||||
@ -564,7 +564,7 @@ static void profiling_handle_msrops(void)
|
||||
my_msr_node->msr_op_state = (int32_t)MSR_OP_REQUESTED;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
}
|
||||
|
||||
@ -582,7 +582,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
|
||||
struct sep_state *ss = &(get_cpu_var(profiling_info.s_state));
|
||||
|
||||
if ((ss == NULL) || (psample == NULL)) {
|
||||
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
dev_dbg(DBG_LEVEL_ERR_PROFILING, "%s: exiting cpu%d",
|
||||
__func__, get_pcpu_id());
|
||||
return;
|
||||
}
|
||||
@ -726,7 +726,7 @@ static void profiling_start_pmu(void)
|
||||
uint16_t i;
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (in_pmu_profiling) {
|
||||
return;
|
||||
@ -757,7 +757,7 @@ static void profiling_start_pmu(void)
|
||||
|
||||
in_pmu_profiling = true;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: done", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: done", __func__);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -768,7 +768,7 @@ static void profiling_stop_pmu(void)
|
||||
uint16_t i;
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (in_pmu_profiling) {
|
||||
for (i = 0U; i < pcpu_nums; i++) {
|
||||
@ -777,19 +777,19 @@ static void profiling_stop_pmu(void)
|
||||
per_cpu(profiling_info.s_state, i).pmu_state = PMU_SETUP;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: pmi_cnt[%d] = total:%u valid=%u, vmexit_cnt=%u",
|
||||
__func__, i, per_cpu(profiling_info.s_state, i).total_pmi_count,
|
||||
per_cpu(profiling_info.s_state, i).valid_pmi_count,
|
||||
per_cpu(profiling_info.s_state, i).total_vmexit_count);
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: cpu%d frozen well:%u frozen delayed=%u, nofrozen_pmi=%u",
|
||||
__func__, i, per_cpu(profiling_info.s_state, i).frozen_well,
|
||||
per_cpu(profiling_info.s_state, i).frozen_delayed,
|
||||
per_cpu(profiling_info.s_state, i).nofrozen_pmi);
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: cpu%d samples captured:%u samples dropped=%u",
|
||||
__func__, i, per_cpu(profiling_info.s_state, i).samples_logged,
|
||||
per_cpu(profiling_info.s_state, i).samples_dropped);
|
||||
@ -800,7 +800,7 @@ static void profiling_stop_pmu(void)
|
||||
|
||||
in_pmu_profiling = false;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: done.", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: done.", __func__);
|
||||
}
|
||||
|
||||
}
|
||||
@ -815,7 +815,7 @@ int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
|
||||
struct profiling_msr_ops_list msr_list[MAX_PCPU_NUM];
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &msr_list, addr, (uint32_t)pcpu_nums * sizeof(struct profiling_msr_ops_list)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -834,7 +834,7 @@ int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -850,7 +850,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
|
||||
struct profiling_vm_info_list vm_info_list;
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &vm_info_list, addr, sizeof(vm_info_list)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -899,7 +899,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -910,7 +910,7 @@ int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr)
|
||||
{
|
||||
struct profiling_version_info ver_info;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &ver_info, addr, sizeof(ver_info)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -930,7 +930,7 @@ int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -942,7 +942,7 @@ int32_t profiling_get_control(struct acrn_vm *vm, uint64_t addr)
|
||||
{
|
||||
struct profiling_control prof_control;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &prof_control, addr, sizeof(prof_control)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -966,7 +966,7 @@ int32_t profiling_get_control(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -982,7 +982,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
struct profiling_control prof_control;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &prof_control, addr, sizeof(prof_control)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -995,7 +995,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
new_switch = prof_control.switches;
|
||||
sep_collection_switch = prof_control.switches;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
" old_switch: %lu sep_collection_switch: %lu!",
|
||||
old_switch, sep_collection_switch);
|
||||
|
||||
@ -1015,7 +1015,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
case VM_SWITCH_TRACING:
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: feature not supported %u",
|
||||
__func__, i);
|
||||
break;
|
||||
@ -1024,17 +1024,17 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
}
|
||||
break;
|
||||
case COLLECT_POWER_DATA:
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: configuring socwatch", __func__);
|
||||
|
||||
socwatch_collection_switch = prof_control.switches;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"socwatch_collection_switch: %lu!",
|
||||
socwatch_collection_switch);
|
||||
|
||||
if (socwatch_collection_switch != 0UL) {
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: socwatch start collection invoked!", __func__);
|
||||
for (i = 0U; i < (uint16_t)MAX_SOCWATCH_FEATURE_ID; i++) {
|
||||
if ((socwatch_collection_switch & (0x1UL << i)) != 0UL) {
|
||||
@ -1042,12 +1042,12 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
case SOCWATCH_COMMAND:
|
||||
break;
|
||||
case SOCWATCH_VM_SWITCH_TRACING:
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: socwatch vm-switch feature requested!",
|
||||
__func__);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: socwatch feature not supported %u",
|
||||
__func__, i);
|
||||
break;
|
||||
@ -1059,7 +1059,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
= SW_RUNNING;
|
||||
}
|
||||
} else { /* stop socwatch collection */
|
||||
dev_dbg(ACRN_DBG_PROFILING,
|
||||
dev_dbg(DBG_LEVEL_PROFILING,
|
||||
"%s: socwatch stop collection invoked or collection switch not set!",
|
||||
__func__);
|
||||
for (i = 0U; i < pcpu_nums ; i++) {
|
||||
@ -1079,7 +1079,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1093,7 +1093,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
|
||||
struct profiling_pmi_config pmi_config;
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &pmi_config, addr, sizeof(pmi_config)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -1156,7 +1156,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1170,7 +1170,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
|
||||
struct profiling_vmsw_config vmsw_config;
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &vmsw_config, addr, sizeof(vmsw_config)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -1218,7 +1218,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1230,7 +1230,7 @@ int32_t profiling_get_pcpu_id(struct acrn_vm *vm, uint64_t addr)
|
||||
{
|
||||
struct profiling_pcpuid pcpuid;
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &pcpuid, addr, sizeof(pcpuid)) != 0) {
|
||||
pr_err("%s: Unable to copy addr from vm\n", __func__);
|
||||
@ -1245,7 +1245,7 @@ int32_t profiling_get_pcpu_id(struct acrn_vm *vm, uint64_t addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1259,7 +1259,7 @@ int32_t profiling_get_status_info(struct acrn_vm *vm, uint64_t gpa)
|
||||
struct profiling_status pstats[MAX_PCPU_NUM];
|
||||
uint16_t pcpu_nums = get_pcpu_nums();
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
|
||||
if (copy_from_gpa(vm, &pstats, gpa,
|
||||
pcpu_nums*sizeof(struct profiling_status)) != 0) {
|
||||
@ -1280,7 +1280,7 @@ int32_t profiling_get_status_info(struct acrn_vm *vm, uint64_t gpa)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1409,7 +1409,7 @@ void profiling_setup(void)
|
||||
{
|
||||
uint16_t cpu;
|
||||
int32_t retval;
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||
cpu = get_pcpu_id();
|
||||
/* support PMI notification, SOS_VM will register all CPU */
|
||||
if ((cpu == BOOT_CPU_ID) && (profiling_pmi_irq == IRQ_INVALID)) {
|
||||
@ -1437,7 +1437,7 @@ void profiling_setup(void)
|
||||
msr_write(MSR_IA32_EXT_APIC_LVT_PMI,
|
||||
VECTOR_PMI | LVT_PERFCTR_BIT_MASK);
|
||||
|
||||
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
|
||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: exiting", __func__);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <errno.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_IOREQUEST 6U
|
||||
#define DBG_LEVEL_IOREQ 6U
|
||||
|
||||
static uint32_t acrn_vhm_notification_vector = VECTOR_HYPERVISOR_CALLBACK_VHM;
|
||||
#define MMIO_DEFAULT_VALUE_SIZE_1 (0xFFUL)
|
||||
@ -21,8 +21,8 @@ __unused static void acrn_print_request(uint16_t vcpu_id, const struct vhm_reque
|
||||
{
|
||||
switch (req->type) {
|
||||
case REQ_MMIO:
|
||||
dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=MMIO]", vcpu_id);
|
||||
dev_dbg(ACRN_DBG_IOREQUEST,
|
||||
dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=MMIO]", vcpu_id);
|
||||
dev_dbg(DBG_LEVEL_IOREQ,
|
||||
"gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
|
||||
req->reqs.mmio.address,
|
||||
req->reqs.mmio.direction,
|
||||
@ -31,8 +31,8 @@ __unused static void acrn_print_request(uint16_t vcpu_id, const struct vhm_reque
|
||||
req->processed);
|
||||
break;
|
||||
case REQ_PORTIO:
|
||||
dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=PORTIO]", vcpu_id);
|
||||
dev_dbg(ACRN_DBG_IOREQUEST,
|
||||
dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=PORTIO]", vcpu_id);
|
||||
dev_dbg(DBG_LEVEL_IOREQ,
|
||||
"IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
|
||||
req->reqs.pio.address,
|
||||
req->reqs.pio.direction,
|
||||
@ -41,7 +41,7 @@ __unused static void acrn_print_request(uint16_t vcpu_id, const struct vhm_reque
|
||||
req->processed);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=%d] NOT support type",
|
||||
dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=%d] NOT support type",
|
||||
vcpu_id, req->type);
|
||||
break;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
#define RTBL_RO_BITS ((uint32_t)0x00004000U | (uint32_t)0x00001000U) /*Remote IRR and Delivery Status bits*/
|
||||
|
||||
#define ACRN_DBG_IOAPIC 6U
|
||||
#define DBG_LEVEL_VIOAPIC 6U
|
||||
#define ACRN_IOAPIC_VERSION 0x11U
|
||||
|
||||
#define IOAPIC_ID_MASK 0x0f000000U
|
||||
@ -63,7 +63,7 @@ vioapic_generate_intr(struct acrn_vioapic *vioapic, uint32_t pin)
|
||||
rte = vioapic->rtbl[pin];
|
||||
|
||||
if (rte.bits.intr_mask == IOAPIC_RTE_MASK_SET) {
|
||||
dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: masked", pin);
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic pin%hhu: masked", pin);
|
||||
} else {
|
||||
phys = (rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
|
||||
delmode = rte.bits.delivery_mode;
|
||||
@ -310,7 +310,7 @@ static void vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr,
|
||||
if ((vioapic->vm->wire_mode == VPIC_WIRE_NULL) ||
|
||||
(vioapic->vm->wire_mode == VPIC_WIRE_INTR)) {
|
||||
vioapic->vm->wire_mode = VPIC_WIRE_IOAPIC;
|
||||
dev_dbg(ACRN_DBG_IOAPIC, "vpic wire mode -> IOAPIC");
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC, "vpic wire mode -> IOAPIC");
|
||||
} else {
|
||||
pr_err("WARNING: invalid vpic wire mode change");
|
||||
wire_mode_valid = false;
|
||||
@ -319,14 +319,14 @@ static void vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr,
|
||||
} else {
|
||||
if (vioapic->vm->wire_mode == VPIC_WIRE_IOAPIC) {
|
||||
vioapic->vm->wire_mode = VPIC_WIRE_INTR;
|
||||
dev_dbg(ACRN_DBG_IOAPIC, "vpic wire mode -> INTR");
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC, "vpic wire mode -> INTR");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (wire_mode_valid) {
|
||||
vioapic->rtbl[pin] = new;
|
||||
dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: redir table entry %#lx",
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic pin%hhu: redir table entry %#lx",
|
||||
pin, vioapic->rtbl[pin].full);
|
||||
|
||||
/* remap for ptdev */
|
||||
@ -345,7 +345,7 @@ static void vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr,
|
||||
if ((vioapic->rtbl[pin].bits.intr_mask == IOAPIC_RTE_MASK_CLR) &&
|
||||
(vioapic->rtbl[pin].bits.remote_irr == 0UL) &&
|
||||
vioapic_need_intr(vioapic, (uint16_t)pin)) {
|
||||
dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: asserted at rtbl write", pin);
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic pin%hhu: asserted at rtbl write", pin);
|
||||
vioapic_generate_intr(vioapic, pin);
|
||||
}
|
||||
}
|
||||
@ -410,7 +410,7 @@ vioapic_process_eoi(struct acrn_vm *vm, uint32_t vector)
|
||||
}
|
||||
|
||||
vioapic = vm_ioapic(vm);
|
||||
dev_dbg(ACRN_DBG_IOAPIC, "ioapic processing eoi for vector %u", vector);
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic processing eoi for vector %u", vector);
|
||||
|
||||
/* notify device to ack if assigned pin */
|
||||
for (pin = 0U; pin < pincount; pin++) {
|
||||
@ -437,7 +437,7 @@ vioapic_process_eoi(struct acrn_vm *vm, uint32_t vector)
|
||||
|
||||
vioapic->rtbl[pin].bits.remote_irr = 0U;
|
||||
if (vioapic_need_intr(vioapic, (uint16_t)pin)) {
|
||||
dev_dbg(ACRN_DBG_IOAPIC,
|
||||
dev_dbg(DBG_LEVEL_VIOAPIC,
|
||||
"ioapic pin%hhu: asserted at eoi", pin);
|
||||
vioapic_generate_intr(vioapic, pin);
|
||||
}
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include <spinlock.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
#define ACRN_DBG_PIC 6U
|
||||
#define DBG_LEVEL_PIC 6U
|
||||
|
||||
static void vpic_set_pinstate(struct acrn_vpic *vpic, uint32_t pin, uint8_t level);
|
||||
|
||||
@ -149,7 +149,7 @@ static void vpic_notify_intr(struct acrn_vpic *vpic)
|
||||
i8259 = &vpic->i8259[1];
|
||||
pin = vpic_get_highest_irrpin(i8259);
|
||||
if (!i8259->intr_raised && (pin < NR_VPIC_PINS_PER_CHIP)) {
|
||||
dev_dbg(ACRN_DBG_PIC,
|
||||
dev_dbg(DBG_LEVEL_PIC,
|
||||
"pic slave notify pin = %hhu (imr 0x%x irr 0x%x isr 0x%x)\n",
|
||||
pin, i8259->mask, i8259->request, i8259->service);
|
||||
|
||||
@ -160,7 +160,7 @@ static void vpic_notify_intr(struct acrn_vpic *vpic)
|
||||
vpic_set_pinstate(vpic, 2U, 1U);
|
||||
vpic_set_pinstate(vpic, 2U, 0U);
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_PIC,
|
||||
dev_dbg(DBG_LEVEL_PIC,
|
||||
"pic slave no eligible interrupt (imr 0x%x irr 0x%x isr 0x%x)",
|
||||
i8259->mask, i8259->request, i8259->service);
|
||||
}
|
||||
@ -171,7 +171,7 @@ static void vpic_notify_intr(struct acrn_vpic *vpic)
|
||||
i8259 = &vpic->i8259[0];
|
||||
pin = vpic_get_highest_irrpin(i8259);
|
||||
if (!i8259->intr_raised && (pin < NR_VPIC_PINS_PER_CHIP)) {
|
||||
dev_dbg(ACRN_DBG_PIC,
|
||||
dev_dbg(DBG_LEVEL_PIC,
|
||||
"pic master notify pin = %hhu (imr 0x%x irr 0x%x isr 0x%x)\n",
|
||||
pin, i8259->mask, i8259->request, i8259->service);
|
||||
|
||||
@ -218,7 +218,7 @@ static void vpic_notify_intr(struct acrn_vpic *vpic)
|
||||
vioapic_set_irqline_lock(vpic->vm, 0U, GSI_RAISING_PULSE);
|
||||
}
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_PIC,
|
||||
dev_dbg(DBG_LEVEL_PIC,
|
||||
"pic master no eligible interrupt (imr 0x%x irr 0x%x isr 0x%x)",
|
||||
i8259->mask, i8259->request, i8259->service);
|
||||
}
|
||||
@ -228,7 +228,7 @@ static int32_t vpic_icw1(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 icw1 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 icw1 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
i8259->ready = false;
|
||||
@ -242,10 +242,10 @@ static int32_t vpic_icw1(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
i8259->smm = 0U;
|
||||
|
||||
if ((val & ICW1_SNGL) != 0U) {
|
||||
dev_dbg(ACRN_DBG_PIC, "vpic cascade mode required\n");
|
||||
dev_dbg(DBG_LEVEL_PIC, "vpic cascade mode required\n");
|
||||
ret = -1;
|
||||
} else if ((val & ICW1_IC4) == 0U) {
|
||||
dev_dbg(ACRN_DBG_PIC, "vpic icw4 required\n");
|
||||
dev_dbg(DBG_LEVEL_PIC, "vpic icw4 required\n");
|
||||
ret = -1;
|
||||
} else {
|
||||
i8259->icw_num++;
|
||||
@ -257,7 +257,7 @@ static int32_t vpic_icw1(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
|
||||
static int32_t vpic_icw2(const struct acrn_vpic *vpic, struct i8259_reg_state *i8259, uint8_t val)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 icw2 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 icw2 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
i8259->irq_base = val & 0xf8U;
|
||||
@ -269,7 +269,7 @@ static int32_t vpic_icw2(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
|
||||
static int32_t vpic_icw3(const struct acrn_vpic *vpic, struct i8259_reg_state *i8259, uint8_t val)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 icw3 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 icw3 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
i8259->icw_num++;
|
||||
@ -281,11 +281,11 @@ static int32_t vpic_icw4(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 icw4 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 icw4 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
if ((val & ICW4_8086) == 0U) {
|
||||
dev_dbg(ACRN_DBG_PIC,
|
||||
dev_dbg(DBG_LEVEL_PIC,
|
||||
"vpic microprocessor mode required\n");
|
||||
ret = -1;
|
||||
} else {
|
||||
@ -297,7 +297,7 @@ static int32_t vpic_icw4(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
if (master_pic(vpic, i8259)) {
|
||||
i8259->sfn = true;
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_PIC,
|
||||
dev_dbg(DBG_LEVEL_PIC,
|
||||
"Ignoring special fully nested mode on slave pic: %#x",
|
||||
val);
|
||||
}
|
||||
@ -316,7 +316,7 @@ static int32_t vpic_ocw1(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
uint32_t pin, i, bit;
|
||||
uint8_t old = i8259->mask;
|
||||
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 ocw1 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 ocw1 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
i8259->mask = val & 0xffU;
|
||||
@ -352,7 +352,7 @@ static int32_t vpic_ocw1(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
|
||||
static int32_t vpic_ocw2(const struct acrn_vpic *vpic, struct i8259_reg_state *i8259, uint8_t val)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 ocw2 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 ocw2 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
i8259->rotate = ((val & OCW2_R) != 0U);
|
||||
@ -393,12 +393,12 @@ static int32_t vpic_ocw2(const struct acrn_vpic *vpic, struct i8259_reg_state *i
|
||||
|
||||
static int32_t vpic_ocw3(const struct acrn_vpic *vpic, struct i8259_reg_state *i8259, uint8_t val)
|
||||
{
|
||||
dev_dbg(ACRN_DBG_PIC, "vm 0x%x: i8259 ocw3 0x%x\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "vm 0x%x: i8259 ocw3 0x%x\n",
|
||||
vpic->vm, val);
|
||||
|
||||
if ((val & OCW3_ESMM) != 0U) {
|
||||
i8259->smm = ((val & OCW3_SMM) != 0U) ? 1U : 0U;
|
||||
dev_dbg(ACRN_DBG_PIC, "%s i8259 special mask mode %s\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "%s i8259 special mask mode %s\n",
|
||||
master_pic(vpic, i8259) ? "master" : "slave",
|
||||
(i8259->smm != 0U) ? "enabled" : "disabled");
|
||||
}
|
||||
@ -436,16 +436,16 @@ static void vpic_set_pinstate(struct acrn_vpic *vpic, uint32_t pin, uint8_t leve
|
||||
|
||||
if (((old_lvl == 0U) && (level == 1U)) || ((level == 1U) && lvl_trigger)) {
|
||||
/* raising edge or level */
|
||||
dev_dbg(ACRN_DBG_PIC, "pic pin%hhu: asserted\n", pin);
|
||||
dev_dbg(DBG_LEVEL_PIC, "pic pin%hhu: asserted\n", pin);
|
||||
i8259->request |= (uint8_t)(1U << (pin & 0x7U));
|
||||
} else if ((old_lvl == 1U) && (level == 0U)) {
|
||||
/* falling edge */
|
||||
dev_dbg(ACRN_DBG_PIC, "pic pin%hhu: deasserted\n", pin);
|
||||
dev_dbg(DBG_LEVEL_PIC, "pic pin%hhu: deasserted\n", pin);
|
||||
if (lvl_trigger) {
|
||||
i8259->request &= ~(uint8_t)(1U << (pin & 0x7U));
|
||||
}
|
||||
} else {
|
||||
dev_dbg(ACRN_DBG_PIC, "pic pin%hhu: %s, ignored\n",
|
||||
dev_dbg(DBG_LEVEL_PIC, "pic pin%hhu: %s, ignored\n",
|
||||
pin, (level != 0U) ? "asserted" : "deasserted");
|
||||
}
|
||||
}
|
||||
@ -555,7 +555,7 @@ void vpic_pending_intr(struct acrn_vpic *vpic, uint32_t *vecptr)
|
||||
} else {
|
||||
*vecptr = i8259->irq_base + pin;
|
||||
|
||||
dev_dbg(ACRN_DBG_PIC, "Got pending vector 0x%x\n", *vecptr);
|
||||
dev_dbg(DBG_LEVEL_PIC, "Got pending vector 0x%x\n", *vecptr);
|
||||
}
|
||||
|
||||
spinlock_release(&(vpic->lock));
|
||||
|
@ -17,8 +17,8 @@
|
||||
* @brief public APIs for virtual IRQ
|
||||
*/
|
||||
|
||||
#define ACRN_DBG_PTIRQ 6U
|
||||
#define ACRN_DBG_IRQ 6U
|
||||
#define DBG_LEVEL_PTIRQ 6U
|
||||
#define DBG_LEVEL_IRQ 6U
|
||||
|
||||
/* vectors range for dynamic allocation, usually for devices */
|
||||
#define VECTOR_DYNAMIC_START 0x20U
|
||||
|
@ -27,7 +27,7 @@
|
||||
*/
|
||||
#define LOG_MESSAGE_MAX_SIZE (4U * LOG_ENTRY_SIZE)
|
||||
|
||||
#define ACRN_DBG_LAPICPT 5U
|
||||
#define DBG_LEVEL_LAPICPT 5U
|
||||
#if defined(HV_DEBUG)
|
||||
|
||||
extern uint16_t console_loglevel;
|
||||
|
Loading…
Reference in New Issue
Block a user