HV:treewide:fix "expression is not Boolean"

MISRA C explicit required expression should be boolean when
in branch statements (if,while...).

Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi
2018-06-20 13:28:25 +08:00
committed by lijinxia
parent f92931c879
commit be0f5e6c16
17 changed files with 157 additions and 157 deletions

View File

@@ -224,12 +224,12 @@ int shutdown_vm(struct vm *vm)
free_io_emulation_resource(vm);
/* Free iommu_domain */
if (vm->iommu_domain)
if (vm->iommu_domain != NULL)
destroy_iommu_domain(vm->iommu_domain);
bitmap_clear(vm->attr.id, &vmid_bitmap);
if (vm->vpic)
if (vm->vpic != NULL)
vpic_cleanup(vm);
free(vm->hw.vcpu_array);

View File

@@ -90,7 +90,7 @@ void init_msr_emulation(struct vcpu *vcpu)
/* Allocate and initialize memory for MSR bitmap region*/
vcpu->vm->arch_vm.msr_bitmap = alloc_page();
ASSERT(vcpu->vm->arch_vm.msr_bitmap, "");
ASSERT(vcpu->vm->arch_vm.msr_bitmap != NULL, "");
memset(vcpu->vm->arch_vm.msr_bitmap, 0x0, CPU_PAGE_SIZE);
msr_bitmap = vcpu->vm->arch_vm.msr_bitmap;
@@ -307,7 +307,7 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
}
case MSR_IA32_PERF_CTL:
{
if (validate_pstate(vcpu->vm, v)) {
if (validate_pstate(vcpu->vm, v) != 0) {
break;
}
msr_write(msr, v);

View File

@@ -164,7 +164,7 @@ static void clear_lapic_isr(void)
* life, therefore we will ensure all the in-service bits are clear.
*/
do {
if (read_lapic_reg32(isr_reg)) {
if (read_lapic_reg32(isr_reg) != 0U) {
write_lapic_reg32(LAPIC_EOI_REGISTER, 0);
continue;
}
@@ -328,7 +328,7 @@ static void wait_for_delivery(void)
do {
tmp.value_32.lo_32 =
read_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_0);
} while (tmp.bits.delivery_status);
} while (tmp.bits.delivery_status != 0U);
}
uint32_t get_cur_lapic_id(void)

View File

@@ -63,7 +63,7 @@ void setup_notification(void)
void cleanup_notification(void)
{
if (notification_node)
if (notification_node != NULL)
unregister_handler_common(notification_node);
notification_node = NULL;
}

View File

@@ -81,7 +81,7 @@ again:
CPU_IRQ_DISABLE();
if (((*bitmap) & SOFTIRQ_MASK))
if (((*bitmap) & SOFTIRQ_MASK) != 0U)
goto again;
enable_softirq(cpu_id);

View File

@@ -16,7 +16,7 @@ uint64_t tsc_hz = 1000000000;
static void run_timer(struct timer *timer)
{
/* deadline = 0 means stop timer, we should skip */
if (timer->func && timer->fire_tsc != 0UL)
if ((timer->func != NULL) && timer->fire_tsc != 0UL)
timer->func(timer->priv_data);
TRACE_2L(TRACE_TIMER_ACTION_PCKUP, timer->fire_tsc, 0);
@@ -62,7 +62,7 @@ static void __add_timer(struct per_cpu_timers *cpu_timer,
list_add(&timer->node, prev);
if (need_update)
if (need_update != NULL)
/* update the physical timer if we're on the timer_list head */
*need_update = (prev == &cpu_timer->timer_list);
}
@@ -95,7 +95,7 @@ int add_timer(struct timer *timer)
void del_timer(struct timer *timer)
{
if (timer && !list_empty(&timer->node))
if ((timer != NULL) && !list_empty(&timer->node))
list_del_init(&timer->node);
}
@@ -108,7 +108,7 @@ static int request_timer_irq(int pcpu_id,
if (pcpu_id >= phy_cpu_num)
return -EINVAL;
if (per_cpu(timer_node, pcpu_id)) {
if (per_cpu(timer_node, pcpu_id) != NULL) {
pr_err("CPU%d timer isr already added", pcpu_id);
unregister_handler_common(per_cpu(timer_node, pcpu_id));
}
@@ -165,7 +165,7 @@ void timer_cleanup(void)
{
int pcpu_id = get_cpu_id();
if (per_cpu(timer_node, pcpu_id))
if (per_cpu(timer_node, pcpu_id) != NULL)
unregister_handler_common(per_cpu(timer_node, pcpu_id));
per_cpu(timer_node, pcpu_id) = NULL;
@@ -286,7 +286,7 @@ static uint64_t native_calibrate_tsc(void)
void calibrate_tsc(void)
{
tsc_hz = native_calibrate_tsc();
if (!tsc_hz)
if (tsc_hz == 0U)
tsc_hz = pit_calibrate_tsc(CAL_MS);
printf("%s, tsc_hz=%lu\n", __func__, tsc_hz);
}

View File

@@ -315,12 +315,12 @@ static bool setup_trusty_info(struct vcpu *vcpu,
/* Derive dvseed from dseed for Trusty */
key_info = &mem->first_page.data.key_info;
for (i = 0; i < g_key_info.num_seeds; i++) {
if (!hkdf_sha256(key_info->dseed_list[i].seed,
if (hkdf_sha256(key_info->dseed_list[i].seed,
BUP_MKHI_BOOTLOADER_SEED_LEN,
g_key_info.dseed_list[i].seed,
BUP_MKHI_BOOTLOADER_SEED_LEN,
NULL, 0,
vcpu->vm->GUID, sizeof(vcpu->vm->GUID))) {
vcpu->vm->GUID, sizeof(vcpu->vm->GUID)) == 0) {
memset(key_info, 0, sizeof(struct key_info));
pr_err("%s: derive dvseed failed!", __func__);
return false;
@@ -402,12 +402,12 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
return false;
}
if (!boot_param->entry_point) {
if (boot_param->entry_point == 0) {
pr_err("%s: Invalid entry point\n", __func__);
return false;
}
if (!boot_param->base_addr) {
if (boot_param->base_addr == 0) {
pr_err("%s: Invalid memory base address\n", __func__);
return false;
}

View File

@@ -143,14 +143,14 @@ int vmexit_handler(struct vcpu *vcpu)
vcpu->arch_vcpu.idt_vectoring_info =
exec_vmread(VMX_IDT_VEC_INFO_FIELD);
/* Filter out HW exception & NMI */
if (vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) {
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
uint32_t vector_info = vcpu->arch_vcpu.idt_vectoring_info;
uint32_t vector = vector_info & 0xff;
uint32_t type = (vector_info & VMX_INT_TYPE_MASK) >> 8;
uint32_t err_code = 0;
if (type == VMX_INT_TYPE_HW_EXP) {
if (vector_info & VMX_INT_INFO_ERR_CODE_VALID)
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
err_code = exec_vmread(VMX_IDT_VEC_ERROR_CODE);
vcpu_queue_exception(vcpu, vector, err_code);
vcpu->arch_vcpu.idt_vectoring_info = 0;
@@ -180,7 +180,7 @@ int vmexit_handler(struct vcpu *vcpu)
/* See if an exit qualification is necessary for this exit
* handler
*/
if (dispatch->need_exit_qualification) {
if (dispatch->need_exit_qualification != 0U) {
/* Get exit qualification */
vcpu->arch_vcpu.exit_qualification =
exec_vmread(VMX_EXIT_QUALIFICATION);
@@ -306,7 +306,7 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
struct run_context *ctx_ptr;
val64 = exec_vmread(VMX_GUEST_CR4);
if (!(val64 & CR4_OSXSAVE)) {
if ((val64 & CR4_OSXSAVE) == 0U) {
vcpu_inject_gp(vcpu, 0);
return -1;
}
@@ -327,7 +327,7 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
(ctx_ptr->guest_cpu_regs.regs.rdx << 32);
/*bit 0(x87 state) of XCR0 can't be cleared*/
if (!(val64 & 0x01)) {
if ((val64 & 0x01) == 0U) {
vcpu_inject_gp(vcpu, 0);
return -1;
}

View File

@@ -47,9 +47,9 @@ static inline int exec_vmxon(void *addr)
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
/* Determine if feature control is locked */
if (tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) {
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) != 0U) {
/* See if VMX enabled */
if (!(tmp64 & MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX)) {
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX) == 0U) {
/* Return error - VMX can't be enabled */
pr_err("%s, VMX can't be enabled\n", __func__);
status = -EINVAL;
@@ -72,7 +72,7 @@ static inline int exec_vmxon(void *addr)
: "%rax", "cc", "memory");
/* if carry and zero flags are clear operation success */
if (rflags & (RFLAGS_C | RFLAGS_Z)) {
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U) {
pr_err("%s, Turn VMX on failed\n", __func__);
status = -EINVAL;
}
@@ -117,7 +117,7 @@ int exec_vmxon_instr(uint32_t pcpu_id)
per_cpu(vmxon_region_pa, pcpu_id) = HVA2HPA(vmxon_region_va);
ret = exec_vmxon(&per_cpu(vmxon_region_pa, pcpu_id));
if (vcpu) {
if (vcpu != NULL) {
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmptrld(&vmcs_pa);
}
@@ -135,10 +135,10 @@ int vmx_off(int pcpu_id)
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
uint64_t vmcs_pa;
if (vcpu) {
if (vcpu != NULL) {
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmclear((void *)&vmcs_pa);
if (ret)
if (ret != 0)
return ret;
}
@@ -165,7 +165,7 @@ int exec_vmclear(void *addr)
: "%rax", "cc", "memory");
/* if carry and zero flags are clear operation success */
if (rflags & (RFLAGS_C | RFLAGS_Z))
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U)
status = -EINVAL;
return status;
@@ -190,7 +190,7 @@ int exec_vmptrld(void *addr)
: "%rax", "cc");
/* if carry and zero flags are clear operation success */
if (rflags & (RFLAGS_C | RFLAGS_Z))
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U)
status = -EINVAL;
return status;
@@ -331,7 +331,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
uint32_t entry_ctrls;
bool paging_enabled = !!(context->cr0 & CR0_PG);
if (cr0 & (cr0_always_off_mask | CR0_RESERVED_MASK)) {
if ((cr0 & (cr0_always_off_mask | CR0_RESERVED_MASK)) != 0U) {
pr_err("Not allow to set always off / reserved bits for CR0");
vcpu_inject_gp(vcpu, 0);
return -EINVAL;
@@ -340,9 +340,9 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
/* TODO: Check all invalid guest statuses according to the change of
* CR0, and inject a #GP to guest */
if ((context->ia32_efer & MSR_IA32_EFER_LME_BIT) &&
!paging_enabled && (cr0 & CR0_PG)) {
if (!(context->cr4 & CR4_PAE)) {
if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0U) &&
!paging_enabled && ((cr0 & CR0_PG) != 0U)) {
if ((context->cr4 & CR4_PAE) == 0U) {
pr_err("Can't enable long mode when PAE disabled");
vcpu_inject_gp(vcpu, 0);
return -EINVAL;
@@ -355,8 +355,8 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
context->ia32_efer |= MSR_IA32_EFER_LMA_BIT;
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
} else if ((context->ia32_efer & MSR_IA32_EFER_LME_BIT) &&
paging_enabled && !(cr0 & CR0_PG)){
} else if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0U) &&
paging_enabled && ((cr0 & CR0_PG) == 0U)){
/* Disable long mode */
pr_dbg("VMM: Disable long mode");
entry_ctrls = exec_vmread(VMX_ENTRY_CONTROLS);
@@ -436,14 +436,14 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
* CR4, and inject a #GP to guest */
/* Check if guest try to set fixed to 0 bits or reserved bits */
if(cr4 & cr4_always_off_mask) {
if((cr4 & cr4_always_off_mask) != 0U) {
pr_err("Not allow to set reserved/always off bits for CR4");
vcpu_inject_gp(vcpu, 0);
return -EINVAL;
}
/* Do NOT support nested guest */
if (cr4 & CR4_VMXE) {
if ((cr4 & CR4_VMXE) != 0U) {
pr_err("Nested guest not supported");
vcpu_inject_gp(vcpu, 0);
return -EINVAL;
@@ -620,7 +620,7 @@ static void init_guest_state(struct vcpu *vcpu)
value32 = gdtb.limit;
if ((gdtb.base >> 47) & 0x1)
if (((gdtb.base >> 47) & 0x1) != 0U)
gdtb.base |= 0xffff000000000000ull;
base = gdtb.base;
@@ -655,7 +655,7 @@ static void init_guest_state(struct vcpu *vcpu)
/* Limit */
limit = idtb.limit;
if ((idtb.base >> 47) & 0x1)
if (((idtb.base >> 47) & 0x1) != 0U)
idtb.base |= 0xffff000000000000ull;
/* Base */
@@ -953,7 +953,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
asm volatile ("sgdt %0"::"m" (gdtb));
value32 = gdtb.limit;
if ((gdtb.base >> 47) & 0x1)
if (((gdtb.base >> 47) & 0x1) != 0U)
gdtb.base |= 0xffff000000000000ull;
/* Set up the guest and host GDTB base fields with current GDTB base */
@@ -963,7 +963,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
/* TODO: Should guest TR point to host TR ? */
trbase = gdtb.base + tr_sel;
if ((trbase >> 47) & 0x1)
if (((trbase >> 47) & 0x1) != 0U)
trbase |= 0xffff000000000000ull;
/* SS segment override */
@@ -989,7 +989,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
/* Obtain the current interrupt descriptor table base */
asm volatile ("sidt %0"::"m" (idtb));
/* base */
if ((idtb.base >> 47) & 0x1)
if (((idtb.base >> 47) & 0x1) != 0U)
idtb.base |= 0xffff000000000000ull;
field = VMX_HOST_IDTR_BASE;
@@ -1144,7 +1144,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
VMX_PROCBASED_CTLS2_RDTSCP |
VMX_PROCBASED_CTLS2_UNRESTRICT);
if (vcpu->arch_vcpu.vpid)
if (vcpu->arch_vcpu.vpid != 0)
value32 |= VMX_PROCBASED_CTLS2_VPID;
else
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
@@ -1201,7 +1201,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
}
/* Check for EPT support */
if (is_ept_supported())
if (is_ept_supported() != 0)
pr_dbg("EPT is supported");
else
pr_err("Error: EPT is not supported");

View File

@@ -175,7 +175,7 @@ static int register_hrhd_units(void)
struct dmar_drhd_rt *drhd_rt;
uint32_t i;
if (!info) {
if (info == NULL) {
pr_warn("vtd: no dmar units found");
return -1;
}
@@ -232,7 +232,7 @@ static void iommu_flush_cache(struct dmar_drhd_rt *dmar_uint,
uint32_t i;
/* if vtd support page-walk coherency, no need to flush cacheline */
if (iommu_ecap_c(dmar_uint->ecap))
if (iommu_ecap_c(dmar_uint->ecap) != 0U)
return;
for (i = 0; i < size; i += CACHE_LINE_SIZE)
@@ -326,7 +326,7 @@ static uint8_t dmar_uint_get_msagw(struct dmar_drhd_rt *dmar_uint)
uint8_t sgaw = iommu_cap_sagaw(dmar_uint->cap);
for (i = 4; i >= 0; i--) {
if ((1 << i) & sgaw)
if (((1 << i) & sgaw) != 0)
break;
}
return (uint8_t)i;
@@ -351,7 +351,7 @@ static void dmar_enable_translation(struct dmar_drhd_rt *dmar_uint)
iommu_write32(dmar_uint, DMAR_GCMD_REG, dmar_uint->gcmd);
/* 32-bit register */
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, status & DMA_GSTS_TES, status);
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, (status & DMA_GSTS_TES) != 0U, status);
status = iommu_read32(dmar_uint, DMAR_GSTS_REG);
@@ -370,7 +370,7 @@ static void dmar_disable_translation(struct dmar_drhd_rt *dmar_uint)
iommu_write32(dmar_uint, DMAR_GCMD_REG, dmar_uint->gcmd);
/* 32-bit register */
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, !(status & DMA_GSTS_TES), status);
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, (status & DMA_GSTS_TES) == 0U, status);
IOMMU_UNLOCK(dmar_uint);
}
@@ -419,7 +419,7 @@ static void dmar_register_hrhd(struct dmar_drhd_rt *dmar_uint)
* How to guarantee it when EPT is used as second-level
* translation paging structures?
*/
if (!iommu_ecap_sc(dmar_uint->ecap))
if (iommu_ecap_sc(dmar_uint->ecap) == 0U)
dev_dbg(ACRN_DBG_IOMMU,
"dmar uint doesn't support snoop control!");
@@ -432,14 +432,14 @@ static void dmar_register_hrhd(struct dmar_drhd_rt *dmar_uint)
max_domain_id = dmar_uint->max_domain_id;
/* register operation is considered serial, no lock here */
if (dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK)
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U)
list_add_tail(&dmar_uint->list, &dmar_drhd_units);
else
list_add(&dmar_uint->list, &dmar_drhd_units);
dmar_hdrh_unit_count++;
if (dmar_uint->gcmd & DMA_GCMD_TE)
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0)
dmar_disable_translation(dmar_uint);
}
@@ -465,7 +465,7 @@ static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
/* has the same segment number and
* the dmar unit has INCLUDE_PCI_ALL set
*/
if (dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK)
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U)
return dmar_uint;
}
@@ -519,7 +519,7 @@ static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_uint)
{
uint32_t status;
if (!iommu_cap_rwbf(dmar_uint->cap))
if (iommu_cap_rwbf(dmar_uint->cap) == 0U)
return;
IOMMU_LOCK(dmar_uint);
@@ -527,7 +527,7 @@ static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_uint)
dmar_uint->gcmd | DMA_GCMD_WBF);
/* read lower 32 bits to check */
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, !(status & DMA_GSTS_WBFS), status);
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, (status & DMA_GSTS_WBFS) == 0U, status);
IOMMU_UNLOCK(dmar_uint);
}
@@ -562,7 +562,7 @@ static void dmar_invalid_context_cache(struct dmar_drhd_rt *dmar_uint,
IOMMU_LOCK(dmar_uint);
iommu_write64(dmar_uint, DMAR_CCMD_REG, cmd);
/* read upper 32bits to check */
DMAR_WAIT_COMPLETION(DMAR_CCMD_REG + 4, !(status & DMA_CCMD_ICC_32),
DMAR_WAIT_COMPLETION(DMAR_CCMD_REG + 4, (status & DMA_CCMD_ICC_32) == 0U,
status);
IOMMU_UNLOCK(dmar_uint);
@@ -605,16 +605,16 @@ static void dmar_invalid_iotlb(struct dmar_drhd_rt *dmar_uint,
return;
}
IOMMU_LOCK(dmar_uint);
if (addr)
if (addr != 0U)
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset, addr);
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset + 8, cmd);
/* read upper 32bits to check */
DMAR_WAIT_COMPLETION(dmar_uint->ecap_iotlb_offset + 12,
!(status & DMA_IOTLB_IVT_32), status);
(status & DMA_IOTLB_IVT_32) == 0U, status);
IOMMU_UNLOCK(dmar_uint);
if (!DMA_IOTLB_GET_IAIG_32(status)) {
if (DMA_IOTLB_GET_IAIG_32(status) == 0U) {
pr_err("fail to invalidate IOTLB!, 0x%x, 0x%x",
status, iommu_read32(dmar_uint, DMAR_FSTS_REG));
}
@@ -646,7 +646,7 @@ static void dmar_set_root_table(struct dmar_drhd_rt *dmar_uint)
dmar_uint->gcmd | DMA_GCMD_SRTP);
/* 32-bit register */
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, status & DMA_GSTS_RTPS, status);
DMAR_WAIT_COMPLETION(DMAR_GSTS_REG, (status & DMA_GSTS_RTPS) != 0U, status);
IOMMU_UNLOCK(dmar_uint);
}
@@ -714,12 +714,12 @@ static void fault_status_analysis(uint32_t status)
static void fault_record_analysis(__unused uint64_t low, uint64_t high)
{
if (!DMA_FRCD_UP_F(high))
if (DMA_FRCD_UP_F(high) == 0U)
return;
/* currently skip PASID related parsing */
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%llx",
DMA_FRCD_UP_T(high) ? "Read/Atomic" : "Write",
(DMA_FRCD_UP_T(high) != 0U) ? "Read/Atomic" : "Write",
DMA_FRCD_UP_FR(high),
DMA_FRCD_UP_SID(high) >> 8,
(DMA_FRCD_UP_SID(high) >> 3) & 0x1f,
@@ -749,7 +749,7 @@ static int dmar_fault_handler(int irq, void *data)
fault_status_analysis(fsr);
#endif
while (DMA_FSTS_PPF(fsr)) {
while (DMA_FSTS_PPF(fsr) != 0U) {
loop++;
index = DMA_FSTS_FRI(fsr);
record_reg_offset = dmar_uint->cap_fault_reg_offset
@@ -792,7 +792,7 @@ static int dmar_setup_interrupt(struct dmar_drhd_rt *dmar_uint)
{
uint32_t vector;
if (dmar_uint->dmar_irq_node) {
if (dmar_uint->dmar_irq_node != NULL) {
dev_dbg(ACRN_DBG_IOMMU, "%s: irq already setup", __func__);
return 0;
}
@@ -802,7 +802,7 @@ static int dmar_setup_interrupt(struct dmar_drhd_rt *dmar_uint)
dmar_uint, true, false,
"dmar_fault_event");
if (!dmar_uint->dmar_irq_node) {
if (dmar_uint->dmar_irq_node == NULL) {
pr_err("%s: fail to setup interrupt", __func__);
return 1;
}
@@ -832,7 +832,7 @@ static void dmar_enable(struct dmar_drhd_rt *dmar_uint)
static void dmar_disable(struct dmar_drhd_rt *dmar_uint)
{
if (dmar_uint->gcmd & DMA_GCMD_TE)
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0U)
dmar_disable_translation(dmar_uint);
dmar_fault_event_mask(dmar_uint);
@@ -846,7 +846,7 @@ struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
/* TODO: check if a domain with the vm_id exists */
if (!translation_table) {
if (translation_table == 0) {
pr_err("translation table is NULL");
return NULL;
}
@@ -882,7 +882,7 @@ struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
int destroy_iommu_domain(struct iommu_domain *domain)
{
if (!domain)
if (domain == NULL)
return 1;
/* currently only support ept */
@@ -913,11 +913,11 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
uint64_t upper = 0;
uint64_t lower = 0;
if (!domain)
if (domain == NULL)
return 1;
dmar_uint = device_to_dmaru(segment, bus, devfun);
if (!dmar_uint) {
if (dmar_uint == NULL) {
pr_err("no dmar unit found for device:0x%x:%x.%x",
bus, devfun >> 3, devfun & 0x7);
return 1;
@@ -938,10 +938,10 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
if (dmar_uint->root_table_addr == 0) {
void *root_table_vaddr = alloc_paging_struct();
if (root_table_vaddr) {
if (root_table_vaddr != NULL) {
dmar_uint->root_table_addr = HVA2HPA(root_table_vaddr);
} else {
ASSERT(0, "failed to allocate root table!");
ASSERT(false, "failed to allocate root table!");
return 1;
}
}
@@ -950,10 +950,10 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
root_entry = (struct dmar_root_entry *)&root_table[bus * 2];
if (!DMAR_GET_BITSLICE(root_entry->lower, ROOT_ENTRY_LOWER_PRESENT)) {
if (DMAR_GET_BITSLICE(root_entry->lower, ROOT_ENTRY_LOWER_PRESENT) == 0U) {
void *vaddr = alloc_paging_struct();
if (vaddr) {
if (vaddr != NULL) {
/* create context table for the bus if not present */
context_table_addr = HVA2HPA(vaddr);
@@ -969,7 +969,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
iommu_flush_cache(dmar_uint, root_entry,
sizeof(struct dmar_root_entry));
} else {
ASSERT(0, "failed to allocate context table!");
ASSERT(false, "failed to allocate context table!");
return 1;
}
} else {
@@ -983,7 +983,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
context_entry = (struct dmar_context_entry *)&context_table[devfun * 2];
/* the context entry should not be present */
if (DMAR_GET_BITSLICE(context_entry->lower, CTX_ENTRY_LOWER_P)) {
if (DMAR_GET_BITSLICE(context_entry->lower, CTX_ENTRY_LOWER_P) != 0U) {
pr_err("%s: context entry@0x%llx (Lower:%x) ",
__func__, context_entry, context_entry->lower);
pr_err("already present for %x:%x.%x",
@@ -995,7 +995,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
upper = 0;
lower = 0;
if (domain->is_host) {
if (iommu_ecap_pt(dmar_uint->ecap)) {
if (iommu_ecap_pt(dmar_uint->ecap) != 0U) {
/* When the Translation-type (T) field indicates
* pass-through processing (10b), AW field must be
* programmed to indicate the largest AGAW value
@@ -1042,11 +1042,11 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
struct dmar_root_entry *root_entry;
struct dmar_context_entry *context_entry;
if (!domain)
if (domain == NULL)
return 1;
dmar_uint = device_to_dmaru(segment, bus, devfun);
if (!dmar_uint) {
if (dmar_uint == NULL) {
pr_err("no dmar unit found for device:0x%x:%x",
bus, devfun);
return 1;
@@ -1085,7 +1085,7 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
if (!domain)
if (domain == NULL)
return 1;
/* TODO: check if the device assigned */
@@ -1098,7 +1098,7 @@ int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
int unassign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
if (!domain)
if (domain == NULL)
return 1;
/* TODO: check if the device assigned */
@@ -1223,7 +1223,7 @@ int init_iommu(void)
spinlock_init(&domain_lock);
if (register_hrhd_units())
if (register_hrhd_units() != 0)
return -1;
host_domain = create_host_domain();