HV: Fix missing brackets for MISRA C Violations

Patch 5 of 7
Added changes to make sure Misra C violations are fixed
for rules 11S and 12S.

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy
2018-07-12 15:02:33 -07:00
committed by wenlingz
parent d16d9e5751
commit 4aa6cdacf7
8 changed files with 249 additions and 130 deletions

View File

@@ -47,22 +47,25 @@ void exec_softirq(void)
if (cpu_id >= phys_cpu_num)
return;
if (((*bitmap) & SOFTIRQ_MASK) == 0UL)
if (((*bitmap) & SOFTIRQ_MASK) == 0UL) {
return;
}
/* Disable softirq
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
*/
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap))
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap)) {
return;
}
again:
CPU_IRQ_ENABLE();
while (1) {
softirq_id = ffs64(*bitmap);
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX))
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX)) {
break;
}
bitmap_clear(softirq_id, bitmap);
@@ -81,8 +84,9 @@ again:
CPU_IRQ_DISABLE();
if (((*bitmap) & SOFTIRQ_MASK) != 0U)
if (((*bitmap) & SOFTIRQ_MASK) != 0U) {
goto again;
}
enable_softirq(cpu_id);
}

View File

@@ -16,8 +16,9 @@ uint32_t tsc_khz = 0U;
static void run_timer(struct timer *timer)
{
/* deadline = 0 means stop timer, we should skip */
if ((timer->func != NULL) && timer->fire_tsc != 0UL)
if ((timer->func != NULL) && timer->fire_tsc != 0UL) {
timer->func(timer->priv_data);
}
TRACE_2L(TRACE_TIMER_ACTION_PCKUP, timer->fire_tsc, 0);
}
@@ -54,17 +55,20 @@ static void __add_timer(struct per_cpu_timers *cpu_timer,
prev = &cpu_timer->timer_list;
list_for_each(pos, &cpu_timer->timer_list) {
tmp = list_entry(pos, struct timer, node);
if (tmp->fire_tsc < tsc)
if (tmp->fire_tsc < tsc) {
prev = &tmp->node;
else
}
else {
break;
}
}
list_add(&timer->node, prev);
if (need_update != NULL)
if (need_update != NULL) {
/* update the physical timer if we're on the timer_list head */
*need_update = (prev == &cpu_timer->timer_list);
}
}
int add_timer(struct timer *timer)
@@ -73,20 +77,23 @@ int add_timer(struct timer *timer)
uint16_t pcpu_id;
bool need_update;
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL)
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL) {
return -EINVAL;
}
/* limit minimal periodic timer cycle period */
if (timer->mode == TICK_MODE_PERIODIC)
if (timer->mode == TICK_MODE_PERIODIC) {
timer->period_in_cycle = max(timer->period_in_cycle,
us_to_ticks(MIN_TIMER_PERIOD_US));
}
pcpu_id = get_cpu_id();
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
__add_timer(cpu_timer, timer, &need_update);
if (need_update)
if (need_update) {
update_physical_timer(cpu_timer);
}
TRACE_2L(TRACE_TIMER_ACTION_ADDED, timer->fire_tsc, 0);
return 0;
@@ -95,8 +102,9 @@ int add_timer(struct timer *timer)
void del_timer(struct timer *timer)
{
if ((timer != NULL) && !list_empty(&timer->node))
if ((timer != NULL) && !list_empty(&timer->node)) {
list_del_init(&timer->node);
}
}
static int request_timer_irq(uint16_t pcpu_id,
@@ -105,8 +113,9 @@ static int request_timer_irq(uint16_t pcpu_id,
{
struct dev_handler_node *node = NULL;
if (pcpu_id >= phys_cpu_num)
if (pcpu_id >= phys_cpu_num) {
return -EINVAL;
}
if (per_cpu(timer_node, pcpu_id) != NULL) {
pr_err("CPU%d timer isr already added", pcpu_id);
@@ -165,8 +174,9 @@ void timer_cleanup(void)
{
uint16_t pcpu_id = get_cpu_id();
if (per_cpu(timer_node, pcpu_id) != NULL)
if (per_cpu(timer_node, pcpu_id) != NULL) {
unregister_handler_common(per_cpu(timer_node, pcpu_id));
}
per_cpu(timer_node, pcpu_id) = NULL;
}
@@ -202,8 +212,9 @@ void timer_softirq(uint16_t pcpu_id)
timer->fire_tsc += timer->period_in_cycle;
__add_timer(cpu_timer, timer, NULL);
}
} else
} else {
break;
}
}
/* update nearest timer */
@@ -276,9 +287,10 @@ static uint64_t native_calibrate_tsc(void)
cpuid(0x15, &eax_denominator, &ebx_numerator,
&ecx_hz, &reserved);
if (eax_denominator != 0U && ebx_numerator != 0U)
if (eax_denominator != 0U && ebx_numerator != 0U) {
return (uint64_t) ecx_hz *
ebx_numerator / eax_denominator;
}
}
return 0;
@@ -288,8 +300,9 @@ void calibrate_tsc(void)
{
uint64_t tsc_hz;
tsc_hz = native_calibrate_tsc();
if (tsc_hz == 0U)
if (tsc_hz == 0U) {
tsc_hz = pit_calibrate_tsc(CAL_MS);
}
tsc_khz = (uint32_t)(tsc_hz / 1000UL);
printf("%s, tsc_khz=%lu\n", __func__, tsc_khz);
}

View File

@@ -86,10 +86,12 @@ int exec_vmxon_instr(uint16_t pcpu_id)
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
/* Allocate page aligned memory for VMXON region */
if (per_cpu(vmxon_region_pa, pcpu_id) == 0)
if (per_cpu(vmxon_region_pa, pcpu_id) == 0) {
vmxon_region_va = alloc_page();
else
}
else {
vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id));
}
if (vmxon_region_va != NULL) {
/* Initialize vmxon page with revision id from IA32 VMX BASIC
@@ -112,9 +114,10 @@ int exec_vmxon_instr(uint16_t pcpu_id)
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmptrld(&vmcs_pa);
}
} else
} else {
pr_err("%s, alloc memory for VMXON region failed\n",
__func__);
}
return ret;
}
@@ -129,8 +132,9 @@ int vmx_off(uint16_t pcpu_id)
if (vcpu != NULL) {
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmclear((void *)&vmcs_pa);
if (ret != 0)
if (ret != 0) {
return ret;
}
}
asm volatile ("vmxoff" : : : "memory");
@@ -143,8 +147,9 @@ int exec_vmclear(void *addr)
uint64_t rflags;
int status = 0;
if (addr == NULL)
if (addr == NULL) {
status = -EINVAL;
}
ASSERT(status == 0, "Incorrect arguments");
asm volatile (
@@ -156,8 +161,9 @@ int exec_vmclear(void *addr)
: "%rax", "cc", "memory");
/* if carry and zero flags are clear operation success */
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U)
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U) {
status = -EINVAL;
}
return status;
}
@@ -167,8 +173,9 @@ int exec_vmptrld(void *addr)
uint64_t rflags;
int status = 0;
if (addr == NULL)
if (addr == NULL) {
status = -EINVAL;
}
ASSERT(status == 0, "Incorrect arguments");
asm volatile (
@@ -181,8 +188,9 @@ int exec_vmptrld(void *addr)
: "%rax", "cc");
/* if carry and zero flags are clear operation success */
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U)
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U) {
status = -EINVAL;
}
return status;
}
@@ -664,13 +672,16 @@ static void init_guest_state(struct vcpu *vcpu)
if (vcpu_mode == CPU_MODE_REAL) {
/* RIP is set here */
if (is_vcpu_bsp(vcpu)) {
if ((uint64_t)vcpu->entry_addr < 0x100000UL)
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
value32 = (uint64_t)vcpu->entry_addr & 0x0FUL;
else
}
else {
value32 = 0x0000FFF0U;
}
}
} else
} else {
value32 = (uint32_t)((uint64_t)vcpu->entry_addr);
}
pr_dbg("GUEST RIP on VMEntry %x ", value32);
exec_vmwrite(field, value32);
@@ -706,8 +717,9 @@ static void init_guest_state(struct vcpu *vcpu)
value32 = gdtb.limit;
if (((gdtb.base >> 47) & 0x1UL) != 0UL)
if (((gdtb.base >> 47) & 0x1UL) != 0UL) {
gdtb.base |= 0xffff000000000000UL;
}
base = gdtb.base;
@@ -741,8 +753,9 @@ static void init_guest_state(struct vcpu *vcpu)
/* Limit */
limit = idtb.limit;
if (((idtb.base >> 47) & 0x1UL) != 0UL)
if (((idtb.base >> 47) & 0x1UL) != 0UL) {
idtb.base |= 0xffff000000000000UL;
}
/* Base */
base = idtb.base;
@@ -835,10 +848,12 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
/* Access */
if (vcpu_mode == CPU_MODE_REAL)
if (vcpu_mode == CPU_MODE_REAL) {
value32 = REAL_MODE_DATA_SEG_AR;
else /* same value for protected mode and long mode */
}
else { /* same value for protected mode and long mode */
value32 = PROTECTED_MODE_DATA_SEG_AR;
}
field = VMX_GUEST_ES_ATTR;
exec_vmwrite(field, value32);
@@ -1046,8 +1061,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
value32 = gdtb.limit;
if (((gdtb.base >> 47) & 0x1UL) != 0UL)
if (((gdtb.base >> 47) & 0x1UL) != 0UL) {
gdtb.base |= 0xffff000000000000UL;
}
/* Set up the guest and host GDTB base fields with current GDTB base */
field = VMX_HOST_GDTR_BASE;
@@ -1056,8 +1072,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
/* TODO: Should guest TR point to host TR ? */
trbase = gdtb.base + tr_sel;
if (((trbase >> 47) & 0x1UL) != 0UL)
if (((trbase >> 47) & 0x1UL) != 0UL) {
trbase |= 0xffff000000000000UL;
}
/* SS segment override */
asm volatile ("mov %0,%%rax\n"
@@ -1082,8 +1099,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
/* Obtain the current interrupt descriptor table base */
asm volatile ("sidt %0":"=m"(idtb)::"memory");
/* base */
if (((idtb.base >> 47) & 0x1UL) != 0UL)
if (((idtb.base >> 47) & 0x1UL) != 0UL) {
idtb.base |= 0xffff000000000000UL;
}
field = VMX_HOST_IDTR_BASE;
exec_vmwrite(field, idtb.base);
@@ -1235,20 +1253,23 @@ static void init_exec_ctrl(struct vcpu *vcpu)
VMX_PROCBASED_CTLS2_RDTSCP |
VMX_PROCBASED_CTLS2_UNRESTRICT);
if (vcpu->arch_vcpu.vpid != 0U)
if (vcpu->arch_vcpu.vpid != 0U) {
value32 |= VMX_PROCBASED_CTLS2_VPID;
else
} else {
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
}
if (is_vapic_supported()) {
value32 |= VMX_PROCBASED_CTLS2_VAPIC;
if (is_vapic_virt_reg_supported())
if (is_vapic_virt_reg_supported()) {
value32 |= VMX_PROCBASED_CTLS2_VAPIC_REGS;
}
if (is_vapic_intr_delivery_supported())
if (is_vapic_intr_delivery_supported()) {
value32 |= VMX_PROCBASED_CTLS2_VIRQ;
else
}
else {
/*
* This field exists only on processors that support
* the 1-setting of the "use TPR shadow"
@@ -1258,6 +1279,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* - pg 2904 24.6.8
*/
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
}
}
if (cpu_has_cap(X86_FEATURE_OSXSAVE)) {
@@ -1292,10 +1314,12 @@ static void init_exec_ctrl(struct vcpu *vcpu)
}
/* Check for EPT support */
if (is_ept_supported())
if (is_ept_supported()) {
pr_dbg("EPT is supported");
else
}
else {
pr_err("Error: EPT is not supported");
}
/* Load EPTP execution control
* TODO: introduce API to make this data driven based
@@ -1380,8 +1404,9 @@ static void init_entry_ctrl(__unused struct vcpu *vcpu)
* IA32_PAT and IA32_EFER
*/
value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS);
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
}
value32 |= (VMX_ENTRY_CTLS_LOAD_EFER |
VMX_ENTRY_CTLS_LOAD_PAT);
@@ -1540,8 +1565,9 @@ int init_vmcs(struct vcpu *vcpu)
int status = 0;
uint64_t vmcs_pa;
if (vcpu == NULL)
if (vcpu == NULL) {
status = -EINVAL;
}
ASSERT(status == 0, "Incorrect arguments");
/* Log message */
@@ -1569,8 +1595,9 @@ int init_vmcs(struct vcpu *vcpu)
init_exit_ctrl(vcpu);
#ifdef CONFIG_EFI_STUB
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == 0)
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == 0) {
override_uefi_vmcs(vcpu);
}
#endif
/* Return status to caller */
return status;

View File

@@ -232,8 +232,9 @@ static void iommu_flush_cache(struct dmar_drhd_rt *dmar_uint,
uint32_t i;
/* if vtd support page-walk coherency, no need to flush cacheline */
if (iommu_ecap_c(dmar_uint->ecap) != 0U)
if (iommu_ecap_c(dmar_uint->ecap) != 0U) {
return;
}
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
clflush((char *)p + i);
@@ -327,8 +328,9 @@ static uint8_t dmar_uint_get_msagw(struct dmar_drhd_rt *dmar_uint)
uint8_t sgaw = iommu_cap_sagaw(dmar_uint->cap);
for (i = 4; i >= 0; i--) {
if (((1 << i) & sgaw) != 0)
if (((1 << i) & sgaw) != 0) {
break;
}
}
return (uint8_t)i;
}
@@ -420,28 +422,34 @@ static void dmar_register_hrhd(struct dmar_drhd_rt *dmar_uint)
* How to guarantee it when EPT is used as second-level
* translation paging structures?
*/
if (iommu_ecap_sc(dmar_uint->ecap) == 0U)
if (iommu_ecap_sc(dmar_uint->ecap) == 0U) {
dev_dbg(ACRN_DBG_IOMMU,
"dmar uint doesn't support snoop control!");
}
dmar_uint->max_domain_id = iommu_cap_ndoms(dmar_uint->cap) - 1;
if (dmar_uint->max_domain_id > 63U)
if (dmar_uint->max_domain_id > 63U) {
dmar_uint->max_domain_id = 63U;
}
if (max_domain_id > dmar_uint->max_domain_id)
if (max_domain_id > dmar_uint->max_domain_id) {
max_domain_id = dmar_uint->max_domain_id;
}
/* register operation is considered serial, no lock here */
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U)
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U) {
list_add_tail(&dmar_uint->list, &dmar_drhd_units);
else
}
else {
list_add(&dmar_uint->list, &dmar_drhd_units);
}
dmar_hdrh_unit_count++;
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0)
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0) {
dmar_disable_translation(dmar_uint);
}
}
static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
@@ -454,20 +462,23 @@ static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
list_for_each(pos, &dmar_drhd_units) {
dmar_uint = list_entry(pos, struct dmar_drhd_rt, list);
if (dmar_uint->drhd->segment != segment)
if (dmar_uint->drhd->segment != segment) {
continue;
}
for (i = 0U; i < dmar_uint->drhd->dev_cnt; i++) {
if ((dmar_uint->drhd->devices[i].bus == bus) &&
(dmar_uint->drhd->devices[i].devfun == devfun))
(dmar_uint->drhd->devices[i].devfun == devfun)) {
return dmar_uint;
}
}
/* has the same segment number and
* the dmar unit has INCLUDE_PCI_ALL set
*/
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U)
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U) {
return dmar_uint;
}
}
return NULL;
@@ -520,8 +531,9 @@ static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_uint)
{
uint32_t status;
if (iommu_cap_rwbf(dmar_uint->cap) == 0U)
if (iommu_cap_rwbf(dmar_uint->cap) == 0U) {
return;
}
IOMMU_LOCK(dmar_uint);
iommu_write64(dmar_uint, DMAR_GCMD_REG,
@@ -606,8 +618,9 @@ static void dmar_invalid_iotlb(struct dmar_drhd_rt *dmar_uint,
return;
}
IOMMU_LOCK(dmar_uint);
if (addr != 0U)
if (addr != 0U) {
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset, addr);
}
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset + 8, cmd);
/* read upper 32bits to check */
@@ -687,29 +700,37 @@ static void dmar_fault_msi_write(struct dmar_drhd_rt *dmar_uint,
#if DBG_IOMMU
static void fault_status_analysis(uint32_t status)
{
if (DMA_FSTS_PFO(status))
if (DMA_FSTS_PFO(status)) {
pr_info("Primary Fault Overflow");
}
if (DMA_FSTS_PPF(status))
if (DMA_FSTS_PPF(status)) {
pr_info("Primary Pending Fault");
}
if (DMA_FSTS_AFO(status))
if (DMA_FSTS_AFO(status)) {
pr_info("Advanced Fault Overflow");
}
if (DMA_FSTS_APF(status))
if (DMA_FSTS_APF(status)) {
pr_info("Advanced Pending Fault");
}
if (DMA_FSTS_IQE(status))
if (DMA_FSTS_IQE(status)) {
pr_info("Invalidation Queue Error");
}
if (DMA_FSTS_ICE(status))
if (DMA_FSTS_ICE(status)) {
pr_info("Invalidation Completion Error");
}
if (DMA_FSTS_ITE(status))
if (DMA_FSTS_ITE(status)) {
pr_info("Invalidation Time-out Error");
}
if (DMA_FSTS_PRO(status))
if (DMA_FSTS_PRO(status)) {
pr_info("Page Request Overflow");
}
}
#endif
@@ -727,9 +748,10 @@ static void fault_record_analysis(__unused uint64_t low, uint64_t high)
DMA_FRCD_UP_SID(high) & 0x7UL,
low);
#if DBG_IOMMU
if (iommu_ecap_dt(dmar_uint->ecap))
if (iommu_ecap_dt(dmar_uint->ecap)) {
pr_info("Address Type: 0x%x",
DMA_FRCD_UP_AT(high));
}
#endif
}
@@ -833,8 +855,9 @@ static void dmar_enable(struct dmar_drhd_rt *dmar_uint)
static void dmar_disable(struct dmar_drhd_rt *dmar_uint)
{
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0U)
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0U) {
dmar_disable_translation(dmar_uint);
}
dmar_fault_event_mask(dmar_uint);
}
@@ -883,12 +906,14 @@ struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
int destroy_iommu_domain(struct iommu_domain *domain)
{
if (domain == NULL)
if (domain == NULL) {
return 1;
}
/* currently only support ept */
if (!domain->is_tt_ept)
if (!domain->is_tt_ept) {
ASSERT(false, "translation_table is not EPT!");
}
/* TODO: check if any device assigned to this domain */
@@ -914,8 +939,9 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
uint64_t upper = 0UL;
uint64_t lower = 0UL;
if (domain == NULL)
if (domain == NULL) {
return 1;
}
dmar_uint = device_to_dmaru(segment, bus, devfun);
if (dmar_uint == NULL) {
@@ -1006,9 +1032,10 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
dmar_uint->cap_msagaw);
lower = DMAR_SET_BITSLICE(lower, CTX_ENTRY_LOWER_TT,
DMAR_CTX_TT_PASSTHROUGH);
} else
} else {
ASSERT(false,
"dmaru doesn't support trans passthrough");
}
} else {
/* TODO: add Device TLB support */
upper =
@@ -1043,8 +1070,9 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
struct dmar_root_entry *root_entry;
struct dmar_context_entry *context_entry;
if (domain == NULL)
if (domain == NULL) {
return 1;
}
dmar_uint = device_to_dmaru(segment, bus, devfun);
if (dmar_uint == NULL) {
@@ -1086,8 +1114,9 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
if (domain == NULL)
if (domain == NULL) {
return 1;
}
/* TODO: check if the device assigned */
@@ -1099,8 +1128,9 @@ int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
int unassign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
if (domain == NULL)
if (domain == NULL) {
return 1;
}
/* TODO: check if the device assigned */
@@ -1116,11 +1146,13 @@ void enable_iommu(void)
list_for_each(pos, &dmar_drhd_units) {
dmar_uint = list_entry(pos, struct dmar_drhd_rt, list);
if (!dmar_uint->drhd->ignore)
if (!dmar_uint->drhd->ignore) {
dmar_enable(dmar_uint);
else
}
else {
dev_dbg(ACRN_DBG_IOMMU, "ignore dmar_uint @0x%x",
dmar_uint->drhd->reg_base_addr);
}
}
}
@@ -1149,8 +1181,9 @@ void suspend_iommu(void)
list_for_each(pos, &dmar_drhd_units) {
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
if (dmar_unit->drhd->ignore)
if (dmar_unit->drhd->ignore) {
continue;
}
/* flush */
dmar_write_buffer_flush(dmar_unit);
@@ -1187,8 +1220,9 @@ void resume_iommu(void)
list_for_each(pos, &dmar_drhd_units) {
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
if (dmar_unit->drhd->ignore)
if (dmar_unit->drhd->ignore) {
continue;
}
/* set root table */
dmar_set_root_table(dmar_unit);
@@ -1228,8 +1262,9 @@ int init_iommu(void)
spinlock_init(&domain_lock);
if (register_hrhd_units() != 0)
if (register_hrhd_units() != 0) {
return -1;
}
host_domain = create_host_domain();