mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-08 12:19:06 +00:00
HV: Logical conjunction needs brackets
The bracket is required when the level of precedence of the operators is less than 13. Add the bracket to logical conjunctions. The commit applys the rule to the files under Signed-off-by: Yang, Yu-chu <yu-chu.yang@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
@@ -187,7 +187,7 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
|
||||
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info *info)
|
||||
{
|
||||
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
|
||||
if (is_vm0(vm) && (vm->vuart != NULL) && info->virt_pin == 4U) {
|
||||
if (is_vm0(vm) && (vm->vuart != NULL) && (info->virt_pin == 4U)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@@ -210,7 +210,7 @@ static void ptdev_build_physical_msi(struct vm *vm, struct ptdev_msi_info *info,
|
||||
|
||||
/* get physical delivery mode */
|
||||
delmode = info->vmsi_data & APIC_DELMODE_MASK;
|
||||
if (delmode != APIC_DELMODE_FIXED && delmode != APIC_DELMODE_LOWPRIO) {
|
||||
if ((delmode != APIC_DELMODE_FIXED) && (delmode != APIC_DELMODE_LOWPRIO)) {
|
||||
delmode = APIC_DELMODE_LOWPRIO;
|
||||
}
|
||||
|
||||
@@ -659,7 +659,7 @@ int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
|
||||
}
|
||||
|
||||
/* handle destroy case */
|
||||
if (is_entry_active(entry) && info->vmsi_data == 0U) {
|
||||
if (is_entry_active(entry) && (info->vmsi_data == 0U)) {
|
||||
info->pmsi_data = 0U;
|
||||
ptdev_deactivate_entry(entry);
|
||||
goto END;
|
||||
|
@@ -87,7 +87,7 @@ static inline bool get_monitor_cap(void)
|
||||
* in hypervisor, but still expose it to the guests and
|
||||
* let them handle it correctly
|
||||
*/
|
||||
if (boot_cpu_data.family != 0x6U || boot_cpu_data.model != 0x5cU) {
|
||||
if ((boot_cpu_data.family != 0x6U) || (boot_cpu_data.model != 0x5cU)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
|
||||
if (tmp->leaf < leaf) {
|
||||
continue;
|
||||
} else if (tmp->leaf == leaf) {
|
||||
if ((tmp->flags & CPUID_CHECK_SUBLEAF) != 0U &&
|
||||
if (((tmp->flags & CPUID_CHECK_SUBLEAF) != 0U) &&
|
||||
(tmp->subleaf != subleaf)) {
|
||||
continue;
|
||||
}
|
||||
@@ -182,7 +182,7 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
|
||||
for (i = 1U; i <= limit; i++) {
|
||||
/* cpuid 1/0xb is percpu related */
|
||||
if (i == 1U || i == 0xbU) {
|
||||
if ((i == 1U) || (i == 0xbU)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -213,16 +213,16 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
case 0x04U:
|
||||
case 0x0dU:
|
||||
for (j = 0U; ; j++) {
|
||||
if (i == 0x0dU && j == 64U) {
|
||||
if ((i == 0x0dU) && (j == 64U)) {
|
||||
break;
|
||||
}
|
||||
|
||||
init_vcpuid_entry(vm, i, j,
|
||||
CPUID_CHECK_SUBLEAF, &entry);
|
||||
if (i == 0x04U && entry.eax == 0U) {
|
||||
if ((i == 0x04U) && (entry.eax == 0U)) {
|
||||
break;
|
||||
}
|
||||
if (i == 0x0dU && entry.eax == 0U) {
|
||||
if ((i == 0x0dU) && (entry.eax == 0U)) {
|
||||
continue;
|
||||
}
|
||||
result = set_vcpuid_entry(vm, &entry);
|
||||
@@ -281,7 +281,7 @@ void guest_cpuid(struct vcpu *vcpu,
|
||||
uint32_t subleaf = *ecx;
|
||||
|
||||
/* vm related */
|
||||
if (leaf != 0x1U && leaf != 0xbU && leaf != 0xdU) {
|
||||
if ((leaf != 0x1U) && (leaf != 0xbU) && (leaf != 0xdU)) {
|
||||
struct vcpuid_entry *entry =
|
||||
find_vcpuid_entry(vcpu, leaf, subleaf);
|
||||
|
||||
|
@@ -21,8 +21,8 @@ static uint64_t find_next_table(uint32_t table_offset, void *table_base)
|
||||
+ (table_offset * IA32E_COMM_ENTRY_SIZE));
|
||||
|
||||
/* If bit 7 is set, entry is not a subtable. */
|
||||
if ((table_entry & IA32E_PDPTE_PS_BIT) != 0U
|
||||
|| (table_entry & IA32E_PDE_PS_BIT) != 0U) {
|
||||
if (((table_entry & IA32E_PDPTE_PS_BIT) != 0U)
|
||||
|| ((table_entry & IA32E_PDE_PS_BIT) != 0U)) {
|
||||
return sub_table_addr;
|
||||
}
|
||||
|
||||
|
@@ -490,7 +490,7 @@ void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range,
|
||||
{
|
||||
struct vm_io_handler *handler = NULL;
|
||||
|
||||
if (io_read_fn_ptr == NULL || io_write_fn_ptr == NULL) {
|
||||
if ((io_read_fn_ptr == NULL) || (io_write_fn_ptr == NULL)) {
|
||||
pr_err("Invalid IO handler.");
|
||||
return;
|
||||
}
|
||||
@@ -512,7 +512,7 @@ int register_mmio_emulation_handler(struct vm *vm,
|
||||
int status = -EINVAL;
|
||||
struct mem_io_node *mmio_node;
|
||||
|
||||
if (vm->hw.created_vcpus > 0U && vm->hw.vcpu_array[0]->launched) {
|
||||
if ((vm->hw.created_vcpus > 0U) && vm->hw.vcpu_array[0]->launched) {
|
||||
ASSERT(false, "register mmio handler after vm launched");
|
||||
return status;
|
||||
}
|
||||
|
@@ -174,7 +174,7 @@ irq_desc_append_dev(struct irq_desc *desc, void *node, bool share)
|
||||
if (desc->irq_handler == NULL) {
|
||||
desc->irq_handler = common_handler_edge;
|
||||
}
|
||||
} else if (!share || desc->used == IRQ_ASSIGNED_NOSHARE) {
|
||||
} else if (!share || (desc->used == IRQ_ASSIGNED_NOSHARE)) {
|
||||
/* dev node added failed */
|
||||
added = false;
|
||||
} else {
|
||||
@@ -259,8 +259,8 @@ common_register_handler(uint32_t irq_arg,
|
||||
OUT:
|
||||
if (added) {
|
||||
/* it is safe to call irq_desc_alloc_vector multiple times*/
|
||||
if (info->vector >= VECTOR_FIXED_START &&
|
||||
info->vector <= VECTOR_FIXED_END) {
|
||||
if ((info->vector >= VECTOR_FIXED_START) &&
|
||||
(info->vector <= VECTOR_FIXED_END)) {
|
||||
irq_desc_set_vector(irq, info->vector);
|
||||
} else if (info->vector > NR_MAX_VECTOR) {
|
||||
irq_desc_alloc_vector(irq);
|
||||
@@ -324,7 +324,7 @@ void irq_desc_try_free_vector(uint32_t irq)
|
||||
spinlock_rflags;
|
||||
|
||||
/* legacy irq's vector is reserved and should not be freed */
|
||||
if (irq >= NR_IRQS || irq < NR_LEGACY_IRQ) {
|
||||
if ((irq >= NR_IRQS) || (irq < NR_LEGACY_IRQ)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -419,7 +419,7 @@ void dispatch_interrupt(struct intr_excp_ctx *ctx)
|
||||
goto ERR;
|
||||
}
|
||||
|
||||
if (desc->used == IRQ_NOT_ASSIGNED || desc->irq_handler == NULL) {
|
||||
if ((desc->used == IRQ_NOT_ASSIGNED) || (desc->irq_handler == NULL)) {
|
||||
/* mask irq if possible */
|
||||
goto ERR;
|
||||
}
|
||||
@@ -681,7 +681,7 @@ pri_register_handler(uint32_t irq,
|
||||
{
|
||||
struct irq_request_info info;
|
||||
|
||||
if (vector < VECTOR_FIXED_START || vector > VECTOR_FIXED_END) {
|
||||
if ((vector < VECTOR_FIXED_START) || (vector > VECTOR_FIXED_END)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -718,8 +718,8 @@ void get_cpu_interrupt_info(char *str_arg, int str_max)
|
||||
for (irq = 0U; irq < NR_IRQS; irq++) {
|
||||
desc = &irq_desc_array[irq];
|
||||
vector = irq_to_vector(irq);
|
||||
if (desc->used != IRQ_NOT_ASSIGNED &&
|
||||
vector != VECTOR_INVALID) {
|
||||
if ((desc->used != IRQ_NOT_ASSIGNED) &&
|
||||
(vector != VECTOR_INVALID)) {
|
||||
len = snprintf(str, size, "\r\n%d\t0x%X", irq, vector);
|
||||
size -= len;
|
||||
str += len;
|
||||
|
@@ -235,7 +235,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
||||
uint32_t table_offset;
|
||||
uint32_t mapped_size;
|
||||
|
||||
if (table_base == NULL || table_level >= IA32E_UNKNOWN) {
|
||||
if ((table_base == NULL) || (table_level >= IA32E_UNKNOWN)) {
|
||||
/* Shouldn't go here */
|
||||
ASSERT(false, "Incorrect Arguments. Failed to map region");
|
||||
return 0;
|
||||
@@ -281,7 +281,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
||||
|
||||
/* If not a EPT entry, see if the PAT bit is set for PDPT entry
|
||||
*/
|
||||
if ((table_type == PTT_HOST) && (attr & IA32E_PDPTE_PAT_BIT) != 0U) {
|
||||
if ((table_type == PTT_HOST) && ((attr & IA32E_PDPTE_PAT_BIT) != 0U)) {
|
||||
/* The PAT bit is set; Clear it and set the page table
|
||||
* PAT bit instead
|
||||
*/
|
||||
@@ -409,7 +409,7 @@ static int get_table_entry(void *addr, void *table_base,
|
||||
{
|
||||
uint32_t table_offset;
|
||||
|
||||
if (table_base == NULL || table_level >= IA32E_UNKNOWN) {
|
||||
if ((table_base == NULL) || (table_level >= IA32E_UNKNOWN)) {
|
||||
ASSERT(false, "Incorrect Arguments");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -434,8 +434,8 @@ static void *walk_paging_struct(void *addr, void *table_base,
|
||||
*/
|
||||
void *sub_table_addr = (table_level == IA32E_PT) ? table_base : NULL;
|
||||
|
||||
if (table_base == NULL || table_level >= IA32E_UNKNOWN
|
||||
|| map_params == NULL) {
|
||||
if ((table_base == NULL) || (table_level >= IA32E_UNKNOWN)
|
||||
|| (map_params == NULL)) {
|
||||
ASSERT(false, "Incorrect Arguments");
|
||||
return NULL;
|
||||
}
|
||||
@@ -947,7 +947,7 @@ static int modify_paging(struct mem_map_params *map_params, void *paddr_arg,
|
||||
/* Maybe need to recursive breaking in this case
|
||||
* e.g. 1GB->2MB->4KB
|
||||
*/
|
||||
while ((uint64_t)remaining_size < page_size
|
||||
while (((uint64_t)remaining_size < page_size)
|
||||
|| (!MEM_ALIGNED_CHECK(vaddr, page_size))
|
||||
|| (!MEM_ALIGNED_CHECK(paddr, page_size))) {
|
||||
/* The breaking function return the page size
|
||||
|
@@ -17,7 +17,7 @@ static struct dev_handler_node *timer_node;
|
||||
static void run_timer(struct hv_timer *timer)
|
||||
{
|
||||
/* deadline = 0 means stop timer, we should skip */
|
||||
if ((timer->func != NULL) && timer->fire_tsc != 0UL) {
|
||||
if ((timer->func != NULL) && (timer->fire_tsc != 0UL)) {
|
||||
timer->func(timer->priv_data);
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ int add_timer(struct hv_timer *timer)
|
||||
uint16_t pcpu_id;
|
||||
bool need_update;
|
||||
|
||||
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL) {
|
||||
if ((timer == NULL) || (timer->func == NULL) || (timer->fire_tsc == 0UL)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ static void timer_softirq(uint16_t pcpu_id)
|
||||
timer = list_entry(pos, struct hv_timer, node);
|
||||
/* timer expried */
|
||||
tries--;
|
||||
if (timer->fire_tsc <= current_tsc && tries > 0) {
|
||||
if ((timer->fire_tsc <= current_tsc) && (tries > 0)) {
|
||||
del_timer(timer);
|
||||
|
||||
run_timer(timer);
|
||||
@@ -212,7 +212,7 @@ void timer_cleanup(void)
|
||||
{
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
if (pcpu_id == BOOT_CPU_ID && timer_node != NULL) {
|
||||
if ((pcpu_id == BOOT_CPU_ID) && (timer_node != NULL)) {
|
||||
unregister_handler_common(timer_node);
|
||||
timer_node = NULL;
|
||||
}
|
||||
@@ -288,7 +288,7 @@ static uint64_t native_calibrate_tsc(void)
|
||||
cpuid(0x15U, &eax_denominator, &ebx_numerator,
|
||||
&ecx_hz, &reserved);
|
||||
|
||||
if (eax_denominator != 0U && ebx_numerator != 0U) {
|
||||
if ((eax_denominator != 0U) && (ebx_numerator != 0U)) {
|
||||
return ((uint64_t) ecx_hz *
|
||||
ebx_numerator) / eax_denominator;
|
||||
}
|
||||
|
@@ -267,7 +267,7 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
uint64_t reg;
|
||||
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
|
||||
|
||||
ASSERT(idx>=0 && idx<=15, "index out of range");
|
||||
ASSERT((idx>=0) && (idx<=15), "index out of range");
|
||||
reg = vcpu_get_gpreg(vcpu, idx);
|
||||
|
||||
switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE
|
||||
|
@@ -314,8 +314,8 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||
|
||||
for (i = 0U; i < 8U; i++) {
|
||||
field = (value >> (i * 8U)) & 0xffUL;
|
||||
if ((PAT_MEM_TYPE_INVALID(field) ||
|
||||
(PAT_FIELD_RSV_BITS & field) != 0UL)) {
|
||||
if (PAT_MEM_TYPE_INVALID(field) ||
|
||||
((PAT_FIELD_RSV_BITS & field) != 0UL)) {
|
||||
pr_err("invalid guest IA32_PAT: 0x%016llx", value);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user