HV: Fix missing brackets for MISRA C Violations

Patch 4 of 7.
Added changes to make sure Misra C violations are fixed
for rules 11S and 12S.

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy 2018-07-12 15:02:16 -07:00 committed by wenlingz
parent 82e0cdb808
commit d16d9e5751
7 changed files with 229 additions and 129 deletions

View File

@ -99,8 +99,9 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
}
for (id = 0U; id < (size_t)(sizeof(long) * 8U); id++) {
if (!bitmap_test_and_set(id, &vmid_bitmap))
if (!bitmap_test_and_set(id, &vmid_bitmap)) {
break;
}
}
vm->attr.id = id;
vm->attr.boot_idx = id;
@ -113,12 +114,14 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
/* For UOS: This VM software information is configure in DM */
if (is_vm0(vm)) {
status = prepare_vm0_memmap_and_e820(vm);
if (status != 0)
if (status != 0) {
goto err2;
}
#ifndef CONFIG_EFI_STUB
status = init_vm0_boot_info(vm);
if (status != 0)
if (status != 0) {
goto err2;
}
#endif
} else {
/* populate UOS vm fields according to vm_desc */
@ -143,8 +146,9 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
if (is_vm0(vm)) {
/* Load pm S state data */
if (vm_load_pm_s_state(vm) == 0)
if (vm_load_pm_s_state(vm) == 0) {
register_pm1ab_handler(vm);
}
/* Create virtual uart */
vm->vuart = vuart_init(vm);
@ -166,8 +170,9 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
vm->sw.io_shared_page = NULL;
status = set_vcpuid_entries(vm);
if (status != 0)
if (status != 0) {
goto err4;
}
vm->state = VM_CREATED;
@ -190,14 +195,16 @@ int shutdown_vm(struct vm *vm)
uint16_t i;
struct vcpu *vcpu = NULL;
if (vm == NULL)
if (vm == NULL) {
return -EINVAL;
}
pause_vm(vm);
/* Only allow shutdown paused vm */
if (vm->state != VM_PAUSED)
if (vm->state != VM_PAUSED) {
return -EINVAL;
}
foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu);
@ -226,13 +233,15 @@ int shutdown_vm(struct vm *vm)
free_io_emulation_resource(vm);
/* Free iommu_domain */
if (vm->iommu_domain != NULL)
if (vm->iommu_domain != NULL) {
destroy_iommu_domain(vm->iommu_domain);
}
bitmap_clear(vm->attr.id, &vmid_bitmap);
if (vm->vpic != NULL)
if (vm->vpic != NULL) {
vpic_cleanup(vm);
}
free(vm->hw.vcpu_array);
@ -267,8 +276,9 @@ void pause_vm(struct vm *vm)
uint16_t i;
struct vcpu *vcpu = NULL;
if (vm->state == VM_PAUSED)
if (vm->state == VM_PAUSED) {
return;
}
vm->state = VM_PAUSED;
@ -320,14 +330,16 @@ int prepare_vm0(void)
struct vm_description *vm_desc = &vm0_desc;
err = create_vm(vm_desc, &vm);
if (err != 0)
if (err != 0) {
return err;
}
/* Allocate all cpus to vm0 at the beginning */
for (i = 0U; i < phys_cpu_num; i++) {
err = prepare_vcpu(vm, i);
if (err != 0)
if (err != 0) {
return err;
}
}
/* start vm0 BSP automatically */
@ -344,8 +356,9 @@ static inline bool vcpu_in_vm_desc(struct vcpu *vcpu,
int i;
for (i = 0; i < vm_desc->vm_hw_num_cores; i++) {
if (vcpu->pcpu_id == vm_desc->vm_hw_logical_core_ids[i])
if (vcpu->pcpu_id == vm_desc->vm_hw_logical_core_ids[i]) {
return true;
}
}
return false;

View File

@ -131,9 +131,10 @@ void init_msr_emulation(struct vcpu *vcpu)
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
if (!vcpu->guest_msrs)
if (!vcpu->guest_msrs) {
vcpu->guest_msrs =
(uint64_t *)calloc(msrs_count, sizeof(uint64_t));
}
ASSERT(vcpu->guest_msrs != NULL, "");
(void)memset(vcpu->guest_msrs, 0U, msrs_count * sizeof(uint64_t));
@ -313,8 +314,9 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
case MSR_IA32_BIOS_UPDT_TRIG:
{
/* We only allow SOS to do uCode update */
if (is_vm0(vcpu->vm))
if (is_vm0(vcpu->vm)) {
acrn_update_ucode(vcpu, v);
}
break;
}
case MSR_IA32_PERF_CTL:

View File

@ -91,10 +91,11 @@ static void vpic_set_pinstate(struct vpic *vpic, uint8_t pin, bool newstate);
static inline bool master_pic(struct vpic *vpic, struct pic *pic)
{
if (pic == &vpic->pic[0])
if (pic == &vpic->pic[0]) {
return true;
else
} else {
return false;
}
}
static inline uint8_t vpic_get_highest_isrpin(struct pic *pic)
@ -109,10 +110,11 @@ static inline uint8_t vpic_get_highest_isrpin(struct pic *pic)
* An IS bit that is masked by an IMR bit will not be
* cleared by a non-specific EOI in Special Mask Mode.
*/
if ((pic->smm != 0U) && (pic->mask & bit) != 0U)
if ((pic->smm != 0U) && (pic->mask & bit) != 0U) {
continue;
else
} else {
return pin;
}
}
}
@ -129,8 +131,9 @@ static inline uint8_t vpic_get_highest_irrpin(struct pic *pic)
* master's priority logic.
*/
serviced = pic->service;
if (pic->sfn)
if (pic->sfn) {
serviced &= ~(uint8_t)(1U << 2U);
}
/*
* In 'Special Mask Mode', when a mask bit is set in OCW1 it inhibits
@ -138,8 +141,9 @@ static inline uint8_t vpic_get_highest_irrpin(struct pic *pic)
* other levels that are not masked. In other words the ISR has no
* bearing on the levels that can generate interrupts.
*/
if (pic->smm != 0U)
if (pic->smm != 0U) {
serviced = 0U;
}
PIC_PIN_FOREACH(pin, pic, tmp) {
bit = (uint8_t)(1U << pin);
@ -148,15 +152,17 @@ static inline uint8_t vpic_get_highest_irrpin(struct pic *pic)
* If there is already an interrupt in service at the same
* or higher priority then bail.
*/
if ((serviced & bit) != 0)
if ((serviced & bit) != 0) {
break;
}
/*
* If an interrupt is asserted and not masked then return
* the corresponding 'pin' to the caller.
*/
if ((pic->request & bit) != 0 && (pic->mask & bit) == 0)
if ((pic->request & bit) != 0 && (pic->mask & bit) == 0) {
return pin;
}
}
return VPIC_INVALID_PIN;
@ -309,8 +315,9 @@ static int vpic_icw4(struct vpic *vpic, struct pic *pic, uint8_t val)
return -1;
}
if ((val & ICW4_AEOI) != 0U)
if ((val & ICW4_AEOI) != 0U) {
pic->aeoi = true;
}
if ((val & ICW4_SFNM) != 0U) {
if (master_pic(vpic, pic)) {
@ -332,18 +339,20 @@ bool vpic_is_pin_mask(struct vpic *vpic, uint8_t virt_pin)
{
struct pic *pic;
if (virt_pin < 8U)
if (virt_pin < 8U) {
pic = &vpic->pic[0];
else if (virt_pin < 16U) {
} else if (virt_pin < 16U) {
pic = &vpic->pic[1];
virt_pin -= 8U;
} else
} else {
return true;
}
if ((pic->mask & (1U << virt_pin)) != 0U)
if ((pic->mask & (1U << virt_pin)) != 0U) {
return true;
else
} else {
return false;
}
}
static int vpic_ocw1(struct vpic *vpic, struct pic *pic, uint8_t val)
@ -369,13 +378,15 @@ static int vpic_ocw1(struct vpic *vpic, struct pic *pic, uint8_t val)
/* master pic pin2 connect with slave pic,
* not device, so not need pt remap
*/
if ((pin == 2U) && master_pic(vpic, pic))
if ((pin == 2U) && master_pic(vpic, pic)) {
continue;
}
intx.virt_pin = pin;
intx.vpin_src = PTDEV_VPIN_PIC;
if (!master_pic(vpic, pic))
if (!master_pic(vpic, pic)) {
intx.virt_pin += 8U;
}
ptdev_intx_pin_remap(vpic->vm, &intx);
}
}
@ -404,8 +415,9 @@ static int vpic_ocw2(struct vpic *vpic, struct pic *pic, uint8_t val)
if (isr_bit < NR_VPIC_PINS_PER_CHIP) {
pic->service &= ~(uint8_t)(1U << isr_bit);
if (pic->rotate)
if (pic->rotate) {
pic->lowprio = isr_bit;
}
}
/* if level ack PTDEV */
@ -457,10 +469,11 @@ static void vpic_set_pinstate(struct vpic *vpic, uint8_t pin, bool newstate)
pic = &vpic->pic[pin >> 3U];
oldcnt = pic->acnt[pin & 0x7U];
if (newstate)
if (newstate) {
pic->acnt[pin & 0x7U]++;
else
} else {
pic->acnt[pin & 0x7U]--;
}
newcnt = pic->acnt[pin & 0x7U];
if (newcnt < 0) {
@ -493,15 +506,17 @@ static int vpic_set_irqstate(struct vm *vm, uint32_t irq, enum irqstate irqstate
struct pic *pic;
uint8_t pin;
if (irq >= NR_VPIC_PINS_TOTAL)
if (irq >= NR_VPIC_PINS_TOTAL) {
return -EINVAL;
}
vpic = vm_pic(vm);
pic = &vpic->pic[irq >> 3U];
pin = (uint8_t)irq;
if (pic->ready == false)
if (pic->ready == false) {
return 0;
}
VPIC_LOCK(vpic);
switch (irqstate) {
@ -544,8 +559,9 @@ int vpic_set_irq_trigger(struct vm *vm, uint32_t irq, enum vpic_trigger trigger)
struct vpic *vpic;
uint8_t pin_mask;
if (irq >= NR_VPIC_PINS_TOTAL)
if (irq >= NR_VPIC_PINS_TOTAL) {
return -EINVAL;
}
/*
* See comment in vpic_elc_handler. These IRQs must be
@ -567,10 +583,11 @@ int vpic_set_irq_trigger(struct vm *vm, uint32_t irq, enum vpic_trigger trigger)
VPIC_LOCK(vpic);
if (trigger == LEVEL_TRIGGER)
if (trigger == LEVEL_TRIGGER) {
vpic->pic[irq >> 3U].elc |= pin_mask;
else
} else {
vpic->pic[irq >> 3U].elc &= ~pin_mask;
}
VPIC_UNLOCK(vpic);
@ -585,13 +602,15 @@ int vpic_get_irq_trigger(struct vm *vm, uint32_t irq, enum vpic_trigger *trigger
return -EINVAL;
vpic = vm_pic(vm);
if (vpic == NULL)
if (vpic == NULL) {
return -EINVAL;
}
if ((vpic->pic[irq >> 3U].elc & (1U << (irq & 0x7U))) != 0U)
if ((vpic->pic[irq >> 3U].elc & (1U << (irq & 0x7U))) != 0U) {
*trigger = LEVEL_TRIGGER;
else
} else {
*trigger = EDGE_TRIGGER;
}
return 0;
}
@ -741,15 +760,17 @@ static int vpic_write(struct vpic *vpic, struct pic *pic,
error = vpic_icw1(vpic, pic, val);
if (pic->ready) {
if ((val & (1U << 3U)) != 0U)
if ((val & (1U << 3U)) != 0U) {
error = vpic_ocw3(vpic, pic, val);
else
} else {
error = vpic_ocw2(vpic, pic, val);
}
}
}
if (pic->ready)
if (pic->ready) {
vpic_notify_intr(vpic);
}
VPIC_UNLOCK(vpic);
@ -765,11 +786,13 @@ static int vpic_master_handler(struct vm *vm, bool in, uint16_t port,
vpic = vm_pic(vm);
pic = &vpic->pic[0];
if (bytes != 1U)
if (bytes != 1U) {
return -1;
}
if (in)
if (in) {
return vpic_read(vpic, pic, port, eax);
}
return vpic_write(vpic, pic, port, eax);
}
@ -779,9 +802,10 @@ static uint32_t vpic_master_io_read(__unused struct vm_io_handler *hdlr,
{
uint32_t val = 0U;
if (vpic_master_handler(vm, true, addr, width, &val) < 0)
if (vpic_master_handler(vm, true, addr, width, &val) < 0) {
pr_err("pic master read port 0x%x width=%d failed\n",
addr, width);
}
return val;
}
@ -790,9 +814,10 @@ static void vpic_master_io_write(__unused struct vm_io_handler *hdlr,
{
uint32_t val = v;
if (vpic_master_handler(vm, false, addr, width, &val) < 0)
if (vpic_master_handler(vm, false, addr, width, &val) < 0) {
pr_err("%s: write port 0x%x width=%d value 0x%x failed\n",
__func__, addr, width, val);
}
}
static int vpic_slave_handler(struct vm *vm, bool in, uint16_t port,
@ -804,11 +829,13 @@ static int vpic_slave_handler(struct vm *vm, bool in, uint16_t port,
vpic = vm_pic(vm);
pic = &vpic->pic[1];
if (bytes != 1U)
if (bytes != 1U) {
return -1;
}
if (in)
if (in) {
return vpic_read(vpic, pic, port, eax);
}
return vpic_write(vpic, pic, port, eax);
}
@ -818,9 +845,10 @@ static uint32_t vpic_slave_io_read(__unused struct vm_io_handler *hdlr,
{
uint32_t val = 0U;
if (vpic_slave_handler(vm, true, addr, width, &val) < 0)
if (vpic_slave_handler(vm, true, addr, width, &val) < 0) {
pr_err("pic slave read port 0x%x width=%d failed\n",
addr, width);
}
return val;
}
@ -829,9 +857,10 @@ static void vpic_slave_io_write(__unused struct vm_io_handler *hdlr,
{
uint32_t val = v;
if (vpic_slave_handler(vm, false, addr, width, &val) < 0)
if (vpic_slave_handler(vm, false, addr, width, &val) < 0) {
pr_err("%s: write port 0x%x width=%d value 0x%x failed\n",
__func__, addr, width, val);
}
}
static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
@ -843,16 +872,18 @@ static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
vpic = vm_pic(vm);
is_master = (port == IO_ELCR1);
if (bytes != 1U)
if (bytes != 1U) {
return -1;
}
VPIC_LOCK(vpic);
if (in) {
if (is_master)
if (is_master) {
*eax = vpic->pic[0].elc;
else
} else {
*eax = vpic->pic[1].elc;
}
} else {
/*
* For the master PIC the cascade channel (IRQ2), the
@ -864,10 +895,11 @@ static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
* the floating point error interrupt (IRQ13) cannot
* be programmed for level mode.
*/
if (is_master)
if (is_master) {
vpic->pic[0].elc = (uint8_t)(*eax & 0xf8U);
else
} else {
vpic->pic[1].elc = (uint8_t)(*eax & 0xdeU);
}
}
VPIC_UNLOCK(vpic);
@ -880,8 +912,9 @@ static uint32_t vpic_elc_io_read(__unused struct vm_io_handler *hdlr,
{
uint32_t val = 0U;
if (vpic_elc_handler(vm, true, addr, width, &val) < 0)
if (vpic_elc_handler(vm, true, addr, width, &val) < 0) {
pr_err("pic elc read port 0x%x width=%d failed", addr, width);
}
return val;
}
@ -890,9 +923,10 @@ static void vpic_elc_io_write(__unused struct vm_io_handler *hdlr,
{
uint32_t val = v;
if (vpic_elc_handler(vm, false, addr, width, &val) < 0)
if (vpic_elc_handler(vm, false, addr, width, &val) < 0) {
pr_err("%s: write port 0x%x width=%d value 0x%x failed\n",
__func__, addr, width, val);
}
}
void vpic_register_io_handler(struct vm *vm)

View File

@ -24,12 +24,14 @@ int dm_emulate_pio_post(struct vcpu *vcpu)
/* VHM emulation data already copy to req, mark to free slot now */
req_buf->req_queue[cur].valid = false;
if (req_buf->req_queue[cur].processed != REQ_STATE_SUCCESS)
if (req_buf->req_queue[cur].processed != REQ_STATE_SUCCESS) {
return -1;
}
if (vcpu->req.reqs.pio_request.direction == REQUEST_READ)
if (vcpu->req.reqs.pio_request.direction == REQUEST_READ) {
*rax = ((*rax) & ~mask) |
(vcpu->req.reqs.pio_request.value & mask);
}
return 0;
}
@ -38,10 +40,11 @@ static void dm_emulate_pio_pre(struct vcpu *vcpu, uint64_t exit_qual,
uint32_t sz, uint64_t req_value)
{
vcpu->req.type = REQ_PORTIO;
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) != 0U)
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) != 0U) {
vcpu->req.reqs.pio_request.direction = REQUEST_READ;
else
} else {
vcpu->req.reqs.pio_request.direction = REQUEST_WRITE;
}
vcpu->req.reqs.pio_request.address =
VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
@ -77,9 +80,9 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
handler; handler = handler->next) {
if ((port >= handler->desc.addr + handler->desc.len) ||
(port + sz <= handler->desc.addr))
(port + sz <= handler->desc.addr)) {
continue;
else if (!((port >= handler->desc.addr) && ((port + sz)
} else if (!((port >= handler->desc.addr) && ((port + sz)
<= (handler->desc.addr + handler->desc.len)))) {
pr_fatal("Err:IO, port 0x%04x, size=%u spans devices",
port, sz);
@ -129,8 +132,9 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
static void register_io_handler(struct vm *vm, struct vm_io_handler *hdlr)
{
if (vm->arch_vm.io_handler != NULL)
if (vm->arch_vm.io_handler != NULL) {
hdlr->next = vm->arch_vm.io_handler;
}
vm->arch_vm.io_handler = hdlr;
}
@ -165,8 +169,9 @@ void allow_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbytes)
b = vm->arch_vm.iobitmap[0];
for (i = 0U; i < nbytes; i++) {
if ((address & 0x8000U) != 0U)
if ((address & 0x8000U) != 0U) {
b = vm->arch_vm.iobitmap[1];
}
a = address & 0x7fffU;
b[a >> 5] &= ~(1 << (a & 0x1fU));
address++;
@ -181,8 +186,9 @@ static void deny_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbyte
b = vm->arch_vm.iobitmap[0];
for (i = 0U; i < nbytes; i++) {
if ((address & 0x8000U) != 0U)
if ((address & 0x8000U) != 0U) {
b = vm->arch_vm.iobitmap[1];
}
a = address & 0x7fffU;
b[a >> 5U] |= (1U << (a & 0x1fU));
address++;
@ -240,8 +246,9 @@ void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range,
return;
}
if (is_vm0(vm))
if (is_vm0(vm)) {
deny_guest_io_access(vm, range->base, range->len);
}
handler = create_io_handler(range->base,
range->len, io_read_fn_ptr, io_write_fn_ptr);

View File

@ -139,14 +139,15 @@ get_ioapic_base(uint8_t apic_id)
uint64_t addr = 0xffffffffffffffffUL;
/* should extract next ioapic from ACPI MADT table */
if (apic_id == 0U)
if (apic_id == 0U) {
addr = DEFAULT_IO_APIC_BASE;
else if (apic_id == 1U)
} else if (apic_id == 1U) {
addr = 0xfec3f000UL;
else if (apic_id == 2U)
} else if (apic_id == 2U) {
addr = 0xfec7f000UL;
else
} else {
ASSERT(apic_id <= 2U, "ACPI MADT table missing");
}
return addr;
}
@ -200,8 +201,9 @@ create_rte_for_gsi_irq(uint32_t irq, uint32_t vr)
{
struct ioapic_rte rte = {0, 0};
if (irq < NR_LEGACY_IRQ)
if (irq < NR_LEGACY_IRQ) {
return create_rte_for_legacy_irq(irq, vr);
}
/* irq default masked, level trig */
rte.lo_32 |= IOAPIC_RTE_INTMSET;
@ -228,10 +230,11 @@ static void ioapic_set_routing(uint32_t gsi, uint32_t vr)
rte = create_rte_for_gsi_irq(gsi, vr);
ioapic_set_rte_entry(addr, gsi_table[gsi].pin, &rte);
if ((rte.lo_32 & IOAPIC_RTE_TRGRMOD) != 0U)
if ((rte.lo_32 & IOAPIC_RTE_TRGRMOD) != 0U) {
update_irq_handler(gsi, handle_level_interrupt_common);
else
} else {
update_irq_handler(gsi, common_handler_edge);
}
dev_dbg(ACRN_DBG_IRQ, "GSI: irq:%d pin:%hhu rte:%x",
gsi, gsi_table[gsi].pin,
@ -243,8 +246,9 @@ void ioapic_get_rte(uint32_t irq, uint64_t *rte)
void *addr;
struct ioapic_rte _rte;
if (!irq_is_gsi(irq))
if (!irq_is_gsi(irq)) {
return;
}
addr = gsi_table[irq].addr;
ioapic_get_rte_entry(addr, gsi_table[irq].pin, &_rte);
@ -258,8 +262,9 @@ void ioapic_set_rte(uint32_t irq, uint64_t raw_rte)
void *addr;
struct ioapic_rte rte;
if (!irq_is_gsi(irq))
if (!irq_is_gsi(irq)) {
return;
}
addr = gsi_table[irq].addr;
rte.lo_32 = (uint32_t)raw_rte;
@ -283,10 +288,11 @@ bool irq_is_gsi(uint32_t irq)
uint8_t irq_to_pin(uint32_t irq)
{
if (irq_is_gsi(irq))
if (irq_is_gsi(irq)) {
return gsi_table[irq].pin;
else
} else {
return IOAPIC_INVALID_PIN;
}
}
uint32_t pin_to_irq(uint8_t pin)
@ -294,8 +300,9 @@ uint32_t pin_to_irq(uint8_t pin)
uint32_t i;
for (i = 0U; i < nr_gsi; i++) {
if (gsi_table[i].pin == pin)
if (gsi_table[i].pin == pin) {
return i;
}
}
return IRQ_INVALID;
}
@ -307,14 +314,16 @@ irq_gsi_mask_unmask(uint32_t irq, bool mask)
uint8_t pin = gsi_table[irq].pin;
struct ioapic_rte rte;
if (!irq_is_gsi(irq))
if (!irq_is_gsi(irq)) {
return;
}
ioapic_get_rte_entry(addr, pin, &rte);
if (mask)
if (mask) {
rte.lo_32 |= IOAPIC_RTE_INTMSET;
else
} else {
rte.lo_32 &= ~IOAPIC_RTE_INTMASK;
}
ioapic_set_rte_entry(addr, pin, &rte);
dev_dbg(ACRN_DBG_PTIRQ, "update: irq:%d pin:%hhu rte:%x",
irq, pin, rte.lo_32);
@ -360,11 +369,12 @@ void setup_ioapic_irq(void)
gsi_table[gsi].ioapic_id = ioapic_id;
gsi_table[gsi].addr = addr;
if (gsi < NR_LEGACY_IRQ)
if (gsi < NR_LEGACY_IRQ) {
gsi_table[gsi].pin =
legacy_irq_to_pin[gsi] & 0xffU;
else
} else {
gsi_table[gsi].pin = pin;
}
/* pinned irq before use it */
if (irq_mark_used(gsi) > NR_MAX_IRQS) {
@ -383,8 +393,9 @@ void setup_ioapic_irq(void)
gsi++;
continue;
}
} else
} else {
vr = 0U; /* not to allocate VR right now */
}
ioapic_set_routing(gsi, vr);
gsi++;

View File

@ -48,8 +48,9 @@ static uint32_t find_available_vector(bool lowpri)
/* TODO: vector lock required */
for (i = start; i < end; i++) {
if (vector_to_irq[i] == IRQ_INVALID)
if (vector_to_irq[i] == IRQ_INVALID) {
return i;
}
}
return VECTOR_INVALID;
}
@ -64,13 +65,15 @@ uint32_t irq_mark_used(uint32_t irq)
spinlock_rflags;
if (irq > NR_MAX_IRQS)
if (irq > NR_MAX_IRQS) {
return IRQ_INVALID;
}
desc = &irq_desc_array[irq];
spinlock_irqsave_obtain(&desc->irq_lock);
if (desc->used == IRQ_NOT_ASSIGNED)
if (desc->used == IRQ_NOT_ASSIGNED) {
desc->used = IRQ_ASSIGNED_NOSHARE;
}
spinlock_irqrestore_release(&desc->irq_lock);
return irq;
}
@ -130,8 +133,9 @@ static void _irq_desc_free_vector(uint32_t irq)
uint32_t vr;
uint16_t pcpu_id;
if (irq > NR_MAX_IRQS)
if (irq > NR_MAX_IRQS) {
return;
}
desc = &irq_desc_array[irq];
@ -141,8 +145,9 @@ static void _irq_desc_free_vector(uint32_t irq)
desc->vector = VECTOR_INVALID;
vr &= NR_MAX_VECTOR;
if (vector_to_irq[vr] == irq)
if (vector_to_irq[vr] == irq) {
vector_to_irq[vr] = IRQ_INVALID;
}
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
per_cpu(irq_count, pcpu_id)[irq] = 0UL;
@ -175,8 +180,9 @@ irq_desc_append_dev(struct irq_desc *desc, void *node, bool share)
* ioapic setup.
* caller can later update it with update_irq_handler()
*/
if (desc->irq_handler == NULL)
if (desc->irq_handler == NULL) {
desc->irq_handler = common_handler_edge;
}
} else if (!share || desc->used == IRQ_ASSIGNED_NOSHARE) {
/* dev node added failed */
added = false;
@ -233,10 +239,11 @@ common_register_handler(uint32_t irq,
/* HV select a irq for device if irq < 0
* this vector/irq match to APCI DSDT or PCI INTx/MSI
*/
if (irq == IRQ_INVALID)
if (irq == IRQ_INVALID) {
irq = alloc_irq();
else
} else {
irq = irq_mark_used(irq);
}
if (irq > NR_MAX_IRQS) {
pr_err("failed to assign IRQ");
@ -261,11 +268,11 @@ OUT:
if (added) {
/* it is safe to call irq_desc_alloc_vector multiple times*/
if (info->vector >= VECTOR_FOR_PRI_START &&
info->vector <= VECTOR_FOR_PRI_END)
info->vector <= VECTOR_FOR_PRI_END) {
irq_desc_set_vector(irq, info->vector);
else if (info->vector > NR_MAX_VECTOR)
} else if (info->vector > NR_MAX_VECTOR) {
irq_desc_alloc_vector(irq, info->lowpri);
else {
} else {
pr_err("the input vector is not correct");
free(node);
return NULL;
@ -295,8 +302,9 @@ uint32_t irq_desc_alloc_vector(uint32_t irq, bool lowpri)
spinlock_rflags;
/* irq should be always available at this time */
if (irq > NR_MAX_IRQS)
if (irq > NR_MAX_IRQS) {
return VECTOR_INVALID;
}
desc = &irq_desc_array[irq];
spinlock_irqsave_obtain(&desc->irq_lock);
@ -324,13 +332,15 @@ void irq_desc_try_free_vector(uint32_t irq)
spinlock_rflags;
/* legacy irq's vector is reserved and should not be freed */
if (irq > NR_MAX_IRQS || irq < NR_LEGACY_IRQ)
if (irq > NR_MAX_IRQS || irq < NR_LEGACY_IRQ) {
return;
}
desc = &irq_desc_array[irq];
spinlock_irqsave_obtain(&desc->irq_lock);
if (desc->dev_list == NULL)
if (desc->dev_list == NULL) {
_irq_desc_free_vector(irq);
}
spinlock_irqrestore_release(&desc->irq_lock);
@ -338,10 +348,11 @@ void irq_desc_try_free_vector(uint32_t irq)
uint32_t irq_to_vector(uint32_t irq)
{
if (irq < NR_MAX_IRQS)
if (irq < NR_MAX_IRQS) {
return irq_desc_array[irq].vector;
else
} else {
return VECTOR_INVALID;
}
}
uint32_t dev_to_irq(struct dev_handler_node *node)
@ -356,8 +367,9 @@ uint32_t dev_to_vector(struct dev_handler_node *node)
int init_default_irqs(uint16_t cpu_id)
{
if (cpu_id != BOOT_CPU_ID)
if (cpu_id != BOOT_CPU_ID) {
return 0;
}
init_irq_desc();
@ -394,8 +406,9 @@ void handle_spurious_interrupt(uint32_t vector)
pr_warn("Spurious vector: 0x%x.", vector);
if (spurious_handler != NULL)
if (spurious_handler != NULL) {
spurious_handler(vector);
}
}
/* do_IRQ() */
@ -405,14 +418,16 @@ void dispatch_interrupt(struct intr_excp_ctx *ctx)
uint32_t irq = vector_to_irq[vr];
struct irq_desc *desc;
if (irq == IRQ_INVALID)
if (irq == IRQ_INVALID) {
goto ERR;
}
desc = &irq_desc_array[irq];
per_cpu(irq_count, get_cpu_id())[irq]++;
if (vr != desc->vector)
if (vr != desc->vector) {
goto ERR;
}
if (desc->used == IRQ_NOT_ASSIGNED || desc->irq_handler == NULL) {
/* mask irq if possible */
@ -446,20 +461,23 @@ int handle_level_interrupt_common(struct irq_desc *desc,
desc->state = IRQ_DESC_IN_PROCESS;
/* mask iopaic pin */
if (irq_is_gsi(desc->irq))
if (irq_is_gsi(desc->irq)) {
GSI_MASK_IRQ(desc->irq);
}
/* Send EOI to LAPIC/IOAPIC IRR */
send_lapic_eoi();
while (dev != NULL) {
if (dev->dev_handler != NULL)
if (dev->dev_handler != NULL) {
dev->dev_handler(desc->irq, dev->dev_data);
}
dev = dev->next;
}
if (irq_is_gsi(desc->irq))
if (irq_is_gsi(desc->irq)) {
GSI_UNMASK_IRQ(desc->irq);
}
desc->state = IRQ_DESC_PENDING;
spinlock_irqrestore_release(&desc->irq_lock);
@ -489,8 +507,9 @@ int common_handler_edge(struct irq_desc *desc, __unused void *handler_data)
send_lapic_eoi();
while (dev != NULL) {
if (dev->dev_handler != NULL)
if (dev->dev_handler != NULL) {
dev->dev_handler(desc->irq, dev->dev_data);
}
dev = dev->next;
}
@ -519,15 +538,17 @@ int common_dev_handler_level(struct irq_desc *desc, __unused void *handler_data)
desc->state = IRQ_DESC_IN_PROCESS;
/* mask iopaic pin */
if (irq_is_gsi(desc->irq))
if (irq_is_gsi(desc->irq)) {
GSI_MASK_IRQ(desc->irq);
}
/* Send EOI to LAPIC/IOAPIC IRR */
send_lapic_eoi();
while (dev != NULL) {
if (dev->dev_handler != NULL)
if (dev->dev_handler != NULL) {
dev->dev_handler(desc->irq, dev->dev_data);
}
dev = dev->next;
}
@ -547,8 +568,9 @@ int quick_handler_nolock(struct irq_desc *desc, __unused void *handler_data)
send_lapic_eoi();
while (dev != NULL) {
if (dev->dev_handler != NULL)
if (dev->dev_handler != NULL) {
dev->dev_handler(desc->irq, dev->dev_data);
}
dev = dev->next;
}
@ -561,8 +583,9 @@ void update_irq_handler(uint32_t irq, irq_handler_t func)
spinlock_rflags;
if (irq >= NR_MAX_IRQS)
if (irq >= NR_MAX_IRQS) {
return;
}
desc = &irq_desc_array[irq];
spinlock_irqsave_obtain(&desc->irq_lock);
@ -577,8 +600,9 @@ void unregister_handler_common(struct dev_handler_node *node)
spinlock_rflags;
if (node == NULL)
if (node == NULL) {
return;
}
dev_dbg(ACRN_DBG_IRQ, "[%s] %s irq%d vr:0x%x",
__func__, node->name,
@ -646,8 +670,9 @@ pri_register_handler(uint32_t irq,
{
struct irq_request_info info;
if (vector < VECTOR_FOR_PRI_START || vector > VECTOR_FOR_PRI_END)
if (vector < VECTOR_FOR_PRI_START || vector > VECTOR_FOR_PRI_END) {
return NULL;
}
info.vector = vector;
info.lowpri = false;
@ -714,13 +739,15 @@ int interrupt_init(uint16_t pcpu_id)
status = init_lapic(pcpu_id);
ASSERT(status == 0, "lapic init failed");
if (status != 0)
if (status != 0) {
return -ENODEV;
}
status = init_default_irqs(pcpu_id);
ASSERT(status == 0, "irqs init failed");
if (status != 0)
if (status != 0) {
return -ENODEV;
}
#ifndef CONFIG_EFI_STUB
CPU_IRQ_ENABLE();

View File

@ -48,7 +48,7 @@ static struct vmx_capability {
/*
* If the logical processor is in VMX non-root operation and
* the enable VPID VM-execution control is 1, the current VPID
* the "enable VPID" VM-execution control is 1, the current VPID
* is the value of the VPID VM-execution control field in the VMCS.
* (VM entry ensures that this value is never 0000H).
*/
@ -163,8 +163,9 @@ uint16_t allocate_vpid(void)
void flush_vpid_single(uint16_t vpid)
{
if (vpid == 0U)
if (vpid == 0U) {
return;
}
_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0UL);
}
@ -187,18 +188,20 @@ void invept(struct vcpu *vcpu)
| (3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
}
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT))
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT)) {
_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
}
}
bool check_mmu_1gb_support(enum _page_table_type page_table_type)
{
bool status = false;
if (page_table_type == PTT_EPT)
if (page_table_type == PTT_EPT) {
status = cpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE);
else
} else {
status = cpu_has_cap(X86_FEATURE_PAGE1GB);
}
return status;
}
@ -218,8 +221,9 @@ check_page_table_present(enum _page_table_type page_table_type,
if ((table_entry == IA32E_EPT_W_BIT) ||
(table_entry == (IA32E_EPT_W_BIT | IA32E_EPT_X_BIT)) ||
((table_entry == IA32E_EPT_X_BIT) &&
!cpu_has_vmx_ept_cap(VMX_EPT_EXECUTE_ONLY)))
!cpu_has_vmx_ept_cap(VMX_EPT_EXECUTE_ONLY))) {
return PT_MISCFG_PRESENT;
}
} else {
table_entry &= (IA32E_COMM_P_BIT);
}
@ -383,10 +387,11 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
{
if (prev_entry_present) {
/* modify the memory type related fields only */
if (table_type == PTT_EPT)
if (table_type == PTT_EPT) {
table_entry = entry & ~IA32E_EPT_MT_MASK;
else
} else {
table_entry = entry & ~MMU_MEM_ATTR_TYPE_MASK;
}
table_entry |= attr;
@ -396,8 +401,9 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
/* Modify, need to invalidate TLB and
* page-structure cache
*/
if (table_type == PTT_HOST)
if (table_type == PTT_HOST) {
mmu_need_invtlb = true;
}
}
break;
}