HV: Rename functions beginning with "_"

V1:
In order to remove MISRA C violations for rule
219S, rename functions, macros whose name
starts with "_".
Also removed some __mmio functions because they are
duplicates and are not used anywhere.
Renamed functions like __assert, to asm_assert,
because they were only wrappers around asm calls.

V2:
Over and above the changes on V1, modified bitmap
functions names to lock (corresponding to unlock)
introduced in V1

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy
2018-07-24 18:11:26 -07:00
committed by lijinxia
parent d40a6b9e93
commit a2fe964de8
17 changed files with 116 additions and 180 deletions

View File

@@ -431,7 +431,7 @@ void bsp_boot_init(void)
CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL,
"run_context ia32_spec_ctrl offset not match");
__bitmap_set(BOOT_CPU_ID, &pcpu_active_bitmap);
bitmap_set_nolock(BOOT_CPU_ID, &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0UL) {
@@ -540,7 +540,7 @@ static void bsp_boot_post(void)
start_cpus();
/* Trigger event to allow secondary CPUs to continue */
__bitmap_set(0U, &pcpu_sync);
bitmap_set_nolock(0U, &pcpu_sync);
ASSERT(get_cpu_id() == BOOT_CPU_ID, "");
@@ -580,7 +580,7 @@ void cpu_secondary_init(void)
cpu_set_current_state(get_cpu_id_from_lapic_id(get_cur_lapic_id()),
CPU_STATE_INITIALIZING);
__bitmap_set(get_cpu_id(), &pcpu_active_bitmap);
bitmap_set_nolock(get_cpu_id(), &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0UL) {
@@ -742,7 +742,7 @@ void cpu_dead(uint16_t pcpu_id)
*/
int halt = 1;
if (bitmap_test_and_clear(pcpu_id, &pcpu_active_bitmap) == false) {
if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap) == false) {
pr_err("pcpu%hu already dead", pcpu_id);
return;
}

View File

@@ -85,10 +85,10 @@ inline uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
for (vcpu_id = ffs64(vdmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(vdmask)) {
bitmap_clear(vcpu_id, &vdmask);
bitmap_clear_lock(vcpu_id, &vdmask);
vcpu = vcpu_from_vid(vm, vcpu_id);
ASSERT(vcpu != NULL, "vcpu_from_vid failed");
bitmap_set(vcpu->pcpu_id, &dmask);
bitmap_set_lock(vcpu->pcpu_id, &dmask);
}
return dmask;

View File

@@ -391,5 +391,5 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id)
{
bitmap_set(pre_work_id, &vcpu->pending_pre_work);
bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work);
}

View File

@@ -146,7 +146,7 @@ vm_active_cpus(struct vm *vm)
struct vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
bitmap_set(vcpu->vcpu_id, &dmask);
bitmap_set_lock(vcpu->vcpu_id, &dmask);
}
return dmask;
@@ -485,7 +485,7 @@ vlapic_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
irrptr = &lapic->irr[0];
/* If the interrupt is set, don't try to do it again */
if (bitmap32_test_and_set((uint16_t)(vector % 32U), &irrptr[idx].val)) {
if (bitmap32_test_and_set_lock((uint16_t)(vector % 32U), &irrptr[idx].val)) {
return 0;
}
@@ -934,7 +934,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
*dmask = 0UL;
vcpu_id = vm_apicid2vcpu_id(vm, (uint8_t)dest);
if (vcpu_id < phys_cpu_num) {
bitmap_set(vcpu_id, dmask);
bitmap_set_lock(vcpu_id, dmask);
}
} else {
/*
@@ -958,7 +958,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
amask = vm_active_cpus(vm);
for (vcpu_id = ffs64(amask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(amask)) {
bitmap_clear(vcpu_id, &amask);
bitmap_clear_lock(vcpu_id, &amask);
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
dfr = vlapic->apic_page->dfr;
@@ -1001,13 +1001,13 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
/* target is the dest */
}
} else {
bitmap_set(vcpu_id, dmask);
bitmap_set_lock(vcpu_id, dmask);
}
}
}
if (lowprio && (target != NULL)) {
bitmap_set(target->vcpu->vcpu_id, dmask);
bitmap_set_lock(target->vcpu->vcpu_id, dmask);
}
}
}
@@ -1108,19 +1108,19 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
vlapic_calcdest(vlapic->vm, &dmask, dest, phys, false);
break;
case APIC_DEST_SELF:
bitmap_set(vlapic->vcpu->vcpu_id, &dmask);
bitmap_set_lock(vlapic->vcpu->vcpu_id, &dmask);
break;
case APIC_DEST_ALLISELF:
dmask = vm_active_cpus(vlapic->vm);
break;
case APIC_DEST_ALLESELF:
dmask = vm_active_cpus(vlapic->vm);
bitmap_clear(vlapic->vcpu->vcpu_id, &dmask);
bitmap_clear_lock(vlapic->vcpu->vcpu_id, &dmask);
break;
}
while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &dmask);
bitmap_clear_lock(vcpu_id, &dmask);
target_vcpu = vcpu_from_vid(vlapic->vm, vcpu_id);
if (target_vcpu == NULL) {
continue;
@@ -1690,7 +1690,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
struct acrn_vlapic *vlapic;
bitmap_clear(vcpu_id, &dmask);
bitmap_clear_lock(vcpu_id, &dmask);
target_vcpu = vcpu_from_vid(vm, vcpu_id);
if (target_vcpu == NULL) {
return;
@@ -1846,12 +1846,12 @@ vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
if (vcpu_id == BROADCAST_CPU_ID) {
dmask = vm_active_cpus(vm);
} else {
bitmap_set(vcpu_id, &dmask);
bitmap_set_lock(vcpu_id, &dmask);
}
error = 0;
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
bitmap_clear(vcpu_id, &dmask);
bitmap_clear_lock(vcpu_id, &dmask);
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
error = vlapic_trigger_lvt(vlapic, vector);
if (error != 0) {

View File

@@ -103,7 +103,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
}
for (id = 0U; id < (size_t)(sizeof(vmid_bitmap) * 8U); id++) {
if (!bitmap_test_and_set(id, &vmid_bitmap)) {
if (!bitmap_test_and_set_lock(id, &vmid_bitmap)) {
break;
}
}
@@ -273,7 +273,7 @@ int shutdown_vm(struct vm *vm)
destroy_iommu_domain(vm->iommu_domain);
}
bitmap_clear(vm->attr.id, &vmid_bitmap);
bitmap_clear_lock(vm->attr.id, &vmid_bitmap);
if (vm->vpic != NULL) {
vpic_cleanup(vm);

View File

@@ -8,12 +8,12 @@
void disable_softirq(uint16_t cpu_id)
{
bitmap_clear(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
bitmap_clear_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
}
void enable_softirq(uint16_t cpu_id)
{
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
bitmap_set_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
}
void init_softirq(void)
@@ -22,7 +22,7 @@ void init_softirq(void)
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
per_cpu(softirq_pending, pcpu_id) = 0UL;
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, pcpu_id));
bitmap_set_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, pcpu_id));
}
}
@@ -35,7 +35,7 @@ void raise_softirq(uint16_t softirq_id)
return;
}
bitmap_set(softirq_id, bitmap);
bitmap_set_lock(softirq_id, bitmap);
}
void exec_softirq(void)
@@ -56,7 +56,7 @@ void exec_softirq(void)
/* Disable softirq
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
*/
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap)) {
if (!bitmap_test_and_clear_lock(SOFTIRQ_ATOMIC, bitmap)) {
return;
}
@@ -69,7 +69,7 @@ again:
break;
}
bitmap_clear(softirq_id, bitmap);
bitmap_clear_lock(softirq_id, bitmap);
switch (softirq_id) {
case SOFTIRQ_TIMER:

View File

@@ -96,7 +96,7 @@ static bool vcpu_pending_request(struct vcpu *vcpu)
void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
{
bitmap_set(eventid, &vcpu->arch_vcpu.pending_req);
bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req);
/*
* if current hostcpu is not the target vcpu's hostcpu, we need
* to invoke IPI to wake up target vcpu
@@ -372,18 +372,18 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
struct vcpu_arch * arch_vcpu = &vcpu->arch_vcpu;
uint64_t *pending_req_bits = &arch_vcpu->pending_req;
if (bitmap_test_and_clear(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
pr_fatal("Triple fault happen -> shutdown!");
return -EFAULT;
}
if (bitmap_test_and_clear(ACRN_REQUEST_EPT_FLUSH, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EPT_FLUSH, pending_req_bits))
invept(vcpu);
if (bitmap_test_and_clear(ACRN_REQUEST_VPID_FLUSH, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH, pending_req_bits))
flush_vpid_single(arch_vcpu->vpid);
if (bitmap_test_and_clear(ACRN_REQUEST_TMR_UPDATE, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TMR_UPDATE, pending_req_bits))
vioapic_update_tmr(vcpu);
/* handling cancelled event injection when vcpu is switched out */
@@ -408,7 +408,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
goto INTR_WIN;
/* inject NMI before maskable hardware interrupt */
if (bitmap_test_and_clear(ACRN_REQUEST_NMI, pending_req_bits)) {
if (bitmap_test_and_clear_lock(ACRN_REQUEST_NMI, pending_req_bits)) {
/* Inject NMI vector = 2 */
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8U) | IDT_NMI);
@@ -432,7 +432,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
/* Guest interruptable or not */
if (is_guest_irq_enabled(vcpu)) {
/* Inject external interrupt first */
if (bitmap_test_and_clear(ACRN_REQUEST_EXTINT,
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EXTINT,
pending_req_bits)) {
/* has pending external interrupts */
ret = vcpu_do_pending_extint(vcpu);
@@ -440,7 +440,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
}
/* Inject vLAPIC vectors */
if (bitmap_test_and_clear(ACRN_REQUEST_EVENT,
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT,
pending_req_bits)) {
/* has pending vLAPIC interrupts */
ret = vcpu_do_pending_event(vcpu);