hv:Simplify for-loop when walk through the vcpu

-- Not return NULL for vcpu_from_vid
  We have replaced dynamic memory with static memory for vcpu,
  then if vcpu_id is valid, this API should not return NULL.
-- Simplify the for-loop when walk through the vcpu

Tracked-On: #861
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Reviewed-by: Li, Fei1 <fei1.li@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Mingqiang Chi 2018-10-17 13:56:37 +08:00 committed by wenlingz
parent 813e3abccc
commit 85ececd2d7
5 changed files with 127 additions and 132 deletions

View File

@ -38,12 +38,12 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
uint64_t dmask = 0UL;
struct vcpu *vcpu;
for (vcpu_id = ffs64(vdmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(vdmask)) {
bitmap_clear_lock(vcpu_id, &vdmask);
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
if (vdmask & (1U << vcpu_id)) {
vcpu = vcpu_from_vid(vm, vcpu_id);
bitmap_set_lock(vcpu->pcpu_id, &dmask);
}
}
return dmask;
}

View File

@ -1020,10 +1020,8 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
*/
*dmask = 0UL;
amask = vm_active_cpus(vm);
for (vcpu_id = ffs64(amask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(amask)) {
bitmap_clear_lock(vcpu_id, &amask);
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
if (amask & (1U << vcpu_id)) {
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
dfr = vlapic->apic_page.dfr.v;
ldr = vlapic->apic_page.ldr.v;
@ -1069,6 +1067,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
}
}
}
}
if (lowprio && (target != NULL)) {
bitmap_set_lock(target->vcpu->vcpu_id, dmask);
@ -1189,13 +1188,9 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
break;
}
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
bitmap_clear_lock(vcpu_id, &dmask);
for (vcpu_id = 0U; vcpu_id < vlapic->vm->hw.created_vcpus; vcpu_id++) {
if (dmask & (1U << vcpu_id)) {
target_vcpu = vcpu_from_vid(vlapic->vm, vcpu_id);
if (target_vcpu == NULL) {
continue;
}
if (mode == APIC_DELMODE_FIXED) {
vlapic_set_intr(target_vcpu, vec,
@ -1251,6 +1246,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
pr_err("Unhandled icrlo write with mode %u\n", mode);
}
}
}
return 0; /* handled completely in the kernel */
}
@ -1723,14 +1719,10 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
*/
vlapic_calcdest(vm, &dmask, dest, phys, lowprio);
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
struct acrn_vlapic *vlapic;
bitmap_clear_lock(vcpu_id, &dmask);
if (dmask & (1U << vcpu_id)) {
target_vcpu = vcpu_from_vid(vm, vcpu_id);
if (target_vcpu == NULL) {
return;
}
/* only make request when vlapic enabled */
vlapic = vcpu_vlapic(target_vcpu);
@ -1743,6 +1735,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
}
}
}
}
bool
vlapic_enabled(struct acrn_vlapic *vlapic)
@ -1875,15 +1868,15 @@ vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
bitmap_set_lock(vcpu_id, &dmask);
}
error = 0;
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
bitmap_clear_lock(vcpu_id, &dmask);
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
if (dmask & (1U << vcpu_id)) {
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
error = vlapic_trigger_lvt(vlapic, vector);
if (error != 0) {
break;
}
}
}
return error;
}

View File

@ -319,12 +319,12 @@ int32_t hcall_set_vcpu_regs(struct vm *vm, uint16_t vmid, uint64_t param)
return -1;
}
vcpu = vcpu_from_vid(target_vm, vcpu_regs.vcpu_id);
if (vcpu == NULL) {
if (vcpu_regs.vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
pr_err("%s: invalid vcpu_id for set_vcpu_regs\n", __func__);
return -1;
}
vcpu = vcpu_from_vid(target_vm, vcpu_regs.vcpu_id);
set_vcpu_regs(vcpu, &(vcpu_regs.vcpu_regs));
return 0;
@ -437,13 +437,13 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
vmid, vcpu_id);
vcpu = vcpu_from_vid(target_vm, vcpu_id);
if (vcpu == NULL) {
if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
pr_err("%s, failed to get VCPU %d context from VM %d\n",
__func__, vcpu_id, target_vm->vm_id);
return -EINVAL;
}
vcpu = vcpu_from_vid(target_vm, vcpu_id);
emulate_io_post(vcpu);
return 0;

View File

@ -651,13 +651,13 @@ static int shell_vcpu_dumpreg(int argc, char **argv)
goto out;
}
vcpu = vcpu_from_vid(vm, vcpu_id);
if (vcpu == NULL) {
if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
shell_puts("No vcpu found in the input <vm_id, vcpu_id>\r\n");
status = -EINVAL;
goto out;
}
vcpu = vcpu_from_vid(vm, vcpu_id);
dump.vcpu = vcpu;
dump.str = shell_log_buf;
dump.str_max = CPU_PAGE_SIZE;

View File

@ -199,6 +199,9 @@ static inline bool is_vm0(struct vm *vm)
return (vm->vm_id) == 0U;
}
/*
* @pre vcpu_id < CONFIG_MAX_VCPUS_PER_VM
*/
static inline struct vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
{
uint16_t i;
@ -206,12 +209,11 @@ static inline struct vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
foreach_vcpu(i, vm, vcpu) {
if (vcpu->vcpu_id == vcpu_id) {
break;
}
}
return vcpu;
}
}
return NULL;
}
static inline struct vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
{