hv:cleanup vmid related code

Remove structure vm_attr
Wrap two APIs alloc_vm_id and free_vm_id

Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Mingqiang Chi
2018-08-03 15:43:25 +08:00
committed by lijinxia
parent 2299926a88
commit 7345677bbb
17 changed files with 73 additions and 65 deletions

View File

@@ -32,7 +32,7 @@ struct page_walk_info {
inline bool
is_vm0(struct vm *vm)
{
return (vm->attr.boot_idx & 0x7FU) == 0U;
return (vm->vm_id) == 0U;
}
inline struct vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)

View File

@@ -210,7 +210,7 @@ register_gas_io_handler(struct vm *vm, struct acpi_generic_address *gas)
&pm1ab_io_read, &pm1ab_io_write);
pr_dbg("Enable PM1A trap for VM %d, port 0x%x, size %d\n",
vm->attr.id, gas_io.base, gas_io.len);
vm->vm_id, gas_io.base, gas_io.len);
}
void register_pm1ab_handler(struct vm *vm)

View File

@@ -75,7 +75,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
per_cpu(vcpu, pcpu_id) = vcpu;
pr_info("PCPU%d is working as VM%d VCPU%d, Role: %s",
vcpu->pcpu_id, vcpu->vm->attr.id, vcpu->vcpu_id,
vcpu->pcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id,
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
#ifdef CONFIG_START_VM0_BSP_64BIT
@@ -84,7 +84,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
/* Set up temporary guest page tables */
vm->arch_vm.guest_init_pml4 = create_guest_initial_paging(vm);
pr_info("VM %d VCPU %hu CR3: 0x%016llx ",
vm->attr.id, vcpu->vcpu_id,
vm->vm_id, vcpu->vcpu_id,
vm->arch_vm.guest_init_pml4);
}
#endif
@@ -156,7 +156,7 @@ int start_vcpu(struct vcpu *vcpu)
/* If this VCPU is not already launched, launch it */
if (!vcpu->launched) {
pr_info("VM %d Starting VCPU %hu",
vcpu->vm->attr.id, vcpu->vcpu_id);
vcpu->vm->vm_id, vcpu->vcpu_id);
if (vcpu->arch_vcpu.vpid)
exec_vmwrite16(VMX_VPID, vcpu->arch_vcpu.vpid);
@@ -185,7 +185,7 @@ int start_vcpu(struct vcpu *vcpu)
if (status == 0) {
if (is_vcpu_bsp(vcpu)) {
pr_info("VM %d VCPU %hu successfully launched",
vcpu->vm->attr.id, vcpu->vcpu_id);
vcpu->vm->vm_id, vcpu->vcpu_id);
}
}
} else {

View File

@@ -105,7 +105,7 @@ vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
struct vcpu *vcpu;
vcpu = vcpu_from_vid(vm, vcpu_id);
ASSERT(vcpu != NULL, "vm%d, vcpu%hu", vm->attr.id, vcpu_id);
ASSERT(vcpu != NULL, "vm%d, vcpu%hu", vm->vm_id, vcpu_id);
return vcpu->arch_vcpu.vlapic;
}
@@ -116,7 +116,7 @@ vm_lapic_from_pcpuid(struct vm *vm, uint16_t pcpu_id)
struct vcpu *vcpu;
vcpu = vcpu_from_pid(vm, pcpu_id);
ASSERT(vcpu != NULL, "vm%d, pcpu%hu", vm->attr.id, pcpu_id);
ASSERT(vcpu != NULL, "vm%d, pcpu%hu", vm->vm_id, pcpu_id);
return vcpu->arch_vcpu.vlapic;
}
@@ -1173,7 +1173,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
target_vcpu->arch_vcpu.sipi_vector = vec;
pr_err("Start Secondary VCPU%hu for VM[%d]...",
target_vcpu->vcpu_id,
target_vcpu->vm->attr.id);
target_vcpu->vm->vm_id);
schedule_vcpu(target_vcpu);
} else {
pr_err("Unhandled icrlo write with mode %u\n", mode);

View File

@@ -25,6 +25,25 @@ spinlock_t vm_list_lock = {
/* used for vmid allocation. And this means the max vm number is 64 */
static uint64_t vmid_bitmap;
static inline uint16_t alloc_vm_id(void)
{
uint16_t id = ffz64(vmid_bitmap);
while (id < (size_t)(sizeof(vmid_bitmap) * 8U)) {
if (!bitmap_test_and_set_lock(id, &vmid_bitmap)) {
return id;
}
id = ffz64(vmid_bitmap);
}
return INVALID_VM_ID;
}
static inline void free_vm_id(struct vm *vm)
{
bitmap_clear_lock(vm->vm_id, &vmid_bitmap);
}
static void init_vm(struct vm_description *vm_desc,
struct vm *vm_handle)
{
@@ -54,7 +73,7 @@ struct vm *get_vm_from_vmid(uint16_t vm_id)
spinlock_obtain(&vm_list_lock);
list_for_each(pos, &vm_list) {
vm = list_entry(pos, struct vm, list);
if (vm->attr.id == vm_id) {
if (vm->vm_id == vm_id) {
spinlock_release(&vm_list_lock);
return vm;
}
@@ -66,7 +85,6 @@ struct vm *get_vm_from_vmid(uint16_t vm_id)
int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
{
uint16_t id;
struct vm *vm;
int status;
@@ -102,21 +120,13 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
goto err;
}
for (id = 0U; id < (size_t)(sizeof(vmid_bitmap) * 8U); id++) {
if (!bitmap_test_and_set_lock(id, &vmid_bitmap)) {
break;
}
}
if (id >= (size_t)(sizeof(vmid_bitmap) * 8U)) {
vm->vm_id = alloc_vm_id();
if (vm->vm_id == INVALID_VM_ID) {
pr_err("%s, no more VMs can be supported\n", __func__);
status = -EINVAL;
status = -ENODEV;
goto err;
}
vm->attr.id = id;
vm->attr.boot_idx = id;
atomic_store16(&vm->hw.created_vcpus, 0U);
/* gpa_lowtop are used for system start up */
@@ -273,7 +283,8 @@ int shutdown_vm(struct vm *vm)
destroy_iommu_domain(vm->iommu);
}
bitmap_clear_lock(vm->attr.id, &vmid_bitmap);
/* Free vm id */
free_vm_id(vm);
if (vm->vpic != NULL) {
vpic_cleanup(vm);
@@ -300,7 +311,7 @@ int start_vm(struct vm *vm)
/* Only start BSP (vid = 0) and let BSP start other APs */
vcpu = vcpu_from_vid(vm, 0U);
ASSERT(vcpu != NULL, "vm%d, vcpu0", vm->attr.id);
ASSERT(vcpu != NULL, "vm%d, vcpu0", vm->vm_id);
schedule_vcpu(vcpu);
return 0;

View File

@@ -181,7 +181,7 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
out:
cur_context->guest_cpu_regs.regs.rax = (uint64_t)ret;
TRACE_2L(TRACE_VMEXIT_VMCALL, vm->attr.id, hypcall_id);
TRACE_2L(TRACE_VMEXIT_VMCALL, vm->vm_id, hypcall_id);
return 0;
}

View File

@@ -234,7 +234,7 @@ static void vpic_notify_intr(struct acrn_vpic *vpic)
if (vpic->vm->wire_mode == VPIC_WIRE_INTR) {
struct vcpu *vcpu = vcpu_from_vid(vpic->vm, 0U);
ASSERT(vcpu != NULL, "vm%d, vcpu0", vpic->vm->attr.id);
ASSERT(vcpu != NULL, "vm%d, vcpu0", vpic->vm->vm_id);
vcpu_inject_extint(vcpu);
} else {
vlapic_set_local_intr(vpic->vm, BROADCAST_CPU_ID, APIC_LVT_LINT0);