hv:Replace dynamic memory with static for vcpu

-- Replace dynamic memory allocation with static memory
-- Remove parameter check if vcpu is NULL

Tracked-On: #861
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Reviewed-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Mingqiang Chi
2018-10-11 09:33:13 +08:00
committed by wenlingz
parent 7dd35cb72e
commit 1d725c89c0
17 changed files with 81 additions and 117 deletions

View File

@@ -34,6 +34,12 @@ config RELEASE
config MAX_VM_NUM
int "Maximum number of VM"
range 1 8
default 4
config MAX_VCPUS_PER_VM
int "Maximum number of VCPUS per VM"
range 1 8
default 4
config NR_IOAPICS

View File

@@ -42,7 +42,6 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
vcpu_id = ffs64(vdmask)) {
bitmap_clear_lock(vcpu_id, &vdmask);
vcpu = vcpu_from_vid(vm, vcpu_id);
ASSERT(vcpu != NULL, "vcpu_from_vid failed");
bitmap_set_lock(vcpu->pcpu_id, &dmask);
}
@@ -383,6 +382,9 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
return 0;
}
/*
* @pre vcpu != NULL && err_code != NULL
*/
static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
uint32_t size_arg, uint32_t *err_code, uint64_t *fault_addr,
bool cp_from_vm)
@@ -394,15 +396,6 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
uint64_t gva = gva_arg;
uint32_t size = size_arg;
if (vcpu == NULL) {
pr_err("guest virt addr copy need vcpu param");
return -EINVAL;
}
if (err_code == NULL) {
pr_err("guest virt addr copy need err_code param");
return -EINVAL;
}
while (size > 0U) {
ret = gva2gpa(vcpu, gva, &gpa, err_code);
if (ret < 0) {

View File

@@ -311,15 +311,25 @@ void set_ap_entry(struct vcpu *vcpu, uint64_t entry)
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
{
struct vcpu *vcpu;
uint16_t vcpu_id;
pr_info("Creating VCPU working on PCPU%hu", pcpu_id);
/*
* vcpu->vcpu_id = vm->hw.created_vcpus;
* vm->hw.created_vcpus++;
*/
vcpu_id = atomic_xadd16(&vm->hw.created_vcpus, 1U);
if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
pr_err("%s, vcpu id is invalid!\n", __func__);
return -EINVAL;
}
/* Allocate memory for VCPU */
vcpu = calloc(1U, sizeof(struct vcpu));
ASSERT(vcpu != NULL, "");
vcpu = &(vm->hw.vcpu_array[vcpu_id]);
(void)memset((void *)vcpu, 0U, sizeof(struct vcpu));
/* Initialize the physical CPU ID for this VCPU */
/* Initialize CPU ID for this VCPU */
vcpu->vcpu_id = vcpu_id;
vcpu->pcpu_id = pcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
@@ -334,19 +344,6 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
* needs revise.
*/
/*
* vcpu->vcpu_id = vm->hw.created_vcpus;
* vm->hw.created_vcpus++;
*/
vcpu->vcpu_id = atomic_xadd16(&vm->hw.created_vcpus, 1U);
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
atomic_store64(
(uint64_t *)&vm->hw.vcpu_array[vcpu->vcpu_id],
(uint64_t)vcpu);
ASSERT(vcpu->vcpu_id < vm->hw.num_vcpus,
"Allocated vcpu_id is out of range!");
per_cpu(vcpu, pcpu_id) = vcpu;
pr_info("PCPU%d is working as VM%d VCPU%d, Role: %s",
@@ -384,6 +381,9 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
return 0;
}
/*
* @pre vcpu != NULL
*/
int run_vcpu(struct vcpu *vcpu)
{
uint32_t instlen, cs_attr;
@@ -392,8 +392,6 @@ int run_vcpu(struct vcpu *vcpu)
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
int64_t status = 0;
ASSERT(vcpu != NULL, "Incorrect arguments");
if (bitmap_test_and_clear_lock(CPU_REG_RIP, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated))
@@ -486,21 +484,15 @@ int shutdown_vcpu(__unused struct vcpu *vcpu)
return 0;
}
void destroy_vcpu(struct vcpu *vcpu)
/*
* @pre vcpu != NULL
*/
void offline_vcpu(struct vcpu *vcpu)
{
ASSERT(vcpu != NULL, "Incorrect arguments");
/* vcpu->vm->hw.vcpu_array[vcpu->vcpu_id] = NULL; */
atomic_store64(
(uint64_t *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
(uint64_t)NULL);
atomic_dec16(&vcpu->vm->hw.created_vcpus);
vlapic_free(vcpu);
per_cpu(ever_run_vcpu, vcpu->pcpu_id) = NULL;
free_pcpu(vcpu->pcpu_id);
free(vcpu);
vcpu->state = VCPU_OFFLINE;
}
/* NOTE:
@@ -602,7 +594,9 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
struct vcpu *vcpu = NULL;
ret = create_vcpu(pcpu_id, vm, &vcpu);
ASSERT(ret == 0, "vcpu create failed");
if (ret != 0) {
return ret;
}
if (!vm_sw_loader) {
vm_sw_loader = general_sw_loader;

View File

@@ -108,7 +108,6 @@ vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
struct vcpu *vcpu;
vcpu = vcpu_from_vid(vm, vcpu_id);
ASSERT(vcpu != NULL, "vm%d, vcpu%hu", vm->vm_id, vcpu_id);
return vcpu_vlapic(vcpu);
}
@@ -119,7 +118,6 @@ vm_lapic_from_pcpuid(struct vm *vm, uint16_t pcpu_id)
struct vcpu *vcpu;
vcpu = vcpu_from_pid(vm, pcpu_id);
ASSERT(vcpu != NULL, "vm%d, pcpu%hu", vm->vm_id, pcpu_id);
return vcpu_vlapic(vcpu);
}
@@ -1996,14 +1994,13 @@ int vlapic_create(struct vcpu *vcpu)
return 0;
}
/*
* @pre vcpu != NULL
*/
void vlapic_free(struct vcpu *vcpu)
{
struct acrn_vlapic *vlapic = NULL;
if (vcpu == NULL) {
return;
}
vlapic = vcpu_vlapic(vcpu);
del_timer(&vlapic->vtimer.timer);

View File

@@ -40,17 +40,6 @@ static inline bool is_vm_valid(uint16_t vm_id)
return bitmap_test(vm_id, &vmid_bitmap);
}
static void init_vm(struct vm_description *vm_desc,
struct vm *vm_handle)
{
/* Populate VM attributes from VM description */
vm_handle->hw.num_vcpus = vm_desc->vm_hw_num_cores;
#ifdef CONFIG_PARTITION_MODE
vm_handle->vm_desc = vm_desc;
#endif
}
/* return a pointer to the virtual machine structure associated with
* this VM ID
*/
@@ -87,25 +76,12 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
vm = &vm_array[vm_id];
(void)memset((void *)vm, 0U, sizeof(struct vm));
vm->vm_id = vm_id;
/*
* Map Virtual Machine to its VM Description
*/
init_vm(vm_desc, vm);
#ifdef CONFIG_PARTITION_MODE
/* Map Virtual Machine to its VM Description */
vm->vm_desc = vm_desc;
#endif
/* Init mmio list */
INIT_LIST_HEAD(&vm->mmio_list);
if (vm->hw.num_vcpus == 0U) {
vm->hw.num_vcpus = phys_cpu_num;
}
vm->hw.vcpu_array =
calloc(1U, sizeof(struct vcpu *) * vm->hw.num_vcpus);
if (vm->hw.vcpu_array == NULL) {
pr_err("%s, vcpu_array allocation failed\n", __func__);
status = -ENOMEM;
goto err;
}
atomic_store16(&vm->hw.created_vcpus, 0U);
/* gpa_lowtop are used for system start up */
@@ -211,9 +187,6 @@ err:
free(vm->arch_vm.nworld_eptp);
}
if (vm->hw.vcpu_array != NULL) {
free(vm->hw.vcpu_array);
}
return status;
}
@@ -235,7 +208,7 @@ int shutdown_vm(struct vm *vm)
foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu);
destroy_vcpu(vcpu);
offline_vcpu(vcpu);
}
ptdev_release_all_entries(vm);
@@ -267,7 +240,6 @@ int shutdown_vm(struct vm *vm)
#ifdef CONFIG_PARTITION_MODE
vpci_cleanup(vm);
#endif
free(vm->hw.vcpu_array);
/* Return status to caller */
return status;
@@ -284,7 +256,6 @@ int start_vm(struct vm *vm)
/* Only start BSP (vid = 0) and let BSP start other APs */
vcpu = vcpu_from_vid(vm, 0U);
ASSERT(vcpu != NULL, "vm%d, vcpu0", vm->vm_id);
schedule_vcpu(vcpu);
return 0;

View File

@@ -479,7 +479,7 @@ int register_mmio_emulation_handler(struct vm *vm,
int status = -EINVAL;
struct mem_io_node *mmio_node;
if ((vm->hw.created_vcpus > 0U) && vm->hw.vcpu_array[0]->launched) {
if ((vm->hw.created_vcpus > 0U) && vm->hw.vcpu_array[0].launched) {
ASSERT(false, "register mmio handler after vm launched");
return status;
}

View File

@@ -563,6 +563,9 @@ void cancel_event_injection(struct vcpu *vcpu)
}
}
/*
* @pre vcpu != NULL
*/
int exception_vmexit_handler(struct vcpu *vcpu)
{
uint32_t intinfo, int_err_code = 0U;
@@ -570,15 +573,6 @@ int exception_vmexit_handler(struct vcpu *vcpu)
uint32_t cpl;
int status = 0;
if (vcpu == NULL) {
TRACE_4I(TRACE_VMEXIT_EXCEPTION_OR_NMI, 0U, 0U, 0U, 0U);
status = -EINVAL;
}
if (status != 0) {
return status;
}
pr_dbg(" Handling guest exception");
/* Obtain VM-Exit information field pg 2912 */

View File

@@ -99,10 +99,8 @@ void exec_vmxon_instr(uint16_t pcpu_id)
vmxon_region_pa = hva2hpa(vmxon_region_va);
exec_vmxon(&vmxon_region_pa);
if (vcpu != NULL) {
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
exec_vmptrld(&vmcs_pa);
}
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
exec_vmptrld(&vmcs_pa);
}
void vmx_off(uint16_t pcpu_id)
@@ -111,10 +109,8 @@ void vmx_off(uint16_t pcpu_id)
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
uint64_t vmcs_pa;
if (vcpu != NULL) {
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
exec_vmclear((void *)&vmcs_pa);
}
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
exec_vmclear((void *)&vmcs_pa);
asm volatile ("vmxoff" : : : "memory");
}