HV: common: cleanup of remaining integral-type issues

This is the final cleanup of the integral type related issues, reported by the
static checker, under common/, mostly including

    * make explicit the narrowings of vm_ids passed by register.
    * work around the confusion of the static checker by abstracting
      sub-expressions to local variables.

The remaining reports that are not trivial to suppress will be in the scope of a
separate document.

v1 -> v2:

    * Instead of converting vm_ids inside hcall_xxx, update the prototypes of
      these functions and do the conversion in vmcall_vmexit_handler.

Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao 2018-07-19 23:35:34 +08:00 committed by lijinxia
parent 112b5b820c
commit f0a3585ebf
7 changed files with 154 additions and 124 deletions

View File

@ -55,47 +55,60 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
break;
case HC_DESTROY_VM:
ret = hcall_destroy_vm(param1);
/* param1: vmid */
ret = hcall_destroy_vm((uint16_t)param1);
break;
case HC_START_VM:
ret = hcall_resume_vm(param1);
/* param1: vmid */
ret = hcall_resume_vm((uint16_t)param1);
break;
case HC_PAUSE_VM:
ret = hcall_pause_vm(param1);
/* param1: vmid */
ret = hcall_pause_vm((uint16_t)param1);
break;
case HC_CREATE_VCPU:
ret = hcall_create_vcpu(vm, param1, param2);
/* param1: vmid */
ret = hcall_create_vcpu(vm, (uint16_t)param1, param2);
break;
case HC_ASSERT_IRQLINE:
ret = hcall_assert_irqline(vm, param1, param2);
/* param1: vmid */
ret = hcall_assert_irqline(vm, (uint16_t)param1, param2);
break;
case HC_DEASSERT_IRQLINE:
ret = hcall_deassert_irqline(vm, param1, param2);
/* param1: vmid */
ret = hcall_deassert_irqline(vm, (uint16_t)param1, param2);
break;
case HC_PULSE_IRQLINE:
ret = hcall_pulse_irqline(vm, param1, param2);
/* param1: vmid */
ret = hcall_pulse_irqline(vm, (uint16_t)param1, param2);
break;
case HC_INJECT_MSI:
ret = hcall_inject_msi(vm, param1, param2);
/* param1: vmid */
ret = hcall_inject_msi(vm, (uint16_t)param1, param2);
break;
case HC_SET_IOREQ_BUFFER:
ret = hcall_set_ioreq_buffer(vm, param1, param2);
/* param1: vmid */
ret = hcall_set_ioreq_buffer(vm, (uint16_t)param1, param2);
break;
case HC_NOTIFY_REQUEST_FINISH:
ret = hcall_notify_req_finish(param1, param2);
/* param1: vmid
* param2: vcpu_id */
ret = hcall_notify_req_finish((uint16_t)param1,
(uint16_t)param2);
break;
case HC_VM_SET_MEMMAP:
ret = hcall_set_vm_memmap(vm, param1, param2);
/* param1: vmid */
ret = hcall_set_vm_memmap(vm, (uint16_t)param1, param2);
break;
case HC_VM_SET_MEMMAPS:
@ -103,27 +116,33 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
break;
case HC_VM_PCI_MSIX_REMAP:
ret = hcall_remap_pci_msix(vm, param1, param2);
/* param1: vmid */
ret = hcall_remap_pci_msix(vm, (uint16_t)param1, param2);
break;
case HC_VM_GPA2HPA:
ret = hcall_gpa_to_hpa(vm, param1, param2);
/* param1: vmid */
ret = hcall_gpa_to_hpa(vm, (uint16_t)param1, param2);
break;
case HC_ASSIGN_PTDEV:
ret = hcall_assign_ptdev(vm, param1, param2);
/* param1: vmid */
ret = hcall_assign_ptdev(vm, (uint16_t)param1, param2);
break;
case HC_DEASSIGN_PTDEV:
ret = hcall_deassign_ptdev(vm, param1, param2);
/* param1: vmid */
ret = hcall_deassign_ptdev(vm, (uint16_t)param1, param2);
break;
case HC_SET_PTDEV_INTR_INFO:
ret = hcall_set_ptdev_intr_info(vm, param1, param2);
/* param1: vmid */
ret = hcall_set_ptdev_intr_info(vm, (uint16_t)param1, param2);
break;
case HC_RESET_PTDEV_INTR_INFO:
ret = hcall_reset_ptdev_intr_info(vm, param1, param2);
/* param1: vmid */
ret = hcall_reset_ptdev_intr_info(vm, (uint16_t)param1, param2);
break;
case HC_SETUP_SBUF:

View File

@ -113,8 +113,8 @@ handle_virt_irqline(struct vm *vm, uint16_t target_vmid,
/* Call vpic for pic injection */
ret = handle_vpic_irqline(target_vm, param->pic_irq, mode);
/* call vioapic for ioapic injection if ioapic_irq != ~0UL*/
if (param->ioapic_irq != (~0UL)) {
/* call vioapic for ioapic injection if ioapic_irq != ~0U*/
if (param->ioapic_irq != (~0U)) {
/* handle IOAPIC irqline */
ret = handle_vioapic_irqline(target_vm,
param->ioapic_irq, mode);
@ -172,7 +172,7 @@ int32_t hcall_create_vm(struct vm *vm, uint64_t param)
return ret;
}
int32_t hcall_destroy_vm(uint64_t vmid)
int32_t hcall_destroy_vm(uint16_t vmid)
{
int32_t ret = 0;
struct vm *target_vm = get_vm_from_vmid(vmid);
@ -185,7 +185,7 @@ int32_t hcall_destroy_vm(uint64_t vmid)
return ret;
}
int32_t hcall_resume_vm(uint64_t vmid)
int32_t hcall_resume_vm(uint16_t vmid)
{
int32_t ret = 0;
struct vm *target_vm = get_vm_from_vmid(vmid);
@ -202,7 +202,7 @@ int32_t hcall_resume_vm(uint64_t vmid)
return ret;
}
int32_t hcall_pause_vm(uint64_t vmid)
int32_t hcall_pause_vm(uint16_t vmid)
{
struct vm *target_vm = get_vm_from_vmid(vmid);
@ -215,7 +215,7 @@ int32_t hcall_pause_vm(uint64_t vmid)
return 0;
}
int32_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_create_vcpu(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
uint16_t pcpu_id;
@ -242,7 +242,7 @@ int32_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
return ret;
}
int32_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_assert_irqline(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct acrn_irqline irqline;
@ -251,12 +251,12 @@ int32_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;
}
ret = handle_virt_irqline(vm, (uint16_t)vmid, &irqline, IRQ_ASSERT);
ret = handle_virt_irqline(vm, vmid, &irqline, IRQ_ASSERT);
return ret;
}
int32_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_deassert_irqline(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct acrn_irqline irqline;
@ -265,12 +265,12 @@ int32_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;
}
ret = handle_virt_irqline(vm, (uint16_t)vmid, &irqline, IRQ_DEASSERT);
ret = handle_virt_irqline(vm, vmid, &irqline, IRQ_DEASSERT);
return ret;
}
int32_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_pulse_irqline(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct acrn_irqline irqline;
@ -279,12 +279,12 @@ int32_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;
}
ret = handle_virt_irqline(vm, (uint16_t)vmid, &irqline, IRQ_PULSE);
ret = handle_virt_irqline(vm, vmid, &irqline, IRQ_PULSE);
return ret;
}
int32_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_inject_msi(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct acrn_msi_entry msi;
@ -304,7 +304,7 @@ int32_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param)
return ret;
}
int32_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
uint64_t hpa = 0UL;
@ -370,7 +370,7 @@ static void complete_request(struct vcpu *vcpu)
resume_vcpu(vcpu);
}
int32_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id)
{
union vhm_request_buffer *req_buf;
struct vhm_request *req;
@ -386,7 +386,7 @@ int32_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
vmid, vcpu_id);
vcpu = vcpu_from_vid(target_vm, (uint16_t)vcpu_id);
vcpu = vcpu_from_vid(target_vm, vcpu_id);
if (vcpu == NULL) {
pr_err("%s, failed to get VCPU %d context from VM %d\n",
__func__, vcpu_id, target_vm->attr.id);
@ -410,7 +410,7 @@ _set_vm_memmap(struct vm *vm, struct vm *target_vm,
struct vm_set_memmap *memmap)
{
uint64_t hpa, base_paddr;
uint32_t attr, prot;
uint64_t attr, prot;
if ((memmap->length & 0xFFFUL) != 0UL) {
pr_err("%s: ERROR! [vm%d] map size 0x%x is not page aligned",
@ -462,7 +462,7 @@ _set_vm_memmap(struct vm *vm, struct vm *target_vm,
memmap->remote_gpa, memmap->length, memmap->type, attr);
}
int32_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_set_vm_memmap(struct vm *vm, uint16_t vmid, uint64_t param)
{
struct vm_set_memmap memmap;
struct vm *target_vm = get_vm_from_vmid(vmid);
@ -534,7 +534,7 @@ int32_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param)
return 0;
}
int32_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_remap_pci_msix(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct acrn_vm_pci_msix_remap remap;
@ -575,7 +575,7 @@ int32_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param)
return ret;
}
int32_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct vm_gpa2hpa v_gpa2hpa;
@ -600,7 +600,7 @@ int32_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param)
return ret;
}
int32_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
uint16_t bdf;
@ -638,7 +638,7 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
return ret;
}
int32_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_deassign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
uint16_t bdf;
@ -658,7 +658,7 @@ int32_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
return ret;
}
int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct hc_ptdev_irq irq;
@ -693,7 +693,7 @@ int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
}
int32_t
hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct hc_ptdev_irq irq;
@ -752,7 +752,7 @@ int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
uint16_t target_vm_id;
struct vm *target_vm;
target_vm_id = (cmd & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT;
target_vm_id = (uint16_t)((cmd & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT);
target_vm = get_vm_from_vmid(target_vm_id);
if (target_vm == NULL) {
@ -820,7 +820,8 @@ int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
return -1;
}
cx_idx = (cmd & PMCMD_STATE_NUM_MASK) >> PMCMD_STATE_NUM_SHIFT;
cx_idx = (uint8_t)
((cmd & PMCMD_STATE_NUM_MASK) >> PMCMD_STATE_NUM_SHIFT);
if ((cx_idx == 0U) || (cx_idx > target_vm->pm.cx_cnt)) {
return -1;
}

View File

@ -87,9 +87,9 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
* before we perform upcall.
* because VHM can work in pulling mode without wait for upcall
*/
req_buf->req_queue[cur].valid = true;
req_buf->req_queue[cur].valid = 1;
acrn_print_request(vcpu->vcpu_id, req_buf->req_queue + cur);
acrn_print_request(vcpu->vcpu_id, &req_buf->req_queue[cur]);
/* signal VHM */
fire_vhm_interrupt();
@ -99,11 +99,11 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
#ifdef HV_DEBUG
static void _get_req_info_(struct vhm_request *req, int *id, char *type,
char *state, char *dir, int64_t *addr, long *val)
char *state, char *dir, uint64_t *addr, uint64_t *val)
{
(void)strcpy_s(dir, 16U, "NONE");
*addr = 0;
*val = 0;
*addr = 0UL;
*val = 0UL;
*id = req->client;
switch (req->type) {
@ -158,7 +158,7 @@ void get_req_info(char *str, int str_max)
union vhm_request_buffer *req_buf;
struct vhm_request *req;
char type[16], state[16], dir[16];
int64_t addr, val;
uint64_t addr, val;
struct list_head *pos;
struct vm *vm;

View File

@ -177,5 +177,3 @@ void ptdev_release_all_entries(struct vm *vm)
release_all_entries(vm);
spinlock_release(&ptdev_lock);
}

View File

@ -11,25 +11,30 @@ static unsigned long pcpu_used_bitmap;
void init_scheduler(void)
{
struct sched_context *ctx;
uint32_t i;
for (i = 0U; i < phys_cpu_num; i++) {
spinlock_init(&per_cpu(sched_ctx, i).runqueue_lock);
spinlock_init(&per_cpu(sched_ctx, i).scheduler_lock);
INIT_LIST_HEAD(&per_cpu(sched_ctx, i).runqueue);
per_cpu(sched_ctx, i).flags = 0UL;
per_cpu(sched_ctx, i).curr_vcpu = NULL;
ctx = &per_cpu(sched_ctx, i);
spinlock_init(&ctx->runqueue_lock);
spinlock_init(&ctx->scheduler_lock);
INIT_LIST_HEAD(&ctx->runqueue);
ctx->flags = 0UL;
ctx->curr_vcpu = NULL;
}
}
void get_schedule_lock(uint16_t pcpu_id)
{
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
spinlock_obtain(&ctx->scheduler_lock);
}
void release_schedule_lock(uint16_t pcpu_id)
{
spinlock_release(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
spinlock_release(&ctx->scheduler_lock);
}
uint16_t allocate_pcpu(void)
@ -57,50 +62,53 @@ void free_pcpu(uint16_t pcpu_id)
void add_vcpu_to_runqueue(struct vcpu *vcpu)
{
int pcpu_id = vcpu->pcpu_id;
uint16_t pcpu_id = vcpu->pcpu_id;
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
spinlock_obtain(&ctx->runqueue_lock);
if (list_empty(&vcpu->run_list)) {
list_add_tail(&vcpu->run_list,
&per_cpu(sched_ctx, pcpu_id).runqueue);
list_add_tail(&vcpu->run_list, &ctx->runqueue);
}
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
spinlock_release(&ctx->runqueue_lock);
}
void remove_vcpu_from_runqueue(struct vcpu *vcpu)
{
int pcpu_id = vcpu->pcpu_id;
uint16_t pcpu_id = vcpu->pcpu_id;
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
spinlock_obtain(&ctx->runqueue_lock);
list_del_init(&vcpu->run_list);
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
spinlock_release(&ctx->runqueue_lock);
}
static struct vcpu *select_next_vcpu(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
struct vcpu *vcpu = NULL;
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
if (!list_empty(&per_cpu(sched_ctx, pcpu_id).runqueue)) {
vcpu = get_first_item(&per_cpu(sched_ctx, pcpu_id).runqueue,
struct vcpu, run_list);
spinlock_obtain(&ctx->runqueue_lock);
if (!list_empty(&ctx->runqueue)) {
vcpu = get_first_item(&ctx->runqueue, struct vcpu, run_list);
}
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
spinlock_release(&ctx->runqueue_lock);
return vcpu;
}
void make_reschedule_request(struct vcpu *vcpu)
{
bitmap_set(NEED_RESCHEDULE,
&per_cpu(sched_ctx, vcpu->pcpu_id).flags);
struct sched_context *ctx = &per_cpu(sched_ctx, vcpu->pcpu_id);
bitmap_set(NEED_RESCHEDULE, &ctx->flags);
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
}
int need_reschedule(uint16_t pcpu_id)
{
return bitmap_test_and_clear(NEED_RESCHEDULE,
&per_cpu(sched_ctx, pcpu_id).flags);
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
return bitmap_test_and_clear(NEED_RESCHEDULE, &ctx->flags);
}
static void context_switch_out(struct vcpu *vcpu)
@ -142,15 +150,17 @@ static void context_switch_in(struct vcpu *vcpu)
void make_pcpu_offline(uint16_t pcpu_id)
{
bitmap_set(NEED_OFFLINE,
&per_cpu(sched_ctx, pcpu_id).flags);
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
bitmap_set(NEED_OFFLINE, &ctx->flags);
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
}
int need_offline(uint16_t pcpu_id)
{
return bitmap_test_and_clear(NEED_OFFLINE,
&per_cpu(sched_ctx, pcpu_id).flags);
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
return bitmap_test_and_clear(NEED_OFFLINE, &ctx->flags);
}
void default_idle(void)

View File

@ -27,8 +27,9 @@ static uint64_t create_zero_page(struct vm *vm)
{
struct zero_page *zeropage;
struct sw_linux *sw_linux = &(vm->sw.linux_info);
struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
struct zero_page *hva;
uint64_t gpa;
uint64_t gpa, addr;
/* Set zeropage in Linux Guest RAM region just past boot args */
hva = GPA2HVA(vm, (uint64_t)sw_linux->bootargs_load_addr);
@ -38,7 +39,7 @@ static uint64_t create_zero_page(struct vm *vm)
(void)memset(zeropage, 0U, MEM_2K);
/* copy part of the header into the zero page */
hva = GPA2HVA(vm, (uint64_t)vm->sw.kernel_info.kernel_load_addr);
hva = GPA2HVA(vm, (uint64_t)sw_kernel->kernel_load_addr);
(void)memcpy_s(&(zeropage->hdr), sizeof(zeropage->hdr),
&(hva->hdr), sizeof(hva->hdr));
@ -46,21 +47,21 @@ static uint64_t create_zero_page(struct vm *vm)
if (sw_linux->ramdisk_src_addr != NULL) {
/* Copy ramdisk load_addr and size in zeropage header structure
*/
zeropage->hdr.ramdisk_addr =
(uint32_t)(uint64_t)sw_linux->ramdisk_load_addr;
addr = (uint64_t)sw_linux->ramdisk_load_addr;
zeropage->hdr.ramdisk_addr = (uint32_t)addr;
zeropage->hdr.ramdisk_size = (uint32_t)sw_linux->ramdisk_size;
}
/* Copy bootargs load_addr in zeropage header structure */
zeropage->hdr.bootargs_addr =
(uint32_t)(uint64_t)sw_linux->bootargs_load_addr;
addr = (uint64_t)sw_linux->bootargs_load_addr;
zeropage->hdr.bootargs_addr = (uint32_t)addr;
/* set constant arguments in zero page */
zeropage->hdr.loader_type = 0xffU;
zeropage->hdr.load_flags |= (1U << 5U); /* quiet */
/* Create/add e820 table entries in zeropage */
zeropage->e820_nentries = create_e820_table(zeropage->e820);
zeropage->e820_nentries = (uint8_t)create_e820_table(zeropage->e820);
/* Get the host physical address of the zeropage */
gpa = hpa2gpa(vm, HVA2HPA((uint64_t)zeropage));
@ -81,7 +82,7 @@ int load_guest(struct vm *vm, struct vcpu *vcpu)
lowmem_gpa_top = *(uint64_t *)hva;
/* hardcode vcpu entry addr(kernel entry) & rsi (zeropage)*/
(void)memset(cur_context->guest_cpu_regs.longs,
(void)memset((void*)cur_context->guest_cpu_regs.longs,
0U, sizeof(uint64_t)*NUM_GPRS);
hva = GPA2HVA(vm, lowmem_gpa_top -
@ -109,6 +110,8 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
char dyn_bootargs[100] = {0};
uint32_t kernel_entry_offset;
struct zero_page *zeropage;
struct sw_linux *sw_linux = &(vm->sw.linux_info);
struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
ASSERT(vm != NULL, "Incorrect argument");
@ -120,31 +123,30 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
}
/* calculate the kernel entry point */
zeropage = (struct zero_page *)
vm->sw.kernel_info.kernel_src_addr;
zeropage = (struct zero_page *)sw_kernel->kernel_src_addr;
kernel_entry_offset = (uint32_t)(zeropage->hdr.setup_sects + 1U) * 512U;
if (vcpu->arch_vcpu.cpu_mode == CPU_MODE_64BIT) {
/* 64bit entry is the 512bytes after the start */
kernel_entry_offset += 512U;
}
vm->sw.kernel_info.kernel_entry_addr =
(void *)((uint64_t)vm->sw.kernel_info.kernel_load_addr
sw_kernel->kernel_entry_addr =
(void *)((uint64_t)sw_kernel->kernel_load_addr
+ kernel_entry_offset);
if (is_vcpu_bsp(vcpu)) {
/* Set VCPU entry point to kernel entry */
vcpu->entry_addr = vm->sw.kernel_info.kernel_entry_addr;
vcpu->entry_addr = sw_kernel->kernel_entry_addr;
pr_info("%s, VM *d VCPU %hu Entry: 0x%016llx ",
__func__, vm->attr.id, vcpu->vcpu_id, vcpu->entry_addr);
}
/* Calculate the host-physical address where the guest will be loaded */
hva = GPA2HVA(vm, (uint64_t)vm->sw.kernel_info.kernel_load_addr);
hva = GPA2HVA(vm, (uint64_t)sw_kernel->kernel_load_addr);
/* Copy the guest kernel image to its run-time location */
(void)memcpy_s((void *)hva, vm->sw.kernel_info.kernel_size,
vm->sw.kernel_info.kernel_src_addr,
vm->sw.kernel_info.kernel_size);
(void)memcpy_s((void *)hva, sw_kernel->kernel_size,
sw_kernel->kernel_src_addr,
sw_kernel->kernel_size);
/* See if guest is a Linux guest */
if (vm->sw.kernel_type == VM_LINUX_GUEST) {
@ -156,11 +158,11 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
/* Get host-physical address for guest bootargs */
hva = GPA2HVA(vm,
(uint64_t)vm->sw.linux_info.bootargs_load_addr);
(uint64_t)sw_linux->bootargs_load_addr);
/* Copy Guest OS bootargs to its load location */
(void)strcpy_s((char *)hva, MEM_2K,
vm->sw.linux_info.bootargs_src_addr);
sw_linux->bootargs_src_addr);
#ifdef CONFIG_CMA
/* add "cma=XXXXM@0xXXXXXXXX" to cmdline*/
@ -169,8 +171,8 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
(e820_mem.max_ram_blk_size >> 20),
e820_mem.max_ram_blk_base);
(void)strcpy_s((char *)hva
+vm->sw.linux_info.bootargs_size,
100, dyn_bootargs);
+ sw_linux->bootargs_size,
100U, dyn_bootargs);
}
#else
/* add "hugepagesz=1G hugepages=x" to cmdline for 1G hugepage
@ -192,23 +194,23 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
" hugepagesz=1G hugepages=%d",
reserving_1g_pages);
(void)strcpy_s((char *)hva
+vm->sw.linux_info.bootargs_size,
+ sw_linux->bootargs_size,
100U, dyn_bootargs);
}
}
#endif
/* Check if a RAM disk is present with Linux guest */
if (vm->sw.linux_info.ramdisk_src_addr != NULL) {
if (sw_linux->ramdisk_src_addr != NULL) {
/* Get host-physical address for guest RAM disk */
hva = GPA2HVA(vm,
(uint64_t)vm->sw.linux_info.ramdisk_load_addr);
(uint64_t)sw_linux->ramdisk_load_addr);
/* Copy RAM disk to its load location */
(void)memcpy_s((void *)hva,
vm->sw.linux_info.ramdisk_size,
vm->sw.linux_info.ramdisk_src_addr,
vm->sw.linux_info.ramdisk_size);
sw_linux->ramdisk_size,
sw_linux->ramdisk_src_addr,
sw_linux->ramdisk_size);
}

View File

@ -62,7 +62,7 @@ int32_t hcall_create_vm(struct vm *vm, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_destroy_vm(uint64_t vmid);
int32_t hcall_destroy_vm(uint16_t vmid);
/**
* @brief resume virtual machine
@ -75,7 +75,7 @@ int32_t hcall_destroy_vm(uint64_t vmid);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_resume_vm(uint64_t vmid);
int32_t hcall_resume_vm(uint16_t vmid);
/**
* @brief pause virtual machine
@ -88,7 +88,7 @@ int32_t hcall_resume_vm(uint64_t vmid);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_pause_vm(uint64_t vmid);
int32_t hcall_pause_vm(uint16_t vmid);
/**
* @brief create vcpu
@ -104,7 +104,7 @@ int32_t hcall_pause_vm(uint64_t vmid);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_create_vcpu(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief assert IRQ line
@ -119,7 +119,7 @@ int32_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_assert_irqline(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief deassert IRQ line
@ -134,7 +134,7 @@ int32_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_deassert_irqline(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief trigger a pulse on IRQ line
@ -149,7 +149,7 @@ int32_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_pulse_irqline(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief inject MSI interrupt
@ -163,7 +163,7 @@ int32_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_inject_msi(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief set ioreq shared buffer
@ -178,7 +178,7 @@ int32_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief notify request done
@ -187,11 +187,11 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param);
* The function will return -1 if the target VM does not exist.
*
* @param vmid ID of the VM
* @param param vcpu ID of the requestor
* @param vcpu_id vcpu ID of the requestor
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_notify_req_finish(uint64_t vmid, uint64_t param);
int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id);
/**
* @brief setup ept memory mapping
@ -206,7 +206,7 @@ int32_t hcall_notify_req_finish(uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_set_vm_memmap(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief setup ept memory mapping for multi regions
@ -232,7 +232,7 @@ int32_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_remap_pci_msix(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief translate guest physical address to host physical address
@ -246,7 +246,7 @@ int32_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Assign one passthrough dev to VM.
@ -258,7 +258,7 @@ int32_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Deassign one passthrough dev from VM.
@ -270,7 +270,7 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_deassign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Set interrupt mapping info of ptdev.
@ -282,7 +282,7 @@ int32_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param);
int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Clear interrupt mapping info of ptdev.
@ -294,7 +294,7 @@ int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid,
int32_t hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid,
uint64_t param);
/**