acrn-hypervisor/hypervisor/arch/x86/guest/vmcall.c
Jason Chen CJ 3d5d6c96ec vcpu: add get/set register APIs
there will be 3 types of vcpu runtime contexts:
- runtime contexts always saved/restored during VM exit/entry, which
  include general registers rax/rcx/rdx/rbx/rbp/rsi/rdi/r8~r15, cr2 and
  msr for spectre control (ia32_spec_ctrl)
- runtime contexts on-demand cached/updated during VM exit/entry, which
  include frequently used registers rsp, rip, efer, rflags, cr0 and cr4
- runtime contexts always read/write from/to VMCS, which include left
  registers not in above

this patch add get/set register APIs for vcpu runtime contexts, and unified
the save/restore method for them according to above description.

v3:
- update vcpu_get/set_cr0/4 as unified interface to get/set guest cr0/cr4,
  use on-demand cache for reading, but always write to VMCS for writing.

v2:
- use reg_cached/reg_updated for on-demand runtime contexts
- always read/write cr3 from/to VMCS

Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
2018-08-07 09:55:13 +08:00

186 lines
4.1 KiB
C

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hypervisor.h>
#include <hypercall.h>
/*
* Pass return value to SOS by register rax.
* This function should always return 0 since we shouldn't
* deal with hypercall error in hypervisor.
*/
int vmcall_vmexit_handler(struct vcpu *vcpu)
{
int32_t ret = -EACCES;
struct vm *vm = vcpu->vm;
/* hypercall ID from guest*/
uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);
/* hypercall param1 from guest*/
uint64_t param1 = vcpu_get_gpreg(vcpu, CPU_REG_RDI);
/* hypercall param2 from guest*/
uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI);
if (!is_hypercall_from_ring0()) {
pr_err("hypercall is only allowed from RING-0!\n");
goto out;
}
if (!is_vm0(vm) && hypcall_id != HC_WORLD_SWITCH &&
hypcall_id != HC_INITIALIZE_TRUSTY) {
pr_err("hypercall %d is only allowed from VM0!\n", hypcall_id);
goto out;
}
/* Dispatch the hypercall handler */
switch (hypcall_id) {
case HC_SOS_OFFLINE_CPU:
ret = hcall_sos_offline_cpu(vm, param1);
break;
case HC_GET_API_VERSION:
#ifdef CONFIG_VM0_DESC
/* vm0 will call HC_GET_API_VERSION as first hypercall, fixup
* vm0 vcpu here.
*/
vm_fixup(vm);
#endif
ret = hcall_get_api_version(vm, param1);
break;
case HC_CREATE_VM:
ret = hcall_create_vm(vm, param1);
break;
case HC_DESTROY_VM:
/* param1: vmid */
ret = hcall_destroy_vm((uint16_t)param1);
break;
case HC_START_VM:
/* param1: vmid */
ret = hcall_start_vm((uint16_t)param1);
break;
case HC_RESET_VM:
/* param1: vmid */
ret = hcall_reset_vm((uint16_t)param1);
break;
case HC_PAUSE_VM:
/* param1: vmid */
ret = hcall_pause_vm((uint16_t)param1);
break;
case HC_CREATE_VCPU:
/* param1: vmid */
ret = hcall_create_vcpu(vm, (uint16_t)param1, param2);
break;
case HC_ASSERT_IRQLINE:
/* param1: vmid */
ret = hcall_assert_irqline(vm, (uint16_t)param1, param2);
break;
case HC_DEASSERT_IRQLINE:
/* param1: vmid */
ret = hcall_deassert_irqline(vm, (uint16_t)param1, param2);
break;
case HC_PULSE_IRQLINE:
/* param1: vmid */
ret = hcall_pulse_irqline(vm, (uint16_t)param1, param2);
break;
case HC_INJECT_MSI:
/* param1: vmid */
ret = hcall_inject_msi(vm, (uint16_t)param1, param2);
break;
case HC_SET_IOREQ_BUFFER:
/* param1: vmid */
ret = hcall_set_ioreq_buffer(vm, (uint16_t)param1, param2);
break;
case HC_NOTIFY_REQUEST_FINISH:
/* param1: vmid
* param2: vcpu_id */
ret = hcall_notify_ioreq_finish((uint16_t)param1,
(uint16_t)param2);
break;
case HC_VM_SET_MEMORY_REGION:
/* param1: vmid */
ret = hcall_set_vm_memory_region(vm, (uint16_t)param1, param2);
break;
case HC_VM_SET_MEMORY_REGIONS:
ret = hcall_set_vm_memory_regions(vm, param1);
break;
case HC_VM_WRITE_PROTECT_PAGE:
ret = hcall_write_protect_page(vm, (uint16_t)param1, param2);
break;
case HC_VM_PCI_MSIX_REMAP:
/* param1: vmid */
ret = hcall_remap_pci_msix(vm, (uint16_t)param1, param2);
break;
case HC_VM_GPA2HPA:
/* param1: vmid */
ret = hcall_gpa_to_hpa(vm, (uint16_t)param1, param2);
break;
case HC_ASSIGN_PTDEV:
/* param1: vmid */
ret = hcall_assign_ptdev(vm, (uint16_t)param1, param2);
break;
case HC_DEASSIGN_PTDEV:
/* param1: vmid */
ret = hcall_deassign_ptdev(vm, (uint16_t)param1, param2);
break;
case HC_SET_PTDEV_INTR_INFO:
/* param1: vmid */
ret = hcall_set_ptdev_intr_info(vm, (uint16_t)param1, param2);
break;
case HC_RESET_PTDEV_INTR_INFO:
/* param1: vmid */
ret = hcall_reset_ptdev_intr_info(vm, (uint16_t)param1, param2);
break;
case HC_SETUP_SBUF:
ret = hcall_setup_sbuf(vm, param1);
break;
case HC_WORLD_SWITCH:
ret = hcall_world_switch(vcpu);
break;
case HC_INITIALIZE_TRUSTY:
ret = hcall_initialize_trusty(vcpu, param1);
break;
case HC_PM_GET_CPU_STATE:
ret = hcall_get_cpu_pm_state(vm, param1, param2);
break;
default:
pr_err("op %d: Invalid hypercall\n", hypcall_id);
ret = -EPERM;
break;
}
out:
vcpu_set_gpreg(vcpu, CPU_REG_RAX, (uint64_t)ret);
TRACE_2L(TRACE_VMEXIT_VMCALL, vm->vm_id, hypcall_id);
return 0;
}