ACRN: hv: Unify terminology for service vm

Rename is_sos_vm to is_service_vm

Tracked-On: #6744
Signed-off-by: Liu Long <longliu@intel.com>
This commit is contained in:
Liu Long 2021-10-19 01:24:54 +08:00 committed by wenlingz
parent 2554c8f8cc
commit 26e507a06e
21 changed files with 113 additions and 113 deletions

View File

@ -381,7 +381,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
pr_err("INTX re-add vpin %d", virt_gsi);
}
} else if (entry->vm != vm) {
if (is_sos_vm(entry->vm)) {
if (is_service_vm(entry->vm)) {
entry->vm = vm;
entry->virt_sid.value = virt_sid.value;
entry->polarity = 0U;
@ -700,7 +700,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ct
* Device Model should pre-hold the mapping entries by calling
* ptirq_add_intx_remapping for UOS.
*
* For SOS(sos_vm), it adds the mapping entries at runtime, if the
* For Service VM, it adds the mapping entries at runtime, if the
* entry already be held by others, return error.
*/
@ -710,9 +710,9 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ct
spinlock_obtain(&ptdev_lock);
entry = find_ptirq_entry(PTDEV_INTR_INTX, &virt_sid, vm);
if (entry == NULL) {
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/* for sos_vm, there is chance of vpin source switch
/* for Service VM, there is chance of vpin source switch
* between vPIC & vIOAPIC for one legacy phys_pin.
*
* here checks if there is already mapping entry from

View File

@ -16,7 +16,7 @@
int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
{
/* Note:
* 1. We don't validate Px request from SOS_VM for now;
* 1. We don't validate Px request from Service VM for now;
* 2. Px request will be rejected if no VM Px data is set, even guest is running intel_pstate driver;
* 3. The Pstate frequency varies from LFM to HFM and then TFM, but not all frequencies between
* LFM to TFM are mapped in ACPI table. For acpi-cpufreq driver, the target Px value in MSR
@ -27,7 +27,7 @@ int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
*/
int32_t ret = -1;
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
ret = 0;
} else {
uint8_t px_cnt = vm->pm.px_cnt;
@ -157,11 +157,11 @@ static inline void enter_s5(struct acrn_vcpu *vcpu, uint32_t pm1a_cnt_val, uint3
get_vm_lock(vm);
/*
* Currently, we assume SOS has full ACPI power management stack.
* That means the value from SOS should be saved and used to shut
* Currently, we assume Service VM has full ACPI power management stack.
* That means the value from Service VM should be saved and used to shut
* down the system.
*/
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
save_s5_reg_val(pm1a_cnt_val, pm1b_cnt_val);
}
pause_vm(vm);
@ -382,8 +382,8 @@ void init_guest_pm(struct acrn_vm *vm)
/*
* In enter_s5(), it will call save_s5_reg_val() to initialize system_pm1a_cnt_val/system_pm1b_cnt_val when the
* vm is SOS.
* If there is no SOS, save_s5_reg_val() will not be called and these 2 variables will not be initialized properly
* vm is Service VM.
* If there is no Service VM, save_s5_reg_val() will not be called and these 2 variables will not be initialized properly
* so shutdown_system() will fail, explicitly init here to avoid this
*/
save_s5_reg_val((sx_data->s5_pkg.val_pm1a << BIT_SLP_TYPx) | (1U << BIT_SLP_EN),
@ -391,7 +391,7 @@ void init_guest_pm(struct acrn_vm *vm)
vm_setup_cpu_state(vm);
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/* Load pm S state data */
if (vm_load_pm_s_state(vm) == 0) {
register_pm1ab_handler(vm);

View File

@ -442,7 +442,7 @@ static int32_t set_vcpuid_extended_function(struct acrn_vm *vm)
if (result == 0) {
init_vcpuid_entry(0x40000001U, 0U, 0U, &entry);
/* EAX: Guest capability flags (e.g. whether it is a privilege VM) */
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
entry.eax |= GUEST_CAPS_PRIVILEGE_VM;
}
#ifdef CONFIG_HYPERV_ENABLED

View File

@ -2184,8 +2184,8 @@ void vlapic_create(struct acrn_vcpu *vcpu, uint16_t pcpu_id)
if (is_vcpu_bsp(vcpu)) {
uint64_t *pml4_page =
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
/* only need unmap it from SOS as UOS never mapped it */
if (is_sos_vm(vcpu->vm)) {
/* only need unmap it from Service VM as User VM never mapped it */
if (is_service_vm(vcpu->vm)) {
ept_del_mr(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, PAGE_SIZE);
}

View File

@ -94,7 +94,7 @@ bool is_paused_vm(const struct acrn_vm *vm)
return (vm->state == VM_PAUSED);
}
bool is_sos_vm(const struct acrn_vm *vm)
bool is_service_vm(const struct acrn_vm *vm)
{
return (vm != NULL) && (get_vm_config(vm->vm_id)->load_order == SOS_VM);
}
@ -210,7 +210,7 @@ bool vm_hide_mtrr(const struct acrn_vm *vm)
*/
static void setup_io_bitmap(struct acrn_vm *vm)
{
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
(void)memset(vm->arch_vm.io_bitmap, 0x00U, PAGE_SIZE * 2U);
} else {
/* block all IO port access from Guest */
@ -393,7 +393,7 @@ static void deny_hv_owned_devices(struct acrn_vm *sos)
* @retval 0 on success
*
* @pre vm != NULL
* @pre is_sos_vm(vm) == true
* @pre is_service_vm(vm) == true
*/
static void prepare_sos_vm_memmap(struct acrn_vm *vm)
{
@ -547,8 +547,8 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
(void)memcpy_s(&vm->uuid[0], sizeof(vm->uuid),
&vm_config->uuid[0], sizeof(vm_config->uuid));
if (is_sos_vm(vm)) {
/* Only for SOS_VM */
if (is_service_vm(vm)) {
/* Only for Service VM */
create_sos_vm_e820(vm);
prepare_sos_vm_memmap(vm);
@ -610,7 +610,7 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
vrtc_init(vm);
}
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
deny_hv_owned_devices(vm);
}
@ -763,7 +763,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
/* Only allow shutdown paused vm */
vm->state = VM_POWERED_OFF;
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
sbuf_reset();
}
@ -841,7 +841,7 @@ int32_t reset_vm(struct acrn_vm *vm)
*/
vm->arch_vm.vlapic_mode = VM_VLAPIC_XAPIC;
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
(void)prepare_os_image(vm);
}
@ -897,7 +897,7 @@ void pause_vm(struct acrn_vm *vm)
* @wakeup_vec[in] The resume address of vm
*
* @pre vm != NULL
* @pre is_sos_vm(vm) && vm->state == VM_PAUSED
* @pre is_service_vm(vm) && vm->state == VM_PAUSED
*/
void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
{
@ -938,7 +938,7 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config)
build_vrsdp(vm);
}
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/* We need to ensure all modules of pre-launched VMs have been loaded already
* before loading SOS VM modules, otherwise the module of pre-launched VMs could
* be corrupted because SOS VM kernel might pick any usable RAM to extract kernel

View File

@ -31,7 +31,7 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
/* Inject pm1a S5 request to SOS to shut down the guest */
(void)emulate_io(vcpu, io_req);
} else {
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
uint16_t vm_id;
/* Shut down all non real time post-launched VMs */
@ -137,7 +137,7 @@ static bool handle_kb_write(struct acrn_vcpu *vcpu, __unused uint16_t addr, size
static bool handle_kb_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
{
if (is_sos_vm(vcpu->vm) && (bytes == 1U)) {
if (is_service_vm(vcpu->vm) && (bytes == 1U)) {
/* In case i8042 is defined as ACPI PNP device in BIOS, HV need expose physical 0x64 port. */
vcpu->req.reqs.pio_request.value = pio_read8(addr);
} else {
@ -219,7 +219,7 @@ void register_reset_port_handler(struct acrn_vm *vm)
* Don't support MMIO or PCI based reset register for now.
* ACPI Spec: Register_Bit_Width must be 8 and Register_Bit_Offset must be 0.
*/
if (is_sos_vm(vm) &&
if (is_service_vm(vm) &&
(gas->space_id == SPACE_SYSTEM_IO) &&
(gas->bit_width == 8U) && (gas->bit_offset == 0U) &&
(gas->address != 0xcf9U) && (gas->address != 0x64U)) {

View File

@ -172,7 +172,7 @@ static int32_t dispatch_hypercall(struct acrn_vcpu *vcpu)
uint64_t param1 = vcpu_get_gpreg(vcpu, CPU_REG_RDI); /* hypercall param1 from guest */
uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI); /* hypercall param2 from guest */
if ((permission_flags == 0UL) && is_sos_vm(vm)) {
if ((permission_flags == 0UL) && is_service_vm(vm)) {
/* A permission_flags of 0 indicates that this hypercall is for SOS to manage
* post-launched VMs.
*/
@ -221,7 +221,7 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
* guest flags. Attempts to invoke an unpermitted hypercall will make a vCPU see -EINVAL as the return
* value. No exception is triggered in this case.
*/
if (!is_sos_vm(vm) && ((guest_flags & GUEST_FLAGS_ALLOWING_HYPERCALLS) == 0UL)) {
if (!is_service_vm(vm) && ((guest_flags & GUEST_FLAGS_ALLOWING_HYPERCALLS) == 0UL)) {
vcpu_inject_ud(vcpu);
ret = -ENODEV;
} else if (!is_hypercall_from_ring0()) {

View File

@ -977,7 +977,7 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
case MSR_IA32_BIOS_UPDT_TRIG:
{
/* We only allow SOS to do uCode update */
if (is_sos_vm(vcpu->vm)) {
if (is_service_vm(vcpu->vm)) {
acrn_update_ucode(vcpu, v);
}
break;

View File

@ -108,7 +108,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
vmtrr->def_type.bits.fixed_enable = 1U;
vmtrr->def_type.bits.type = MTRR_MEM_TYPE_UC;
if (is_sos_vm(vcpu->vm)) {
if (is_service_vm(vcpu->vm)) {
cap.value = msr_read(MSR_IA32_MTRR_CAP);
}

View File

@ -68,7 +68,7 @@ static void *get_initrd_load_addr(struct acrn_vm *vm, uint64_t kernel_start)
ramdisk_gpa_max = initrd_addr_max;
}
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
uint64_t mods_start, mods_end;
get_boot_mods_range(&mods_start, &mods_end);
@ -138,7 +138,7 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
zeropage = (struct zero_page *)sw_info->kernel_info.kernel_src_addr;
stac();
if ((is_sos_vm(vm)) && (zeropage->hdr.relocatable_kernel != 0U)) {
if ((is_service_vm(vm)) && (zeropage->hdr.relocatable_kernel != 0U)) {
uint64_t mods_start, mods_end;
uint64_t kernel_load_gpa = INVALID_GPA;
uint32_t kernel_align = zeropage->hdr.kernel_alignment;
@ -167,7 +167,7 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
}
} else {
load_addr = (void *)zeropage->hdr.pref_addr;
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/* The non-relocatable SOS kernel might overlap with boot modules. */
pr_err("Non-relocatable kernel found, risk to boot!");
}
@ -275,7 +275,7 @@ static uint64_t create_zero_page(struct acrn_vm *vm, uint64_t load_params_gpa)
(void)memset(zeropage, 0U, MEM_2K);
#ifdef CONFIG_MULTIBOOT2
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
struct acrn_boot_info *abi = get_acrn_boot_info();
if (boot_from_uefi(abi)) {

View File

@ -82,7 +82,7 @@ inline static bool is_severity_pass(uint16_t target_vmid)
* @param vcpu Pointer to vCPU that initiates the hypercall
* @param param1 lapic id of the vcpu which wants to offline
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -119,7 +119,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *t
* @param param1 guest physical memory address. The api version returned
* will be copied to this gpa
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_get_api_version(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -188,7 +188,7 @@ void get_cache_shift(uint32_t *l2_shift, uint32_t *l3_shift)
* @param vcpu Pointer to vCPU that initiates the hypercall.
* @param param1 GPA pointer to struct acrn_platform_info.
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non zero in case of error.
*/
int32_t hcall_get_platform_info(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -243,7 +243,7 @@ int32_t hcall_get_platform_info(struct acrn_vcpu *vcpu, __unused struct acrn_vm
* @param param1 guest physical memory address. This gpa points to
* struct acrn_vm_creation
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @pre get_vm_config(target_vm->vm_id) != NULL
* @return 0 on success, non-zero on error.
*/
@ -412,7 +412,7 @@ int32_t hcall_reset_vm(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_v
* @param param2 guest physical address. This gpa points to
* struct acrn_vcpu_regs
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -459,7 +459,7 @@ int32_t hcall_create_vcpu(__unused struct acrn_vcpu *vcpu, __unused struct acrn_
* @param target_vm Pointer to target VM data structure
* @param param2 info for irqline
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_irqline(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -500,7 +500,7 @@ int32_t hcall_set_irqline(__unused struct acrn_vcpu *vcpu, struct acrn_vm *targe
* @param target_vm Pointer to target VM data structure
* @param param2 guest physical address. This gpa points to struct acrn_msi_entry
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
@ -529,7 +529,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __un
* @param target_vm Pointer to target VM data structure
* @param param2 guest physical address. This gpa points to buffer address
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -605,7 +605,7 @@ int32_t hcall_notify_ioreq_finish(__unused struct acrn_vcpu *vcpu, struct acrn_v
}
/**
*@pre is_sos_vm(vm)
*@pre is_service_vm(vm)
*@pre gpa2hpa(vm, region->sos_vm_gpa) != INVALID_HPA
*/
static void add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_vm,
@ -656,7 +656,7 @@ static void add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_vm,
}
/**
*@pre is_sos_vm(vm)
*@pre is_service_vm(vm)
*/
static int32_t set_vm_memory_region(struct acrn_vm *vm,
struct acrn_vm *target_vm, const struct vm_memory_region *region)
@ -696,7 +696,7 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
* @param param1 guest physical address. This gpa points to
* struct set_memmaps
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memory_regions(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -734,7 +734,7 @@ int32_t hcall_set_vm_memory_regions(struct acrn_vcpu *vcpu, struct acrn_vm *targ
}
/**
*@pre is_sos_vm(vm)
*@pre is_service_vm(vm)
*/
static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
{
@ -785,7 +785,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
* @param param2 guest physical address. This gpa points to
* struct wp_data
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_write_protect_page(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -818,7 +818,7 @@ int32_t hcall_write_protect_page(struct acrn_vcpu *vcpu, struct acrn_vm *target_
* @param target_vm Pointer to target VM data structure
* @param param2 guest physical address. This gpa points to struct vm_gpa2hpa
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_gpa_to_hpa(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
@ -852,7 +852,7 @@ int32_t hcall_gpa_to_hpa(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __un
* @param param2 guest physical address. This gpa points to data structure of
* acrn_pcidev including assign PCI device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -882,7 +882,7 @@ int32_t hcall_assign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param param2 guest physical address. This gpa points to data structure of
* acrn_pcidev including deassign PCI device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -912,7 +912,7 @@ int32_t hcall_deassign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param param2 guest physical address. This gpa points to data structure of
* acrn_mmiodev including assign MMIO device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -945,7 +945,7 @@ int32_t hcall_assign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param param2 guest physical address. This gpa points to data structure of
* acrn_mmiodev including deassign MMIO device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -978,7 +978,7 @@ int32_t hcall_deassign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
* @param param2 guest physical address. This gpa points to data structure of
* hc_ptdev_irq including intr remapping info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -1030,7 +1030,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target
* @param param2 guest physical address. This gpa points to data structure of
* hc_ptdev_irq including intr remapping info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_reset_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -1082,7 +1082,7 @@ int32_t hcall_reset_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *targ
* @param param1 cmd to show get which VCPU power state data
* @param param2 VCPU power state data
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_get_cpu_pm_state(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2)
@ -1161,7 +1161,7 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
* @param param2 guest physical address. This gpa points to data structure of
* acrn_intr_monitor
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -1215,7 +1215,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param vcpu Pointer to vCPU that initiates the hypercall
* @param param1 the expected notifier vector from guest
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_callback_vector(__unused struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -1259,7 +1259,7 @@ static struct emul_dev_ops *find_emul_dev_ops(struct acrn_vdev *dev)
* @param param guest physical address. This gpa points to data structure of
* acrn_vdev including information about PCI or legacy devices
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_add_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
@ -1291,7 +1291,7 @@ int32_t hcall_add_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unus
* @param param guest physical address. This gpa points to data structure of
* acrn_vdev including information about PCI or legacy devices
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_remove_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)

View File

@ -103,8 +103,8 @@ struct ptirq_remapping_info *ptirq_dequeue_softirq(uint16_t pcpu_id)
list_del_init(&entry->softirq_node);
/* if sos vm, just dequeue, if uos, check delay timer */
if (is_sos_vm(entry->vm) || timer_expired(&entry->intr_delay_timer, cpu_ticks(), NULL)) {
/* if Service VM, just dequeue, if User VM, check delay timer */
if (is_service_vm(entry->vm) || timer_expired(&entry->intr_delay_timer, cpu_ticks(), NULL)) {
break;
} else {
/* add it into timer list; dequeue next one */
@ -164,10 +164,10 @@ static void ptirq_interrupt_handler(__unused uint32_t irq, void *data)
bool to_enqueue = true;
/*
* "interrupt storm" detection & delay intr injection just for UOS
* "interrupt storm" detection & delay intr injection just for User VM
* pass-thru devices, collect its data and delay injection if needed
*/
if (!is_sos_vm(entry->vm)) {
if (!is_service_vm(entry->vm)) {
entry->intr_count++;
/* if delta > 0, set the delay TSC, dequeue to handle */

View File

@ -22,7 +22,7 @@
* @param param2 guest physical address. This gpa points to
* data structure required by each command
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_profiling_ops(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -76,7 +76,7 @@ int32_t hcall_profiling_ops(struct acrn_vcpu *vcpu, __unused struct acrn_vm *tar
* @param param1 guest physical address. This gpa points to
* struct sbuf_setup_param
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_setup_sbuf(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -106,7 +106,7 @@ int32_t hcall_setup_sbuf(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target
* @param param1 guest physical address. This gpa points to
* struct hv_npk_log_param
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_setup_hv_npk_log(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
@ -130,7 +130,7 @@ int32_t hcall_setup_hv_npk_log(struct acrn_vcpu *vcpu, __unused struct acrn_vm *
* @param vcpu Pointer to vCPU that initiates the hypercall
* @param param1 Guest physical address pointing to struct acrn_hw_info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @pre param1 shall be a valid physical address
*
* @retval 0 on success

View File

@ -1196,7 +1196,7 @@ static void get_vioapic_info(char *str_arg, size_t str_max, uint16_t vmid)
gsi_count = get_vm_gsicount(vm);
rte.full = 0UL;
for (gsi = 0U; gsi < gsi_count; gsi++) {
if (is_sos_vm(vm) && (!is_gsi_valid(gsi))) {
if (is_service_vm(vm) && (!is_gsi_valid(gsi))) {
continue;
}
vioapic_get_rte(vm, gsi, &rte);

View File

@ -404,7 +404,7 @@ hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req)
io_read_fn_t io_read = NULL;
io_write_fn_t io_write = NULL;
if (is_sos_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) {
if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) {
io_read = pio_default_read;
io_write = pio_default_write;
}
@ -468,7 +468,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
hv_mem_io_handler_t read_write = NULL;
void *handler_private_data = NULL;
if (is_sos_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) {
if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) {
read_write = mmio_default_access_handler;
}
@ -600,7 +600,7 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
void register_pio_emulation_handler(struct acrn_vm *vm, uint32_t pio_idx,
const struct vm_io_range *range, io_read_fn_t io_read_fn_ptr, io_write_fn_t io_write_fn_ptr)
{
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
deny_guest_pio_access(vm, range->base, range->len);
}
vm->emul_pio[pio_idx].port_start = range->base;

View File

@ -24,7 +24,7 @@ int32_t assign_mmio_dev(struct acrn_vm *vm, const struct acrn_mmiodev *mmiodev)
mem_aligned_check(res->host_pa, PAGE_SIZE) &&
mem_aligned_check(res->size, PAGE_SIZE)) {
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, res->host_pa,
is_sos_vm(vm) ? res->host_pa : res->user_vm_pa,
is_service_vm(vm) ? res->host_pa : res->user_vm_pa,
res->size, EPT_RWX | (res->mem_type & EPT_MT_MASK));
} else {
pr_err("%s invalid mmio res[%d] gpa:0x%lx hpa:0x%lx size:0x%lx",
@ -45,7 +45,7 @@ int32_t deassign_mmio_dev(struct acrn_vm *vm, const struct acrn_mmiodev *mmiodev
for (i = 0; i < MMIODEV_RES_NUM; i++) {
res = &mmiodev->res[i];
gpa = is_sos_vm(vm) ? res->host_pa : res->user_vm_pa;
gpa = is_service_vm(vm) ? res->host_pa : res->user_vm_pa;
if (ept_is_valid_mr(vm, gpa, res->size)) {
if (mem_aligned_check(gpa, PAGE_SIZE) &&
mem_aligned_check(res->size, PAGE_SIZE)) {

View File

@ -120,7 +120,7 @@ vgsi_to_vioapic_and_vpin(const struct acrn_vm *vm, uint32_t vgsi, uint32_t *vpin
struct acrn_single_vioapic *vioapic;
uint8_t vioapic_index = 0U;
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/*
* Utilize platform ioapic_info for SOS VM
*/
@ -532,7 +532,7 @@ vioapic_init(struct acrn_vm *vm)
uint8_t vioapic_index;
struct acrn_single_vioapic *vioapic = NULL;
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
vm->arch_vm.vioapics.ioapic_num = get_platform_ioapic_info(&vioapic_info);
} else {
vm->arch_vm.vioapics.ioapic_num = 1U;

View File

@ -259,7 +259,7 @@ static void vdev_pt_allow_io_vbar(struct pci_vdev *vdev, uint32_t idx)
struct acrn_vm *vm = vpci2vm(vdev->vpci);
/* For SOS, all port IO access is allowed by default, so skip SOS here */
if (!is_sos_vm(vm)) {
if (!is_service_vm(vm)) {
struct pci_vbar *vbar = &vdev->vbars[idx];
if (vbar->base_gpa != 0UL) {
allow_guest_pio_access(vm, (uint16_t)vbar->base_gpa, (uint32_t)(vbar->size));
@ -277,7 +277,7 @@ static void vdev_pt_deny_io_vbar(struct pci_vdev *vdev, uint32_t idx)
struct acrn_vm *vm = vpci2vm(vdev->vpci);
/* For SOS, all port IO access is allowed by default, so skip SOS here */
if (!is_sos_vm(vm)) {
if (!is_service_vm(vm)) {
struct pci_vbar *vbar = &vdev->vbars[idx];
if (vbar->base_gpa != 0UL) {
deny_guest_pio_access(vm, (uint16_t)(vbar->base_gpa), (uint32_t)(vbar->size));
@ -540,7 +540,7 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
if (vdev->phyfun == NULL) {
init_bars(vdev, is_pf_vdev);
init_vmsix_on_msi(vdev);
if (is_sos_vm(vpci2vm(vdev->vpci)) && (vdev->pdev->bdf.value == CONFIG_GPU_SBDF)) {
if (is_service_vm(vpci2vm(vdev->vpci)) && (vdev->pdev->bdf.value == CONFIG_GPU_SBDF)) {
pci_vdev_write_vcfg(vdev, PCIR_ASLS_CTL, 4U, pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_ASLS_CTL, 4U));
}
if (is_prelaunched_vm(vpci2vm(vdev->vpci)) && (!is_pf_vdev)) {
@ -576,7 +576,7 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
}
}
if (!is_sos_vm(vpci2vm(vdev->vpci)) && (has_sriov_cap(vdev))) {
if (!is_service_vm(vpci2vm(vdev->vpci)) && (has_sriov_cap(vdev))) {
vdev_pt_hide_sriov_cap(vdev);
}

View File

@ -325,7 +325,7 @@ static uint32_t vpin_to_vgsi(const struct acrn_vm *vm, uint32_t vpin)
* Remap depending on the type of VM
*/
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/*
* For SOS VM vPIC pin to GSI is same as the one
* that is used for platform
@ -361,7 +361,7 @@ static uint32_t vgsi_to_vpin(const struct acrn_vm *vm, uint32_t vgsi)
* Remap depending on the type of VM
*/
if (is_sos_vm(vm)) {
if (is_service_vm(vm)) {
/*
* For SOS VM vPIC pin to GSI is same as the one
* that is used for platform

View File

@ -238,7 +238,7 @@ void launch_vms(uint16_t pcpu_id);
bool is_poweroff_vm(const struct acrn_vm *vm);
bool is_created_vm(const struct acrn_vm *vm);
bool is_paused_vm(const struct acrn_vm *vm);
bool is_sos_vm(const struct acrn_vm *vm);
bool is_service_vm(const struct acrn_vm *vm);
bool is_postlaunched_vm(const struct acrn_vm *vm);
bool is_prelaunched_vm(const struct acrn_vm *vm);
uint16_t get_vmid_by_uuid(const uint8_t *uuid);

View File

@ -23,16 +23,16 @@ bool is_hypercall_from_ring0(void);
*/
/**
* @brief offline vcpu from SOS
* @brief offline vcpu from Service VM
*
* The function offline specific vcpu from SOS.
* The function offline specific vcpu from Service VM.
*
* @param vcpu Pointer to vCPU that initiates the hypercall
* @param target_vm not used
* @param param1 lapic id of the vcpu which wants to offline
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -40,7 +40,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
/**
* @brief Get hypervisor api version
*
* The function only return api version information when VM is SOS_VM.
* The function only return api version information when VM is Service VM.
*
* @param vcpu Pointer to vCPU that initiates the hypercall
* @param target_vm not used
@ -48,7 +48,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* will be copied to this gpa
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_get_api_version(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -64,7 +64,7 @@ int32_t hcall_get_api_version(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param param1 GPA pointer to struct acrn_platform_info.
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, -1 in case of error.
*/
int32_t hcall_get_platform_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -82,7 +82,7 @@ int32_t hcall_get_platform_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_v
* struct acrn_vm_creation
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_create_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -180,7 +180,7 @@ int32_t hcall_set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, u
* @param param1 not used
* @param param2 info for irqline
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_irqline(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -196,7 +196,7 @@ int32_t hcall_set_irqline(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uin
* @param param1 not used
* @param param2 guest physical address. This gpa points to struct acrn_msi_entry
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -212,7 +212,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint
* @param param1 not used
* @param param2 guest physical address. This gpa points to buffer address
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -241,7 +241,7 @@ int32_t hcall_notify_ioreq_finish(struct acrn_vcpu *vcpu, struct acrn_vm *target
* struct set_memmaps
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memory_regions(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -256,7 +256,7 @@ int32_t hcall_set_vm_memory_regions(struct acrn_vcpu *vcpu, struct acrn_vm *targ
* @param param2 guest physical address. This gpa points to
* struct wp_data
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_write_protect_page(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -272,7 +272,7 @@ int32_t hcall_write_protect_page(struct acrn_vcpu *vcpu, struct acrn_vm *target_
* @param param1 not used
* @param param2 guest physical address. This gpa points to struct vm_gpa2hpa
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_gpa_to_hpa(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -286,7 +286,7 @@ int32_t hcall_gpa_to_hpa(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint
* @param param2 guest physical address. This gpa points to data structure of
* acrn_pcidev including assign PCI device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -300,7 +300,7 @@ int32_t hcall_assign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, u
* @param param2 guest physical address. This gpa points to data structure of
* acrn_pcidev including deassign PCI device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -314,7 +314,7 @@ int32_t hcall_deassign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param param2 guest physical address. This gpa points to data structure of
* acrn_mmiodev including assign MMIO device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -328,7 +328,7 @@ int32_t hcall_assign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
* @param param2 guest physical address. This gpa points to data structure of
* acrn_mmiodev including deassign MMIO device info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -342,7 +342,7 @@ int32_t hcall_deassign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
* @param param2 guest physical address. This gpa points to data structure of
* acrn_vdev including information about PCI or legacy devices
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_add_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -356,7 +356,7 @@ int32_t hcall_add_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64
* @param param2 guest physical address. This gpa points to data structure of
* acrn_vdev including information about PCI or legacy devices
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_remove_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -370,7 +370,7 @@ int32_t hcall_remove_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uin
* @param param2 guest physical address. This gpa points to data structure of
* hc_ptdev_irq including intr remapping info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -384,7 +384,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target
* @param param2 guest physical address. This gpa points to data structure of
* hc_ptdev_irq including intr remapping info
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_reset_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
@ -398,7 +398,7 @@ int32_t hcall_reset_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *targ
* @param param1 cmd to show get which VCPU power state data
* @param param2 VCPU power state data
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_get_cpu_pm_state(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -412,7 +412,7 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
* @param param2 guest physical address. This gpa points to data structure of
* acrn_intr_monitor
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -487,14 +487,14 @@ int32_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu, struct acrn_vm *ta
*
* This is the API that helps to switch the notifer vecotr. If this API is
* not called, the hypervisor will use the default notifier vector(0xF7)
* to notify the SOS kernel.
* to notify the Service VM OS kernel.
*
* @param vcpu not used
* @param target_vm not used
* @param param1 the expected notifier vector from guest
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_callback_vector(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -508,7 +508,7 @@ int32_t hcall_set_callback_vector(struct acrn_vcpu *vcpu, struct acrn_vm *target
* struct sbuf_setup_param
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_setup_sbuf(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -522,7 +522,7 @@ int32_t hcall_setup_sbuf(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint
* struct hv_npk_log_param
* @param param2 not used
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_setup_hv_npk_log(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
@ -535,7 +535,7 @@ int32_t hcall_setup_hv_npk_log(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
* @param param1 Guest physical address pointing to struct acrn_hw_info
* @param param2 not used
*
* @pre vm shall point to SOS_VM
* @pre vm shall point to Service VM
* @pre param1 shall be a valid physical address
*
* @retval 0 on success
@ -552,7 +552,7 @@ int32_t hcall_get_hw_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uin
* @param param2 guest physical address. This gpa points to
* data structure required by each command
*
* @pre is_sos_vm(vcpu->vm)
* @pre is_service_vm(vcpu->vm)
* @return 0 on success, non-zero on error.
*/
int32_t hcall_profiling_ops(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);