mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2026-01-05 15:45:05 +00:00
initial import
internal commit: 14ac2bc2299032fa6714d1fefa7cf0987b3e3085 Signed-off-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
206
hypervisor/common/hv_main.c
Normal file
206
hypervisor/common/hv_main.c
Normal file
@@ -0,0 +1,206 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <schedule.h>
|
||||
#include <hv_debug.h>
|
||||
|
||||
bool x2apic_enabled;
|
||||
|
||||
static DEFINE_CPU_DATA(uint64_t[64], vmexit_cnt);
|
||||
static DEFINE_CPU_DATA(uint64_t[64], vmexit_time);
|
||||
|
||||
static void run_vcpu_pre_work(struct vcpu *vcpu)
|
||||
{
|
||||
unsigned long *pending_pre_work = &vcpu->pending_pre_work;
|
||||
|
||||
if (bitmap_test_and_clear(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work))
|
||||
dm_emulate_mmio_post(vcpu);
|
||||
}
|
||||
|
||||
void vcpu_thread(struct vcpu *vcpu)
|
||||
{
|
||||
uint64_t vmexit_begin, vmexit_end;
|
||||
uint16_t exit_reason;
|
||||
uint64_t tsc_aux_hyp_cpu = vcpu->pcpu_id;
|
||||
struct vm_exit_dispatch *vmexit_hdlr;
|
||||
int ret = 0;
|
||||
|
||||
vmexit_begin = vmexit_end = exit_reason = 0;
|
||||
/* If vcpu is not launched, we need to do init_vmcs first */
|
||||
if (!vcpu->launched)
|
||||
init_vmcs(vcpu);
|
||||
|
||||
run_vcpu_pre_work(vcpu);
|
||||
|
||||
do {
|
||||
/* handling pending softirq */
|
||||
CPU_IRQ_ENABLE();
|
||||
exec_softirq();
|
||||
CPU_IRQ_DISABLE();
|
||||
|
||||
/* Check and process interrupts */
|
||||
acrn_do_intr_process(vcpu);
|
||||
|
||||
if (need_rescheduled(vcpu->pcpu_id)) {
|
||||
/*
|
||||
* In extrem case, schedule() could return. Which
|
||||
* means the vcpu resume happens before schedule()
|
||||
* triggered by vcpu suspend. In this case, we need
|
||||
* to do pre work and continue vcpu loop after
|
||||
* schedule() is return.
|
||||
*/
|
||||
schedule();
|
||||
run_vcpu_pre_work(vcpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
vmexit_end = rdtsc();
|
||||
if (vmexit_begin > 0)
|
||||
per_cpu(vmexit_time, vcpu->pcpu_id)[exit_reason]
|
||||
+= (vmexit_end - vmexit_begin);
|
||||
TRACE_2L(TRACE_VM_ENTER, 0, 0);
|
||||
|
||||
/* Restore guest TSC_AUX */
|
||||
if (vcpu->launched) {
|
||||
CPU_MSR_WRITE(MSR_IA32_TSC_AUX,
|
||||
vcpu->msr_tsc_aux_guest);
|
||||
}
|
||||
|
||||
ret = start_vcpu(vcpu);
|
||||
ASSERT(ret == 0, "vcpu resume failed");
|
||||
|
||||
vmexit_begin = rdtsc();
|
||||
|
||||
vcpu->arch_vcpu.nrexits++;
|
||||
/* Save guest TSC_AUX */
|
||||
CPU_MSR_READ(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
|
||||
/* Restore native TSC_AUX */
|
||||
CPU_MSR_WRITE(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
|
||||
ASSERT((int)get_cpu_id() == vcpu->pcpu_id, "");
|
||||
|
||||
/* Dispatch handler */
|
||||
vmexit_hdlr = vmexit_handler(vcpu);
|
||||
ASSERT(vmexit_hdlr != 0,
|
||||
"Unable to dispatch VM exit handler!");
|
||||
|
||||
exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFF;
|
||||
per_cpu(vmexit_cnt, vcpu->pcpu_id)[exit_reason]++;
|
||||
TRACE_2L(TRACE_VM_EXIT, exit_reason,
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip);
|
||||
|
||||
if (exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) {
|
||||
/* Handling external_interrupt
|
||||
* should disable intr
|
||||
*/
|
||||
vmexit_hdlr->handler(vcpu);
|
||||
} else {
|
||||
CPU_IRQ_ENABLE();
|
||||
vmexit_hdlr->handler(vcpu);
|
||||
CPU_IRQ_DISABLE();
|
||||
}
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static bool is_vm0_bsp(int pcpu_id)
|
||||
{
|
||||
struct vm_description *vm_desc = get_vm_desc(0);
|
||||
|
||||
ASSERT(vm_desc, "get vm desc failed");
|
||||
return pcpu_id == vm_desc->vm_hw_logical_core_ids[0];
|
||||
}
|
||||
|
||||
int hv_main(int cpu_id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pr_info("%s, Starting common entry point for CPU %d",
|
||||
__func__, cpu_id);
|
||||
ASSERT(cpu_id < phy_cpu_num, "cpu_id out of range");
|
||||
|
||||
ASSERT((uint64_t) cpu_id == get_cpu_id(),
|
||||
"cpu_id/tsc_aux mismatch");
|
||||
|
||||
/* Check if virtualization extensions are supported */
|
||||
ret = check_vmx_support();
|
||||
ASSERT(ret == 0, "VMX not supported!");
|
||||
|
||||
/* Enable virtualization extensions */
|
||||
ret = exec_vmxon_instr();
|
||||
ASSERT(ret == 0, "Unable to enable VMX!");
|
||||
|
||||
/* X2APIC mode is disabled by default. */
|
||||
x2apic_enabled = false;
|
||||
|
||||
if (is_vm0_bsp(cpu_id))
|
||||
prepare_vm0();
|
||||
|
||||
default_idle();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int get_vmexit_profile(char *str, int str_max)
|
||||
{
|
||||
int cpu, i, len, size = str_max;
|
||||
|
||||
len = snprintf(str, size, "\r\nNow(us) = %16lld\r\n",
|
||||
TICKS_TO_US(rdtsc()));
|
||||
size -= len;
|
||||
str += len;
|
||||
|
||||
len = snprintf(str, size, "\r\nREASON");
|
||||
size -= len;
|
||||
str += len;
|
||||
|
||||
for (cpu = 0; cpu < phy_cpu_num; cpu++) {
|
||||
len = snprintf(str, size, "\t CPU%d\t US", cpu);
|
||||
size -= len;
|
||||
str += len;
|
||||
}
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
len = snprintf(str, size, "\r\n0x%x", i);
|
||||
size -= len;
|
||||
str += len;
|
||||
for (cpu = 0; cpu < phy_cpu_num; cpu++) {
|
||||
len = snprintf(str, size, "\t%10lld\t%10lld",
|
||||
per_cpu(vmexit_cnt, cpu)[i],
|
||||
TICKS_TO_US(per_cpu(vmexit_time, cpu)[i]));
|
||||
size -= len;
|
||||
str += len;
|
||||
}
|
||||
}
|
||||
snprintf(str, size, "\r\n");
|
||||
return 0;
|
||||
}
|
||||
868
hypervisor/common/hypercall.c
Normal file
868
hypervisor/common/hypercall.c
Normal file
@@ -0,0 +1,868 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <schedule.h>
|
||||
#include <hypercall.h>
|
||||
#include <acrn_hv_defs.h>
|
||||
#include <hv_debug.h>
|
||||
#include <version.h>
|
||||
|
||||
#define ACRN_DBG_HYCALL 6
|
||||
|
||||
int64_t hcall_get_api_version(struct vm *vm, uint64_t param)
|
||||
{
|
||||
struct hc_api_version version;
|
||||
|
||||
if (!is_vm0(vm))
|
||||
return -1;
|
||||
|
||||
version.major_version = HV_MAJOR_VERSION;
|
||||
version.minor_version = HV_MINOR_VERSION;
|
||||
|
||||
if (copy_to_vm(vm, &version, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_vpic_irqline(struct vm *vm, int irq, enum irq_mode mode)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (!vm)
|
||||
return ret;
|
||||
|
||||
switch (mode) {
|
||||
case IRQ_ASSERT:
|
||||
ret = vpic_assert_irq(vm, irq);
|
||||
break;
|
||||
case IRQ_DEASSERT:
|
||||
ret = vpic_deassert_irq(vm, irq);
|
||||
break;
|
||||
case IRQ_PULSE:
|
||||
ret = vpic_pulse_irq(vm, irq);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
handle_vioapic_irqline(struct vm *vm, int irq, enum irq_mode mode)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (!vm)
|
||||
return ret;
|
||||
|
||||
switch (mode) {
|
||||
case IRQ_ASSERT:
|
||||
ret = vioapic_assert_irq(vm, irq);
|
||||
break;
|
||||
case IRQ_DEASSERT:
|
||||
ret = vioapic_deassert_irq(vm, irq);
|
||||
break;
|
||||
case IRQ_PULSE:
|
||||
ret = vioapic_pulse_irq(vm, irq);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int handle_virt_irqline(struct vm *vm, uint64_t target_vmid,
|
||||
struct acrn_irqline *param, enum irq_mode mode)
|
||||
{
|
||||
int ret = 0;
|
||||
long intr_type;
|
||||
struct vm *target_vm = get_vm_from_vmid(target_vmid);
|
||||
|
||||
if (!vm || !param)
|
||||
return -1;
|
||||
|
||||
intr_type = param->intr_type;
|
||||
|
||||
switch (intr_type) {
|
||||
case ACRN_INTR_TYPE_ISA:
|
||||
/* Call vpic for pic injection */
|
||||
ret = handle_vpic_irqline(target_vm, param->pic_irq, mode);
|
||||
|
||||
/* call vioapic for ioapic injection if ioapic_irq != -1*/
|
||||
if (param->ioapic_irq != -1UL) {
|
||||
/* handle IOAPIC irqline */
|
||||
ret = handle_vioapic_irqline(target_vm,
|
||||
param->ioapic_irq, mode);
|
||||
}
|
||||
break;
|
||||
case ACRN_INTR_TYPE_IOAPIC:
|
||||
/* handle IOAPIC irqline */
|
||||
ret = handle_vioapic_irqline(target_vm,
|
||||
param->ioapic_irq, mode);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ACRN_DBG_HYCALL, "vINTR inject failed. type=%d",
|
||||
intr_type);
|
||||
ret = -1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_create_vm(struct vm *vm, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct vm *target_vm = NULL;
|
||||
/* VM are created from hv_main() directly
|
||||
* Here we just return the vmid for DM
|
||||
*/
|
||||
struct acrn_create_vm cv;
|
||||
struct vm_description vm_desc;
|
||||
|
||||
memset((void *)&cv, 0, sizeof(cv));
|
||||
if (copy_from_vm(vm, &cv, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(&vm_desc, 0, sizeof(vm_desc));
|
||||
vm_desc.secure_world_enabled = cv.secure_world_enabled;
|
||||
memcpy_s(&vm_desc.GUID[0], 16, &cv.GUID[0], 16);
|
||||
ret = create_vm(&vm_desc, &target_vm);
|
||||
|
||||
if (ret != 0) {
|
||||
dev_dbg(ACRN_DBG_HYCALL, "HCALL: Create VM failed");
|
||||
cv.vmid = ACRN_INVALID_VMID;
|
||||
ret = -1;
|
||||
} else {
|
||||
cv.vmid = target_vm->attr.id;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (copy_to_vm(vm, &cv.vmid, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_destroy_vm(uint64_t vmid)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
ret = shutdown_vm(target_vm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_resume_vm(uint64_t vmid)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
if (target_vm->sw.req_buf == 0)
|
||||
ret = -1;
|
||||
else
|
||||
ret = start_vm(target_vm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_pause_vm(uint64_t vmid)
|
||||
{
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
pause_vm(target_vm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int ret, pcpu_id;
|
||||
struct acrn_create_vcpu cv;
|
||||
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (!target_vm || !param)
|
||||
return -1;
|
||||
|
||||
if (copy_from_vm(vm, &cv, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pcpu_id = allocate_pcpu();
|
||||
if (-1 == pcpu_id) {
|
||||
pr_err("%s: No physical available\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = prepare_vcpu(target_vm, pcpu_id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct acrn_irqline irqline;
|
||||
|
||||
if (copy_from_vm(vm, &irqline, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
ret = handle_virt_irqline(vm, vmid, &irqline, IRQ_ASSERT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct acrn_irqline irqline;
|
||||
|
||||
if (copy_from_vm(vm, &irqline, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
ret = handle_virt_irqline(vm, vmid, &irqline, IRQ_DEASSERT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct acrn_irqline irqline;
|
||||
|
||||
if (copy_from_vm(vm, &irqline, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
ret = handle_virt_irqline(vm, vmid, &irqline, IRQ_PULSE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int ret = 0;
|
||||
struct acrn_msi_entry msi;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
memset((void *)&msi, 0, sizeof(msi));
|
||||
if (copy_from_vm(vm, &msi, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct acrn_set_ioreq_buffer iobuf;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
memset((void *)&iobuf, 0, sizeof(iobuf));
|
||||
|
||||
if (copy_from_vm(vm, &iobuf, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[%d] SET BUFFER=0x%x",
|
||||
vmid, iobuf.req_buf);
|
||||
|
||||
/* store gpa of guest request_buffer */
|
||||
target_vm->sw.req_buf = gpa2hpa(vm, iobuf.req_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void complete_request(struct vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* If vcpu is in Zombie state and will be destroyed soon. Just
|
||||
* mark ioreq done and don't resume vcpu.
|
||||
*/
|
||||
if (vcpu->state == VCPU_ZOMBIE) {
|
||||
struct vhm_request_buffer *req_buf;
|
||||
|
||||
req_buf = (struct vhm_request_buffer *)vcpu->vm->sw.req_buf;
|
||||
req_buf->req_queue[vcpu->vcpu_id].valid = false;
|
||||
atomic_store_rel_32(&vcpu->ioreq_pending, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
switch (vcpu->req.type) {
|
||||
case REQ_MMIO:
|
||||
request_vcpu_pre_work(vcpu, ACRN_VCPU_MMIO_COMPLETE);
|
||||
break;
|
||||
|
||||
case REQ_PORTIO:
|
||||
dm_emulate_pio_post(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
resume_vcpu(vcpu);
|
||||
}
|
||||
|
||||
int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct vhm_request_buffer *req_buf;
|
||||
struct vhm_request *req;
|
||||
struct vcpu *vcpu;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
/* make sure we have set req_buf */
|
||||
if (!target_vm || target_vm->sw.req_buf == 0)
|
||||
return -1;
|
||||
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
||||
vmid, vcpu_id);
|
||||
|
||||
vcpu = vcpu_from_vid(target_vm, vcpu_id);
|
||||
ASSERT(vcpu != NULL, "Failed to get VCPU context.");
|
||||
|
||||
req_buf = (struct vhm_request_buffer *)target_vm->sw.req_buf;
|
||||
req = req_buf->req_queue + vcpu_id;
|
||||
|
||||
if (req->valid &&
|
||||
((req->processed == REQ_STATE_SUCCESS) ||
|
||||
(req->processed == REQ_STATE_FAILED)))
|
||||
complete_request(vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
uint64_t hpa;
|
||||
uint32_t attr, prot;
|
||||
struct vm_set_memmap memmap;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (!vm || !target_vm)
|
||||
return -1;
|
||||
|
||||
memset((void *)&memmap, 0, sizeof(memmap));
|
||||
|
||||
if (copy_from_vm(vm, &memmap, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!is_vm0(vm)) {
|
||||
pr_err("%s: ERROR! Not coming from service vm", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (is_vm0(target_vm)) {
|
||||
pr_err("%s: ERROR! Targeting to service vm", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((memmap.length & 0xFFF) != 0) {
|
||||
pr_err("%s: ERROR! [vm%d] map size 0x%x is not page aligned",
|
||||
__func__, vmid, memmap.length);
|
||||
return -1;
|
||||
}
|
||||
|
||||
hpa = gpa2hpa(vm, memmap.vm0_gpa);
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
|
||||
vmid, memmap.remote_gpa, hpa, memmap.length);
|
||||
|
||||
/* Check prot */
|
||||
attr = 0;
|
||||
if (memmap.type != MAP_UNMAP) {
|
||||
prot = memmap.prot;
|
||||
if (prot & MEM_ACCESS_READ)
|
||||
attr |= MMU_MEM_ATTR_READ;
|
||||
if (prot & MEM_ACCESS_WRITE)
|
||||
attr |= MMU_MEM_ATTR_WRITE;
|
||||
if (prot & MEM_ACCESS_EXEC)
|
||||
attr |= MMU_MEM_ATTR_EXECUTE;
|
||||
if (prot & MEM_TYPE_WB)
|
||||
attr |= MMU_MEM_ATTR_WB_CACHE;
|
||||
else if (prot & MEM_TYPE_WT)
|
||||
attr |= MMU_MEM_ATTR_WT_CACHE;
|
||||
else if (prot & MEM_TYPE_UC)
|
||||
attr |= MMU_MEM_ATTR_UNCACHED;
|
||||
else if (prot & MEM_TYPE_WC)
|
||||
attr |= MMU_MEM_ATTR_WC;
|
||||
else if (prot & MEM_TYPE_WP)
|
||||
attr |= MMU_MEM_ATTR_WP;
|
||||
else
|
||||
attr |= MMU_MEM_ATTR_UNCACHED;
|
||||
}
|
||||
|
||||
/* create gpa to hpa EPT mapping */
|
||||
ret = ept_mmap(target_vm, hpa,
|
||||
memmap.remote_gpa, memmap.length, memmap.type, attr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct acrn_vm_pci_msix_remap remap;
|
||||
struct ptdev_msi_info info;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
memset((void *)&remap, 0, sizeof(remap));
|
||||
|
||||
if (copy_from_vm(vm, &remap, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!is_vm0(vm))
|
||||
ret = -1;
|
||||
else {
|
||||
info.msix = remap.msix;
|
||||
info.msix_entry_index = remap.msix_entry_index;
|
||||
info.vmsi_ctl = remap.msi_ctl;
|
||||
info.vmsi_addr = remap.msi_addr;
|
||||
info.vmsi_data = remap.msi_data;
|
||||
|
||||
ret = ptdev_msix_remap(target_vm,
|
||||
remap.virt_bdf, &info);
|
||||
remap.msi_data = info.pmsi_data;
|
||||
remap.msi_addr = info.pmsi_addr;
|
||||
|
||||
if (copy_to_vm(vm, &remap, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct vm_gpa2hpa v_gpa2hpa;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
memset((void *)&v_gpa2hpa, 0, sizeof(v_gpa2hpa));
|
||||
|
||||
if (copy_from_vm(vm, &v_gpa2hpa, param)) {
|
||||
pr_err("HCALL gpa2hpa: Unable copy param from vm\n");
|
||||
return -1;
|
||||
}
|
||||
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
|
||||
if (copy_to_vm(vm, &v_gpa2hpa, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
uint16_t bdf;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
if (copy_from_vm(vm, &bdf, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* create a iommu domain for target VM if not created */
|
||||
if (!target_vm->iommu_domain) {
|
||||
ASSERT(target_vm->arch_vm.ept, "EPT of VM not set!");
|
||||
/* TODO: how to get vm's address width? */
|
||||
target_vm->iommu_domain = create_iommu_domain(vmid,
|
||||
target_vm->arch_vm.ept, 48);
|
||||
ASSERT(target_vm->iommu_domain,
|
||||
"failed to created iommu domain!");
|
||||
}
|
||||
ret = assign_iommu_device(target_vm->iommu_domain,
|
||||
(uint8_t)(bdf >> 8), (uint8_t)(bdf & 0xff));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
uint16_t bdf;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
if (copy_from_vm(vm, &bdf, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
ret = unassign_iommu_device(target_vm->iommu_domain,
|
||||
(uint8_t)(bdf >> 8), (uint8_t)(bdf & 0xff));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct hc_ptdev_irq irq;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
memset((void *)&irq, 0, sizeof(irq));
|
||||
|
||||
if (copy_from_vm(vm, &irq, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (irq.type == IRQ_INTX)
|
||||
ptdev_add_intx_remapping(target_vm,
|
||||
irq.virt_bdf, irq.phys_bdf,
|
||||
irq.is.intx.virt_pin, irq.is.intx.phys_pin,
|
||||
irq.is.intx.pic_pin);
|
||||
else if (irq.type == IRQ_MSI || irq.type == IRQ_MSIX)
|
||||
ptdev_add_msix_remapping(target_vm,
|
||||
irq.virt_bdf, irq.phys_bdf,
|
||||
irq.is.msix.vector_cnt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t
|
||||
hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int64_t ret = 0;
|
||||
struct hc_ptdev_irq irq;
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
|
||||
if (target_vm == NULL)
|
||||
return -1;
|
||||
|
||||
memset((void *)&irq, 0, sizeof(irq));
|
||||
|
||||
if (copy_from_vm(vm, &irq, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (irq.type == IRQ_INTX)
|
||||
ptdev_remove_intx_remapping(target_vm,
|
||||
irq.is.intx.virt_pin,
|
||||
irq.is.intx.pic_pin);
|
||||
else if (irq.type == IRQ_MSI || irq.type == IRQ_MSIX)
|
||||
ptdev_remove_msix_remapping(target_vm,
|
||||
irq.virt_bdf,
|
||||
irq.is.msix.vector_cnt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef HV_DEBUG
|
||||
int64_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
|
||||
{
|
||||
struct sbuf_setup_param ssp;
|
||||
uint64_t *hva;
|
||||
|
||||
memset((void *)&ssp, 0, sizeof(ssp));
|
||||
|
||||
if (copy_from_vm(vm, &ssp, param)) {
|
||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ssp.gpa)
|
||||
hva = (uint64_t *)GPA2HVA(vm, ssp.gpa);
|
||||
else
|
||||
hva = (uint64_t *)NULL;
|
||||
|
||||
return sbuf_share_setup(ssp.pcpu_id, ssp.sbuf_id, hva);
|
||||
}
|
||||
#else /* HV_DEBUG */
|
||||
int64_t hcall_setup_sbuf(__unused struct vm *vm,
|
||||
__unused uint64_t param)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif /* HV_DEBUG */
|
||||
|
||||
static void fire_vhm_interrupt(void)
|
||||
{
|
||||
/*
|
||||
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
|
||||
* otherwise, send IPI hardcoded to CPU_BOOT_ID
|
||||
*/
|
||||
struct vm *vm0;
|
||||
struct vcpu *vcpu;
|
||||
|
||||
vm0 = get_vm_from_vmid(0);
|
||||
ASSERT(vm0, "VM Pointer is NULL");
|
||||
|
||||
vcpu = vcpu_from_vid(vm0, 0);
|
||||
ASSERT(vcpu, "vcpu_from_vid failed");
|
||||
|
||||
vlapic_intr_edge(vcpu, VECTOR_VIRT_IRQ_VHM);
|
||||
}
|
||||
|
||||
#ifdef HV_DEBUG
|
||||
static void acrn_print_request(int vcpu_id, struct vhm_request *req)
|
||||
{
|
||||
switch (req->type) {
|
||||
case REQ_MMIO:
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[vcpu_id=%d type=MMIO]", vcpu_id);
|
||||
dev_dbg(ACRN_DBG_HYCALL,
|
||||
"gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
|
||||
req->reqs.mmio_request.address,
|
||||
req->reqs.mmio_request.direction,
|
||||
req->reqs.mmio_request.size,
|
||||
req->reqs.mmio_request.value,
|
||||
req->processed);
|
||||
break;
|
||||
case REQ_PORTIO:
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[vcpu_id=%d type=PORTIO]", vcpu_id);
|
||||
dev_dbg(ACRN_DBG_HYCALL,
|
||||
"IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
|
||||
req->reqs.pio_request.address,
|
||||
req->reqs.pio_request.direction,
|
||||
req->reqs.pio_request.size,
|
||||
req->reqs.pio_request.value,
|
||||
req->processed);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ACRN_DBG_HYCALL, "[vcpu_id=%d type=%d] NOT support type",
|
||||
vcpu_id, req->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void acrn_print_request(__unused int vcpu_id,
|
||||
__unused struct vhm_request *req)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
|
||||
{
|
||||
struct vhm_request_buffer *req_buf =
|
||||
(void *)HPA2HVA(vcpu->vm->sw.req_buf);
|
||||
long cur;
|
||||
|
||||
ASSERT(sizeof(*req) == (4096/VHM_REQUEST_MAX),
|
||||
"vhm_request page broken!");
|
||||
|
||||
|
||||
if (!vcpu || !req || vcpu->vm->sw.req_buf == 0)
|
||||
return -1;
|
||||
|
||||
/* ACRN insert request to VHM and inject upcall */
|
||||
cur = vcpu->vcpu_id;
|
||||
req_buf->req_queue[cur] = *req;
|
||||
|
||||
/* Must clear the signal before we mark req valid
|
||||
* Once we mark to valid, VHM may process req and signal us
|
||||
* before we perform upcall.
|
||||
* because VHM can work in pulling mode without wait for upcall
|
||||
*/
|
||||
req_buf->req_queue[cur].valid = true;
|
||||
|
||||
acrn_print_request(vcpu->vcpu_id, req_buf->req_queue + cur);
|
||||
|
||||
/* signal VHM */
|
||||
fire_vhm_interrupt();
|
||||
|
||||
/* pause vcpu, wait for VHM to handle the MMIO request */
|
||||
atomic_store_rel_32(&vcpu->ioreq_pending, 1);
|
||||
pause_vcpu(vcpu, VCPU_PAUSED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acrn_insert_request_nowait(struct vcpu *vcpu, struct vhm_request *req)
|
||||
{
|
||||
struct vhm_request_buffer *req_buf;
|
||||
long cur;
|
||||
|
||||
if (!vcpu || !req || !vcpu->vm->sw.req_buf)
|
||||
return -1;
|
||||
|
||||
req_buf = (void *)gpa2hpa(vcpu->vm, vcpu->vm->sw.req_buf);
|
||||
|
||||
/* ACRN insert request to VHM and inject upcall */
|
||||
cur = vcpu->vcpu_id;
|
||||
req_buf->req_queue[cur] = *req;
|
||||
req_buf->req_queue[cur].valid = true;
|
||||
|
||||
/* signal VHM and yield CPU */
|
||||
fire_vhm_interrupt();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _get_req_info_(struct vhm_request *req, int *id, char *type,
|
||||
char *state, char *dir, long *addr, long *val)
|
||||
{
|
||||
strcpy_s(dir, 16, "NONE");
|
||||
*addr = *val = 0;
|
||||
*id = req->client;
|
||||
|
||||
switch (req->type) {
|
||||
case REQ_PORTIO:
|
||||
strcpy_s(type, 16, "PORTIO");
|
||||
if (req->reqs.pio_request.direction == REQUEST_READ)
|
||||
strcpy_s(dir, 16, "READ");
|
||||
else
|
||||
strcpy_s(dir, 16, "WRITE");
|
||||
*addr = req->reqs.pio_request.address;
|
||||
*val = req->reqs.pio_request.value;
|
||||
break;
|
||||
case REQ_MMIO:
|
||||
case REQ_WP:
|
||||
strcpy_s(type, 16, "MMIO/WP");
|
||||
if (req->reqs.mmio_request.direction == REQUEST_READ)
|
||||
strcpy_s(dir, 16, "READ");
|
||||
else
|
||||
strcpy_s(dir, 16, "WRITE");
|
||||
*addr = req->reqs.mmio_request.address;
|
||||
*val = req->reqs.mmio_request.value;
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
strcpy_s(type, 16, "UNKNOWN");
|
||||
}
|
||||
|
||||
switch (req->processed) {
|
||||
case REQ_STATE_SUCCESS:
|
||||
strcpy_s(state, 16, "SUCCESS");
|
||||
break;
|
||||
case REQ_STATE_PENDING:
|
||||
strcpy_s(state, 16, "PENDING");
|
||||
break;
|
||||
case REQ_STATE_PROCESSING:
|
||||
strcpy_s(state, 16, "PROCESS");
|
||||
break;
|
||||
case REQ_STATE_FAILED:
|
||||
strcpy_s(state, 16, "FAILED");
|
||||
break;
|
||||
default:
|
||||
strcpy_s(state, 16, "UNKNOWN");
|
||||
}
|
||||
}
|
||||
|
||||
int get_req_info(char *str, int str_max)
|
||||
{
|
||||
int i, len, size = str_max, client_id;
|
||||
struct vhm_request_buffer *req_buf;
|
||||
struct vhm_request *req;
|
||||
char type[16], state[16], dir[16];
|
||||
long addr, val;
|
||||
struct list_head *pos;
|
||||
struct vm *vm;
|
||||
|
||||
len = snprintf(str, size,
|
||||
"\r\nVM\tVCPU\tCID\tTYPE\tSTATE\tDIR\tADDR\t\t\tVAL");
|
||||
size -= len;
|
||||
str += len;
|
||||
|
||||
spinlock_obtain(&vm_list_lock);
|
||||
list_for_each(pos, &vm_list) {
|
||||
vm = list_entry(pos, struct vm, list);
|
||||
req_buf = (struct vhm_request_buffer *)vm->sw.req_buf;
|
||||
if (req_buf) {
|
||||
for (i = 0; i < VHM_REQUEST_MAX; i++) {
|
||||
req = req_buf->req_queue + i;
|
||||
if (req->valid) {
|
||||
_get_req_info_(req, &client_id, type,
|
||||
state, dir, &addr, &val);
|
||||
len = snprintf(str, size,
|
||||
"\r\n%d\t%d\t%d\t%s\t%s\t%s",
|
||||
vm->attr.id, i, client_id, type,
|
||||
state, dir);
|
||||
size -= len;
|
||||
str += len;
|
||||
|
||||
len = snprintf(str, size,
|
||||
"\t0x%016llx\t0x%016llx",
|
||||
addr, val);
|
||||
size -= len;
|
||||
str += len;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
spinlock_release(&vm_list_lock);
|
||||
snprintf(str, size, "\r\n");
|
||||
return 0;
|
||||
}
|
||||
234
hypervisor/common/schedule.c
Normal file
234
hypervisor/common/schedule.c
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <hv_debug.h>
|
||||
#include <schedule.h>
|
||||
|
||||
struct sched_context {
|
||||
spinlock_t runqueue_lock;
|
||||
struct list_head runqueue;
|
||||
unsigned long need_scheduled;
|
||||
struct vcpu *curr_vcpu;
|
||||
spinlock_t scheduler_lock;
|
||||
};
|
||||
|
||||
static DEFINE_CPU_DATA(struct sched_context, sched_ctx);
|
||||
static unsigned long pcpu_used_bitmap;
|
||||
|
||||
void init_scheduler(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < phy_cpu_num; i++) {
|
||||
spinlock_init(&per_cpu(sched_ctx, i).runqueue_lock);
|
||||
spinlock_init(&per_cpu(sched_ctx, i).scheduler_lock);
|
||||
INIT_LIST_HEAD(&per_cpu(sched_ctx, i).runqueue);
|
||||
per_cpu(sched_ctx, i).need_scheduled = 0;
|
||||
per_cpu(sched_ctx, i).curr_vcpu = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void get_schedule_lock(int pcpu_id)
|
||||
{
|
||||
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
|
||||
}
|
||||
|
||||
void release_schedule_lock(int pcpu_id)
|
||||
{
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
|
||||
}
|
||||
|
||||
int allocate_pcpu(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < phy_cpu_num; i++) {
|
||||
if (bitmap_test_and_set(i, &pcpu_used_bitmap) == 0) {
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
efi_deferred_wakeup_pcpu(i);
|
||||
#endif
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void set_pcpu_used(int pcpu_id)
|
||||
{
|
||||
bitmap_set(pcpu_id, &pcpu_used_bitmap);
|
||||
}
|
||||
|
||||
void free_pcpu(int pcpu_id)
|
||||
{
|
||||
bitmap_clr(pcpu_id, &pcpu_used_bitmap);
|
||||
}
|
||||
|
||||
void add_vcpu_to_runqueue(struct vcpu *vcpu)
|
||||
{
|
||||
int pcpu_id = vcpu->pcpu_id;
|
||||
|
||||
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
if (list_empty(&vcpu->run_list))
|
||||
list_add_tail(&vcpu->run_list,
|
||||
&per_cpu(sched_ctx, pcpu_id).runqueue);
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
}
|
||||
|
||||
void remove_vcpu_from_runqueue(struct vcpu *vcpu)
|
||||
{
|
||||
int pcpu_id = vcpu->pcpu_id;
|
||||
|
||||
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
list_del_init(&vcpu->run_list);
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
}
|
||||
|
||||
static struct vcpu *select_next_vcpu(int pcpu_id)
|
||||
{
|
||||
struct vcpu *vcpu = NULL;
|
||||
|
||||
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
if (!list_empty(&per_cpu(sched_ctx, pcpu_id).runqueue)) {
|
||||
vcpu = get_first_item(&per_cpu(sched_ctx, pcpu_id).runqueue,
|
||||
struct vcpu, run_list);
|
||||
}
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
void make_reschedule_request(struct vcpu *vcpu)
|
||||
{
|
||||
bitmap_set(NEED_RESCHEDULED,
|
||||
&per_cpu(sched_ctx, vcpu->pcpu_id).need_scheduled);
|
||||
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
}
|
||||
|
||||
int need_rescheduled(int pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear(NEED_RESCHEDULED,
|
||||
&per_cpu(sched_ctx, pcpu_id).need_scheduled);
|
||||
}
|
||||
|
||||
static void context_switch_out(struct vcpu *vcpu)
|
||||
{
|
||||
/* if it's idle thread, no action for switch out */
|
||||
if (vcpu == NULL)
|
||||
return;
|
||||
|
||||
atomic_store_rel_32(&vcpu->running, 0);
|
||||
/* do prev vcpu context switch out */
|
||||
/* For now, we don't need to invalid ept.
|
||||
* But if we have more than one vcpu on one pcpu,
|
||||
* we need add ept invalid operation here.
|
||||
*/
|
||||
}
|
||||
|
||||
static void context_switch_in(struct vcpu *vcpu)
|
||||
{
|
||||
/* update current_vcpu */
|
||||
get_cpu_var(sched_ctx).curr_vcpu = vcpu;
|
||||
|
||||
/* if it's idle thread, no action for switch out */
|
||||
if (vcpu == NULL)
|
||||
return;
|
||||
|
||||
atomic_store_rel_32(&vcpu->running, 1);
|
||||
/* FIXME:
|
||||
* Now, we don't need to load new vcpu VMCS because
|
||||
* we only do switch between vcpu loop and idle loop.
|
||||
* If we have more than one vcpu on on pcpu, need to
|
||||
* add VMCS load operation here.
|
||||
*/
|
||||
}
|
||||
|
||||
void default_idle(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
|
||||
while (1) {
|
||||
if (need_rescheduled(pcpu_id))
|
||||
schedule();
|
||||
else
|
||||
__asm __volatile("pause" ::: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
static void switch_to(struct vcpu *curr)
|
||||
{
|
||||
/*
|
||||
* reset stack pointer here. Otherwise, schedule
|
||||
* is recursive call and stack will overflow finally.
|
||||
*/
|
||||
uint64_t cur_sp = (uint64_t)&get_cpu_var(stack)[STACK_SIZE];
|
||||
|
||||
if (curr == NULL) {
|
||||
asm volatile ("movq %1, %%rsp\n"
|
||||
"movq $0, %%rdi\n"
|
||||
"jmp *%0\n"
|
||||
:
|
||||
: "a"(default_idle), "r"(cur_sp)
|
||||
: "memory");
|
||||
} else {
|
||||
asm volatile ("movq %2, %%rsp\n"
|
||||
"movq %0, %%rdi\n"
|
||||
"jmp *%1\n"
|
||||
:
|
||||
: "c"(curr), "a"(vcpu_thread), "r"(cur_sp)
|
||||
: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
void schedule(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
struct vcpu *next = NULL;
|
||||
struct vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
next = select_next_vcpu(pcpu_id);
|
||||
|
||||
if (prev == next) {
|
||||
release_schedule_lock(pcpu_id);
|
||||
return;
|
||||
}
|
||||
|
||||
context_switch_out(prev);
|
||||
context_switch_in(next);
|
||||
release_schedule_lock(pcpu_id);
|
||||
|
||||
switch_to(next);
|
||||
|
||||
ASSERT(false, "Shouldn't go here");
|
||||
}
|
||||
39
hypervisor/common/stack_protector.c
Normal file
39
hypervisor/common/stack_protector.c
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <hv_debug.h>
|
||||
|
||||
void __stack_chk_fail(void)
|
||||
{
|
||||
ASSERT(0, "stack check fails in HV\n");
|
||||
}
|
||||
254
hypervisor/common/vm_load.c
Normal file
254
hypervisor/common/vm_load.c
Normal file
@@ -0,0 +1,254 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <bsp_extern.h>
|
||||
#include <hv_debug.h>
|
||||
|
||||
struct zero_page {
|
||||
uint8_t pad1[0x1e8]; /* 0x000 */
|
||||
uint8_t e820_nentries; /* 0x1e8 */
|
||||
uint8_t pad2[0x8]; /* 0x1e9 */
|
||||
|
||||
struct {
|
||||
uint8_t setup_sects; /* 0x1f1 */
|
||||
uint8_t hdr_pad1[0x1e]; /* 0x1f2 */
|
||||
uint8_t loader_type; /* 0x210 */
|
||||
uint8_t load_flags; /* 0x211 */
|
||||
uint8_t hdr_pad2[0x6]; /* 0x212 */
|
||||
uint32_t ramdisk_addr; /* 0x218 */
|
||||
uint32_t ramdisk_size; /* 0x21c */
|
||||
uint8_t hdr_pad3[0x8]; /* 0x220 */
|
||||
uint32_t bootargs_addr; /* 0x228 */
|
||||
uint8_t hdr_pad4[0x1c]; /* 0x22c */
|
||||
uint32_t payload_offset;/* 0x248 */
|
||||
uint32_t payload_length;/* 0x24c */
|
||||
uint8_t hdr_pad5[0x18]; /* 0x250 */
|
||||
} __packed hdr;
|
||||
|
||||
uint8_t pad3[0x68]; /* 0x268 */
|
||||
struct e820_entry e820[0x80]; /* 0x2d0 */
|
||||
uint8_t pad4[0x330]; /* 0xcd0 */
|
||||
} __packed;
|
||||
|
||||
static uint32_t create_e820_table(struct e820_entry *_e820)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
ASSERT(e820_entries > 0,
|
||||
"e820 should be inited");
|
||||
|
||||
for (i = 0; i < e820_entries; i++) {
|
||||
_e820[i].baseaddr = e820[i].baseaddr;
|
||||
_e820[i].length = e820[i].length;
|
||||
_e820[i].type = e820[i].type;
|
||||
}
|
||||
|
||||
return e820_entries;
|
||||
}
|
||||
|
||||
static uint64_t create_zero_page(struct vm *vm)
|
||||
{
|
||||
struct zero_page *zeropage;
|
||||
struct sw_linux *sw_linux = &(vm->sw.linux_info);
|
||||
struct zero_page *hva;
|
||||
uint64_t gpa;
|
||||
|
||||
/* Set zeropage in Linux Guest RAM region just past boot args */
|
||||
hva = GPA2HVA(vm, (uint64_t)sw_linux->bootargs_load_addr);
|
||||
zeropage = (struct zero_page *)((char *)hva + MEM_4K);
|
||||
|
||||
/* clear the zeropage */
|
||||
memset(zeropage, 0, MEM_2K);
|
||||
|
||||
/* copy part of the header into the zero page */
|
||||
hva = GPA2HVA(vm, (uint64_t)vm->sw.kernel_info.kernel_load_addr);
|
||||
memcpy_s(&(zeropage->hdr), sizeof(zeropage->hdr),
|
||||
&(hva->hdr), sizeof(hva->hdr));
|
||||
|
||||
/* See if kernel has a RAM disk */
|
||||
if (sw_linux->ramdisk_src_addr) {
|
||||
/* Copy ramdisk load_addr and size in zeropage header structure
|
||||
*/
|
||||
zeropage->hdr.ramdisk_addr =
|
||||
(uint32_t)(uint64_t)sw_linux->ramdisk_load_addr;
|
||||
zeropage->hdr.ramdisk_size = (uint32_t)sw_linux->ramdisk_size;
|
||||
}
|
||||
|
||||
/* Copy bootargs load_addr in zeropage header structure */
|
||||
zeropage->hdr.bootargs_addr =
|
||||
(uint32_t)(uint64_t)sw_linux->bootargs_load_addr;
|
||||
|
||||
/* set constant arguments in zero page */
|
||||
zeropage->hdr.loader_type = 0xff;
|
||||
zeropage->hdr.load_flags |= (1 << 5); /* quiet */
|
||||
|
||||
/* Create/add e820 table entries in zeropage */
|
||||
zeropage->e820_nentries = create_e820_table(zeropage->e820);
|
||||
|
||||
/* Get the host physical address of the zeropage */
|
||||
gpa = hpa2gpa(vm, HVA2HPA((uint64_t)zeropage));
|
||||
|
||||
/* Return Physical Base Address of zeropage */
|
||||
return gpa;
|
||||
}
|
||||
|
||||
int load_guest(struct vm *vm, struct vcpu *vcpu)
|
||||
{
|
||||
int ret = 0;
|
||||
void *hva;
|
||||
struct run_context *cur_context =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
uint64_t lowmem_gpa_top;
|
||||
|
||||
hva = GPA2HVA(vm, GUEST_CFG_OFFSET);
|
||||
lowmem_gpa_top = *(uint64_t *)hva;
|
||||
|
||||
/* hardcode vcpu entry addr(kernel entry) & rsi (zeropage)*/
|
||||
memset(cur_context->guest_cpu_regs.longs,
|
||||
0, sizeof(uint64_t)*NUM_GPRS);
|
||||
|
||||
hva = GPA2HVA(vm, lowmem_gpa_top -
|
||||
MEM_4K - MEM_2K);
|
||||
vcpu->entry_addr = (void *)(*((uint64_t *)hva));
|
||||
cur_context->guest_cpu_regs.regs.rsi =
|
||||
lowmem_gpa_top - MEM_4K;
|
||||
|
||||
pr_info("%s, Set config according to predefined offset:",
|
||||
__func__);
|
||||
pr_info("VCPU%d Entry: 0x%llx, RSI: 0x%016llx, cr3: 0x%016llx",
|
||||
vcpu->vcpu_id, vcpu->entry_addr,
|
||||
cur_context->guest_cpu_regs.regs.rsi,
|
||||
vm->arch_vm.guest_pml4);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
|
||||
{
|
||||
int ret = 0;
|
||||
void *hva;
|
||||
struct run_context *cur_context =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
char dyn_bootargs[100] = {0};
|
||||
uint32_t kernel_entry_offset;
|
||||
struct zero_page *zeropage;
|
||||
|
||||
ASSERT(vm != NULL, "Incorrect argument");
|
||||
|
||||
pr_dbg("Loading guest to run-time location");
|
||||
|
||||
/* FIXME: set config according to predefined offset */
|
||||
if (!is_vm0(vm))
|
||||
return load_guest(vm, vcpu);
|
||||
|
||||
/* calculate the kernel entry point */
|
||||
zeropage = (struct zero_page *)
|
||||
vm->sw.kernel_info.kernel_src_addr;
|
||||
kernel_entry_offset = (zeropage->hdr.setup_sects + 1) * 512;
|
||||
/* 64bit entry is the 512bytes after the start */
|
||||
kernel_entry_offset += 512;
|
||||
vm->sw.kernel_info.kernel_entry_addr =
|
||||
(void *)((unsigned long)vm->sw.kernel_info.kernel_load_addr
|
||||
+ kernel_entry_offset);
|
||||
if (is_vcpu_bsp(vcpu)) {
|
||||
/* Set VCPU entry point to kernel entry */
|
||||
vcpu->entry_addr = vm->sw.kernel_info.kernel_entry_addr;
|
||||
pr_info("%s, VM *d VCPU %d Entry: 0x%016llx ",
|
||||
__func__, vm->attr.id, vcpu->vcpu_id, vcpu->entry_addr);
|
||||
}
|
||||
|
||||
/* Calculate the host-physical address where the guest will be loaded */
|
||||
hva = GPA2HVA(vm, (uint64_t)vm->sw.kernel_info.kernel_load_addr);
|
||||
|
||||
/* Copy the guest kernel image to its run-time location */
|
||||
memcpy_s((void *)hva, vm->sw.kernel_info.kernel_size,
|
||||
vm->sw.kernel_info.kernel_src_addr,
|
||||
vm->sw.kernel_info.kernel_size);
|
||||
|
||||
/* See if guest is a Linux guest */
|
||||
if (vm->sw.kernel_type == VM_LINUX_GUEST) {
|
||||
/* Documentation states: ebx=0, edi=0, ebp=0, esi=ptr to
|
||||
* zeropage
|
||||
*/
|
||||
memset(cur_context->guest_cpu_regs.longs,
|
||||
0, sizeof(uint64_t) * NUM_GPRS);
|
||||
|
||||
/* Get host-physical address for guest bootargs */
|
||||
hva = GPA2HVA(vm,
|
||||
(uint64_t)vm->sw.linux_info.bootargs_load_addr);
|
||||
|
||||
/* Copy Guest OS bootargs to its load location */
|
||||
strcpy_s((char *)hva, MEM_2K,
|
||||
vm->sw.linux_info.bootargs_src_addr);
|
||||
|
||||
/* add "cma=XXXXM@0xXXXXXXXX" to cmdline*/
|
||||
if (is_vm0(vm) && (e820_mem.max_ram_blk_size > 0)) {
|
||||
snprintf(dyn_bootargs, 100, " cma=%dM@0x%llx\n",
|
||||
(e820_mem.max_ram_blk_size >> 20),
|
||||
e820_mem.max_ram_blk_base);
|
||||
/* Delete '\n' at the end of cmdline */
|
||||
strcpy_s((char *)hva
|
||||
+vm->sw.linux_info.bootargs_size - 1,
|
||||
100, dyn_bootargs);
|
||||
}
|
||||
|
||||
/* Check if a RAM disk is present with Linux guest */
|
||||
if (vm->sw.linux_info.ramdisk_src_addr) {
|
||||
/* Get host-physical address for guest RAM disk */
|
||||
hva = GPA2HVA(vm,
|
||||
(uint64_t)vm->sw.linux_info.ramdisk_load_addr);
|
||||
|
||||
/* Copy RAM disk to its load location */
|
||||
memcpy_s((void *)hva, vm->sw.linux_info.ramdisk_size,
|
||||
vm->sw.linux_info.ramdisk_src_addr,
|
||||
vm->sw.linux_info.ramdisk_size);
|
||||
|
||||
}
|
||||
|
||||
/* Create Zeropage and copy Physical Base Address of Zeropage
|
||||
* in RSI
|
||||
*/
|
||||
cur_context->guest_cpu_regs.regs.rsi = create_zero_page(vm);
|
||||
|
||||
pr_info("%s, RSI pointing to zero page for VM %d at GPA %X",
|
||||
__func__, vm->attr.id,
|
||||
cur_context->guest_cpu_regs.regs.rsi);
|
||||
|
||||
} else {
|
||||
pr_err("%s, Loading VM SW failed", __func__);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
Reference in New Issue
Block a user