mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-06 14:12:10 +00:00
HV: Fix missing brackets for MISRA C Violations
Patch 5 of 7 Added changes to make sure Misra C violations are fixed for rules 11S and 12S. Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
parent
d16d9e5751
commit
4aa6cdacf7
@ -47,22 +47,25 @@ void exec_softirq(void)
|
|||||||
if (cpu_id >= phys_cpu_num)
|
if (cpu_id >= phys_cpu_num)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (((*bitmap) & SOFTIRQ_MASK) == 0UL)
|
if (((*bitmap) & SOFTIRQ_MASK) == 0UL) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Disable softirq
|
/* Disable softirq
|
||||||
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
|
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
|
||||||
*/
|
*/
|
||||||
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap))
|
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap)) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
again:
|
again:
|
||||||
CPU_IRQ_ENABLE();
|
CPU_IRQ_ENABLE();
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
softirq_id = ffs64(*bitmap);
|
softirq_id = ffs64(*bitmap);
|
||||||
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX))
|
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX)) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
bitmap_clear(softirq_id, bitmap);
|
bitmap_clear(softirq_id, bitmap);
|
||||||
|
|
||||||
@ -81,8 +84,9 @@ again:
|
|||||||
|
|
||||||
CPU_IRQ_DISABLE();
|
CPU_IRQ_DISABLE();
|
||||||
|
|
||||||
if (((*bitmap) & SOFTIRQ_MASK) != 0U)
|
if (((*bitmap) & SOFTIRQ_MASK) != 0U) {
|
||||||
goto again;
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
enable_softirq(cpu_id);
|
enable_softirq(cpu_id);
|
||||||
}
|
}
|
||||||
|
@ -16,8 +16,9 @@ uint32_t tsc_khz = 0U;
|
|||||||
static void run_timer(struct timer *timer)
|
static void run_timer(struct timer *timer)
|
||||||
{
|
{
|
||||||
/* deadline = 0 means stop timer, we should skip */
|
/* deadline = 0 means stop timer, we should skip */
|
||||||
if ((timer->func != NULL) && timer->fire_tsc != 0UL)
|
if ((timer->func != NULL) && timer->fire_tsc != 0UL) {
|
||||||
timer->func(timer->priv_data);
|
timer->func(timer->priv_data);
|
||||||
|
}
|
||||||
|
|
||||||
TRACE_2L(TRACE_TIMER_ACTION_PCKUP, timer->fire_tsc, 0);
|
TRACE_2L(TRACE_TIMER_ACTION_PCKUP, timer->fire_tsc, 0);
|
||||||
}
|
}
|
||||||
@ -54,17 +55,20 @@ static void __add_timer(struct per_cpu_timers *cpu_timer,
|
|||||||
prev = &cpu_timer->timer_list;
|
prev = &cpu_timer->timer_list;
|
||||||
list_for_each(pos, &cpu_timer->timer_list) {
|
list_for_each(pos, &cpu_timer->timer_list) {
|
||||||
tmp = list_entry(pos, struct timer, node);
|
tmp = list_entry(pos, struct timer, node);
|
||||||
if (tmp->fire_tsc < tsc)
|
if (tmp->fire_tsc < tsc) {
|
||||||
prev = &tmp->node;
|
prev = &tmp->node;
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
list_add(&timer->node, prev);
|
list_add(&timer->node, prev);
|
||||||
|
|
||||||
if (need_update != NULL)
|
if (need_update != NULL) {
|
||||||
/* update the physical timer if we're on the timer_list head */
|
/* update the physical timer if we're on the timer_list head */
|
||||||
*need_update = (prev == &cpu_timer->timer_list);
|
*need_update = (prev == &cpu_timer->timer_list);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int add_timer(struct timer *timer)
|
int add_timer(struct timer *timer)
|
||||||
@ -73,20 +77,23 @@ int add_timer(struct timer *timer)
|
|||||||
uint16_t pcpu_id;
|
uint16_t pcpu_id;
|
||||||
bool need_update;
|
bool need_update;
|
||||||
|
|
||||||
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL)
|
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* limit minimal periodic timer cycle period */
|
/* limit minimal periodic timer cycle period */
|
||||||
if (timer->mode == TICK_MODE_PERIODIC)
|
if (timer->mode == TICK_MODE_PERIODIC) {
|
||||||
timer->period_in_cycle = max(timer->period_in_cycle,
|
timer->period_in_cycle = max(timer->period_in_cycle,
|
||||||
us_to_ticks(MIN_TIMER_PERIOD_US));
|
us_to_ticks(MIN_TIMER_PERIOD_US));
|
||||||
|
}
|
||||||
|
|
||||||
pcpu_id = get_cpu_id();
|
pcpu_id = get_cpu_id();
|
||||||
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
|
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
|
||||||
__add_timer(cpu_timer, timer, &need_update);
|
__add_timer(cpu_timer, timer, &need_update);
|
||||||
|
|
||||||
if (need_update)
|
if (need_update) {
|
||||||
update_physical_timer(cpu_timer);
|
update_physical_timer(cpu_timer);
|
||||||
|
}
|
||||||
|
|
||||||
TRACE_2L(TRACE_TIMER_ACTION_ADDED, timer->fire_tsc, 0);
|
TRACE_2L(TRACE_TIMER_ACTION_ADDED, timer->fire_tsc, 0);
|
||||||
return 0;
|
return 0;
|
||||||
@ -95,8 +102,9 @@ int add_timer(struct timer *timer)
|
|||||||
|
|
||||||
void del_timer(struct timer *timer)
|
void del_timer(struct timer *timer)
|
||||||
{
|
{
|
||||||
if ((timer != NULL) && !list_empty(&timer->node))
|
if ((timer != NULL) && !list_empty(&timer->node)) {
|
||||||
list_del_init(&timer->node);
|
list_del_init(&timer->node);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int request_timer_irq(uint16_t pcpu_id,
|
static int request_timer_irq(uint16_t pcpu_id,
|
||||||
@ -105,8 +113,9 @@ static int request_timer_irq(uint16_t pcpu_id,
|
|||||||
{
|
{
|
||||||
struct dev_handler_node *node = NULL;
|
struct dev_handler_node *node = NULL;
|
||||||
|
|
||||||
if (pcpu_id >= phys_cpu_num)
|
if (pcpu_id >= phys_cpu_num) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (per_cpu(timer_node, pcpu_id) != NULL) {
|
if (per_cpu(timer_node, pcpu_id) != NULL) {
|
||||||
pr_err("CPU%d timer isr already added", pcpu_id);
|
pr_err("CPU%d timer isr already added", pcpu_id);
|
||||||
@ -165,8 +174,9 @@ void timer_cleanup(void)
|
|||||||
{
|
{
|
||||||
uint16_t pcpu_id = get_cpu_id();
|
uint16_t pcpu_id = get_cpu_id();
|
||||||
|
|
||||||
if (per_cpu(timer_node, pcpu_id) != NULL)
|
if (per_cpu(timer_node, pcpu_id) != NULL) {
|
||||||
unregister_handler_common(per_cpu(timer_node, pcpu_id));
|
unregister_handler_common(per_cpu(timer_node, pcpu_id));
|
||||||
|
}
|
||||||
|
|
||||||
per_cpu(timer_node, pcpu_id) = NULL;
|
per_cpu(timer_node, pcpu_id) = NULL;
|
||||||
}
|
}
|
||||||
@ -202,8 +212,9 @@ void timer_softirq(uint16_t pcpu_id)
|
|||||||
timer->fire_tsc += timer->period_in_cycle;
|
timer->fire_tsc += timer->period_in_cycle;
|
||||||
__add_timer(cpu_timer, timer, NULL);
|
__add_timer(cpu_timer, timer, NULL);
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* update nearest timer */
|
/* update nearest timer */
|
||||||
@ -276,9 +287,10 @@ static uint64_t native_calibrate_tsc(void)
|
|||||||
cpuid(0x15, &eax_denominator, &ebx_numerator,
|
cpuid(0x15, &eax_denominator, &ebx_numerator,
|
||||||
&ecx_hz, &reserved);
|
&ecx_hz, &reserved);
|
||||||
|
|
||||||
if (eax_denominator != 0U && ebx_numerator != 0U)
|
if (eax_denominator != 0U && ebx_numerator != 0U) {
|
||||||
return (uint64_t) ecx_hz *
|
return (uint64_t) ecx_hz *
|
||||||
ebx_numerator / eax_denominator;
|
ebx_numerator / eax_denominator;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -288,8 +300,9 @@ void calibrate_tsc(void)
|
|||||||
{
|
{
|
||||||
uint64_t tsc_hz;
|
uint64_t tsc_hz;
|
||||||
tsc_hz = native_calibrate_tsc();
|
tsc_hz = native_calibrate_tsc();
|
||||||
if (tsc_hz == 0U)
|
if (tsc_hz == 0U) {
|
||||||
tsc_hz = pit_calibrate_tsc(CAL_MS);
|
tsc_hz = pit_calibrate_tsc(CAL_MS);
|
||||||
|
}
|
||||||
tsc_khz = (uint32_t)(tsc_hz / 1000UL);
|
tsc_khz = (uint32_t)(tsc_hz / 1000UL);
|
||||||
printf("%s, tsc_khz=%lu\n", __func__, tsc_khz);
|
printf("%s, tsc_khz=%lu\n", __func__, tsc_khz);
|
||||||
}
|
}
|
||||||
|
@ -86,10 +86,12 @@ int exec_vmxon_instr(uint16_t pcpu_id)
|
|||||||
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||||
|
|
||||||
/* Allocate page aligned memory for VMXON region */
|
/* Allocate page aligned memory for VMXON region */
|
||||||
if (per_cpu(vmxon_region_pa, pcpu_id) == 0)
|
if (per_cpu(vmxon_region_pa, pcpu_id) == 0) {
|
||||||
vmxon_region_va = alloc_page();
|
vmxon_region_va = alloc_page();
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id));
|
vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id));
|
||||||
|
}
|
||||||
|
|
||||||
if (vmxon_region_va != NULL) {
|
if (vmxon_region_va != NULL) {
|
||||||
/* Initialize vmxon page with revision id from IA32 VMX BASIC
|
/* Initialize vmxon page with revision id from IA32 VMX BASIC
|
||||||
@ -112,9 +114,10 @@ int exec_vmxon_instr(uint16_t pcpu_id)
|
|||||||
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
||||||
ret = exec_vmptrld(&vmcs_pa);
|
ret = exec_vmptrld(&vmcs_pa);
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
pr_err("%s, alloc memory for VMXON region failed\n",
|
pr_err("%s, alloc memory for VMXON region failed\n",
|
||||||
__func__);
|
__func__);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -129,8 +132,9 @@ int vmx_off(uint16_t pcpu_id)
|
|||||||
if (vcpu != NULL) {
|
if (vcpu != NULL) {
|
||||||
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
|
||||||
ret = exec_vmclear((void *)&vmcs_pa);
|
ret = exec_vmclear((void *)&vmcs_pa);
|
||||||
if (ret != 0)
|
if (ret != 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
asm volatile ("vmxoff" : : : "memory");
|
asm volatile ("vmxoff" : : : "memory");
|
||||||
@ -143,8 +147,9 @@ int exec_vmclear(void *addr)
|
|||||||
uint64_t rflags;
|
uint64_t rflags;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
|
||||||
if (addr == NULL)
|
if (addr == NULL) {
|
||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
|
}
|
||||||
ASSERT(status == 0, "Incorrect arguments");
|
ASSERT(status == 0, "Incorrect arguments");
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
@ -156,8 +161,9 @@ int exec_vmclear(void *addr)
|
|||||||
: "%rax", "cc", "memory");
|
: "%rax", "cc", "memory");
|
||||||
|
|
||||||
/* if carry and zero flags are clear operation success */
|
/* if carry and zero flags are clear operation success */
|
||||||
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U)
|
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U) {
|
||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -167,8 +173,9 @@ int exec_vmptrld(void *addr)
|
|||||||
uint64_t rflags;
|
uint64_t rflags;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
|
||||||
if (addr == NULL)
|
if (addr == NULL) {
|
||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
|
}
|
||||||
ASSERT(status == 0, "Incorrect arguments");
|
ASSERT(status == 0, "Incorrect arguments");
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
@ -181,8 +188,9 @@ int exec_vmptrld(void *addr)
|
|||||||
: "%rax", "cc");
|
: "%rax", "cc");
|
||||||
|
|
||||||
/* if carry and zero flags are clear operation success */
|
/* if carry and zero flags are clear operation success */
|
||||||
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U)
|
if ((rflags & (RFLAGS_C | RFLAGS_Z)) != 0U) {
|
||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -664,13 +672,16 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
if (vcpu_mode == CPU_MODE_REAL) {
|
if (vcpu_mode == CPU_MODE_REAL) {
|
||||||
/* RIP is set here */
|
/* RIP is set here */
|
||||||
if (is_vcpu_bsp(vcpu)) {
|
if (is_vcpu_bsp(vcpu)) {
|
||||||
if ((uint64_t)vcpu->entry_addr < 0x100000UL)
|
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
|
||||||
value32 = (uint64_t)vcpu->entry_addr & 0x0FUL;
|
value32 = (uint64_t)vcpu->entry_addr & 0x0FUL;
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
value32 = 0x0000FFF0U;
|
value32 = 0x0000FFF0U;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
value32 = (uint32_t)((uint64_t)vcpu->entry_addr);
|
value32 = (uint32_t)((uint64_t)vcpu->entry_addr);
|
||||||
|
}
|
||||||
|
|
||||||
pr_dbg("GUEST RIP on VMEntry %x ", value32);
|
pr_dbg("GUEST RIP on VMEntry %x ", value32);
|
||||||
exec_vmwrite(field, value32);
|
exec_vmwrite(field, value32);
|
||||||
@ -706,8 +717,9 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
|
|
||||||
value32 = gdtb.limit;
|
value32 = gdtb.limit;
|
||||||
|
|
||||||
if (((gdtb.base >> 47) & 0x1UL) != 0UL)
|
if (((gdtb.base >> 47) & 0x1UL) != 0UL) {
|
||||||
gdtb.base |= 0xffff000000000000UL;
|
gdtb.base |= 0xffff000000000000UL;
|
||||||
|
}
|
||||||
|
|
||||||
base = gdtb.base;
|
base = gdtb.base;
|
||||||
|
|
||||||
@ -741,8 +753,9 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
/* Limit */
|
/* Limit */
|
||||||
limit = idtb.limit;
|
limit = idtb.limit;
|
||||||
|
|
||||||
if (((idtb.base >> 47) & 0x1UL) != 0UL)
|
if (((idtb.base >> 47) & 0x1UL) != 0UL) {
|
||||||
idtb.base |= 0xffff000000000000UL;
|
idtb.base |= 0xffff000000000000UL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Base */
|
/* Base */
|
||||||
base = idtb.base;
|
base = idtb.base;
|
||||||
@ -835,10 +848,12 @@ static void init_guest_state(struct vcpu *vcpu)
|
|||||||
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
|
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
|
||||||
|
|
||||||
/* Access */
|
/* Access */
|
||||||
if (vcpu_mode == CPU_MODE_REAL)
|
if (vcpu_mode == CPU_MODE_REAL) {
|
||||||
value32 = REAL_MODE_DATA_SEG_AR;
|
value32 = REAL_MODE_DATA_SEG_AR;
|
||||||
else /* same value for protected mode and long mode */
|
}
|
||||||
|
else { /* same value for protected mode and long mode */
|
||||||
value32 = PROTECTED_MODE_DATA_SEG_AR;
|
value32 = PROTECTED_MODE_DATA_SEG_AR;
|
||||||
|
}
|
||||||
|
|
||||||
field = VMX_GUEST_ES_ATTR;
|
field = VMX_GUEST_ES_ATTR;
|
||||||
exec_vmwrite(field, value32);
|
exec_vmwrite(field, value32);
|
||||||
@ -1046,8 +1061,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
|
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
|
||||||
value32 = gdtb.limit;
|
value32 = gdtb.limit;
|
||||||
|
|
||||||
if (((gdtb.base >> 47) & 0x1UL) != 0UL)
|
if (((gdtb.base >> 47) & 0x1UL) != 0UL) {
|
||||||
gdtb.base |= 0xffff000000000000UL;
|
gdtb.base |= 0xffff000000000000UL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Set up the guest and host GDTB base fields with current GDTB base */
|
/* Set up the guest and host GDTB base fields with current GDTB base */
|
||||||
field = VMX_HOST_GDTR_BASE;
|
field = VMX_HOST_GDTR_BASE;
|
||||||
@ -1056,8 +1072,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
|
|
||||||
/* TODO: Should guest TR point to host TR ? */
|
/* TODO: Should guest TR point to host TR ? */
|
||||||
trbase = gdtb.base + tr_sel;
|
trbase = gdtb.base + tr_sel;
|
||||||
if (((trbase >> 47) & 0x1UL) != 0UL)
|
if (((trbase >> 47) & 0x1UL) != 0UL) {
|
||||||
trbase |= 0xffff000000000000UL;
|
trbase |= 0xffff000000000000UL;
|
||||||
|
}
|
||||||
|
|
||||||
/* SS segment override */
|
/* SS segment override */
|
||||||
asm volatile ("mov %0,%%rax\n"
|
asm volatile ("mov %0,%%rax\n"
|
||||||
@ -1082,8 +1099,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
|||||||
/* Obtain the current interrupt descriptor table base */
|
/* Obtain the current interrupt descriptor table base */
|
||||||
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
||||||
/* base */
|
/* base */
|
||||||
if (((idtb.base >> 47) & 0x1UL) != 0UL)
|
if (((idtb.base >> 47) & 0x1UL) != 0UL) {
|
||||||
idtb.base |= 0xffff000000000000UL;
|
idtb.base |= 0xffff000000000000UL;
|
||||||
|
}
|
||||||
|
|
||||||
field = VMX_HOST_IDTR_BASE;
|
field = VMX_HOST_IDTR_BASE;
|
||||||
exec_vmwrite(field, idtb.base);
|
exec_vmwrite(field, idtb.base);
|
||||||
@ -1235,20 +1253,23 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
|||||||
VMX_PROCBASED_CTLS2_RDTSCP |
|
VMX_PROCBASED_CTLS2_RDTSCP |
|
||||||
VMX_PROCBASED_CTLS2_UNRESTRICT);
|
VMX_PROCBASED_CTLS2_UNRESTRICT);
|
||||||
|
|
||||||
if (vcpu->arch_vcpu.vpid != 0U)
|
if (vcpu->arch_vcpu.vpid != 0U) {
|
||||||
value32 |= VMX_PROCBASED_CTLS2_VPID;
|
value32 |= VMX_PROCBASED_CTLS2_VPID;
|
||||||
else
|
} else {
|
||||||
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
|
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_vapic_supported()) {
|
if (is_vapic_supported()) {
|
||||||
value32 |= VMX_PROCBASED_CTLS2_VAPIC;
|
value32 |= VMX_PROCBASED_CTLS2_VAPIC;
|
||||||
|
|
||||||
if (is_vapic_virt_reg_supported())
|
if (is_vapic_virt_reg_supported()) {
|
||||||
value32 |= VMX_PROCBASED_CTLS2_VAPIC_REGS;
|
value32 |= VMX_PROCBASED_CTLS2_VAPIC_REGS;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_vapic_intr_delivery_supported())
|
if (is_vapic_intr_delivery_supported()) {
|
||||||
value32 |= VMX_PROCBASED_CTLS2_VIRQ;
|
value32 |= VMX_PROCBASED_CTLS2_VIRQ;
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
/*
|
/*
|
||||||
* This field exists only on processors that support
|
* This field exists only on processors that support
|
||||||
* the 1-setting of the "use TPR shadow"
|
* the 1-setting of the "use TPR shadow"
|
||||||
@ -1258,6 +1279,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
|||||||
* - pg 2904 24.6.8
|
* - pg 2904 24.6.8
|
||||||
*/
|
*/
|
||||||
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
|
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_cap(X86_FEATURE_OSXSAVE)) {
|
if (cpu_has_cap(X86_FEATURE_OSXSAVE)) {
|
||||||
@ -1292,10 +1314,12 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check for EPT support */
|
/* Check for EPT support */
|
||||||
if (is_ept_supported())
|
if (is_ept_supported()) {
|
||||||
pr_dbg("EPT is supported");
|
pr_dbg("EPT is supported");
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
pr_err("Error: EPT is not supported");
|
pr_err("Error: EPT is not supported");
|
||||||
|
}
|
||||||
|
|
||||||
/* Load EPTP execution control
|
/* Load EPTP execution control
|
||||||
* TODO: introduce API to make this data driven based
|
* TODO: introduce API to make this data driven based
|
||||||
@ -1380,8 +1404,9 @@ static void init_entry_ctrl(__unused struct vcpu *vcpu)
|
|||||||
* IA32_PAT and IA32_EFER
|
* IA32_PAT and IA32_EFER
|
||||||
*/
|
*/
|
||||||
value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS);
|
value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS);
|
||||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
|
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||||
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
||||||
|
}
|
||||||
|
|
||||||
value32 |= (VMX_ENTRY_CTLS_LOAD_EFER |
|
value32 |= (VMX_ENTRY_CTLS_LOAD_EFER |
|
||||||
VMX_ENTRY_CTLS_LOAD_PAT);
|
VMX_ENTRY_CTLS_LOAD_PAT);
|
||||||
@ -1540,8 +1565,9 @@ int init_vmcs(struct vcpu *vcpu)
|
|||||||
int status = 0;
|
int status = 0;
|
||||||
uint64_t vmcs_pa;
|
uint64_t vmcs_pa;
|
||||||
|
|
||||||
if (vcpu == NULL)
|
if (vcpu == NULL) {
|
||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
|
}
|
||||||
ASSERT(status == 0, "Incorrect arguments");
|
ASSERT(status == 0, "Incorrect arguments");
|
||||||
|
|
||||||
/* Log message */
|
/* Log message */
|
||||||
@ -1569,8 +1595,9 @@ int init_vmcs(struct vcpu *vcpu)
|
|||||||
init_exit_ctrl(vcpu);
|
init_exit_ctrl(vcpu);
|
||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == 0)
|
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == 0) {
|
||||||
override_uefi_vmcs(vcpu);
|
override_uefi_vmcs(vcpu);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
/* Return status to caller */
|
/* Return status to caller */
|
||||||
return status;
|
return status;
|
||||||
|
@ -232,8 +232,9 @@ static void iommu_flush_cache(struct dmar_drhd_rt *dmar_uint,
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
/* if vtd support page-walk coherency, no need to flush cacheline */
|
/* if vtd support page-walk coherency, no need to flush cacheline */
|
||||||
if (iommu_ecap_c(dmar_uint->ecap) != 0U)
|
if (iommu_ecap_c(dmar_uint->ecap) != 0U) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
|
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
|
||||||
clflush((char *)p + i);
|
clflush((char *)p + i);
|
||||||
@ -327,8 +328,9 @@ static uint8_t dmar_uint_get_msagw(struct dmar_drhd_rt *dmar_uint)
|
|||||||
uint8_t sgaw = iommu_cap_sagaw(dmar_uint->cap);
|
uint8_t sgaw = iommu_cap_sagaw(dmar_uint->cap);
|
||||||
|
|
||||||
for (i = 4; i >= 0; i--) {
|
for (i = 4; i >= 0; i--) {
|
||||||
if (((1 << i) & sgaw) != 0)
|
if (((1 << i) & sgaw) != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return (uint8_t)i;
|
return (uint8_t)i;
|
||||||
}
|
}
|
||||||
@ -420,28 +422,34 @@ static void dmar_register_hrhd(struct dmar_drhd_rt *dmar_uint)
|
|||||||
* How to guarantee it when EPT is used as second-level
|
* How to guarantee it when EPT is used as second-level
|
||||||
* translation paging structures?
|
* translation paging structures?
|
||||||
*/
|
*/
|
||||||
if (iommu_ecap_sc(dmar_uint->ecap) == 0U)
|
if (iommu_ecap_sc(dmar_uint->ecap) == 0U) {
|
||||||
dev_dbg(ACRN_DBG_IOMMU,
|
dev_dbg(ACRN_DBG_IOMMU,
|
||||||
"dmar uint doesn't support snoop control!");
|
"dmar uint doesn't support snoop control!");
|
||||||
|
}
|
||||||
|
|
||||||
dmar_uint->max_domain_id = iommu_cap_ndoms(dmar_uint->cap) - 1;
|
dmar_uint->max_domain_id = iommu_cap_ndoms(dmar_uint->cap) - 1;
|
||||||
|
|
||||||
if (dmar_uint->max_domain_id > 63U)
|
if (dmar_uint->max_domain_id > 63U) {
|
||||||
dmar_uint->max_domain_id = 63U;
|
dmar_uint->max_domain_id = 63U;
|
||||||
|
}
|
||||||
|
|
||||||
if (max_domain_id > dmar_uint->max_domain_id)
|
if (max_domain_id > dmar_uint->max_domain_id) {
|
||||||
max_domain_id = dmar_uint->max_domain_id;
|
max_domain_id = dmar_uint->max_domain_id;
|
||||||
|
}
|
||||||
|
|
||||||
/* register operation is considered serial, no lock here */
|
/* register operation is considered serial, no lock here */
|
||||||
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U)
|
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U) {
|
||||||
list_add_tail(&dmar_uint->list, &dmar_drhd_units);
|
list_add_tail(&dmar_uint->list, &dmar_drhd_units);
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
list_add(&dmar_uint->list, &dmar_drhd_units);
|
list_add(&dmar_uint->list, &dmar_drhd_units);
|
||||||
|
}
|
||||||
|
|
||||||
dmar_hdrh_unit_count++;
|
dmar_hdrh_unit_count++;
|
||||||
|
|
||||||
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0)
|
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0) {
|
||||||
dmar_disable_translation(dmar_uint);
|
dmar_disable_translation(dmar_uint);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
|
static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
|
||||||
@ -454,20 +462,23 @@ static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
|
|||||||
list_for_each(pos, &dmar_drhd_units) {
|
list_for_each(pos, &dmar_drhd_units) {
|
||||||
dmar_uint = list_entry(pos, struct dmar_drhd_rt, list);
|
dmar_uint = list_entry(pos, struct dmar_drhd_rt, list);
|
||||||
|
|
||||||
if (dmar_uint->drhd->segment != segment)
|
if (dmar_uint->drhd->segment != segment) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0U; i < dmar_uint->drhd->dev_cnt; i++) {
|
for (i = 0U; i < dmar_uint->drhd->dev_cnt; i++) {
|
||||||
if ((dmar_uint->drhd->devices[i].bus == bus) &&
|
if ((dmar_uint->drhd->devices[i].bus == bus) &&
|
||||||
(dmar_uint->drhd->devices[i].devfun == devfun))
|
(dmar_uint->drhd->devices[i].devfun == devfun)) {
|
||||||
return dmar_uint;
|
return dmar_uint;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* has the same segment number and
|
/* has the same segment number and
|
||||||
* the dmar unit has INCLUDE_PCI_ALL set
|
* the dmar unit has INCLUDE_PCI_ALL set
|
||||||
*/
|
*/
|
||||||
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U)
|
if ((dmar_uint->drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) != 0U) {
|
||||||
return dmar_uint;
|
return dmar_uint;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -520,8 +531,9 @@ static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_uint)
|
|||||||
{
|
{
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
|
|
||||||
if (iommu_cap_rwbf(dmar_uint->cap) == 0U)
|
if (iommu_cap_rwbf(dmar_uint->cap) == 0U) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
IOMMU_LOCK(dmar_uint);
|
IOMMU_LOCK(dmar_uint);
|
||||||
iommu_write64(dmar_uint, DMAR_GCMD_REG,
|
iommu_write64(dmar_uint, DMAR_GCMD_REG,
|
||||||
@ -606,8 +618,9 @@ static void dmar_invalid_iotlb(struct dmar_drhd_rt *dmar_uint,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
IOMMU_LOCK(dmar_uint);
|
IOMMU_LOCK(dmar_uint);
|
||||||
if (addr != 0U)
|
if (addr != 0U) {
|
||||||
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset, addr);
|
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset, addr);
|
||||||
|
}
|
||||||
|
|
||||||
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset + 8, cmd);
|
iommu_write64(dmar_uint, dmar_uint->ecap_iotlb_offset + 8, cmd);
|
||||||
/* read upper 32bits to check */
|
/* read upper 32bits to check */
|
||||||
@ -687,29 +700,37 @@ static void dmar_fault_msi_write(struct dmar_drhd_rt *dmar_uint,
|
|||||||
#if DBG_IOMMU
|
#if DBG_IOMMU
|
||||||
static void fault_status_analysis(uint32_t status)
|
static void fault_status_analysis(uint32_t status)
|
||||||
{
|
{
|
||||||
if (DMA_FSTS_PFO(status))
|
if (DMA_FSTS_PFO(status)) {
|
||||||
pr_info("Primary Fault Overflow");
|
pr_info("Primary Fault Overflow");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_PPF(status))
|
if (DMA_FSTS_PPF(status)) {
|
||||||
pr_info("Primary Pending Fault");
|
pr_info("Primary Pending Fault");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_AFO(status))
|
if (DMA_FSTS_AFO(status)) {
|
||||||
pr_info("Advanced Fault Overflow");
|
pr_info("Advanced Fault Overflow");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_APF(status))
|
if (DMA_FSTS_APF(status)) {
|
||||||
pr_info("Advanced Pending Fault");
|
pr_info("Advanced Pending Fault");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_IQE(status))
|
if (DMA_FSTS_IQE(status)) {
|
||||||
pr_info("Invalidation Queue Error");
|
pr_info("Invalidation Queue Error");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_ICE(status))
|
if (DMA_FSTS_ICE(status)) {
|
||||||
pr_info("Invalidation Completion Error");
|
pr_info("Invalidation Completion Error");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_ITE(status))
|
if (DMA_FSTS_ITE(status)) {
|
||||||
pr_info("Invalidation Time-out Error");
|
pr_info("Invalidation Time-out Error");
|
||||||
|
}
|
||||||
|
|
||||||
if (DMA_FSTS_PRO(status))
|
if (DMA_FSTS_PRO(status)) {
|
||||||
pr_info("Page Request Overflow");
|
pr_info("Page Request Overflow");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -727,9 +748,10 @@ static void fault_record_analysis(__unused uint64_t low, uint64_t high)
|
|||||||
DMA_FRCD_UP_SID(high) & 0x7UL,
|
DMA_FRCD_UP_SID(high) & 0x7UL,
|
||||||
low);
|
low);
|
||||||
#if DBG_IOMMU
|
#if DBG_IOMMU
|
||||||
if (iommu_ecap_dt(dmar_uint->ecap))
|
if (iommu_ecap_dt(dmar_uint->ecap)) {
|
||||||
pr_info("Address Type: 0x%x",
|
pr_info("Address Type: 0x%x",
|
||||||
DMA_FRCD_UP_AT(high));
|
DMA_FRCD_UP_AT(high));
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -833,8 +855,9 @@ static void dmar_enable(struct dmar_drhd_rt *dmar_uint)
|
|||||||
|
|
||||||
static void dmar_disable(struct dmar_drhd_rt *dmar_uint)
|
static void dmar_disable(struct dmar_drhd_rt *dmar_uint)
|
||||||
{
|
{
|
||||||
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0U)
|
if ((dmar_uint->gcmd & DMA_GCMD_TE) != 0U) {
|
||||||
dmar_disable_translation(dmar_uint);
|
dmar_disable_translation(dmar_uint);
|
||||||
|
}
|
||||||
|
|
||||||
dmar_fault_event_mask(dmar_uint);
|
dmar_fault_event_mask(dmar_uint);
|
||||||
}
|
}
|
||||||
@ -883,12 +906,14 @@ struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
|
|||||||
|
|
||||||
int destroy_iommu_domain(struct iommu_domain *domain)
|
int destroy_iommu_domain(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
if (domain == NULL)
|
if (domain == NULL) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* currently only support ept */
|
/* currently only support ept */
|
||||||
if (!domain->is_tt_ept)
|
if (!domain->is_tt_ept) {
|
||||||
ASSERT(false, "translation_table is not EPT!");
|
ASSERT(false, "translation_table is not EPT!");
|
||||||
|
}
|
||||||
|
|
||||||
/* TODO: check if any device assigned to this domain */
|
/* TODO: check if any device assigned to this domain */
|
||||||
|
|
||||||
@ -914,8 +939,9 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
|||||||
uint64_t upper = 0UL;
|
uint64_t upper = 0UL;
|
||||||
uint64_t lower = 0UL;
|
uint64_t lower = 0UL;
|
||||||
|
|
||||||
if (domain == NULL)
|
if (domain == NULL) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
dmar_uint = device_to_dmaru(segment, bus, devfun);
|
dmar_uint = device_to_dmaru(segment, bus, devfun);
|
||||||
if (dmar_uint == NULL) {
|
if (dmar_uint == NULL) {
|
||||||
@ -1006,9 +1032,10 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
|||||||
dmar_uint->cap_msagaw);
|
dmar_uint->cap_msagaw);
|
||||||
lower = DMAR_SET_BITSLICE(lower, CTX_ENTRY_LOWER_TT,
|
lower = DMAR_SET_BITSLICE(lower, CTX_ENTRY_LOWER_TT,
|
||||||
DMAR_CTX_TT_PASSTHROUGH);
|
DMAR_CTX_TT_PASSTHROUGH);
|
||||||
} else
|
} else {
|
||||||
ASSERT(false,
|
ASSERT(false,
|
||||||
"dmaru doesn't support trans passthrough");
|
"dmaru doesn't support trans passthrough");
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
/* TODO: add Device TLB support */
|
/* TODO: add Device TLB support */
|
||||||
upper =
|
upper =
|
||||||
@ -1043,8 +1070,9 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
|||||||
struct dmar_root_entry *root_entry;
|
struct dmar_root_entry *root_entry;
|
||||||
struct dmar_context_entry *context_entry;
|
struct dmar_context_entry *context_entry;
|
||||||
|
|
||||||
if (domain == NULL)
|
if (domain == NULL) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
dmar_uint = device_to_dmaru(segment, bus, devfun);
|
dmar_uint = device_to_dmaru(segment, bus, devfun);
|
||||||
if (dmar_uint == NULL) {
|
if (dmar_uint == NULL) {
|
||||||
@ -1086,8 +1114,9 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
|||||||
int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
|
int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
|
||||||
uint8_t devfun)
|
uint8_t devfun)
|
||||||
{
|
{
|
||||||
if (domain == NULL)
|
if (domain == NULL) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* TODO: check if the device assigned */
|
/* TODO: check if the device assigned */
|
||||||
|
|
||||||
@ -1099,8 +1128,9 @@ int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
|
|||||||
int unassign_iommu_device(struct iommu_domain *domain, uint8_t bus,
|
int unassign_iommu_device(struct iommu_domain *domain, uint8_t bus,
|
||||||
uint8_t devfun)
|
uint8_t devfun)
|
||||||
{
|
{
|
||||||
if (domain == NULL)
|
if (domain == NULL) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* TODO: check if the device assigned */
|
/* TODO: check if the device assigned */
|
||||||
|
|
||||||
@ -1116,11 +1146,13 @@ void enable_iommu(void)
|
|||||||
|
|
||||||
list_for_each(pos, &dmar_drhd_units) {
|
list_for_each(pos, &dmar_drhd_units) {
|
||||||
dmar_uint = list_entry(pos, struct dmar_drhd_rt, list);
|
dmar_uint = list_entry(pos, struct dmar_drhd_rt, list);
|
||||||
if (!dmar_uint->drhd->ignore)
|
if (!dmar_uint->drhd->ignore) {
|
||||||
dmar_enable(dmar_uint);
|
dmar_enable(dmar_uint);
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
dev_dbg(ACRN_DBG_IOMMU, "ignore dmar_uint @0x%x",
|
dev_dbg(ACRN_DBG_IOMMU, "ignore dmar_uint @0x%x",
|
||||||
dmar_uint->drhd->reg_base_addr);
|
dmar_uint->drhd->reg_base_addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1149,8 +1181,9 @@ void suspend_iommu(void)
|
|||||||
list_for_each(pos, &dmar_drhd_units) {
|
list_for_each(pos, &dmar_drhd_units) {
|
||||||
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
|
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
|
||||||
|
|
||||||
if (dmar_unit->drhd->ignore)
|
if (dmar_unit->drhd->ignore) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/* flush */
|
/* flush */
|
||||||
dmar_write_buffer_flush(dmar_unit);
|
dmar_write_buffer_flush(dmar_unit);
|
||||||
@ -1187,8 +1220,9 @@ void resume_iommu(void)
|
|||||||
list_for_each(pos, &dmar_drhd_units) {
|
list_for_each(pos, &dmar_drhd_units) {
|
||||||
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
|
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
|
||||||
|
|
||||||
if (dmar_unit->drhd->ignore)
|
if (dmar_unit->drhd->ignore) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/* set root table */
|
/* set root table */
|
||||||
dmar_set_root_table(dmar_unit);
|
dmar_set_root_table(dmar_unit);
|
||||||
@ -1228,8 +1262,9 @@ int init_iommu(void)
|
|||||||
|
|
||||||
spinlock_init(&domain_lock);
|
spinlock_init(&domain_lock);
|
||||||
|
|
||||||
if (register_hrhd_units() != 0)
|
if (register_hrhd_units() != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
host_domain = create_host_domain();
|
host_domain = create_host_domain();
|
||||||
|
|
||||||
|
@ -122,8 +122,9 @@ biosacpi_search_rsdp(char *base, int length)
|
|||||||
sum += *(cp + idx);
|
sum += *(cp + idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sum != NULL)
|
if (sum != NULL) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
return rsdp;
|
return rsdp;
|
||||||
}
|
}
|
||||||
@ -139,21 +140,24 @@ static void *get_rsdp(void)
|
|||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
rsdp = get_rsdp_from_uefi();
|
rsdp = get_rsdp_from_uefi();
|
||||||
if (rsdp)
|
if (rsdp) {
|
||||||
return rsdp;
|
return rsdp;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* EBDA is addressed by the 16 bit pointer at 0x40E */
|
/* EBDA is addressed by the 16 bit pointer at 0x40E */
|
||||||
addr = (uint16_t *)HPA2HVA(0x40E);
|
addr = (uint16_t *)HPA2HVA(0x40E);
|
||||||
|
|
||||||
rsdp = biosacpi_search_rsdp((char *)HPA2HVA((uint64_t)(*addr << 4)), 0x400);
|
rsdp = biosacpi_search_rsdp((char *)HPA2HVA((uint64_t)(*addr << 4)), 0x400);
|
||||||
if (rsdp != NULL)
|
if (rsdp != NULL) {
|
||||||
return rsdp;
|
return rsdp;
|
||||||
|
}
|
||||||
|
|
||||||
/* Check the upper memory BIOS space, 0xe0000 - 0xfffff. */
|
/* Check the upper memory BIOS space, 0xe0000 - 0xfffff. */
|
||||||
rsdp = biosacpi_search_rsdp((char *)HPA2HVA(0xe0000), 0x20000);
|
rsdp = biosacpi_search_rsdp((char *)HPA2HVA(0xe0000), 0x20000);
|
||||||
if (rsdp != NULL)
|
if (rsdp != NULL) {
|
||||||
return rsdp;
|
return rsdp;
|
||||||
|
}
|
||||||
|
|
||||||
return rsdp;
|
return rsdp;
|
||||||
}
|
}
|
||||||
@ -164,8 +168,9 @@ probe_table(uint64_t address, const char *sig)
|
|||||||
void *va = HPA2HVA(address);
|
void *va = HPA2HVA(address);
|
||||||
struct acpi_table_header *table = (struct acpi_table_header *)va;
|
struct acpi_table_header *table = (struct acpi_table_header *)va;
|
||||||
|
|
||||||
if (strncmp(table->signature, sig, ACPI_NAME_SIZE) != 0)
|
if (strncmp(table->signature, sig, ACPI_NAME_SIZE) != 0) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -231,8 +236,9 @@ static uint16_t _parse_madt(void *madt, uint8_t *lapic_id_base)
|
|||||||
end = (char *)madt_ptr + madt_ptr->header.length;
|
end = (char *)madt_ptr + madt_ptr->header.length;
|
||||||
|
|
||||||
for (entry = first; (void *)entry < end; ) {
|
for (entry = first; (void *)entry < end; ) {
|
||||||
if (entry->length < sizeof(struct acpi_subtable_header))
|
if (entry->length < sizeof(struct acpi_subtable_header)) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (entry->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
if (entry->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||||
processor = (struct acpi_madt_local_apic *)entry;
|
processor = (struct acpi_madt_local_apic *)entry;
|
||||||
|
@ -99,8 +99,9 @@ static void *get_kernel_load_addr(void *kernel_src_addr)
|
|||||||
* non-relocatable.
|
* non-relocatable.
|
||||||
*/
|
*/
|
||||||
zeropage = (struct zero_page *)kernel_src_addr;
|
zeropage = (struct zero_page *)kernel_src_addr;
|
||||||
if (zeropage->hdr.relocatable_kernel != 0U)
|
if (zeropage->hdr.relocatable_kernel != 0U) {
|
||||||
return (void *)zeropage->hdr.pref_addr;
|
return (void *)zeropage->hdr.pref_addr;
|
||||||
|
}
|
||||||
|
|
||||||
return kernel_src_addr;
|
return kernel_src_addr;
|
||||||
}
|
}
|
||||||
|
@ -13,8 +13,9 @@ static void run_vcpu_pre_work(struct vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
uint64_t *pending_pre_work = &vcpu->pending_pre_work;
|
uint64_t *pending_pre_work = &vcpu->pending_pre_work;
|
||||||
|
|
||||||
if (bitmap_test_and_clear(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work))
|
if (bitmap_test_and_clear(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work)) {
|
||||||
dm_emulate_mmio_post(vcpu);
|
dm_emulate_mmio_post(vcpu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vcpu_thread(struct vcpu *vcpu)
|
void vcpu_thread(struct vcpu *vcpu)
|
||||||
@ -25,8 +26,9 @@ void vcpu_thread(struct vcpu *vcpu)
|
|||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
||||||
/* If vcpu is not launched, we need to do init_vmcs first */
|
/* If vcpu is not launched, we need to do init_vmcs first */
|
||||||
if (!vcpu->launched)
|
if (!vcpu->launched) {
|
||||||
init_vmcs(vcpu);
|
init_vmcs(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
run_vcpu_pre_work(vcpu);
|
run_vcpu_pre_work(vcpu);
|
||||||
|
|
||||||
@ -56,9 +58,10 @@ void vcpu_thread(struct vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vmexit_end = rdtsc();
|
vmexit_end = rdtsc();
|
||||||
if (vmexit_begin != 0UL)
|
if (vmexit_begin != 0UL) {
|
||||||
per_cpu(vmexit_time, vcpu->pcpu_id)[basic_exit_reason]
|
per_cpu(vmexit_time, vcpu->pcpu_id)[basic_exit_reason]
|
||||||
+= (vmexit_end - vmexit_begin);
|
+= (vmexit_end - vmexit_begin);
|
||||||
|
}
|
||||||
TRACE_2L(TRACE_VM_ENTER, 0, 0);
|
TRACE_2L(TRACE_VM_ENTER, 0, 0);
|
||||||
|
|
||||||
/* Restore guest TSC_AUX */
|
/* Restore guest TSC_AUX */
|
||||||
@ -124,16 +127,18 @@ int32_t hv_main(uint16_t pcpu_id)
|
|||||||
|
|
||||||
/* Enable virtualization extensions */
|
/* Enable virtualization extensions */
|
||||||
ret = exec_vmxon_instr(pcpu_id);
|
ret = exec_vmxon_instr(pcpu_id);
|
||||||
if (ret != 0)
|
if (ret != 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* X2APIC mode is disabled by default. */
|
/* X2APIC mode is disabled by default. */
|
||||||
x2apic_enabled = false;
|
x2apic_enabled = false;
|
||||||
|
|
||||||
if (is_vm0_bsp(pcpu_id)) {
|
if (is_vm0_bsp(pcpu_id)) {
|
||||||
ret = prepare_vm0();
|
ret = prepare_vm0();
|
||||||
if (ret != 0)
|
if (ret != 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
default_idle();
|
default_idle();
|
||||||
|
@ -18,8 +18,9 @@ bool is_hypercall_from_ring0(void)
|
|||||||
|
|
||||||
cs_sel = exec_vmread(VMX_GUEST_CS_SEL);
|
cs_sel = exec_vmread(VMX_GUEST_CS_SEL);
|
||||||
/* cs_selector[1:0] is CPL */
|
/* cs_selector[1:0] is CPL */
|
||||||
if ((cs_sel & 0x3UL) == 0)
|
if ((cs_sel & 0x3UL) == 0) {
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -28,8 +29,9 @@ int64_t hcall_get_api_version(struct vm *vm, uint64_t param)
|
|||||||
{
|
{
|
||||||
struct hc_api_version version;
|
struct hc_api_version version;
|
||||||
|
|
||||||
if (!is_vm0(vm))
|
if (!is_vm0(vm)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
version.major_version = HV_API_MAJOR_VERSION;
|
version.major_version = HV_API_MAJOR_VERSION;
|
||||||
version.minor_version = HV_API_MINOR_VERSION;
|
version.minor_version = HV_API_MINOR_VERSION;
|
||||||
@ -46,8 +48,9 @@ static int handle_vpic_irqline(struct vm *vm, int irq, enum irq_mode mode)
|
|||||||
{
|
{
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
if (vm == NULL)
|
if (vm == NULL) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case IRQ_ASSERT:
|
case IRQ_ASSERT:
|
||||||
@ -70,8 +73,9 @@ handle_vioapic_irqline(struct vm *vm, int irq, enum irq_mode mode)
|
|||||||
{
|
{
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
if (vm == NULL)
|
if (vm == NULL) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case IRQ_ASSERT:
|
case IRQ_ASSERT:
|
||||||
@ -96,8 +100,9 @@ static int handle_virt_irqline(struct vm *vm, uint64_t target_vmid,
|
|||||||
uint32_t intr_type;
|
uint32_t intr_type;
|
||||||
struct vm *target_vm = get_vm_from_vmid(target_vmid);
|
struct vm *target_vm = get_vm_from_vmid(target_vmid);
|
||||||
|
|
||||||
if ((vm == NULL) || (param == NULL))
|
if ((vm == NULL) || (param == NULL)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
intr_type = param->intr_type;
|
intr_type = param->intr_type;
|
||||||
|
|
||||||
@ -170,8 +175,9 @@ int64_t hcall_destroy_vm(uint64_t vmid)
|
|||||||
int64_t ret = 0;
|
int64_t ret = 0;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
ret = shutdown_vm(target_vm);
|
ret = shutdown_vm(target_vm);
|
||||||
return ret;
|
return ret;
|
||||||
@ -182,12 +188,14 @@ int64_t hcall_resume_vm(uint64_t vmid)
|
|||||||
int64_t ret = 0;
|
int64_t ret = 0;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
if (target_vm->sw.io_shared_page == NULL)
|
}
|
||||||
|
if (target_vm->sw.io_shared_page == NULL) {
|
||||||
ret = -1;
|
ret = -1;
|
||||||
else
|
} else {
|
||||||
ret = start_vm(target_vm);
|
ret = start_vm(target_vm);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -196,8 +204,9 @@ int64_t hcall_pause_vm(uint64_t vmid)
|
|||||||
{
|
{
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
pause_vm(target_vm);
|
pause_vm(target_vm);
|
||||||
|
|
||||||
@ -212,8 +221,9 @@ int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
|
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if ((target_vm == NULL) || (param == 0U))
|
if ((target_vm == NULL) || (param == 0U)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) != 0) {
|
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
@ -279,8 +289,9 @@ int64_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct acrn_msi_entry msi;
|
struct acrn_msi_entry msi;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&msi, 0, sizeof(msi));
|
(void)memset((void *)&msi, 0, sizeof(msi));
|
||||||
if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) {
|
if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) {
|
||||||
@ -299,8 +310,9 @@ int64_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct acrn_set_ioreq_buffer iobuf;
|
struct acrn_set_ioreq_buffer iobuf;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&iobuf, 0, sizeof(iobuf));
|
(void)memset((void *)&iobuf, 0, sizeof(iobuf));
|
||||||
|
|
||||||
@ -385,8 +397,9 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
|
|||||||
|
|
||||||
if ((req->valid != 0) &&
|
if ((req->valid != 0) &&
|
||||||
((req->processed == REQ_STATE_SUCCESS) ||
|
((req->processed == REQ_STATE_SUCCESS) ||
|
||||||
(req->processed == REQ_STATE_FAILED)))
|
(req->processed == REQ_STATE_FAILED))) {
|
||||||
complete_request(vcpu);
|
complete_request(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -420,22 +433,26 @@ int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm,
|
|||||||
attr = 0U;
|
attr = 0U;
|
||||||
if (memmap->type != MAP_UNMAP) {
|
if (memmap->type != MAP_UNMAP) {
|
||||||
prot = (memmap->prot != 0U) ? memmap->prot : memmap->prot_2;
|
prot = (memmap->prot != 0U) ? memmap->prot : memmap->prot_2;
|
||||||
if ((prot & MEM_ACCESS_READ) != 0U)
|
if ((prot & MEM_ACCESS_READ) != 0U) {
|
||||||
attr |= IA32E_EPT_R_BIT;
|
attr |= IA32E_EPT_R_BIT;
|
||||||
if ((prot & MEM_ACCESS_WRITE) != 0U)
|
}
|
||||||
|
if ((prot & MEM_ACCESS_WRITE) != 0U) {
|
||||||
attr |= IA32E_EPT_W_BIT;
|
attr |= IA32E_EPT_W_BIT;
|
||||||
if ((prot & MEM_ACCESS_EXEC) != 0U)
|
}
|
||||||
|
if ((prot & MEM_ACCESS_EXEC) != 0U) {
|
||||||
attr |= IA32E_EPT_X_BIT;
|
attr |= IA32E_EPT_X_BIT;
|
||||||
if ((prot & MEM_TYPE_WB) != 0U)
|
}
|
||||||
|
if ((prot & MEM_TYPE_WB) != 0U) {
|
||||||
attr |= IA32E_EPT_WB;
|
attr |= IA32E_EPT_WB;
|
||||||
else if ((prot & MEM_TYPE_WT) != 0U)
|
} else if ((prot & MEM_TYPE_WT) != 0U) {
|
||||||
attr |= IA32E_EPT_WT;
|
attr |= IA32E_EPT_WT;
|
||||||
else if ((prot & MEM_TYPE_WC) != 0U)
|
} else if ((prot & MEM_TYPE_WC) != 0U) {
|
||||||
attr |= IA32E_EPT_WC;
|
attr |= IA32E_EPT_WC;
|
||||||
else if ((prot & MEM_TYPE_WP) != 0U)
|
} else if ((prot & MEM_TYPE_WP) != 0U) {
|
||||||
attr |= IA32E_EPT_WP;
|
attr |= IA32E_EPT_WP;
|
||||||
else
|
} else {
|
||||||
attr |= IA32E_EPT_UNCACHED;
|
attr |= IA32E_EPT_UNCACHED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create gpa to hpa EPT mapping */
|
/* create gpa to hpa EPT mapping */
|
||||||
@ -448,8 +465,9 @@ int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct vm_set_memmap memmap;
|
struct vm_set_memmap memmap;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if ((vm == NULL) || (target_vm == NULL))
|
if ((vm == NULL) || (target_vm == NULL)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&memmap, 0, sizeof(memmap));
|
(void)memset((void *)&memmap, 0, sizeof(memmap));
|
||||||
|
|
||||||
@ -506,8 +524,9 @@ int64_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param)
|
|||||||
* to struct vm_set_memmap, it will be removed in the future
|
* to struct vm_set_memmap, it will be removed in the future
|
||||||
*/
|
*/
|
||||||
if (_set_vm_memmap(vm, target_vm,
|
if (_set_vm_memmap(vm, target_vm,
|
||||||
(struct vm_set_memmap *)®ions[idx]) < 0)
|
(struct vm_set_memmap *)®ions[idx]) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -520,8 +539,9 @@ int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct ptdev_msi_info info;
|
struct ptdev_msi_info info;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&remap, 0, sizeof(remap));
|
(void)memset((void *)&remap, 0, sizeof(remap));
|
||||||
|
|
||||||
@ -530,9 +550,9 @@ int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_vm0(vm))
|
if (!is_vm0(vm)) {
|
||||||
ret = -1;
|
ret = -1;
|
||||||
else {
|
} else {
|
||||||
info.msix = remap.msix;
|
info.msix = remap.msix;
|
||||||
info.msix_entry_index = remap.msix_entry_index;
|
info.msix_entry_index = remap.msix_entry_index;
|
||||||
info.vmsi_ctl = remap.msi_ctl;
|
info.vmsi_ctl = remap.msi_ctl;
|
||||||
@ -559,8 +579,9 @@ int64_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct vm_gpa2hpa v_gpa2hpa;
|
struct vm_gpa2hpa v_gpa2hpa;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&v_gpa2hpa, 0, sizeof(v_gpa2hpa));
|
(void)memset((void *)&v_gpa2hpa, 0, sizeof(v_gpa2hpa));
|
||||||
|
|
||||||
@ -604,8 +625,9 @@ int64_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
/* TODO: how to get vm's address width? */
|
/* TODO: how to get vm's address width? */
|
||||||
target_vm->iommu_domain = create_iommu_domain(vmid,
|
target_vm->iommu_domain = create_iommu_domain(vmid,
|
||||||
target_vm->arch_vm.nworld_eptp, 48);
|
target_vm->arch_vm.nworld_eptp, 48);
|
||||||
if (target_vm->iommu_domain == NULL)
|
if (target_vm->iommu_domain == NULL) {
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
ret = assign_iommu_device(target_vm->iommu_domain,
|
ret = assign_iommu_device(target_vm->iommu_domain,
|
||||||
@ -620,8 +642,9 @@ int64_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
uint16_t bdf;
|
uint16_t bdf;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) {
|
if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
@ -639,8 +662,9 @@ int64_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct hc_ptdev_irq irq;
|
struct hc_ptdev_irq irq;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&irq, 0, sizeof(irq));
|
(void)memset((void *)&irq, 0, sizeof(irq));
|
||||||
|
|
||||||
@ -649,15 +673,16 @@ int64_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irq.type == IRQ_INTX)
|
if (irq.type == IRQ_INTX) {
|
||||||
ret = ptdev_add_intx_remapping(target_vm,
|
ret = ptdev_add_intx_remapping(target_vm,
|
||||||
irq.virt_bdf, irq.phys_bdf,
|
irq.virt_bdf, irq.phys_bdf,
|
||||||
irq.is.intx.virt_pin, irq.is.intx.phys_pin,
|
irq.is.intx.virt_pin, irq.is.intx.phys_pin,
|
||||||
irq.is.intx.pic_pin);
|
irq.is.intx.pic_pin);
|
||||||
else if (irq.type == IRQ_MSI || irq.type == IRQ_MSIX)
|
} else if (irq.type == IRQ_MSI || irq.type == IRQ_MSIX) {
|
||||||
ret = ptdev_add_msix_remapping(target_vm,
|
ret = ptdev_add_msix_remapping(target_vm,
|
||||||
irq.virt_bdf, irq.phys_bdf,
|
irq.virt_bdf, irq.phys_bdf,
|
||||||
irq.is.msix.vector_cnt);
|
irq.is.msix.vector_cnt);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -669,8 +694,9 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
struct hc_ptdev_irq irq;
|
struct hc_ptdev_irq irq;
|
||||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL)
|
if (target_vm == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memset((void *)&irq, 0, sizeof(irq));
|
(void)memset((void *)&irq, 0, sizeof(irq));
|
||||||
|
|
||||||
@ -679,14 +705,15 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irq.type == IRQ_INTX)
|
if (irq.type == IRQ_INTX) {
|
||||||
ptdev_remove_intx_remapping(target_vm,
|
ptdev_remove_intx_remapping(target_vm,
|
||||||
irq.is.intx.virt_pin,
|
irq.is.intx.virt_pin,
|
||||||
irq.is.intx.pic_pin);
|
irq.is.intx.pic_pin);
|
||||||
else if (irq.type == IRQ_MSI || irq.type == IRQ_MSIX)
|
} else if (irq.type == IRQ_MSI || irq.type == IRQ_MSIX) {
|
||||||
ptdev_remove_msix_remapping(target_vm,
|
ptdev_remove_msix_remapping(target_vm,
|
||||||
irq.virt_bdf,
|
irq.virt_bdf,
|
||||||
irq.is.msix.vector_cnt);
|
irq.is.msix.vector_cnt);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -703,10 +730,11 @@ int64_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ssp.gpa != 0U)
|
if (ssp.gpa != 0U) {
|
||||||
hva = (uint64_t *)GPA2HVA(vm, ssp.gpa);
|
hva = (uint64_t *)GPA2HVA(vm, ssp.gpa);
|
||||||
else
|
} else {
|
||||||
hva = (uint64_t *)NULL;
|
hva = (uint64_t *)NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return sbuf_share_setup(ssp.pcpu_id, ssp.sbuf_id, hva);
|
return sbuf_share_setup(ssp.pcpu_id, ssp.sbuf_id, hva);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user