mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-04 11:07:51 +00:00
hv:Changed several APIs to void type
Since these APIs always return 0, change them to void type, vcpu_set_cr0() vmx_write_cr0() vcpu_set_cr4() vmx_write_cr4() kick_notification() tsc_deadline_handler() dmar_fault_handler() ptdev_interrupt_handler() Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
b75a7df415
commit
c43d0e4f01
@ -111,9 +111,9 @@ inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
|
|||||||
return ctx->cr0;
|
return ctx->cr0;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
|
inline void vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
|
||||||
{
|
{
|
||||||
return vmx_write_cr0(vcpu, val);
|
vmx_write_cr0(vcpu, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uint64_t vcpu_get_cr2(struct vcpu *vcpu)
|
inline uint64_t vcpu_get_cr2(struct vcpu *vcpu)
|
||||||
@ -141,9 +141,9 @@ inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
|
|||||||
return ctx->cr4;
|
return ctx->cr4;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
|
inline void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
|
||||||
{
|
{
|
||||||
return vmx_write_cr4(vcpu, val);
|
vmx_write_cr4(vcpu, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uint64_t vcpu_get_pat_ext(struct vcpu *vcpu)
|
inline uint64_t vcpu_get_pat_ext(struct vcpu *vcpu)
|
||||||
|
@ -11,7 +11,7 @@ static uint32_t notification_irq = IRQ_INVALID;
|
|||||||
static uint64_t smp_call_mask = 0UL;
|
static uint64_t smp_call_mask = 0UL;
|
||||||
|
|
||||||
/* run in interrupt context */
|
/* run in interrupt context */
|
||||||
static int kick_notification(__unused uint32_t irq, __unused void *data)
|
static void kick_notification(__unused uint32_t irq, __unused void *data)
|
||||||
{
|
{
|
||||||
/* Notification vector is used to kick taget cpu out of non-root mode.
|
/* Notification vector is used to kick taget cpu out of non-root mode.
|
||||||
* And it also serves for smp call.
|
* And it also serves for smp call.
|
||||||
@ -26,8 +26,6 @@ static int kick_notification(__unused uint32_t irq, __unused void *data)
|
|||||||
smp_call->func(smp_call->data);
|
smp_call->func(smp_call->data);
|
||||||
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
|
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
|
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
|
||||||
|
@ -24,10 +24,9 @@ static void run_timer(struct hv_timer *timer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* run in interrupt context */
|
/* run in interrupt context */
|
||||||
static int tsc_deadline_handler(__unused uint32_t irq, __unused void *data)
|
static void tsc_deadline_handler(__unused uint32_t irq, __unused void *data)
|
||||||
{
|
{
|
||||||
fire_softirq(SOFTIRQ_TIMER);
|
fire_softirq(SOFTIRQ_TIMER);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void update_physical_timer(struct per_cpu_timers *cpu_timer)
|
static inline void update_physical_timer(struct per_cpu_timers *cpu_timer)
|
||||||
|
@ -263,7 +263,6 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
|
|||||||
|
|
||||||
int cr_access_vmexit_handler(struct vcpu *vcpu)
|
int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int err = 0;
|
|
||||||
uint64_t reg;
|
uint64_t reg;
|
||||||
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
|
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
|
||||||
|
|
||||||
@ -275,11 +274,11 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
|||||||
VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) {
|
VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) {
|
||||||
case 0x00U:
|
case 0x00U:
|
||||||
/* mov to cr0 */
|
/* mov to cr0 */
|
||||||
err = vcpu_set_cr0(vcpu, reg);
|
vcpu_set_cr0(vcpu, reg);
|
||||||
break;
|
break;
|
||||||
case 0x04U:
|
case 0x04U:
|
||||||
/* mov to cr4 */
|
/* mov to cr4 */
|
||||||
err = vcpu_set_cr4(vcpu, reg);
|
vcpu_set_cr4(vcpu, reg);
|
||||||
break;
|
break;
|
||||||
case 0x08U:
|
case 0x08U:
|
||||||
/* mov to cr8 */
|
/* mov to cr8 */
|
||||||
@ -310,7 +309,7 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
|||||||
VM_EXIT_CR_ACCESS_CR_NUM
|
VM_EXIT_CR_ACCESS_CR_NUM
|
||||||
(vcpu->arch_vcpu.exit_qualification));
|
(vcpu->arch_vcpu.exit_qualification));
|
||||||
|
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -387,7 +387,7 @@ static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
* - PG (31) Trapped to track cpu/paging mode.
|
* - PG (31) Trapped to track cpu/paging mode.
|
||||||
* Set the value according to the value from guest.
|
* Set the value according to the value from guest.
|
||||||
*/
|
*/
|
||||||
int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||||
{
|
{
|
||||||
uint64_t cr0_vmx;
|
uint64_t cr0_vmx;
|
||||||
uint32_t entry_ctrls;
|
uint32_t entry_ctrls;
|
||||||
@ -396,7 +396,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
if (!is_cr0_write_valid(vcpu, cr0)) {
|
if (!is_cr0_write_valid(vcpu, cr0)) {
|
||||||
pr_dbg("Invalid cr0 write operation from guest");
|
pr_dbg("Invalid cr0 write operation from guest");
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SDM 2.5
|
/* SDM 2.5
|
||||||
@ -467,8 +467,6 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
|||||||
|
|
||||||
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0",
|
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0",
|
||||||
cr0, cr0_vmx);
|
cr0, cr0_vmx);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_cr4_write_valid(uint64_t cr4)
|
static bool is_cr4_write_valid(uint64_t cr4)
|
||||||
@ -523,14 +521,14 @@ static bool is_cr4_write_valid(uint64_t cr4)
|
|||||||
* - SMAP (21) Flexible to guest
|
* - SMAP (21) Flexible to guest
|
||||||
* - PKE (22) Flexible to guest
|
* - PKE (22) Flexible to guest
|
||||||
*/
|
*/
|
||||||
int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
||||||
{
|
{
|
||||||
uint64_t cr4_vmx;
|
uint64_t cr4_vmx;
|
||||||
|
|
||||||
if (!is_cr4_write_valid(cr4)) {
|
if (!is_cr4_write_valid(cr4)) {
|
||||||
pr_dbg("Invalid cr4 write operation from guest");
|
pr_dbg("Invalid cr4 write operation from guest");
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
vcpu_inject_gp(vcpu, 0U);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Aways off bits and reserved bits has been filtered above */
|
/* Aways off bits and reserved bits has been filtered above */
|
||||||
@ -543,8 +541,6 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
|||||||
|
|
||||||
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4",
|
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4",
|
||||||
cr4, cr4_vmx);
|
cr4, cr4_vmx);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_guest_context_real(struct vcpu *vcpu)
|
static void init_guest_context_real(struct vcpu *vcpu)
|
||||||
|
@ -760,7 +760,7 @@ static void fault_record_analysis(__unused uint64_t low, uint64_t high)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dmar_fault_handler(uint32_t irq, void *data)
|
static void dmar_fault_handler(uint32_t irq, void *data)
|
||||||
{
|
{
|
||||||
struct dmar_drhd_rt *dmar_uint = (struct dmar_drhd_rt *)data;
|
struct dmar_drhd_rt *dmar_uint = (struct dmar_drhd_rt *)data;
|
||||||
uint32_t fsr;
|
uint32_t fsr;
|
||||||
@ -812,8 +812,6 @@ static int dmar_fault_handler(uint32_t irq, void *data)
|
|||||||
|
|
||||||
fsr = iommu_read32(dmar_uint, DMAR_FSTS_REG);
|
fsr = iommu_read32(dmar_uint, DMAR_FSTS_REG);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dmar_setup_interrupt(struct dmar_drhd_rt *dmar_uint)
|
static int dmar_setup_interrupt(struct dmar_drhd_rt *dmar_uint)
|
||||||
|
@ -119,13 +119,12 @@ release_all_entries(struct vm *vm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* interrupt context */
|
/* interrupt context */
|
||||||
static int ptdev_interrupt_handler(__unused uint32_t irq, void *data)
|
static void ptdev_interrupt_handler(__unused uint32_t irq, void *data)
|
||||||
{
|
{
|
||||||
struct ptdev_remapping_info *entry =
|
struct ptdev_remapping_info *entry =
|
||||||
(struct ptdev_remapping_info *) data;
|
(struct ptdev_remapping_info *) data;
|
||||||
|
|
||||||
ptdev_enqueue_softirq(entry);
|
ptdev_enqueue_softirq(entry);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* active intr with irq registering */
|
/* active intr with irq registering */
|
||||||
|
@ -273,11 +273,11 @@ void vcpu_set_efer(struct vcpu *vcpu, uint64_t val);
|
|||||||
uint64_t vcpu_get_rflags(struct vcpu *vcpu);
|
uint64_t vcpu_get_rflags(struct vcpu *vcpu);
|
||||||
void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val);
|
void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val);
|
||||||
uint64_t vcpu_get_cr0(struct vcpu *vcpu);
|
uint64_t vcpu_get_cr0(struct vcpu *vcpu);
|
||||||
int vcpu_set_cr0(struct vcpu *vcpu, uint64_t val);
|
void vcpu_set_cr0(struct vcpu *vcpu, uint64_t val);
|
||||||
uint64_t vcpu_get_cr2(struct vcpu *vcpu);
|
uint64_t vcpu_get_cr2(struct vcpu *vcpu);
|
||||||
void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val);
|
void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val);
|
||||||
uint64_t vcpu_get_cr4(struct vcpu *vcpu);
|
uint64_t vcpu_get_cr4(struct vcpu *vcpu);
|
||||||
int vcpu_set_cr4(struct vcpu *vcpu, uint64_t val);
|
void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val);
|
||||||
uint64_t vcpu_get_pat_ext(struct vcpu *vcpu);
|
uint64_t vcpu_get_pat_ext(struct vcpu *vcpu);
|
||||||
void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val);
|
void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val);
|
||||||
|
|
||||||
|
@ -446,8 +446,8 @@ int exec_vmptrld(void *addr);
|
|||||||
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu);
|
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu);
|
||||||
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value);
|
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value);
|
||||||
|
|
||||||
int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0);
|
void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0);
|
||||||
int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4);
|
void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4);
|
||||||
|
|
||||||
static inline enum vm_cpu_mode get_vcpu_mode(const struct vcpu *vcpu)
|
static inline enum vm_cpu_mode get_vcpu_mode(const struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -17,7 +17,7 @@ enum irq_mode {
|
|||||||
IRQ_DEASSERT,
|
IRQ_DEASSERT,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef int (*irq_action_t)(uint32_t irq, void *priv_data);
|
typedef void (*irq_action_t)(uint32_t irq, void *priv_data);
|
||||||
|
|
||||||
/* any field change in below required irq_lock protection with irqsave */
|
/* any field change in below required irq_lock protection with irqsave */
|
||||||
struct irq_desc {
|
struct irq_desc {
|
||||||
|
Loading…
Reference in New Issue
Block a user