hv: use int32_t replace int

Since it's typedef in "include/lib/types.h"

Tracked-On: #861
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1
2018-12-08 00:30:49 +08:00
committed by wenlingz
parent e8f3a2d42c
commit e3fc6c3c79
91 changed files with 472 additions and 471 deletions

View File

@@ -537,7 +537,7 @@ void ptirq_intx_ack(struct acrn_vm *vm, uint8_t virt_pin,
* entry_nr = 0 means first vector
* user must provide bdf and entry_nr
*/
int ptirq_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf,
int32_t ptirq_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t entry_nr, struct ptirq_msi_info *info)
{
struct ptirq_remapping_info *entry;
@@ -625,7 +625,7 @@ static void activate_physical_ioapic(struct acrn_vm *vm,
/* Main entry for PCI/Legacy device assignment with INTx, calling from vIOAPIC
* or vPIC
*/
int ptirq_intx_pin_remap(struct acrn_vm *vm, uint8_t virt_pin,
int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint8_t virt_pin,
enum ptirq_vpin_source vpin_src)
{
struct ptirq_remapping_info *entry;
@@ -731,7 +731,7 @@ END:
* - currently, one phys_pin can only be held by one pin source (vPIC or
* vIOAPIC)
*/
int ptirq_add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin, uint8_t phys_pin,
int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin, uint8_t phys_pin,
bool pic_pin)
{
struct ptirq_remapping_info *entry;
@@ -758,7 +758,7 @@ void ptirq_remove_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin, bool pic_
* - the entry is identified by phys_bdf:msi_idx:
* one entry vs. one phys_bdf:msi_idx
*/
int ptirq_add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf,
int32_t ptirq_add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, uint32_t vector_count)
{
struct ptirq_remapping_info *entry;

View File

@@ -52,7 +52,7 @@ static void cpu_xsave_init(void);
static void set_current_cpu_id(uint16_t pcpu_id);
static void print_hv_banner(void);
static uint16_t get_cpu_id_from_lapic_id(uint32_t lapic_id);
int ibrs_type;
int32_t ibrs_type;
static uint64_t start_tsc __attribute__((__section__(".bss_noinit")));
/* Push sp magic to top of stack for call trace */
@@ -184,9 +184,9 @@ static void get_cpu_capabilities(void)
* we should supplement which feature/capability we must support
* here later.
*/
static int hardware_detect_support(void)
static int32_t hardware_detect_support(void)
{
int ret;
int32_t ret;
/* Long Mode (x86-64, 64-bit support) */
if (!cpu_has_cap(X86_FEATURE_LM)) {
@@ -702,7 +702,7 @@ void cpu_dead(uint16_t pcpu_id)
/* For debug purposes, using a stack variable in the while loop enables
* us to modify the value using a JTAG probe and resume if needed.
*/
int halt = 1;
int32_t halt = 1;
if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */

View File

@@ -102,10 +102,10 @@ static const struct cpu_state_table {
}
};
static int get_state_tbl_idx(const char *cpuname)
static int32_t get_state_tbl_idx(const char *cpuname)
{
int i;
int count = ARRAY_SIZE(cpu_state_tbl);
int32_t i;
int32_t count = ARRAY_SIZE(cpu_state_tbl);
if (cpuname == NULL) {
return -1;
@@ -123,7 +123,7 @@ static int get_state_tbl_idx(const char *cpuname)
void load_cpu_state_data(void)
{
int tbl_idx;
int32_t tbl_idx;
const struct cpu_state_info *state_info;
(void)memset(&boot_cpu_data.state_info, 0U,

View File

@@ -176,9 +176,9 @@ static void init_vcpuid_entry(uint32_t leaf, uint32_t subleaf,
}
}
int set_vcpuid_entries(struct acrn_vm *vm)
int32_t set_vcpuid_entries(struct acrn_vm *vm)
{
int result;
int32_t result;
struct vcpuid_entry entry;
uint32_t limit;
uint32_t i, j;

View File

@@ -71,9 +71,9 @@ uint64_t vm0_hpa2gpa(uint64_t hpa)
return hpa;
}
int ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
{
int status = -EINVAL, ret;
int32_t status = -EINVAL, ret;
uint64_t exit_qual;
uint64_t gpa;
struct io_request *io_req = &vcpu->req;
@@ -158,9 +158,9 @@ out:
return status;
}
int ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
int32_t ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
{
int status;
int32_t status;
status = -EINVAL;

View File

@@ -68,7 +68,7 @@ enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu)
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
* during page walk */
static int local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_walk_info *pw_info,
static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
uint32_t i;
@@ -77,8 +77,8 @@ static int local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_walk_i
void *base;
uint64_t entry;
uint64_t addr, page_size;
int ret = 0;
int fault = 0;
int32_t ret = 0;
int32_t fault = 0;
bool is_user_mode_addr = true;
bool is_page_rw_flags_on = true;
@@ -215,14 +215,14 @@ out:
return ret;
}
static int local_gva2gpa_pae(struct acrn_vcpu *vcpu, struct page_walk_info *pw_info,
static int32_t local_gva2gpa_pae(struct acrn_vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
int index;
int32_t index;
uint64_t *base;
uint64_t entry;
uint64_t addr;
int ret;
int32_t ret;
addr = pw_info->top_entry & 0xFFFFFFF0U;
base = (uint64_t *)gpa2hva(vcpu->vm, addr);
@@ -265,12 +265,12 @@ out:
* - Return -EFAULT for paging fault, and refer to err_code for paging fault
* error code.
*/
int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa,
int32_t gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa,
uint32_t *err_code)
{
enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu);
struct page_walk_info pw_info;
int ret = 0;
int32_t ret = 0;
if ((gpa == NULL) || (err_code == NULL)) {
return -EINVAL;
@@ -356,7 +356,7 @@ static inline uint32_t local_copy_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t
return len;
}
static inline int copy_gpa(struct acrn_vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
static inline int32_t copy_gpa(struct acrn_vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
uint32_t size_arg, bool cp_from_vm)
{
void *h_ptr = h_ptr_arg;
@@ -381,7 +381,7 @@ static inline int copy_gpa(struct acrn_vm *vm, void *h_ptr_arg, uint64_t gpa_arg
/*
* @pre vcpu != NULL && err_code != NULL
*/
static inline int copy_gva(struct acrn_vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
static inline int32_t copy_gva(struct acrn_vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
uint32_t size_arg, uint32_t *err_code, uint64_t *fault_addr,
bool cp_from_vm)
{
@@ -423,7 +423,7 @@ static inline int copy_gva(struct acrn_vcpu *vcpu, void *h_ptr_arg, uint64_t gva
* continuous
* @pre Pointer vm is non-NULL
*/
int copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
int32_t copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
{
return copy_gpa(vm, h_ptr, gpa, size, 1);
}
@@ -434,18 +434,18 @@ int copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
* continuous
* @pre Pointer vm is non-NULL
*/
int copy_to_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
int32_t copy_to_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
{
return copy_gpa(vm, h_ptr, gpa, size, 0);
}
int copy_from_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
int32_t copy_from_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr)
{
return copy_gva(vcpu, h_ptr, gva, size, err_code, fault_addr, 1);
}
int copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
int32_t copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr)
{
return copy_gva(vcpu, h_ptr, gva, size, err_code, fault_addr, 0);
@@ -459,7 +459,7 @@ int copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
* @pre vm != NULL
* @pre is_vm0(vm) == true
*/
int prepare_vm0_memmap(struct acrn_vm *vm)
int32_t prepare_vm0_memmap(struct acrn_vm *vm)
{
uint32_t i;
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);

View File

@@ -399,7 +399,7 @@ static void get_guest_paging_info(struct acrn_vcpu *vcpu, struct instr_emul_ctxt
emul_ctxt->paging.paging_mode = get_vcpu_paging_mode(vcpu);
}
static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
static int32_t vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
{
uint64_t mask;
@@ -461,7 +461,7 @@ static bool is_desc_valid(struct seg_desc *desc, uint32_t prot)
*return 0 - on success
*return -1 - on failure
*/
static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
static int32_t vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize,
uint64_t *gla)
{
@@ -509,7 +509,7 @@ static inline void vie_mmio_write(struct acrn_vcpu *vcpu, uint64_t wval)
}
static void vie_calc_bytereg(const struct instr_emul_vie *vie,
enum cpu_reg_name *reg, int *lhbr)
enum cpu_reg_name *reg, int32_t *lhbr)
{
*lhbr = 0;
*reg = (enum cpu_reg_name)(vie->reg);
@@ -536,7 +536,7 @@ static void vie_calc_bytereg(const struct instr_emul_vie *vie,
static uint8_t vie_read_bytereg(const struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int lhbr;
int32_t lhbr;
uint64_t val;
uint8_t reg_val;
enum cpu_reg_name reg;
@@ -562,7 +562,7 @@ static void vie_write_bytereg(struct acrn_vcpu *vcpu, const struct instr_emul_vi
{
uint64_t origval, val, mask;
enum cpu_reg_name reg;
int lhbr;
int32_t lhbr;
vie_calc_bytereg(vie, &reg, &lhbr);
origval = vm_get_register(vcpu, reg);
@@ -660,9 +660,9 @@ static uint64_t getcc(uint8_t opsize, uint64_t x, uint64_t y)
}
}
static int emulate_mov(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_mov(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
uint8_t size;
enum cpu_reg_name reg;
uint8_t byte;
@@ -771,9 +771,9 @@ static int emulate_mov(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_movx(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_movx(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
uint8_t size;
enum cpu_reg_name reg;
uint64_t val;
@@ -890,10 +890,10 @@ static void get_gva_si_nocheck(const struct acrn_vcpu *vcpu, uint8_t addrsize,
*
* It's only used by MOVS/STO
*/
static int get_gva_di_check(struct acrn_vcpu *vcpu, struct instr_emul_vie *vie,
static int32_t get_gva_di_check(struct acrn_vcpu *vcpu, struct instr_emul_vie *vie,
uint8_t addrsize, uint64_t *gva)
{
int ret;
int32_t ret;
uint32_t err_code;
struct seg_desc desc;
enum vm_cpu_mode cpu_mode;
@@ -962,14 +962,14 @@ exception_inject:
* For MOVs instruction, we always check RDI during instruction decoding phase.
* And access RSI without any check during instruction emulation phase.
*/
static int emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
uint64_t src_gva, gpa, val = 0UL;
uint64_t *dst_hva, *src_hva;
uint64_t rcx, rdi, rsi, rflags;
uint32_t err_code;
enum cpu_reg_name seg;
int error;
int32_t error;
uint8_t repeat, opsize = vie->opsize;
bool is_mmio_write;
@@ -1049,7 +1049,7 @@ done:
return error;
}
static int emulate_stos(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_stos(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
uint8_t repeat, opsize = vie->opsize;
uint64_t val;
@@ -1099,9 +1099,9 @@ static int emulate_stos(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie
return 0;
}
static int emulate_test(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_test(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
uint8_t size;
enum cpu_reg_name reg;
uint64_t result, rflags2, val1, val2;
@@ -1163,9 +1163,9 @@ static int emulate_test(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie
return error;
}
static int emulate_and(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_and(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
uint8_t size;
enum cpu_reg_name reg;
uint64_t result, rflags2, val1, val2;
@@ -1246,9 +1246,9 @@ static int emulate_and(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_or(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_or(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
uint8_t size;
enum cpu_reg_name reg;
uint64_t val1, val2, result, rflags2;
@@ -1332,9 +1332,9 @@ static int emulate_or(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_cmp(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_cmp(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error = 0;
int32_t error = 0;
uint8_t size;
uint64_t regop, memop, op1, op2, rflags2;
enum cpu_reg_name reg;
@@ -1418,9 +1418,9 @@ static int emulate_cmp(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_sub(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_sub(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
uint8_t size;
uint64_t nval, rflags2, val1, val2;
enum cpu_reg_name reg;
@@ -1468,9 +1468,9 @@ static int emulate_sub(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_group1(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
static int32_t emulate_group1(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
int32_t error;
switch (vie->reg & 7U) {
case 0x1U: /* OR */
@@ -1531,11 +1531,11 @@ static int32_t emulate_bittest(struct acrn_vcpu *vcpu, const struct instr_emul_v
return ret;
}
static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
static int32_t vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
{
struct instr_emul_vie *vie = &ctxt->vie;
struct acrn_vcpu *vcpu = ctxt->vcpu;
int error;
int32_t error;
if (vie->decoded != 0U) {
switch (vie->op.op_type) {
@@ -1584,13 +1584,13 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
return error;
}
static int vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
static int32_t vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
{
uint64_t guest_rip_gva = vcpu_get_rip(vcpu);
uint32_t inst_len = vcpu->arch.inst_len;
uint32_t err_code;
uint64_t fault_addr;
int ret;
int32_t ret;
if ((inst_len > VIE_INST_SIZE) || (inst_len == 0U)) {
pr_err("%s: invalid instruction length (%d)",
@@ -1620,7 +1620,7 @@ static int vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
return 0;
}
static int vie_peek(const struct instr_emul_vie *vie, uint8_t *x)
static int32_t vie_peek(const struct instr_emul_vie *vie, uint8_t *x)
{
if (vie->num_processed < vie->num_valid) {
@@ -1665,7 +1665,7 @@ static bool segment_override(uint8_t x, enum cpu_reg_name *seg)
return true;
}
static int decode_prefixes(struct instr_emul_vie *vie,
static int32_t decode_prefixes(struct instr_emul_vie *vie,
enum vm_cpu_mode cpu_mode, bool cs_d)
{
uint8_t x, i;
@@ -1737,7 +1737,7 @@ static int decode_prefixes(struct instr_emul_vie *vie,
return 0;
}
static int decode_two_byte_opcode(struct instr_emul_vie *vie)
static int32_t decode_two_byte_opcode(struct instr_emul_vie *vie)
{
uint8_t x;
@@ -1757,9 +1757,9 @@ static int decode_two_byte_opcode(struct instr_emul_vie *vie)
return 0;
}
static int decode_opcode(struct instr_emul_vie *vie)
static int32_t decode_opcode(struct instr_emul_vie *vie)
{
int ret = 0;
int32_t ret = 0;
uint8_t x;
if (vie_peek(vie, &x) != 0) {
@@ -1792,7 +1792,7 @@ static int decode_opcode(struct instr_emul_vie *vie)
return ret;
}
static int decode_modrm(struct instr_emul_vie *vie, enum vm_cpu_mode cpu_mode)
static int32_t decode_modrm(struct instr_emul_vie *vie, enum vm_cpu_mode cpu_mode)
{
uint8_t x;
@@ -1885,7 +1885,7 @@ done:
return 0;
}
static int decode_sib(struct instr_emul_vie *vie)
static int32_t decode_sib(struct instr_emul_vie *vie)
{
uint8_t x;
@@ -1965,7 +1965,7 @@ static int decode_sib(struct instr_emul_vie *vie)
return 0;
}
static int decode_displacement(struct instr_emul_vie *vie)
static int32_t decode_displacement(struct instr_emul_vie *vie)
{
uint8_t n, i, x;
@@ -2004,7 +2004,7 @@ static int decode_displacement(struct instr_emul_vie *vie)
return 0;
}
static int decode_immediate(struct instr_emul_vie *vie)
static int32_t decode_immediate(struct instr_emul_vie *vie)
{
uint8_t i, n, x;
union {
@@ -2067,7 +2067,7 @@ static int decode_immediate(struct instr_emul_vie *vie)
return 0;
}
static int decode_moffset(struct instr_emul_vie *vie)
static int32_t decode_moffset(struct instr_emul_vie *vie)
{
uint8_t i, n, x;
union {
@@ -2102,7 +2102,7 @@ static int decode_moffset(struct instr_emul_vie *vie)
return 0;
}
static int local_decode_instruction(enum vm_cpu_mode cpu_mode,
static int32_t local_decode_instruction(enum vm_cpu_mode cpu_mode,
bool cs_d, struct instr_emul_vie *vie)
{
if (decode_prefixes(vie, cpu_mode, cs_d) != 0) {
@@ -2156,10 +2156,10 @@ static int32_t instr_check_di(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *em
return ret;
}
static int instr_check_gva(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
static int32_t instr_check_gva(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
enum vm_cpu_mode cpu_mode)
{
int ret;
int32_t ret;
uint64_t base, segbase, idx, gva, gpa;
uint32_t err_code;
enum cpu_reg_name seg;
@@ -2242,11 +2242,11 @@ static int instr_check_gva(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_
return 0;
}
int decode_instruction(struct acrn_vcpu *vcpu)
int32_t decode_instruction(struct acrn_vcpu *vcpu)
{
struct instr_emul_ctxt *emul_ctxt;
uint32_t csar;
int retval;
int32_t retval;
enum vm_cpu_mode cpu_mode;
emul_ctxt = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
@@ -2302,7 +2302,7 @@ int decode_instruction(struct acrn_vcpu *vcpu)
}
}
return (int)(emul_ctxt->vie.opsize);
return (int32_t)(emul_ctxt->vie.opsize);
}
int32_t emulate_instruction(const struct acrn_vcpu *vcpu)

View File

@@ -195,6 +195,6 @@ struct instr_emul_ctxt {
};
int32_t emulate_instruction(const struct acrn_vcpu *vcpu);
int decode_instruction(struct acrn_vcpu *vcpu);
int32_t decode_instruction(struct acrn_vcpu *vcpu);
#endif

View File

@@ -6,7 +6,7 @@
#include <hypervisor.h>
int validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
{
const struct cpu_px_data *px_data;
uint8_t i, px_cnt;
@@ -103,7 +103,7 @@ void vm_setup_cpu_state(struct acrn_vm *vm)
/* This function is for power management Sx state implementation,
* VM need to load the Sx state data to implement S3/S5.
*/
int vm_load_pm_s_state(struct acrn_vm *vm)
int32_t vm_load_pm_s_state(struct acrn_vm *vm)
{
#ifdef ACPI_INFO_VALIDATED
vm->pm.sx_state_data = (struct pm_s_state_data *)&host_pm_s_state;

View File

@@ -37,7 +37,7 @@ void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v)
uint64_t gva, fault_addr;
struct ucode_header uhdr;
size_t data_size;
int err;
int32_t err;
uint32_t err_code;
spinlock_obtain(&micro_code_lock);

View File

@@ -322,7 +322,7 @@ void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
* for physical CPU 1 : vcpu->pcpu_id = 1, vcpu->vcpu_id = 1, vmid = 1;
*
***********************************************************************/
int create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
{
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;
@@ -399,7 +399,7 @@ int create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcp
/*
* @pre vcpu != NULL
*/
int run_vcpu(struct acrn_vcpu *vcpu)
int32_t run_vcpu(struct acrn_vcpu *vcpu)
{
uint32_t instlen, cs_attr;
uint64_t rip, ia32_efer, cr0;
@@ -499,7 +499,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
return status;
}
int shutdown_vcpu(__unused struct acrn_vcpu *vcpu)
int32_t shutdown_vcpu(__unused struct acrn_vcpu *vcpu)
{
/* TODO : Implement VCPU shutdown sequence */
@@ -522,7 +522,7 @@ void offline_vcpu(struct acrn_vcpu *vcpu)
*/
void reset_vcpu(struct acrn_vcpu *vcpu)
{
int i;
int32_t i;
struct acrn_vlapic *vlapic;
pr_dbg("vcpu%hu reset", vcpu->vcpu_id);
@@ -610,9 +610,9 @@ void schedule_vcpu(struct acrn_vcpu *vcpu)
}
/* help function for vcpu create */
int prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
int ret = 0;
int32_t ret = 0;
struct acrn_vcpu *vcpu = NULL;
ret = create_vcpu(pcpu_id, vm, &vcpu);

View File

@@ -81,10 +81,10 @@ static inline void vlapic_dump_isr(__unused const struct acrn_vlapic *vlapic, __
/*APIC-v APIC-access address */
static uint8_t apicv_apic_access_addr[PAGE_SIZE] __aligned(PAGE_SIZE);
static int
static int32_t
apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector);
static int
static int32_t
apicv_pending_intr(const struct acrn_vlapic *vlapic);
static void
@@ -456,14 +456,14 @@ vlapic_esr_write_handler(struct acrn_vlapic *vlapic)
* Returns 1 if the vcpu needs to be notified of the interrupt and 0 otherwise.
* @pre vector >= 16
*/
static int
static int32_t
vlapic_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
{
struct lapic_regs *lapic;
struct lapic_reg *irrptr, *tmrptr;
uint32_t mask;
uint32_t idx;
int pending_intr;
int32_t pending_intr;
ASSERT(vector <= NR_MAX_VECTOR,
"invalid vector %u", vector);
@@ -926,7 +926,7 @@ vlapic_set_error(struct acrn_vlapic *vlapic, uint32_t mask)
/*
* @pre vector <= 255
*/
static int
static int32_t
vlapic_trigger_lvt(struct acrn_vlapic *vlapic, uint32_t vector)
{
uint32_t lvt;
@@ -1206,7 +1206,7 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
}
}
static int
static int32_t
vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
{
uint16_t vcpu_id;
@@ -1312,7 +1312,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
* @remark The vector does not automatically transition to the ISR as a
* result of calling this function.
*/
int
int32_t
vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
{
const struct lapic_regs *lapic = &(vlapic->apic_page);
@@ -1442,7 +1442,7 @@ vlapic_svr_write_handler(struct acrn_vlapic *vlapic)
}
}
static int
static int32_t
vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg,
uint64_t *data)
{
@@ -1566,14 +1566,14 @@ done:
return 0;
}
static int
static int32_t
vlapic_write(struct acrn_vlapic *vlapic, uint32_t offset,
uint64_t data)
{
struct lapic_regs *lapic = &(vlapic->apic_page);
uint32_t *regptr;
uint32_t data32 = (uint32_t)data;
int retval;
int32_t retval;
ASSERT(((offset & 0xfU) == 0U) && (offset < PAGE_SIZE),
"%s: invalid offset %#x", __func__, offset);
@@ -1728,7 +1728,7 @@ vlapic_init(struct acrn_vlapic *vlapic)
void vlapic_restore(struct acrn_vlapic *vlapic, const struct lapic_regs *regs)
{
struct lapic_regs *lapic;
int i;
int32_t i;
lapic = &(vlapic->apic_page);
@@ -1758,7 +1758,7 @@ vlapic_get_apicbase(const struct acrn_vlapic *vlapic)
return vlapic->msr_apicbase;
}
static int
static int32_t
vlapic_set_apicbase(struct acrn_vlapic *vlapic, uint64_t new)
{
@@ -1955,7 +1955,7 @@ vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
{
struct acrn_vlapic *vlapic;
uint64_t dmask = 0UL;
int error;
int32_t error;
uint16_t vcpu_id = vcpu_id_arg;
if ((vcpu_id != BROADCAST_CPU_ID) && (vcpu_id >= vm->hw.created_vcpus)) {
@@ -2082,7 +2082,7 @@ static inline uint32_t x2apic_msr_to_regoff(uint32_t msr)
* No shorthand and Physical destination mode are only supported.
*/
static int
static int32_t
vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
{
uint64_t apic_id = (uint32_t) (val >> 32U);
@@ -2123,12 +2123,12 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
}
#endif
static int vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool write,
static int32_t vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool write,
uint64_t *val)
{
struct acrn_vlapic *vlapic;
uint32_t offset;
int error = -1;
int32_t error = -1;
/*
* If vLAPIC is in xAPIC mode and guest tries to access x2APIC MSRs
@@ -2159,10 +2159,10 @@ static int vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool write
return error;
}
int
int32_t
vlapic_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval)
{
int error = 0;
int32_t error = 0;
struct acrn_vlapic *vlapic;
dev_dbg(ACRN_DBG_LAPIC, "cpu[%hu] rdmsr: %x", vcpu->vcpu_id, msr);
@@ -2191,10 +2191,10 @@ vlapic_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval)
return error;
}
int
int32_t
vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
{
int error = 0;
int32_t error = 0;
struct acrn_vlapic *vlapic;
vlapic = vcpu_vlapic(vcpu);
@@ -2224,7 +2224,7 @@ vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
return error;
}
int vlapic_create(struct acrn_vcpu *vcpu)
int32_t vlapic_create(struct acrn_vcpu *vcpu)
{
vcpu->arch.vlapic.vm = vcpu->vm;
vcpu->arch.vlapic.vcpu = vcpu;
@@ -2264,7 +2264,7 @@ void vlapic_free(struct acrn_vcpu *vcpu)
/**
* APIC-v functions
* **/
static int
static int32_t
apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector)
{
struct vlapic_pir_desc *pir_desc;
@@ -2282,7 +2282,7 @@ apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector)
return notify;
}
static int
static int32_t
apicv_pending_intr(const struct acrn_vlapic *vlapic)
{
const struct vlapic_pir_desc *pir_desc;
@@ -2428,9 +2428,9 @@ vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
}
}
int apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
{
int err = 0;
int32_t err = 0;
uint32_t offset = 0U;
uint64_t qual, access_type;
struct acrn_vlapic *vlapic;
@@ -2473,7 +2473,7 @@ int apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
return err;
}
int veoi_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic = NULL;
@@ -2514,10 +2514,10 @@ static void vlapic_x2apic_self_ipi_handler(struct acrn_vlapic *vlapic)
vlapic_set_intr(target_vcpu, vector, LAPIC_TRIG_EDGE);
}
int apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t qual;
int error, handled;
int32_t error, handled;
uint32_t offset;
struct acrn_vlapic *vlapic = NULL;
@@ -2584,7 +2584,7 @@ int apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
return handled;
}
int tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu)
int32_t tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu)
{
pr_err("Unhandled %s.", __func__);
return 0;

View File

@@ -60,10 +60,10 @@ struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
/**
* @pre vm_desc != NULL && rtn_vm != NULL
*/
int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
int32_t create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
{
struct acrn_vm *vm;
int status;
int32_t status;
uint16_t vm_id;
#ifdef CONFIG_PARTITION_MODE
@@ -240,7 +240,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
/**
* * @pre vm != NULL
*/
int start_vm(struct acrn_vm *vm)
int32_t start_vm(struct acrn_vm *vm)
{
struct acrn_vcpu *vcpu = NULL;
@@ -348,9 +348,9 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
#ifdef CONFIG_PARTITION_MODE
/* Create vm/vcpu for vm */
int prepare_vm(uint16_t pcpu_id)
int32_t prepare_vm(uint16_t pcpu_id)
{
int ret = 0;
int32_t ret = 0;
uint16_t i;
struct acrn_vm *vm = NULL;
struct vm_description *vm_desc = NULL;
@@ -389,9 +389,9 @@ int prepare_vm(uint16_t pcpu_id)
#else
/* Create vm/vcpu for vm0 */
static int prepare_vm0(void)
static int32_t prepare_vm0(void)
{
int err;
int32_t err;
uint16_t i;
struct acrn_vm *vm = NULL;
struct vm_description vm0_desc;
@@ -428,9 +428,9 @@ static int prepare_vm0(void)
return err;
}
int prepare_vm(uint16_t pcpu_id)
int32_t prepare_vm(uint16_t pcpu_id)
{
int err = 0;
int32_t err = 0;
/* prepare vm0 if pcpu_id is BOOT_CPU_ID */
if (pcpu_id == BOOT_CPU_ID) {

View File

@@ -16,7 +16,7 @@ static spinlock_t vmm_hypercall_lock = {
* This function should always return 0 since we shouldn't
* deal with hypercall error in hypervisor.
*/
int vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
{
int32_t ret = -EACCES;
struct acrn_vm *vm = vcpu->vm;

View File

@@ -318,9 +318,9 @@ void init_msr_emulation(struct acrn_vcpu *vcpu)
init_msr_area(vcpu);
}
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
{
int err = 0;
int32_t err = 0;
uint32_t msr;
uint64_t v = 0UL;
@@ -405,9 +405,9 @@ int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
return err;
}
int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
{
int err = 0;
int32_t err = 0;
uint32_t msr;
uint64_t v;

View File

@@ -241,7 +241,7 @@ hv_emulate_pio(const struct acrn_vcpu *vcpu, struct io_request *io_req)
static int32_t
hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
{
int status = -ENODEV;
int32_t status = -ENODEV;
uint16_t idx;
uint64_t address, size;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
@@ -475,11 +475,11 @@ void register_io_emulation_handler(struct acrn_vm *vm, uint32_t pio_idx,
* @retval 0 Registration succeeds
* @retval -EINVAL \p read_write is NULL, \p end is not larger than \p start or \p vm has been launched
*/
int register_mmio_emulation_handler(struct acrn_vm *vm,
int32_t register_mmio_emulation_handler(struct acrn_vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data)
{
int status = -EINVAL;
int32_t status = -EINVAL;
struct mem_io_node *mmio_node;
if ((vm->hw.created_vcpus > 0U) && vm->hw.vcpu_array[0].launched) {

View File

@@ -66,7 +66,7 @@ struct invept_desc {
static inline void local_invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
{
int error = 0;
int32_t error = 0;
struct {
uint32_t vpid : 16;
@@ -86,7 +86,7 @@ static inline void local_invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
static inline void local_invept(uint64_t type, struct invept_desc desc)
{
int error = 0;
int32_t error = 0;
asm volatile ("invept %1, %2\n"
VMFAIL_INVALID_EPT_VPID
@@ -107,7 +107,7 @@ static inline bool cpu_has_vmx_vpid_cap(uint32_t bit_mask)
return ((vmx_caps.vpid & bit_mask) != 0U);
}
int check_vmx_mmu_cap(void)
int32_t check_vmx_mmu_cap(void)
{
uint64_t val;

View File

@@ -55,7 +55,7 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
wait_sync_change(&smp_call_mask, 0UL);
}
static int request_notification_irq(irq_action_t func, void *data)
static int32_t request_notification_irq(irq_action_t func, void *data)
{
int32_t retval;

View File

@@ -9,7 +9,7 @@
#define CAT__(A,B) A ## B
#define CAT_(A,B) CAT__(A,B)
#define CTASSERT(expr) \
typedef int CAT_(CTA_DummyType,__LINE__)[(expr) ? 1 : -1]
typedef int32_t CAT_(CTA_DummyType,__LINE__)[(expr) ? 1 : -1]
/* Build time sanity checks to make sure hard-coded offset
* is matching the actual offset!

View File

@@ -70,7 +70,7 @@ static void local_add_timer(struct per_cpu_timers *cpu_timer,
}
}
int add_timer(struct hv_timer *timer)
int32_t add_timer(struct hv_timer *timer)
{
struct per_cpu_timers *cpu_timer;
uint16_t pcpu_id;
@@ -134,7 +134,7 @@ static void timer_softirq(uint16_t pcpu_id)
struct per_cpu_timers *cpu_timer;
struct hv_timer *timer;
struct list_head *pos, *n;
int tries = MAX_TIMER_ACTIONS;
int32_t tries = MAX_TIMER_ACTIONS;
uint64_t current_tsc = rdtsc();
/* handle passed timer */

View File

@@ -43,7 +43,7 @@ static void update_trampoline_code_refs(uint64_t dest_pa)
{
void *ptr;
uint64_t val;
int i;
int32_t i;
/*
* calculate the fixup CS:IP according to fixup target address

View File

@@ -279,7 +279,7 @@ static void copy_smc_param(const struct run_context *prev_ctx,
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
}
void switch_world(struct acrn_vcpu *vcpu, int next_world)
void switch_world(struct acrn_vcpu *vcpu, int32_t next_world)
{
struct acrn_vcpu_arch *arch = &vcpu->arch;

View File

@@ -74,7 +74,7 @@ static bool vcpu_pending_request(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic;
uint32_t vector = 0U;
int ret = 0;
int32_t ret = 0;
/* Query vLapic to get vector to inject */
vlapic = vcpu_vlapic(vcpu);
@@ -108,11 +108,11 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
}
}
static int vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
static int32_t vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
uint32_t vector = 0U;
int ret = 0;
int32_t ret = 0;
/*
* This function used for inject virtual interrupt
@@ -148,7 +148,7 @@ static int vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
return 0;
}
static int vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
static int32_t vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
{
struct acrn_vm *vm;
struct acrn_vcpu *primary;
@@ -176,7 +176,7 @@ static int vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
}
/* SDM Vol3 -6.15, Table 6-4 - interrupt and exception classes */
static int get_excep_class(uint32_t vector)
static int32_t get_excep_class(uint32_t vector)
{
if ((vector == IDT_DE) || (vector == IDT_TS) || (vector == IDT_NP) ||
(vector == IDT_SS) || (vector == IDT_GP)) {
@@ -188,7 +188,7 @@ static int get_excep_class(uint32_t vector)
}
}
int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_code)
int32_t vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_code)
{
struct acrn_vcpu_arch *arch = &vcpu->arch;
/* VECTOR_INVALID is also greater than 32 */
@@ -328,7 +328,7 @@ void vcpu_inject_ss(struct acrn_vcpu *vcpu)
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint32_t value32;
@@ -380,9 +380,9 @@ int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
return ret;
}
int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
{
int ret = 0;
int32_t ret = 0;
uint32_t tmp;
uint32_t intr_info;
uint32_t error_code;
@@ -563,12 +563,12 @@ void cancel_event_injection(struct acrn_vcpu *vcpu)
/*
* @pre vcpu != NULL
*/
int exception_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint32_t intinfo, int_err_code = 0U;
uint32_t exception_vector = VECTOR_INVALID;
uint32_t cpl;
int status = 0;
int32_t status = 0;
pr_dbg(" Handling guest exception");

View File

@@ -12,9 +12,9 @@
*/
#define NR_VMX_EXIT_REASONS 65U
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@@ -152,11 +152,11 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
.handler = unhandled_vmexit_handler}
};
int vmexit_handler(struct acrn_vcpu *vcpu)
int32_t vmexit_handler(struct acrn_vcpu *vcpu)
{
struct vm_exit_dispatch *dispatch = NULL;
uint16_t basic_exit_reason;
int ret;
int32_t ret;
if (get_cpu_id() != vcpu->pcpu_id) {
pr_fatal("vcpu is not running on its pcpu!");
@@ -228,7 +228,7 @@ int vmexit_handler(struct acrn_vcpu *vcpu)
return ret;
}
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
{
pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016llx ",
exec_vmread(VMX_GUEST_RIP));
@@ -243,7 +243,7 @@ static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}
int cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t rax, rbx, rcx, rdx;
@@ -263,7 +263,7 @@ int cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}
int cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t reg;
uint32_t idx;
@@ -318,9 +318,9 @@ int cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
* XSETBV instruction set's the XCR0 that is used to tell for which
* components states can be saved on a context switch using xsave.
*/
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
{
int idx;
int32_t idx;
uint64_t val64;
val64 = exec_vmread(VMX_GUEST_CR4);
@@ -362,7 +362,7 @@ static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}
static int wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
{
if (!iommu_snoop_supported(vcpu->vm)) {
cache_flush_invalidate_all();

View File

@@ -267,7 +267,7 @@ uint64_t vmx_rdmsr_pat(const struct acrn_vcpu *vcpu)
return vcpu_get_guest_msr(vcpu, MSR_IA32_PAT);
}
int vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
int32_t vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
{
uint32_t i;
uint64_t field;

View File

@@ -30,7 +30,7 @@
.text
/*int vmx_vmrun(struct run_context *context, int launch, int ibrs_type) */
/*int vmx_vmrun(struct run_context *context, int32_t launch, int32_t ibrs_type) */
.code64
.align 8
.global vmx_vmrun

View File

@@ -168,14 +168,14 @@ static inline uint16_t vmid_to_domainid(uint16_t vm_id)
return vm_id + 1U;
}
static int dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit);
static int32_t dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit);
static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus, uint8_t devfun);
static int register_hrhd_units(void)
static int32_t register_hrhd_units(void)
{
struct dmar_info *info = get_dmar_info();
struct dmar_drhd_rt *drhd_rt;
uint32_t i;
int ret = 0;
int32_t ret = 0;
if (info == NULL || info->drhd_count == 0U) {
pr_fatal("%s: can't find dmar info\n", __func__);
@@ -392,7 +392,7 @@ static void dmar_disable_translation(struct dmar_drhd_rt *dmar_unit)
spinlock_release(&(dmar_unit->lock));
}
static int dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit)
static int32_t dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit)
{
dev_dbg(ACRN_DBG_IOMMU, "Register dmar uint [%d] @0x%llx", dmar_unit->index, dmar_unit->drhd->reg_base_addr);
@@ -707,7 +707,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
uint32_t index;
uint32_t record_reg_offset;
uint64_t record[2];
int loop = 0;
int32_t loop = 0;
dev_dbg(ACRN_DBG_IOMMU, "%s: irq = %d", __func__, irq);
@@ -824,7 +824,7 @@ static void dmar_resume(struct dmar_drhd_rt *dmar_unit)
dmar_enable(dmar_unit);
}
static int add_iommu_device(struct iommu_domain *domain, uint16_t segment, uint8_t bus, uint8_t devfun)
static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, uint8_t bus, uint8_t devfun)
{
struct dmar_drhd_rt *dmar_unit;
struct dmar_root_entry *root_table;
@@ -937,7 +937,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment, uint8
return 0;
}
static int remove_iommu_device(const struct iommu_domain *domain, uint16_t segment, uint8_t bus, uint8_t devfun)
static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t segment, uint8_t bus, uint8_t devfun)
{
struct dmar_drhd_rt *dmar_unit;
struct dmar_root_entry *root_table;
@@ -1045,9 +1045,9 @@ void destroy_iommu_domain(struct iommu_domain *domain)
(void)memset(domain, 0U, sizeof(*domain));
}
int assign_iommu_device(struct iommu_domain *domain, uint8_t bus, uint8_t devfun)
int32_t assign_iommu_device(struct iommu_domain *domain, uint8_t bus, uint8_t devfun)
{
int status = 0;
int32_t status = 0;
/* TODO: check if the device assigned */
@@ -1061,9 +1061,9 @@ int assign_iommu_device(struct iommu_domain *domain, uint8_t bus, uint8_t devfun
return add_iommu_device(domain, 0U, bus, devfun);
}
int unassign_iommu_device(const struct iommu_domain *domain, uint8_t bus, uint8_t devfun)
int32_t unassign_iommu_device(const struct iommu_domain *domain, uint8_t bus, uint8_t devfun)
{
int status = 0;
int32_t status = 0;
/* TODO: check if the device assigned */
status = remove_iommu_device(domain, 0U, bus, devfun);
@@ -1101,9 +1101,9 @@ void resume_iommu(void)
do_action_for_iommus(dmar_resume);
}
int init_iommu(void)
int32_t init_iommu(void)
{
int ret;
int32_t ret;
ret = register_hrhd_units();
if (ret != 0) {