HV:common:transfer local variable type

The local variable type should be transfer to non-basic type,
chaned it to length-prefix(uint32_t,int32_t ...) type.

Char *type or char array type which used to pointer a string
will be keeped.

V1->V2 add extra comments.

Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-06-28 16:27:12 +08:00 committed by lijinxia
parent 4ec690fde3
commit b8384ea0dd
8 changed files with 30 additions and 28 deletions

View File

@ -11,7 +11,7 @@ bool x2apic_enabled;
static void run_vcpu_pre_work(struct vcpu *vcpu)
{
unsigned long *pending_pre_work = &vcpu->pending_pre_work;
uint64_t *pending_pre_work = &vcpu->pending_pre_work;
if (bitmap_test_and_clear(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work))
dm_emulate_mmio_post(vcpu);
@ -22,7 +22,7 @@ void vcpu_thread(struct vcpu *vcpu)
uint64_t vmexit_begin = 0, vmexit_end = 0;
uint16_t basic_exit_reason = 0;
uint64_t tsc_aux_hyp_cpu = vcpu->pcpu_id;
int ret = 0;
int32_t ret = 0;
/* If vcpu is not launched, we need to do init_vmcs first */
if (!vcpu->launched)
@ -104,9 +104,9 @@ static bool is_vm0_bsp(uint16_t pcpu_id)
return pcpu_id == vm0_desc.vm_hw_logical_core_ids[0];
}
int hv_main(uint16_t cpu_id)
int32_t hv_main(uint16_t cpu_id)
{
int ret;
int32_t ret;
pr_info("%s, Starting common entry point for CPU %d",
__func__, cpu_id);
@ -143,7 +143,8 @@ int hv_main(uint16_t cpu_id)
void get_vmexit_profile(char *str, int str_max)
{
int cpu, i, len, size = str_max;
uint16_t cpu, i;
int len, size = str_max;
len = snprintf(str, size, "\r\nNow(us) = %16lld\r\n",
TICKS_TO_US(rdtsc()));
@ -155,7 +156,7 @@ void get_vmexit_profile(char *str, int str_max)
str += len;
for (cpu = 0; cpu < phys_cpu_num; cpu++) {
len = snprintf(str, size, "\t CPU%d\t US", cpu);
len = snprintf(str, size, "\t CPU%hu\t US", cpu);
size -= len;
str += len;
}

View File

@ -43,7 +43,7 @@ int64_t hcall_get_api_version(struct vm *vm, uint64_t param)
static int handle_vpic_irqline(struct vm *vm, int irq, enum irq_mode mode)
{
int ret = -1;
int32_t ret = -1;
if (vm == NULL)
return ret;
@ -67,7 +67,7 @@ static int handle_vpic_irqline(struct vm *vm, int irq, enum irq_mode mode)
static int
handle_vioapic_irqline(struct vm *vm, int irq, enum irq_mode mode)
{
int ret = -1;
int32_t ret = -1;
if (vm == NULL)
return ret;
@ -91,8 +91,8 @@ handle_vioapic_irqline(struct vm *vm, int irq, enum irq_mode mode)
static int handle_virt_irqline(struct vm *vm, uint64_t target_vmid,
struct acrn_irqline *param, enum irq_mode mode)
{
int ret = 0;
long intr_type;
int32_t ret = 0;
uint32_t intr_type;
struct vm *target_vm = get_vm_from_vmid(target_vmid);
if ((vm == NULL) || (param == NULL))
@ -205,7 +205,7 @@ int64_t hcall_pause_vm(uint64_t vmid)
int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
{
int ret;
int32_t ret;
uint16_t pcpu_id;
struct acrn_create_vcpu cv;
@ -274,7 +274,7 @@ int64_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param)
int64_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param)
{
int ret = 0;
int32_t ret = 0;
struct acrn_msi_entry msi;
struct vm *target_vm = get_vm_from_vmid(vmid);
@ -474,7 +474,7 @@ int64_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param)
struct set_memmaps set_memmaps;
struct memory_map *regions;
struct vm *target_vm;
unsigned int idx;
uint32_t idx;
if (!is_vm0(vm)) {
pr_err("%s: ERROR! Not coming from service vm",
@ -496,7 +496,7 @@ int64_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param)
return -1;
}
idx = 0;
idx = 0U;
/*TODO: use copy_from_gpa for this buffer page */
regions = GPA2HVA(vm, set_memmaps.memmaps_gpa);
while (idx < set_memmaps.memmaps_num) {
@ -711,7 +711,7 @@ int64_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
{
int target_vm_id;
int32_t target_vm_id;
struct vm *target_vm;
target_vm_id = (cmd & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT;
@ -736,7 +736,7 @@ int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
return 0;
}
case PMCMD_GET_PX_DATA: {
int pn;
int32_t pn;
struct cpu_px_data *px_data;
/* For now we put px data as per-vm,

View File

@ -55,10 +55,10 @@ static void acrn_print_request(int vcpu_id, struct vhm_request *req)
}
}
int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
{
union vhm_request_buffer *req_buf = NULL;
long cur;
uint16_t cur;
ASSERT(sizeof(*req) == (4096/VHM_REQUEST_MAX),
"vhm_request page broken!");
@ -148,11 +148,12 @@ static void _get_req_info_(struct vhm_request *req, int *id, char *type,
void get_req_info(char *str, int str_max)
{
int i, len, size = str_max, client_id;
uint32_t i;
int32_t len, size = str_max, client_id;
union vhm_request_buffer *req_buf;
struct vhm_request *req;
char type[16], state[16], dir[16];
long addr, val;
int64_t addr, val;
struct list_head *pos;
struct vm *vm;

View File

@ -11,7 +11,7 @@ static unsigned long pcpu_used_bitmap;
void init_scheduler(void)
{
int i;
uint32_t i;
for (i = 0; i < phys_cpu_num; i++) {
spinlock_init(&per_cpu(sched_ctx, i).runqueue_lock);

View File

@ -12,7 +12,7 @@
*/
int64_t hcall_world_switch(struct vcpu *vcpu)
{
int next_world_id = !(vcpu->arch_vcpu.cur_context);
int32_t next_world_id = !(vcpu->arch_vcpu.cur_context);
if (next_world_id >= NR_WORLD) {
pr_err("%s world_id %d exceed max number of Worlds\n",

View File

@ -71,7 +71,7 @@ static uint64_t create_zero_page(struct vm *vm)
int load_guest(struct vm *vm, struct vcpu *vcpu)
{
int ret = 0;
int32_t ret = 0;
void *hva;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
@ -102,7 +102,7 @@ int load_guest(struct vm *vm, struct vcpu *vcpu)
int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
{
int ret = 0;
int32_t ret = 0;
void *hva;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
@ -127,7 +127,7 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
kernel_entry_offset += 512;
vm->sw.kernel_info.kernel_entry_addr =
(void *)((unsigned long)vm->sw.kernel_info.kernel_load_addr
(void *)((uint64_t)vm->sw.kernel_info.kernel_load_addr
+ kernel_entry_offset);
if (is_vcpu_bsp(vcpu)) {
/* Set VCPU entry point to kernel entry */
@ -176,7 +176,7 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
* remained 1G pages" for reserving.
*/
if (is_vm0(vm) && check_mmu_1gb_support(PTT_HOST)) {
int reserving_1g_pages;
int32_t reserving_1g_pages;
#ifdef CONFIG_REMAIN_1G_PAGES
reserving_1g_pages = (e820_mem.total_mem_size >> 30) -

View File

@ -257,7 +257,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
/* Function prototypes */
void cpu_dead(uint32_t logical_id);
void trampoline_start16(void);
int hv_main(uint16_t cpu_id);
int32_t hv_main(uint16_t cpu_id);
bool is_vapic_supported(void);
bool is_vapic_intr_delivery_supported(void);
bool is_vapic_virt_reg_supported(void);

View File

@ -34,7 +34,7 @@ enum {
struct vhm_request;
int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req);
int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req);
void get_req_info(char *str, int str_max);
/*