mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-24 02:08:04 +00:00
HV: treewide: enforce unsignedness of pcpu_id
In the hypervisor, physical cpu id is defined as "int" or "uint32_t" type in the hypervisor. So there are some sign conversion issues about physical cpu id (pcpu_id) reported by static analysis tool. Sign conversion violates the rules of MISRA C:2012. In this patch, define physical cpu id as "uint16_t" type for all modules in the hypervisor and change related codes. The valid range of pcpu_id is 0~65534, INVALID_PCPU_ID is defined to the invalid pcpu_id for error detection, BROADCAST_PCPU_ID is broadcast pcpu_id used to notify all valid pcpu. The type of pcpu_id in the struct vcpu and vcpu_id is "int" type, this will be fixed in another patch. V1-->V2: * Change the type of pcpu_id from uint32_t to uint16_t; * Define INVALID_PCPU_ID for error detection; * Define BROADCAST_PCPU_ID to notify all valid pcpu. V2-->V3: * Update comments for INVALID_PCPU_ID and BROADCAST_PCPU_ID; * Update addtional pcpu_id; * Convert hexadecimals to unsigned to meet the type of pcpu_id; * Clean up for MIN_PCPU_ID and MAX_PCPU_ID, they will be defined by configuration. Note: fix bug in the init_lapic(), the pcpu_id shall be less than 8, this is constraint by implement in the init_lapic(). Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
@@ -99,12 +99,12 @@ void vcpu_thread(struct vcpu *vcpu)
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static bool is_vm0_bsp(int pcpu_id)
|
||||
static bool is_vm0_bsp(uint16_t pcpu_id)
|
||||
{
|
||||
return pcpu_id == vm0_desc.vm_hw_logical_core_ids[0];
|
||||
}
|
||||
|
||||
int hv_main(int cpu_id)
|
||||
int hv_main(uint16_t cpu_id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -117,7 +117,7 @@ int hv_main(int cpu_id)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((uint32_t) cpu_id != get_cpu_id()) {
|
||||
if (cpu_id != get_cpu_id()) {
|
||||
pr_err("%s, cpu_id %d mismatch\n", __func__, cpu_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -205,7 +205,8 @@ int64_t hcall_pause_vm(uint64_t vmid)
|
||||
|
||||
int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int ret, pcpu_id;
|
||||
int ret;
|
||||
uint16_t pcpu_id;
|
||||
struct acrn_create_vcpu cv;
|
||||
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
@@ -219,7 +220,7 @@ int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
}
|
||||
|
||||
pcpu_id = allocate_pcpu();
|
||||
if (-1 == pcpu_id) {
|
||||
if (INVALID_PCPU_ID == pcpu_id) {
|
||||
pr_err("%s: No physical available\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
@@ -22,12 +22,12 @@ void init_scheduler(void)
|
||||
}
|
||||
}
|
||||
|
||||
void get_schedule_lock(int pcpu_id)
|
||||
void get_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
|
||||
}
|
||||
|
||||
void release_schedule_lock(int pcpu_id)
|
||||
void release_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
|
||||
}
|
||||
@@ -41,15 +41,15 @@ int allocate_pcpu(void)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
return INVALID_PCPU_ID;
|
||||
}
|
||||
|
||||
void set_pcpu_used(int pcpu_id)
|
||||
void set_pcpu_used(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set(pcpu_id, &pcpu_used_bitmap);
|
||||
}
|
||||
|
||||
void free_pcpu(int pcpu_id)
|
||||
void free_pcpu(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_clear(pcpu_id, &pcpu_used_bitmap);
|
||||
}
|
||||
@@ -74,7 +74,7 @@ void remove_vcpu_from_runqueue(struct vcpu *vcpu)
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
}
|
||||
|
||||
static struct vcpu *select_next_vcpu(int pcpu_id)
|
||||
static struct vcpu *select_next_vcpu(uint16_t pcpu_id)
|
||||
{
|
||||
struct vcpu *vcpu = NULL;
|
||||
|
||||
@@ -95,7 +95,7 @@ void make_reschedule_request(struct vcpu *vcpu)
|
||||
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
}
|
||||
|
||||
int need_reschedule(int pcpu_id)
|
||||
int need_reschedule(uint16_t pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear(NEED_RESCHEDULE,
|
||||
&per_cpu(sched_ctx, pcpu_id).flags);
|
||||
@@ -136,14 +136,14 @@ static void context_switch_in(struct vcpu *vcpu)
|
||||
*/
|
||||
}
|
||||
|
||||
void make_pcpu_offline(int pcpu_id)
|
||||
void make_pcpu_offline(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set(NEED_OFFLINE,
|
||||
&per_cpu(sched_ctx, pcpu_id).flags);
|
||||
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
}
|
||||
|
||||
int need_offline(int pcpu_id)
|
||||
int need_offline(uint16_t pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear(NEED_OFFLINE,
|
||||
&per_cpu(sched_ctx, pcpu_id).flags);
|
||||
@@ -151,7 +151,7 @@ int need_offline(int pcpu_id)
|
||||
|
||||
void default_idle(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
while (1) {
|
||||
if (need_reschedule(pcpu_id) != 0)
|
||||
@@ -202,7 +202,7 @@ static void switch_to(struct vcpu *curr)
|
||||
|
||||
void schedule(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
struct vcpu *next = NULL;
|
||||
struct vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
|
||||
|
||||
|
Reference in New Issue
Block a user