mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-05 13:42:07 +00:00
HV: treewide: enforce unsignedness of pcpu_id
In the hypervisor, physical cpu id is defined as "int" or "uint32_t" type in the hypervisor. So there are some sign conversion issues about physical cpu id (pcpu_id) reported by static analysis tool. Sign conversion violates the rules of MISRA C:2012. In this patch, define physical cpu id as "uint16_t" type for all modules in the hypervisor and change related codes. The valid range of pcpu_id is 0~65534, INVALID_PCPU_ID is defined to the invalid pcpu_id for error detection, BROADCAST_PCPU_ID is broadcast pcpu_id used to notify all valid pcpu. The type of pcpu_id in the struct vcpu and vcpu_id is "int" type, this will be fixed in another patch. V1-->V2: * Change the type of pcpu_id from uint32_t to uint16_t; * Define INVALID_PCPU_ID for error detection; * Define BROADCAST_PCPU_ID to notify all valid pcpu. V2-->V3: * Update comments for INVALID_PCPU_ID and BROADCAST_PCPU_ID; * Update addtional pcpu_id; * Convert hexadecimals to unsigned to meet the type of pcpu_id; * Clean up for MIN_PCPU_ID and MAX_PCPU_ID, they will be defined by configuration. Note: fix bug in the init_lapic(), the pcpu_id shall be less than 8, this is constraint by implement in the init_lapic(). Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
2922a657c9
commit
3027bfab10
@ -167,10 +167,10 @@ struct acrn_create_vm {
|
||||
*/
|
||||
struct acrn_create_vcpu {
|
||||
/** the virtual CPU ID for the VCPU created */
|
||||
uint32_t vcpu_id;
|
||||
uint16_t vcpu_id;
|
||||
|
||||
/** the physical CPU ID for the VCPU created */
|
||||
uint32_t pcpu_id;
|
||||
uint16_t pcpu_id;
|
||||
} __aligned(8);
|
||||
|
||||
/**
|
||||
|
@ -506,7 +506,7 @@ static void ptdev_intr_handle_irq(struct vm *vm,
|
||||
}
|
||||
}
|
||||
|
||||
void ptdev_softirq(__unused int cpu)
|
||||
void ptdev_softirq(__unused uint16_t cpu_id)
|
||||
{
|
||||
while (1) {
|
||||
struct ptdev_remapping_info *entry = ptdev_dequeue_softirq();
|
||||
|
@ -23,7 +23,7 @@ spinlock_t up_count_spinlock = {
|
||||
};
|
||||
|
||||
struct per_cpu_region *per_cpu_data_base_ptr;
|
||||
int phy_cpu_num = 0;
|
||||
uint16_t phy_cpu_num = 0U;
|
||||
unsigned long pcpu_sync = 0;
|
||||
volatile uint32_t up_count = 0;
|
||||
|
||||
@ -224,7 +224,7 @@ static int hardware_detect_support(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void alloc_phy_cpu_data(int pcpu_num)
|
||||
static void alloc_phy_cpu_data(uint16_t pcpu_num)
|
||||
{
|
||||
phy_cpu_num = pcpu_num;
|
||||
|
||||
@ -245,7 +245,8 @@ int __attribute__((weak)) parse_madt(uint8_t *lapic_id_base)
|
||||
|
||||
static int init_phy_cpu_storage(void)
|
||||
{
|
||||
int i, pcpu_num = 0;
|
||||
int i;
|
||||
uint16_t pcpu_num=0U;
|
||||
int bsp_cpu_id;
|
||||
uint8_t bsp_lapic_id = 0;
|
||||
uint8_t *lapic_id_base;
|
||||
|
@ -47,7 +47,7 @@ inline struct vcpu *vcpu_from_vid(struct vm *vm, int vcpu_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline struct vcpu *vcpu_from_pid(struct vm *vm, int pcpu_id)
|
||||
inline struct vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
|
||||
{
|
||||
int i;
|
||||
struct vcpu *vcpu;
|
||||
|
@ -14,7 +14,7 @@ extern struct efi_ctx* efi_ctx;
|
||||
|
||||
vm_sw_loader_t vm_sw_loader;
|
||||
|
||||
struct vcpu *get_ever_run_vcpu(int pcpu_id)
|
||||
struct vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
|
||||
{
|
||||
return per_cpu(ever_run_vcpu, pcpu_id);
|
||||
}
|
||||
@ -31,7 +31,7 @@ struct vcpu *get_ever_run_vcpu(int pcpu_id)
|
||||
* for physical CPU 1 : vcpu->pcpu_id = 1, vcpu->vcpu_id = 1, vmid = 1;
|
||||
*
|
||||
***********************************************************************/
|
||||
int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
int create_vcpu(uint16_t cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
|
||||
@ -284,7 +284,7 @@ void reset_vcpu(struct vcpu *vcpu)
|
||||
|
||||
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
pr_dbg("vcpu%d paused, new state: %d",
|
||||
vcpu->vcpu_id, new_state);
|
||||
@ -334,7 +334,7 @@ void schedule_vcpu(struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* help function for vcpu create */
|
||||
int prepare_vcpu(struct vm *vm, int pcpu_id)
|
||||
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
|
||||
{
|
||||
int ret = 0;
|
||||
struct vcpu *vcpu = NULL;
|
||||
|
@ -115,7 +115,7 @@ vm_lapic_from_vcpu_id(struct vm *vm, int vcpu_id)
|
||||
}
|
||||
|
||||
struct vlapic *
|
||||
vm_lapic_from_pcpuid(struct vm *vm, int pcpu_id)
|
||||
vm_lapic_from_pcpuid(struct vm *vm, uint16_t pcpu_id)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
|
||||
@ -125,7 +125,7 @@ vm_lapic_from_pcpuid(struct vm *vm, int pcpu_id)
|
||||
return vcpu->arch_vcpu.vlapic;
|
||||
}
|
||||
|
||||
static int vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
|
||||
static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
|
||||
{
|
||||
int i;
|
||||
struct vcpu *vcpu;
|
||||
|
@ -232,7 +232,7 @@ static void vpic_notify_intr(struct vpic *vpic)
|
||||
ASSERT(vcpu != NULL, "vm%d, vcpu0", vpic->vm->attr.id);
|
||||
vcpu_inject_extint(vcpu);
|
||||
} else {
|
||||
vlapic_set_local_intr(vpic->vm, -1, APIC_LVT_LINT0);
|
||||
vlapic_set_local_intr(vpic->vm, BROADCAST_PCPU_ID, APIC_LVT_LINT0);
|
||||
/* notify vioapic pin0 if existing
|
||||
* For vPIC + vIOAPIC mode, vpic master irq connected
|
||||
* to vioapic pin0 (irq2)
|
||||
|
@ -211,11 +211,11 @@ int early_init_lapic(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int init_lapic(uint32_t cpu_id)
|
||||
int init_lapic(uint16_t cpu_id)
|
||||
{
|
||||
/* Set the Logical Destination Register */
|
||||
write_lapic_reg32(LAPIC_LOGICAL_DESTINATION_REGISTER,
|
||||
(1 << cpu_id) << 24);
|
||||
((1U << cpu_id) << 24));
|
||||
|
||||
/* Set the Destination Format Register */
|
||||
write_lapic_reg32(LAPIC_DESTINATION_FORMAT_REGISTER, 0xf << 28);
|
||||
@ -408,7 +408,7 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
|
||||
return status;
|
||||
}
|
||||
|
||||
void send_single_ipi(uint32_t pcpu_id, uint32_t vector)
|
||||
void send_single_ipi(uint16_t pcpu_id, uint32_t vector)
|
||||
{
|
||||
uint32_t dest_lapic_id, hi_32, lo_32;
|
||||
|
||||
|
@ -385,7 +385,7 @@ uint32_t dev_to_vector(struct dev_handler_node *node)
|
||||
return node->desc->vector;
|
||||
}
|
||||
|
||||
int init_default_irqs(unsigned int cpu_id)
|
||||
int init_default_irqs(uint16_t cpu_id)
|
||||
{
|
||||
if (cpu_id > 0)
|
||||
return 0;
|
||||
@ -692,7 +692,7 @@ pri_register_handler(uint32_t irq,
|
||||
|
||||
void get_cpu_interrupt_info(char *str, int str_max)
|
||||
{
|
||||
int pcpu_id;
|
||||
uint16_t pcpu_id;
|
||||
uint32_t irq, vector, len, size = str_max;
|
||||
struct irq_desc *desc;
|
||||
|
||||
|
@ -42,7 +42,7 @@ static int request_notification_irq(dev_handler_t func, void *data,
|
||||
|
||||
void setup_notification(void)
|
||||
{
|
||||
int cpu;
|
||||
uint16_t cpu;
|
||||
char name[32] = {0};
|
||||
|
||||
cpu = get_cpu_id();
|
||||
|
@ -6,19 +6,19 @@
|
||||
|
||||
#include <hypervisor.h>
|
||||
|
||||
void disable_softirq(int cpu_id)
|
||||
void disable_softirq(uint16_t cpu_id)
|
||||
{
|
||||
bitmap_clear(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
|
||||
}
|
||||
|
||||
void enable_softirq(int cpu_id)
|
||||
void enable_softirq(uint16_t cpu_id)
|
||||
{
|
||||
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
|
||||
}
|
||||
|
||||
void init_softirq(void)
|
||||
{
|
||||
int cpu_id;
|
||||
uint16_t cpu_id;
|
||||
|
||||
for (cpu_id = 0; cpu_id < phy_cpu_num; cpu_id++) {
|
||||
per_cpu(softirq_pending, cpu_id) = 0;
|
||||
@ -28,7 +28,7 @@ void init_softirq(void)
|
||||
|
||||
void raise_softirq(int softirq_id)
|
||||
{
|
||||
int cpu_id = get_cpu_id();
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
|
||||
|
||||
if (cpu_id >= phy_cpu_num)
|
||||
@ -39,7 +39,7 @@ void raise_softirq(int softirq_id)
|
||||
|
||||
void exec_softirq(void)
|
||||
{
|
||||
int cpu_id = get_cpu_id();
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
volatile uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
|
||||
|
||||
int softirq_id;
|
||||
|
@ -70,7 +70,7 @@ static void __add_timer(struct per_cpu_timers *cpu_timer,
|
||||
int add_timer(struct timer *timer)
|
||||
{
|
||||
struct per_cpu_timers *cpu_timer;
|
||||
int pcpu_id;
|
||||
uint16_t pcpu_id;
|
||||
bool need_update;
|
||||
|
||||
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0)
|
||||
@ -99,7 +99,7 @@ void del_timer(struct timer *timer)
|
||||
list_del_init(&timer->node);
|
||||
}
|
||||
|
||||
static int request_timer_irq(int pcpu_id,
|
||||
static int request_timer_irq(uint16_t pcpu_id,
|
||||
dev_handler_t func, void *data,
|
||||
const char *name)
|
||||
{
|
||||
@ -125,7 +125,7 @@ static int request_timer_irq(int pcpu_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_percpu_timer(int pcpu_id)
|
||||
static void init_percpu_timer(uint16_t pcpu_id)
|
||||
{
|
||||
struct per_cpu_timers *cpu_timer;
|
||||
|
||||
@ -149,7 +149,7 @@ static void init_tsc_deadline_timer(void)
|
||||
void timer_init(void)
|
||||
{
|
||||
char name[32] = {0};
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
snprintf(name, 32, "timer_tick[%d]", pcpu_id);
|
||||
if (request_timer_irq(pcpu_id, tsc_deadline_handler, NULL, name) < 0) {
|
||||
@ -163,7 +163,7 @@ void timer_init(void)
|
||||
|
||||
void timer_cleanup(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
if (per_cpu(timer_node, pcpu_id) != NULL)
|
||||
unregister_handler_common(per_cpu(timer_node, pcpu_id));
|
||||
@ -171,7 +171,7 @@ void timer_cleanup(void)
|
||||
per_cpu(timer_node, pcpu_id) = NULL;
|
||||
}
|
||||
|
||||
void timer_softirq(int pcpu_id)
|
||||
void timer_softirq(uint16_t pcpu_id)
|
||||
{
|
||||
struct per_cpu_timers *cpu_timer;
|
||||
struct timer *timer;
|
||||
|
@ -86,7 +86,7 @@ static inline int exec_vmxon(void *addr)
|
||||
* It will be used again when we start a pcpu after the pcpu was down.
|
||||
* S3 enter/exit will use it.
|
||||
*/
|
||||
int exec_vmxon_instr(uint32_t pcpu_id)
|
||||
int exec_vmxon_instr(uint16_t pcpu_id)
|
||||
{
|
||||
uint64_t tmp64, vmcs_pa;
|
||||
uint32_t tmp32;
|
||||
@ -128,7 +128,7 @@ int exec_vmxon_instr(uint32_t pcpu_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmx_off(int pcpu_id)
|
||||
int vmx_off(uint16_t pcpu_id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -215,9 +215,9 @@ void *get_acpi_tbl(char *sig)
|
||||
return HPA2HVA(addr);
|
||||
}
|
||||
|
||||
static int _parse_madt(void *madt, uint8_t *lapic_id_base)
|
||||
static uint16_t _parse_madt(void *madt, uint8_t *lapic_id_base)
|
||||
{
|
||||
int pcpu_id = 0;
|
||||
uint16_t pcpu_id = 0;
|
||||
struct acpi_madt_local_apic *processor;
|
||||
struct acpi_table_madt *madt_ptr;
|
||||
void *first;
|
||||
@ -249,7 +249,7 @@ static int _parse_madt(void *madt, uint8_t *lapic_id_base)
|
||||
}
|
||||
|
||||
/* The lapic_id info gotten from madt will be returned in lapic_id_base */
|
||||
int parse_madt(uint8_t *lapic_id_base)
|
||||
uint16_t parse_madt(uint8_t *lapic_id_base)
|
||||
{
|
||||
void *madt;
|
||||
|
||||
|
@ -28,7 +28,7 @@ struct acpi_table_header {
|
||||
uint32_t asl_compiler_revision;
|
||||
};
|
||||
|
||||
int parse_madt(uint8_t *lapic_id_base);
|
||||
uint16_t parse_madt(uint8_t *lapic_id_base);
|
||||
|
||||
void *get_dmar_table(void);
|
||||
#endif /* !ACPI_H */
|
||||
|
@ -99,12 +99,12 @@ void vcpu_thread(struct vcpu *vcpu)
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static bool is_vm0_bsp(int pcpu_id)
|
||||
static bool is_vm0_bsp(uint16_t pcpu_id)
|
||||
{
|
||||
return pcpu_id == vm0_desc.vm_hw_logical_core_ids[0];
|
||||
}
|
||||
|
||||
int hv_main(int cpu_id)
|
||||
int hv_main(uint16_t cpu_id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -117,7 +117,7 @@ int hv_main(int cpu_id)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((uint32_t) cpu_id != get_cpu_id()) {
|
||||
if (cpu_id != get_cpu_id()) {
|
||||
pr_err("%s, cpu_id %d mismatch\n", __func__, cpu_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -205,7 +205,8 @@ int64_t hcall_pause_vm(uint64_t vmid)
|
||||
|
||||
int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
{
|
||||
int ret, pcpu_id;
|
||||
int ret;
|
||||
uint16_t pcpu_id;
|
||||
struct acrn_create_vcpu cv;
|
||||
|
||||
struct vm *target_vm = get_vm_from_vmid(vmid);
|
||||
@ -219,7 +220,7 @@ int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
}
|
||||
|
||||
pcpu_id = allocate_pcpu();
|
||||
if (-1 == pcpu_id) {
|
||||
if (INVALID_PCPU_ID == pcpu_id) {
|
||||
pr_err("%s: No physical available\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
@ -22,12 +22,12 @@ void init_scheduler(void)
|
||||
}
|
||||
}
|
||||
|
||||
void get_schedule_lock(int pcpu_id)
|
||||
void get_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
spinlock_obtain(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
|
||||
}
|
||||
|
||||
void release_schedule_lock(int pcpu_id)
|
||||
void release_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).scheduler_lock);
|
||||
}
|
||||
@ -41,15 +41,15 @@ int allocate_pcpu(void)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
return INVALID_PCPU_ID;
|
||||
}
|
||||
|
||||
void set_pcpu_used(int pcpu_id)
|
||||
void set_pcpu_used(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set(pcpu_id, &pcpu_used_bitmap);
|
||||
}
|
||||
|
||||
void free_pcpu(int pcpu_id)
|
||||
void free_pcpu(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_clear(pcpu_id, &pcpu_used_bitmap);
|
||||
}
|
||||
@ -74,7 +74,7 @@ void remove_vcpu_from_runqueue(struct vcpu *vcpu)
|
||||
spinlock_release(&per_cpu(sched_ctx, pcpu_id).runqueue_lock);
|
||||
}
|
||||
|
||||
static struct vcpu *select_next_vcpu(int pcpu_id)
|
||||
static struct vcpu *select_next_vcpu(uint16_t pcpu_id)
|
||||
{
|
||||
struct vcpu *vcpu = NULL;
|
||||
|
||||
@ -95,7 +95,7 @@ void make_reschedule_request(struct vcpu *vcpu)
|
||||
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
}
|
||||
|
||||
int need_reschedule(int pcpu_id)
|
||||
int need_reschedule(uint16_t pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear(NEED_RESCHEDULE,
|
||||
&per_cpu(sched_ctx, pcpu_id).flags);
|
||||
@ -136,14 +136,14 @@ static void context_switch_in(struct vcpu *vcpu)
|
||||
*/
|
||||
}
|
||||
|
||||
void make_pcpu_offline(int pcpu_id)
|
||||
void make_pcpu_offline(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set(NEED_OFFLINE,
|
||||
&per_cpu(sched_ctx, pcpu_id).flags);
|
||||
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
}
|
||||
|
||||
int need_offline(int pcpu_id)
|
||||
int need_offline(uint16_t pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear(NEED_OFFLINE,
|
||||
&per_cpu(sched_ctx, pcpu_id).flags);
|
||||
@ -151,7 +151,7 @@ int need_offline(int pcpu_id)
|
||||
|
||||
void default_idle(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
while (1) {
|
||||
if (need_reschedule(pcpu_id) != 0)
|
||||
@ -202,7 +202,7 @@ static void switch_to(struct vcpu *curr)
|
||||
|
||||
void schedule(void)
|
||||
{
|
||||
int pcpu_id = get_cpu_id();
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
struct vcpu *next = NULL;
|
||||
struct vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
|
||||
|
||||
|
@ -153,9 +153,9 @@ int sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
||||
return sbuf->ele_size;
|
||||
}
|
||||
|
||||
int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, uint64_t *hva)
|
||||
int sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva)
|
||||
{
|
||||
if (pcpu_id >= (uint32_t) phy_cpu_num ||
|
||||
if (pcpu_id >= phy_cpu_num ||
|
||||
sbuf_id >= ACRN_SBUF_ID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -978,11 +978,15 @@ int shell_show_vmexit_profile(struct shell *p_shell,
|
||||
int shell_dump_logbuf(__unused struct shell *p_shell,
|
||||
int argc, char **argv)
|
||||
{
|
||||
uint32_t pcpu_id;
|
||||
uint16_t pcpu_id;
|
||||
int val;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (argc == 2) {
|
||||
pcpu_id = atoi(argv[1]);
|
||||
val = atoi(argv[1]);
|
||||
if (val < 0)
|
||||
return status;
|
||||
pcpu_id = (uint16_t)val;
|
||||
print_logmsg_buffer(pcpu_id);
|
||||
return 0;
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
|
||||
/* Function prototypes */
|
||||
void cpu_dead(uint32_t logical_id);
|
||||
void trampoline_start16(void);
|
||||
int hv_main(int cpu_id);
|
||||
int hv_main(uint16_t cpu_id);
|
||||
bool is_vapic_supported(void);
|
||||
bool is_vapic_intr_delivery_supported(void);
|
||||
bool is_vapic_virt_reg_supported(void);
|
||||
@ -398,12 +398,12 @@ void start_cpus();
|
||||
}
|
||||
|
||||
/* Macro to get CPU ID */
|
||||
static inline uint32_t get_cpu_id(void)
|
||||
static inline uint16_t get_cpu_id(void)
|
||||
{
|
||||
uint32_t tsl, tsh, cpu_id;
|
||||
|
||||
asm volatile ("rdtscp":"=a" (tsl), "=d"(tsh), "=c"(cpu_id)::);
|
||||
return cpu_id;
|
||||
return (uint16_t)cpu_id;
|
||||
}
|
||||
|
||||
static inline uint64_t cpu_rsp_get(void)
|
||||
|
@ -96,6 +96,16 @@
|
||||
#define CPUID_EXTEND_FUNCTION_4 0x80000004
|
||||
#define CPUID_EXTEND_ADDRESS_SIZE 0x80000008
|
||||
|
||||
/**pcpu id type is uint16_t,
|
||||
*The broadcast id (BROADCAST_PCPU_ID)
|
||||
* used to notify all valid pcpu,
|
||||
*the invalid pcpu id (INVALID_PCPU_ID) is error
|
||||
*code for error handling.
|
||||
*/
|
||||
#define INVALID_PCPU_ID 0xffffU
|
||||
#define BROADCAST_PCPU_ID 0xfffeU
|
||||
|
||||
|
||||
static inline void __cpuid(uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
{
|
||||
|
@ -97,7 +97,7 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
|
||||
|
||||
struct vcpu *get_primary_vcpu(struct vm *vm);
|
||||
struct vcpu *vcpu_from_vid(struct vm *vm, int vcpu_id);
|
||||
struct vcpu *vcpu_from_pid(struct vm *vm, int pcpu_id);
|
||||
struct vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id);
|
||||
|
||||
enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu);
|
||||
|
||||
|
@ -259,8 +259,8 @@ struct vcpu {
|
||||
#define VCPU_RETAIN_RIP(vcpu) ((vcpu)->arch_vcpu.inst_len = 0)
|
||||
|
||||
/* External Interfaces */
|
||||
struct vcpu* get_ever_run_vcpu(int pcpu_id);
|
||||
int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
|
||||
struct vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
|
||||
int create_vcpu(uint16_t cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
|
||||
int start_vcpu(struct vcpu *vcpu);
|
||||
int shutdown_vcpu(struct vcpu *vcpu);
|
||||
void destroy_vcpu(struct vcpu *vcpu);
|
||||
@ -269,7 +269,7 @@ void reset_vcpu(struct vcpu *vcpu);
|
||||
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state);
|
||||
void resume_vcpu(struct vcpu *vcpu);
|
||||
void schedule_vcpu(struct vcpu *vcpu);
|
||||
int prepare_vcpu(struct vm *vm, int pcpu_id);
|
||||
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id);
|
||||
|
||||
void request_vcpu_pre_work(struct vcpu *vcpu, int pre_work_id);
|
||||
|
||||
|
@ -57,7 +57,7 @@ int vlapic_pending_intr(struct vlapic *vlapic, uint32_t *vecptr);
|
||||
void vlapic_intr_accepted(struct vlapic *vlapic, uint32_t vector);
|
||||
|
||||
struct vlapic *vm_lapic_from_vcpuid(struct vm *vm, int vcpu_id);
|
||||
struct vlapic *vm_lapic_from_pcpuid(struct vm *vm, int pcpu_id);
|
||||
struct vlapic *vm_lapic_from_pcpuid(struct vm *vm, uint16_t pcpu_id);
|
||||
bool vlapic_msr(uint32_t num);
|
||||
int vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval);
|
||||
int vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval);
|
||||
|
@ -115,7 +115,7 @@ int quick_handler_nolock(struct irq_desc *desc, void *handler_data);
|
||||
typedef int (*irq_handler_t)(struct irq_desc*, void*);
|
||||
void update_irq_handler(uint32_t irq, irq_handler_t func);
|
||||
|
||||
int init_default_irqs(unsigned int cpu);
|
||||
int init_default_irqs(uint16_t cpu);
|
||||
|
||||
void dispatch_interrupt(struct intr_excp_ctx *ctx);
|
||||
|
||||
|
@ -158,14 +158,14 @@ struct lapic_regs {
|
||||
void write_lapic_reg32(uint32_t offset, uint32_t value);
|
||||
void save_lapic(struct lapic_regs *regs);
|
||||
int early_init_lapic(void);
|
||||
int init_lapic(uint32_t cpu_id);
|
||||
int init_lapic(uint16_t cpu_id);
|
||||
void send_lapic_eoi(void);
|
||||
uint32_t get_cur_lapic_id(void);
|
||||
int send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
|
||||
uint32_t cpu_startup_dest,
|
||||
uint64_t cpu_startup_start_address);
|
||||
/* API to send an IPI to a single guest */
|
||||
void send_single_ipi(uint32_t pcpu_id, uint32_t vector);
|
||||
void send_single_ipi(uint16_t pcpu_id, uint32_t vector);
|
||||
|
||||
void suspend_lapic(void);
|
||||
void resume_lapic(void);
|
||||
|
@ -41,7 +41,7 @@ struct per_cpu_region {
|
||||
} __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE
|
||||
|
||||
extern struct per_cpu_region *per_cpu_data_base_ptr;
|
||||
extern int phy_cpu_num;
|
||||
extern uint16_t phy_cpu_num;
|
||||
extern uint64_t pcpu_active_bitmap;
|
||||
/*
|
||||
* get percpu data for pcpu_id.
|
||||
|
@ -15,8 +15,8 @@
|
||||
/* used for atomic value for prevent recursive */
|
||||
#define SOFTIRQ_ATOMIC 63
|
||||
|
||||
void enable_softirq(int cpu_id);
|
||||
void disable_softirq(int cpu_id);
|
||||
void enable_softirq(uint16_t cpu_id);
|
||||
void disable_softirq(uint16_t cpu_id);
|
||||
void init_softirq(void);
|
||||
void raise_softirq(int softirq_id);
|
||||
void exec_softirq(void);
|
||||
|
@ -54,7 +54,7 @@ static inline void initialize_timer(struct timer *timer,
|
||||
int add_timer(struct timer *timer);
|
||||
void del_timer(struct timer *timer);
|
||||
|
||||
void timer_softirq(int pcpu_id);
|
||||
void timer_softirq(uint16_t pcpu_id);
|
||||
void timer_init(void);
|
||||
void timer_cleanup(void);
|
||||
void check_tsc(void);
|
||||
|
@ -401,15 +401,15 @@
|
||||
#define VMX_SUPPORT_UNRESTRICTED_GUEST (1U<<5)
|
||||
|
||||
/* External Interfaces */
|
||||
int exec_vmxon_instr(uint32_t pcpu_id);
|
||||
int exec_vmxon_instr(uint16_t pcpu_id);
|
||||
uint64_t exec_vmread(uint32_t field);
|
||||
uint64_t exec_vmread64(uint32_t field_full);
|
||||
void exec_vmwrite(uint32_t field, uint64_t value);
|
||||
void exec_vmwrite64(uint32_t field_full, uint64_t value);
|
||||
int init_vmcs(struct vcpu *vcpu);
|
||||
|
||||
int vmx_off(int pcpu_id);
|
||||
int vmx_restart(int pcpu_id);
|
||||
int vmx_off(uint16_t pcpu_id);
|
||||
int vmx_restart(uint16_t pcpu_id);
|
||||
|
||||
int exec_vmclear(void *addr);
|
||||
int exec_vmptrld(void *addr);
|
||||
|
@ -67,7 +67,7 @@ extern spinlock_t ptdev_lock;
|
||||
extern struct ptdev_remapping_info invalid_entry;
|
||||
extern spinlock_t softirq_dev_lock;
|
||||
|
||||
void ptdev_softirq(int cpu);
|
||||
void ptdev_softirq(__unused uint16_t cpu);
|
||||
void ptdev_init(void);
|
||||
void ptdev_release_all_entries(struct vm *vm);
|
||||
void get_ptdev_info(char *str, int str_max);
|
||||
|
@ -19,12 +19,12 @@ struct sched_context {
|
||||
};
|
||||
|
||||
void init_scheduler(void);
|
||||
void get_schedule_lock(int pcpu_id);
|
||||
void release_schedule_lock(int pcpu_id);
|
||||
void get_schedule_lock(uint16_t pcpu_id);
|
||||
void release_schedule_lock(uint16_t pcpu_id);
|
||||
|
||||
void set_pcpu_used(int pcpu_id);
|
||||
void set_pcpu_used(uint16_t pcpu_id);
|
||||
int allocate_pcpu(void);
|
||||
void free_pcpu(int pcpu_id);
|
||||
void free_pcpu(uint16_t pcpu_id);
|
||||
|
||||
void add_vcpu_to_runqueue(struct vcpu *vcpu);
|
||||
void remove_vcpu_from_runqueue(struct vcpu *vcpu);
|
||||
@ -32,9 +32,9 @@ void remove_vcpu_from_runqueue(struct vcpu *vcpu);
|
||||
void default_idle(void);
|
||||
|
||||
void make_reschedule_request(struct vcpu *vcpu);
|
||||
int need_reschedule(int pcpu_id);
|
||||
void make_pcpu_offline(int pcpu_id);
|
||||
int need_offline(int pcpu_id);
|
||||
int need_reschedule(uint16_t pcpu_id);
|
||||
void make_pcpu_offline(uint16_t pcpu_id);
|
||||
int need_offline(uint16_t pcpu_id);
|
||||
|
||||
void schedule(void);
|
||||
|
||||
|
@ -76,7 +76,7 @@ struct shared_buf *sbuf_allocate(uint32_t ele_num, uint32_t ele_size);
|
||||
void sbuf_free(struct shared_buf *sbuf);
|
||||
int sbuf_get(struct shared_buf *sbuf, uint8_t *data);
|
||||
int sbuf_put(struct shared_buf *sbuf, uint8_t *data);
|
||||
int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, uint64_t *hva);
|
||||
int sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva);
|
||||
|
||||
#else /* HV_DEBUG */
|
||||
|
||||
@ -125,7 +125,7 @@ static inline int sbuf_put(
|
||||
}
|
||||
|
||||
static inline int sbuf_share_setup(
|
||||
__unused uint32_t pcpu_id,
|
||||
__unused uint16_t pcpu_id,
|
||||
__unused uint32_t sbuf_id,
|
||||
__unused uint64_t *hva)
|
||||
{
|
||||
|
@ -73,7 +73,7 @@ struct trace_entry {
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
static inline bool
|
||||
trace_check(int cpu_id, __unused int evid)
|
||||
trace_check(uint16_t cpu_id, __unused int evid)
|
||||
{
|
||||
if (cpu_id >= phy_cpu_num)
|
||||
return false;
|
||||
@ -85,7 +85,7 @@ trace_check(int cpu_id, __unused int evid)
|
||||
}
|
||||
|
||||
static inline void
|
||||
_trace_put(int cpu_id, int evid, struct trace_entry *entry)
|
||||
_trace_put(uint16_t cpu_id, int evid, struct trace_entry *entry)
|
||||
{
|
||||
struct shared_buf *sbuf = (struct shared_buf *)
|
||||
per_cpu(sbuf, cpu_id)[ACRN_TRACE];
|
||||
@ -99,7 +99,7 @@ static inline void
|
||||
TRACE_2L(int evid, uint64_t e, uint64_t f)
|
||||
{
|
||||
struct trace_entry entry;
|
||||
int cpu_id = get_cpu_id();
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
|
||||
if (!trace_check(cpu_id, evid))
|
||||
return;
|
||||
@ -114,7 +114,7 @@ TRACE_4I(int evid, uint32_t a, uint32_t b, uint32_t c,
|
||||
uint32_t d)
|
||||
{
|
||||
struct trace_entry entry;
|
||||
int cpu_id = get_cpu_id();
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
|
||||
if (!trace_check(cpu_id, evid))
|
||||
return;
|
||||
@ -131,7 +131,7 @@ TRACE_6C(int evid, uint8_t a1, uint8_t a2, uint8_t a3,
|
||||
uint8_t a4, uint8_t b1, uint8_t b2)
|
||||
{
|
||||
struct trace_entry entry;
|
||||
int cpu_id = get_cpu_id();
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
|
||||
if (!trace_check(cpu_id, evid))
|
||||
return;
|
||||
@ -152,7 +152,7 @@ static inline void
|
||||
TRACE_16STR(int evid, const char name[])
|
||||
{
|
||||
struct trace_entry entry;
|
||||
int cpu_id = get_cpu_id();
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
int len;
|
||||
int i;
|
||||
|
||||
|
@ -147,10 +147,10 @@ struct acrn_create_vm {
|
||||
*/
|
||||
struct acrn_create_vcpu {
|
||||
/** the virtual CPU ID for the VCPU created */
|
||||
uint32_t vcpu_id;
|
||||
uint16_t vcpu_id;
|
||||
|
||||
/** the physical CPU ID for the VCPU created */
|
||||
uint32_t pcpu_id;
|
||||
uint16_t pcpu_id;
|
||||
} __aligned(8);
|
||||
|
||||
/**
|
||||
|
@ -169,7 +169,7 @@ struct set_memmaps {
|
||||
*/
|
||||
struct sbuf_setup_param {
|
||||
/** sbuf physical cpu id */
|
||||
uint32_t pcpu_id;
|
||||
uint16_t pcpu_id;
|
||||
|
||||
/** sbuf id */
|
||||
uint32_t sbuf_id;
|
||||
|
Loading…
Reference in New Issue
Block a user