mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-23 14:07:42 +00:00
HV:misc:fix "signed/unsigned conversion without cast"
Misra C required signed/unsigned conversion with cast. V1->V2: a.split patch to patch series V2->V3: a.change the uint64_t type numeric constant's suffix from U to UL Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
95736e659f
commit
2ffa69cb9a
@ -136,7 +136,7 @@ lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
|
||||
static void
|
||||
ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
|
||||
{
|
||||
int phys_irq = dev_to_irq(entry->node);
|
||||
uint32_t phys_irq = dev_to_irq(entry->node);
|
||||
|
||||
if (entry->type == PTDEV_INTR_MSI) {
|
||||
/* all other MSI and normal maskable */
|
||||
@ -177,7 +177,7 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
|
||||
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info *info)
|
||||
{
|
||||
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
|
||||
if (is_vm0(vm) && (vm->vuart != NULL) && info->virt_pin == 4)
|
||||
if (is_vm0(vm) && (vm->vuart != NULL) && info->virt_pin == 4U)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
@ -224,7 +224,7 @@ static uint64_t ptdev_build_physical_rte(struct vm *vm,
|
||||
{
|
||||
uint64_t rte;
|
||||
int phys_irq = dev_to_irq(entry->node);
|
||||
int vector = dev_to_vector(entry->node);
|
||||
uint32_t vector = dev_to_vector(entry->node);
|
||||
|
||||
if (entry->ptdev_intr_info.intx.vpin_src == PTDEV_VPIN_IOAPIC) {
|
||||
uint64_t vdmask, pdmask;
|
||||
@ -413,7 +413,7 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
|
||||
/* deactive & remove mapping entry of vpin for vm */
|
||||
static void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
|
||||
{
|
||||
int phys_irq;
|
||||
uint32_t phys_irq;
|
||||
struct ptdev_remapping_info *entry;
|
||||
enum ptdev_vpin_source vpin_src =
|
||||
pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
|
||||
@ -537,7 +537,7 @@ void ptdev_softirq(__unused uint16_t cpu_id)
|
||||
void ptdev_intx_ack(struct vm *vm, int virt_pin,
|
||||
enum ptdev_vpin_source vpin_src)
|
||||
{
|
||||
int phys_irq;
|
||||
uint32_t phys_irq;
|
||||
struct ptdev_remapping_info *entry;
|
||||
int phys_pin;
|
||||
|
||||
@ -612,7 +612,7 @@ int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
|
||||
|
||||
/* handle destroy case */
|
||||
if (is_entry_active(entry) && info->vmsi_data == 0) {
|
||||
info->pmsi_data = 0;
|
||||
info->pmsi_data = 0U;
|
||||
ptdev_deactivate_entry(entry);
|
||||
goto END;
|
||||
}
|
||||
@ -663,7 +663,7 @@ static void activate_physical_ioapic(struct vm *vm,
|
||||
struct ptdev_remapping_info *entry)
|
||||
{
|
||||
uint64_t rte;
|
||||
int phys_irq = dev_to_irq(entry->node);
|
||||
uint32_t phys_irq = dev_to_irq(entry->node);
|
||||
|
||||
/* disable interrupt */
|
||||
GSI_MASK_IRQ(phys_irq);
|
||||
|
@ -9,12 +9,12 @@
|
||||
static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
|
||||
uint32_t leaf, uint32_t subleaf)
|
||||
{
|
||||
int i = 0, nr, half;
|
||||
uint32_t i = 0U, nr, half;
|
||||
struct vcpuid_entry *entry = NULL;
|
||||
struct vm *vm = vcpu->vm;
|
||||
|
||||
nr = vm->vcpuid_entry_nr;
|
||||
half = nr / 2;
|
||||
half = nr / 2U;
|
||||
if (vm->vcpuid_entries[half].leaf < leaf)
|
||||
i = half;
|
||||
|
||||
@ -85,7 +85,7 @@ static void init_vcpuid_entry(__unused struct vm *vm,
|
||||
entry->flags = flags;
|
||||
|
||||
switch (leaf) {
|
||||
case 0x07:
|
||||
case 0x07U:
|
||||
if (subleaf == 0U) {
|
||||
cpuid(leaf,
|
||||
&entry->eax, &entry->ebx,
|
||||
@ -93,19 +93,19 @@ static void init_vcpuid_entry(__unused struct vm *vm,
|
||||
/* mask invpcid */
|
||||
entry->ebx &= ~CPUID_EBX_INVPCID;
|
||||
} else {
|
||||
entry->eax = 0;
|
||||
entry->ebx = 0;
|
||||
entry->ecx = 0;
|
||||
entry->edx = 0;
|
||||
entry->eax = 0U;
|
||||
entry->ebx = 0U;
|
||||
entry->ecx = 0U;
|
||||
entry->edx = 0U;
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x0a:
|
||||
case 0x0aU:
|
||||
/* not support pmu */
|
||||
entry->eax = 0;
|
||||
entry->ebx = 0;
|
||||
entry->ecx = 0;
|
||||
entry->edx = 0;
|
||||
entry->eax = 0U;
|
||||
entry->ebx = 0U;
|
||||
entry->ecx = 0U;
|
||||
entry->edx = 0U;
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -117,12 +117,12 @@ static void init_vcpuid_entry(__unused struct vm *vm,
|
||||
* hypervisor.
|
||||
* EBX, ECX, EDX: Hypervisor vendor ID signature.
|
||||
*/
|
||||
case 0x40000000:
|
||||
case 0x40000000U:
|
||||
{
|
||||
static const char sig[12] = "ACRNACRNACRN";
|
||||
const uint32_t *sigptr = (const uint32_t *)sig;
|
||||
|
||||
entry->eax = 0x40000010;
|
||||
entry->eax = 0x40000010U;
|
||||
entry->ebx = sigptr[0];
|
||||
entry->ecx = sigptr[1];
|
||||
entry->edx = sigptr[2];
|
||||
@ -138,11 +138,11 @@ static void init_vcpuid_entry(__unused struct vm *vm,
|
||||
* TSC frequency is calculated from PIT in ACRN
|
||||
* EBX, ECX, EDX: RESERVED (reserved fields are set to zero).
|
||||
*/
|
||||
case 0x40000010:
|
||||
case 0x40000010U:
|
||||
entry->eax = tsc_khz;
|
||||
entry->ebx = 0;
|
||||
entry->ecx = 0;
|
||||
entry->edx = 0;
|
||||
entry->ebx = 0U;
|
||||
entry->ecx = 0U;
|
||||
entry->edx = 0U;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -166,13 +166,13 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
return result;
|
||||
vm->vcpuid_level = limit = entry.eax;
|
||||
|
||||
for (i = 1; i <= limit; i++) {
|
||||
for (i = 1U; i <= limit; i++) {
|
||||
/* cpuid 1/0xb is percpu related */
|
||||
if (i == 1 || i == 0xb)
|
||||
if (i == 1U || i == 0xbU)
|
||||
continue;
|
||||
|
||||
switch (i) {
|
||||
case 0x02:
|
||||
case 0x02U:
|
||||
{
|
||||
uint32_t times;
|
||||
|
||||
@ -183,7 +183,7 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
return result;
|
||||
|
||||
times = entry.eax & 0xffUL;
|
||||
for (j = 1; j < times; j++) {
|
||||
for (j = 1U; j < times; j++) {
|
||||
init_vcpuid_entry(vm, i, j,
|
||||
CPUID_CHECK_SUBLEAF, &entry);
|
||||
result = set_vcpuid_entry(vm, &entry);
|
||||
@ -193,17 +193,17 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x04:
|
||||
case 0x0d:
|
||||
for (j = 0; ; j++) {
|
||||
if (i == 0x0d && j == 64)
|
||||
case 0x04U:
|
||||
case 0x0dU:
|
||||
for (j = 0U; ; j++) {
|
||||
if (i == 0x0dU && j == 64U)
|
||||
break;
|
||||
|
||||
init_vcpuid_entry(vm, i, j,
|
||||
CPUID_CHECK_SUBLEAF, &entry);
|
||||
if (i == 0x04 && entry.eax == 0)
|
||||
if (i == 0x04U && entry.eax == 0U)
|
||||
break;
|
||||
if (i == 0x0d && entry.eax == 0)
|
||||
if (i == 0x0dU && entry.eax == 0U)
|
||||
continue;
|
||||
result = set_vcpuid_entry(vm, &entry);
|
||||
if (result != 0)
|
||||
@ -236,7 +236,7 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
return result;
|
||||
|
||||
vm->vcpuid_xlevel = limit = entry.eax;
|
||||
for (i = 0x80000001; i <= limit; i++) {
|
||||
for (i = 0x80000001U; i <= limit; i++) {
|
||||
init_vcpuid_entry(vm, i, 0, 0, &entry);
|
||||
result = set_vcpuid_entry(vm, &entry);
|
||||
if (result != 0)
|
||||
@ -254,7 +254,7 @@ void guest_cpuid(struct vcpu *vcpu,
|
||||
uint32_t subleaf = *ecx;
|
||||
|
||||
/* vm related */
|
||||
if (leaf != 0x1 && leaf != 0xb && leaf != 0xd) {
|
||||
if (leaf != 0x1U && leaf != 0xbU && leaf != 0xdU) {
|
||||
struct vcpuid_entry *entry =
|
||||
find_vcpuid_entry(vcpu, leaf, subleaf);
|
||||
|
||||
@ -264,10 +264,10 @@ void guest_cpuid(struct vcpu *vcpu,
|
||||
*ecx = entry->ecx;
|
||||
*edx = entry->edx;
|
||||
} else {
|
||||
*eax = 0;
|
||||
*ebx = 0;
|
||||
*ecx = 0;
|
||||
*edx = 0;
|
||||
*eax = 0U;
|
||||
*ebx = 0U;
|
||||
*ecx = 0U;
|
||||
*edx = 0U;
|
||||
}
|
||||
|
||||
return;
|
||||
@ -275,7 +275,7 @@ void guest_cpuid(struct vcpu *vcpu,
|
||||
|
||||
/* percpu related */
|
||||
switch (leaf) {
|
||||
case 0x01:
|
||||
case 0x01U:
|
||||
{
|
||||
cpuid(leaf, eax, ebx, ecx, edx);
|
||||
uint32_t apicid = vlapic_get_id(vcpu->arch_vcpu.vlapic);
|
||||
@ -315,23 +315,23 @@ void guest_cpuid(struct vcpu *vcpu,
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x0b:
|
||||
case 0x0bU:
|
||||
/* Patching X2APIC */
|
||||
if (!x2apic_enabled) {
|
||||
*eax = 0;
|
||||
*ebx = 0;
|
||||
*ecx = 0;
|
||||
*edx = 0;
|
||||
*eax = 0U;
|
||||
*ebx = 0U;
|
||||
*ecx = 0U;
|
||||
*edx = 0U;
|
||||
} else
|
||||
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||
break;
|
||||
|
||||
case 0x0d:
|
||||
case 0x0dU:
|
||||
if (!cpu_has_cap(X86_FEATURE_OSXSAVE)) {
|
||||
*eax = 0;
|
||||
*ebx = 0;
|
||||
*ecx = 0;
|
||||
*edx = 0;
|
||||
*eax = 0U;
|
||||
*ebx = 0U;
|
||||
*ecx = 0U;
|
||||
*edx = 0U;
|
||||
} else
|
||||
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||
break;
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <hypervisor.h>
|
||||
|
||||
static void set_tss_desc(union tss_64_descriptor *desc,
|
||||
void *tss, int tss_limit, int type)
|
||||
void *tss, size_t tss_limit, int type)
|
||||
{
|
||||
uint32_t u1, u2, u3;
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
struct run_context cpu_ctx;
|
||||
|
||||
/* whether the host enter s3 success */
|
||||
uint8_t host_enter_s3_success = 1;
|
||||
uint8_t host_enter_s3_success = 1U;
|
||||
|
||||
void restore_msrs(void)
|
||||
{
|
||||
|
@ -73,7 +73,7 @@ int add_timer(struct timer *timer)
|
||||
uint16_t pcpu_id;
|
||||
bool need_update;
|
||||
|
||||
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0)
|
||||
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL)
|
||||
return -EINVAL;
|
||||
|
||||
/* limit minimal periodic timer cycle period */
|
||||
@ -222,7 +222,7 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
|
||||
{
|
||||
#define PIT_TICK_RATE 1193182UL
|
||||
#define PIT_TARGET 0x3FFFU
|
||||
#define PIT_MAX_COUNT 0xFFFF
|
||||
#define PIT_MAX_COUNT 0xFFFFU
|
||||
|
||||
uint16_t initial_pit;
|
||||
uint16_t current_pit;
|
||||
@ -269,13 +269,13 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
|
||||
*/
|
||||
static uint64_t native_calibrate_tsc(void)
|
||||
{
|
||||
if (boot_cpu_data.cpuid_level >= 0x15) {
|
||||
if (boot_cpu_data.cpuid_level >= 0x15U) {
|
||||
uint32_t eax_denominator, ebx_numerator, ecx_hz, reserved;
|
||||
|
||||
cpuid(0x15, &eax_denominator, &ebx_numerator,
|
||||
&ecx_hz, &reserved);
|
||||
|
||||
if (eax_denominator != 0 && ebx_numerator != 0)
|
||||
if (eax_denominator != 0U && ebx_numerator != 0U)
|
||||
return (uint64_t) ecx_hz *
|
||||
ebx_numerator / eax_denominator;
|
||||
}
|
||||
|
@ -153,10 +153,10 @@ int vmexit_handler(struct vcpu *vcpu)
|
||||
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
|
||||
err_code = exec_vmread(VMX_IDT_VEC_ERROR_CODE);
|
||||
vcpu_queue_exception(vcpu, vector, err_code);
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0;
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0U;
|
||||
} else if (type == VMX_INT_TYPE_NMI) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0;
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
|
||||
(uint32_t *)&cur_context->guest_cpu_regs.regs.rcx,
|
||||
(uint32_t *)&cur_context->guest_cpu_regs.regs.rdx);
|
||||
|
||||
TRACE_2L(TRACE_VMEXIT_CPUID, vcpu->vcpu_id, 0);
|
||||
TRACE_2L(TRACE_VMEXIT_CPUID, vcpu->vcpu_id, 0UL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -265,19 +265,19 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE
|
||||
(vcpu->arch_vcpu.exit_qualification) << 4) |
|
||||
VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) {
|
||||
case 0x00:
|
||||
case 0x00U:
|
||||
/* mov to cr0 */
|
||||
vmx_write_cr0(vcpu, *regptr);
|
||||
break;
|
||||
case 0x04:
|
||||
case 0x04U:
|
||||
/* mov to cr4 */
|
||||
vmx_write_cr4(vcpu, *regptr);
|
||||
break;
|
||||
case 0x08:
|
||||
case 0x08U:
|
||||
/* mov to cr8 */
|
||||
vlapic_set_cr8(vcpu->arch_vcpu.vlapic, *regptr);
|
||||
break;
|
||||
case 0x18:
|
||||
case 0x18U:
|
||||
/* mov from cr8 */
|
||||
*regptr = vlapic_get_cr8(vcpu->arch_vcpu.vlapic);
|
||||
break;
|
||||
@ -318,17 +318,17 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
||||
ctx_ptr = &(vcpu->arch_vcpu.contexts[idx]);
|
||||
|
||||
/*to access XCR0,'rcx' should be 0*/
|
||||
if (ctx_ptr->guest_cpu_regs.regs.rcx != 0) {
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
if (ctx_ptr->guest_cpu_regs.regs.rcx != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -1;
|
||||
}
|
||||
|
||||
val64 = ((ctx_ptr->guest_cpu_regs.regs.rax) & 0xffffffff) |
|
||||
(ctx_ptr->guest_cpu_regs.regs.rdx << 32);
|
||||
val64 = ((ctx_ptr->guest_cpu_regs.regs.rax) & 0xffffffffUL) |
|
||||
(ctx_ptr->guest_cpu_regs.regs.rdx << 32UL);
|
||||
|
||||
/*bit 0(x87 state) of XCR0 can't be cleared*/
|
||||
if ((val64 & 0x01UL) == 0U) {
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
if ((val64 & 0x01UL) == 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -336,8 +336,8 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
||||
*set to 10b as it is necessary to set both bits
|
||||
*to use AVX instructions.
|
||||
**/
|
||||
if (((val64 >> 1) & 0x3UL) == 0x2UL) {
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
if (((val64 >> 1UL) & 0x3UL) == 0x2UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -26,10 +26,10 @@ static struct dmar_dev_scope default_drhd_unit_dev_scope0[] = {
|
||||
|
||||
static struct dmar_drhd drhd_info_array[] = {
|
||||
{
|
||||
.dev_cnt = 1,
|
||||
.segment = 0,
|
||||
.flags = 0,
|
||||
.reg_base_addr = 0xFED64000,
|
||||
.dev_cnt = 1U,
|
||||
.segment = 0U,
|
||||
.flags = 0U,
|
||||
.reg_base_addr = 0xFED64000UL,
|
||||
/* Ignore the iommu for intel graphic device since GVT-g needs
|
||||
* vtd disabled for gpu
|
||||
*/
|
||||
@ -40,10 +40,10 @@ static struct dmar_drhd drhd_info_array[] = {
|
||||
/* No need to specify devices since
|
||||
* DRHD_FLAG_INCLUDE_PCI_ALL_MASK set
|
||||
*/
|
||||
.dev_cnt = 0,
|
||||
.segment = 0,
|
||||
.dev_cnt = 0U,
|
||||
.segment = 0U,
|
||||
.flags = DRHD_FLAG_INCLUDE_PCI_ALL_MASK,
|
||||
.reg_base_addr = 0xFED65000,
|
||||
.reg_base_addr = 0xFED65000UL,
|
||||
.ignore = false,
|
||||
.devices = NULL,
|
||||
},
|
||||
|
@ -262,13 +262,13 @@ struct stack_canary {
|
||||
|
||||
extern struct cpuinfo_x86 boot_cpu_data;
|
||||
|
||||
#define MAX_PSTATE 20 /* max num of supported Px count */
|
||||
#define MAX_CSTATE 8 /* max num of supported Cx count */
|
||||
#define MAX_PSTATE 20U /* max num of supported Px count */
|
||||
#define MAX_CSTATE 8U /* max num of supported Cx count */
|
||||
|
||||
/* We support MAX_CSTATE num of Cx, means have (MAX_CSTATE - 1) Cx entries,
|
||||
* i.e. supported Cx entry index range from 1 to MAX_CX_ENTRY.
|
||||
*/
|
||||
#define MAX_CX_ENTRY (MAX_CSTATE - 1)
|
||||
#define MAX_CX_ENTRY (MAX_CSTATE - 1U)
|
||||
|
||||
/* Function prototypes */
|
||||
void cpu_dead(uint32_t logical_id);
|
||||
|
@ -210,7 +210,7 @@
|
||||
#define MMU_MEM_ATTR_TYPE_MASK \
|
||||
(IA32E_PDPTE_PAT_BIT | IA32E_COMM_PCD_BIT | IA32E_COMM_PWT_BIT)
|
||||
|
||||
#define ROUND_PAGE_UP(addr) (((addr) + CPU_PAGE_SIZE - 1) & CPU_PAGE_MASK)
|
||||
#define ROUND_PAGE_UP(addr) (((addr) + CPU_PAGE_SIZE - 1U) & CPU_PAGE_MASK)
|
||||
#define ROUND_PAGE_DOWN(addr) ((addr) & CPU_PAGE_MASK)
|
||||
|
||||
enum _page_table_type {
|
||||
@ -329,12 +329,12 @@ struct e820_entry {
|
||||
#pragma pack()
|
||||
|
||||
/* E820 memory types */
|
||||
#define E820_TYPE_RAM 1 /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3 /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4 /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5 /* EFI 8 */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
|
||||
/** Calculates the page table address for a given address.
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user