HV: treewide: fix violations of coding guideline C-TY-27 & C-TY-28

The coding guideline rules C-TY-27 and C-TY-28, combined, requires that
assignment and arithmetic operations shall be applied only on operands of the
same kind. This patch either adds explicit type casts or adjust types of
variables to align the types of operands.

The only semantic change introduced by this patch is the promotion of the
second argument of set_vmcs_bit() and clear_vmcs_bit() to
uint64_t (formerly uint32_t). This avoids clear_vmcs_bit() to accidentally
clears the upper 32 bits of the requested VMCS field.

Other than that, this patch has no semantic change. Specifically this patch
is not meant to fix buggy narrowing operations, only to make these
operations explicit.

Tracked-On: #6776
Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao 2021-10-29 16:02:10 +08:00 committed by wenlingz
parent 2c86795fa8
commit 83a938bae6
27 changed files with 51 additions and 50 deletions

View File

@ -37,12 +37,12 @@ static uint8_t get_secondary_bus(uint8_t bus, uint8_t dev, uint8_t func)
return (data >> 8U) & 0xffU;
}
static union pci_bdf dmar_path_bdf(int32_t path_len, int32_t busno, const struct acpi_dmar_pci_path *path)
static union pci_bdf dmar_path_bdf(int32_t path_len, uint8_t busno, const struct acpi_dmar_pci_path *path)
{
int32_t i;
union pci_bdf dmar_bdf;
dmar_bdf.bits.b = (uint8_t)busno;
dmar_bdf.bits.b = busno;
dmar_bdf.bits.d = path->device;
dmar_bdf.bits.f = path->function;

View File

@ -16,7 +16,7 @@ static bool is_allocated_to_prelaunched_vm(struct pci_pdev *pdev)
{
bool found = false;
uint16_t vmid;
uint32_t pci_idx;
uint16_t pci_idx;
struct acrn_vm_config *vm_config;
struct acrn_vm_pci_dev_config *dev_config;

View File

@ -833,7 +833,7 @@ void ptirq_remove_msix_remapping(const struct acrn_vm *vm, uint16_t phys_bdf,
void ptirq_remove_configured_intx_remappings(const struct acrn_vm *vm)
{
const struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
uint32_t i;
uint16_t i;
for (i = 0; i < vm_config->pt_intx_num; i++) {
ptirq_remove_intx_remapping(vm, vm_config->pt_intx[i].virt_gsi, false);

View File

@ -207,7 +207,7 @@ static int32_t local_gva2gpa_pae(struct acrn_vcpu *vcpu, struct page_walk_info *
addr = get_pae_pdpt_addr(pw_info->top_entry);
base = (uint64_t *)gpa2hva(vcpu->vm, addr);
if (base != NULL) {
index = (gva >> 30U) & 0x3UL;
index = (uint32_t)gva >> 30U;
stac();
entry = base[index];
clac();

View File

@ -342,7 +342,7 @@ static bool prelaunched_vm_sleep_io_write(struct acrn_vcpu *vcpu, uint16_t addr,
* SLP_TYPx fields programmed with the values from the \_S5 object
*/
slp_type = (v >> 2U) & 0x7U;
slp_en = (v >> 5U) & 0x1U;
slp_en = ((v >> 5U) & 0x1U) != 0U;
if (slp_en && (slp_type == 5U)) {
get_vm_lock(vm);

View File

@ -37,7 +37,7 @@ uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_a
end = round_page_down(entry->baseaddr + entry->length);
length = (end > start) ? (end - start) : 0UL;
if ((entry->type == E820_TYPE_RAM) && (length >= round_size)
if ((entry->type == E820_TYPE_RAM) && (length >= (uint64_t)round_size)
&& (end > round_min_addr) && (start < round_max_addr)) {
if (((start >= min_addr) && ((start + round_size) <= min(end, round_max_addr)))
|| ((start < min_addr) && ((min_addr + round_size) <= min(end, round_max_addr)))) {
@ -137,7 +137,8 @@ static void filter_mem_from_service_vm_e820(struct acrn_vm *vm, uint64_t start_p
*/
void create_service_vm_e820(struct acrn_vm *vm)
{
uint16_t vm_id, i;
uint16_t vm_id;
uint32_t i;
uint64_t hv_start_pa = hva2hpa((void *)(get_hv_image_base()));
uint64_t hv_end_pa = hv_start_pa + get_hv_ram_size();
uint32_t entries_count = get_e820_entries_count();

View File

@ -671,8 +671,8 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
}
if (status == 0) {
uint32_t i;
for (i = 0; i < vm_config->pt_intx_num; i++) {
uint16_t i;
for (i = 0U; i < vm_config->pt_intx_num; i++) {
status = ptirq_add_intx_remapping(vm, vm_config->pt_intx[i].virt_gsi,
vm_config->pt_intx[i].phys_gsi, false);
if (status != 0) {

View File

@ -291,7 +291,7 @@ static void enable_msr_interception(uint8_t *bitmap, uint32_t msr_arg, uint32_t
}
msr &= 0x1FFFU;
msr_bit = 1U << (msr & 0x7U);
msr_bit = (uint8_t)(1U << (msr & 0x7U));
msr_index = msr >> 3U;
if ((mode & INTERCEPT_READ) == INTERCEPT_READ) {

View File

@ -275,8 +275,8 @@ static inline void fixup_idt(const struct host_idt_descriptor *idtd)
entry_hi_32 = idt_desc[i].rsvd;
idt_desc[i].rsvd = 0U;
idt_desc[i].offset_63_32 = entry_hi_32;
idt_desc[i].high32.bits.offset_31_16 = entry_lo_32 >> 16U;
idt_desc[i].low32.bits.offset_15_0 = entry_lo_32 & 0xffffUL;
idt_desc[i].high32.bits.offset_31_16 = (uint16_t)(entry_lo_32 >> 16U);
idt_desc[i].low32.bits.offset_15_0 = (uint16_t)entry_lo_32;
}
}

View File

@ -40,7 +40,7 @@
#include <logmsg.h>
#include <misc_cfg.h>
static uint32_t hv_ram_size;
static uint64_t hv_ram_size;
static void *ppt_mmu_pml4_addr;
static uint8_t sanitized_page[PAGE_SIZE] __aligned(PAGE_SIZE);
@ -152,7 +152,7 @@ void invept(const void *eptp)
}
}
uint32_t get_hv_ram_size(void)
uint64_t get_hv_ram_size(void)
{
return hv_ram_size;
}
@ -255,7 +255,7 @@ void init_paging(void)
const struct abi_mmap *p_mmap = abi->mmap_entry;
pr_dbg("HV MMU Initialization");
hv_ram_size = (uint32_t)(uint64_t)&ld_ram_size;
hv_ram_size = (uint64_t)&ld_ram_size;
init_sanitized_page((uint64_t *)sanitized_page, hva2hpa_early(sanitized_page));

View File

@ -500,7 +500,7 @@ static struct dmar_drhd_rt *ioapic_to_dmaru(uint16_t ioapic_id, union pci_bdf *s
dmar_unit = &dmar_drhd_units[j];
for (i = 0U; i < dmar_unit->drhd->dev_cnt; i++) {
if ((dmar_unit->drhd->devices[i].type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) &&
(dmar_unit->drhd->devices[i].id == ioapic_id)) {
((uint16_t)dmar_unit->drhd->devices[i].id == ioapic_id)) {
sid->fields.devfun = dmar_unit->drhd->devices[i].devfun;
sid->fields.bus = dmar_unit->drhd->devices[i].bus;
found = true;
@ -815,7 +815,7 @@ static void dmar_fault_handler(uint32_t irq, void *data)
{
struct dmar_drhd_rt *dmar_unit = (struct dmar_drhd_rt *)data;
uint32_t fsr;
uint32_t index;
uint16_t index;
uint32_t record_reg_offset;
struct dmar_entry fault_record;
int32_t loop = 0;

View File

@ -41,7 +41,7 @@
* but this should be configurable for different OS. */
#define DEFAULT_RAMDISK_GPA_MAX 0x37ffffffUL
#define PRE_VM_MAX_RAM_ADDR_BELOW_4GB (VIRT_ACPI_DATA_ADDR - 1U)
#define PRE_VM_MAX_RAM_ADDR_BELOW_4GB (VIRT_ACPI_DATA_ADDR - 1UL)
static void *get_initrd_load_addr(struct acrn_vm *vm, uint64_t kernel_start)
{
@ -190,7 +190,7 @@ static uint16_t create_service_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_m
uint16_t i, desc_idx = 0U;
const struct efi_memory_desc *hv_efi_mmap_desc = get_efi_mmap_entry();
for (i = 0U; i < get_efi_mmap_entries_count(); i++) {
for (i = 0U; i < (uint16_t)get_efi_mmap_entries_count(); i++) {
/* Below efi mmap desc types in native should be kept as original for Service VM */
if ((hv_efi_mmap_desc[i].type == EFI_RESERVED_MEMORYTYPE)
|| (hv_efi_mmap_desc[i].type == EFI_UNUSABLE_MEMORY)
@ -210,7 +210,7 @@ static uint16_t create_service_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_m
}
}
for (i = 0U; i < vm->e820_entry_num; i++) {
for (i = 0U; i < (uint16_t)vm->e820_entry_num; i++) {
/* The memory region with e820 type of RAM could be acted as EFI_CONVENTIONAL_MEMORY
* for Service VM, the region which occupied by HV and pre-launched VM has been filtered
* already, so it is safe for Service VM.

View File

@ -126,7 +126,7 @@ static void init_vm_bootargs_info(struct acrn_vm *vm, const struct acrn_boot_inf
*/
struct abi_module *get_mod_by_tag(const struct acrn_boot_info *abi, const char *tag)
{
uint8_t i;
uint32_t i;
struct abi_module *mod = NULL;
struct abi_module *mods = (struct abi_module *)(&abi->mods[0]);
uint32_t tag_len = strnlen_s(tag, MAX_MOD_TAG_LEN);

View File

@ -10,12 +10,12 @@
#include <efi_mmap.h>
#include <logmsg.h>
static uint16_t hv_memdesc_nr;
static uint32_t hv_memdesc_nr;
static struct efi_memory_desc hv_memdesc[CONFIG_MAX_EFI_MMAP_ENTRIES];
static void sort_efi_mmap_entries(void)
{
uint32_t i,j;
uint32_t i, j;
struct efi_memory_desc tmp_memdesc;
/* Bubble sort */

View File

@ -91,7 +91,7 @@ int32_t hcall_service_vm_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acr
struct acrn_vcpu *target_vcpu;
uint16_t i;
int32_t ret = 0;
uint64_t lapicid = param1;
uint32_t lapicid = (uint32_t)param1;
pr_info("Service VM offline cpu with lapicid %ld", lapicid);
@ -208,7 +208,7 @@ int32_t hcall_get_platform_info(struct acrn_vcpu *vcpu, __unused struct acrn_vm
get_cache_shift(&pi.hw.l2_cat_shift, &pi.hw.l3_cat_shift);
for (i = 0U; i < min(pcpu_nums, ACRN_PLATFORM_LAPIC_IDS_MAX); i++) {
pi.hw.lapic_ids[i] = per_cpu(lapic_id, i);
pi.hw.lapic_ids[i] = (uint8_t)per_cpu(lapic_id, i);
}
pi.hw.cpu_num = pcpu_nums;

View File

@ -274,7 +274,7 @@ static inline bool vioapic_need_intr(const struct acrn_single_vioapic *vioapic,
union ioapic_rte rte;
bool ret = false;
if (pin < vioapic->chipinfo.nr_pins) {
if ((uint32_t)pin < vioapic->chipinfo.nr_pins) {
rte = vioapic->rtbl[pin];
lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
ret = !!(((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO) && (lvl == 0U)) ||

View File

@ -378,7 +378,7 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
pci_pdev_write_cfg(pbdf, offset, 4U, lo);
vbar->mask = size32 & mask;
vbar->bar_type.bits &= (~mask);
vbar->bar_type.bits &= (uint32_t)(~mask);
vbar->size = (uint64_t)size32 & mask;
if (is_prelaunched_vm(vpci2vm(vdev->vpci))) {

View File

@ -140,7 +140,7 @@ static void pci_vdev_update_vbar_base(struct pci_vdev *vdev, uint32_t idx)
* Currently, we don't support the reprogram of PIO bar of pass-thru devs,
* If guest tries to reprogram, hv will inject #GP to guest.
*/
if ((vdev->pdev != NULL) && ((lo & PCI_BASE_ADDRESS_IO_MASK) != vbar->base_hpa)) {
if ((vdev->pdev != NULL) && ((lo & PCI_BASE_ADDRESS_IO_MASK) != (uint32_t)vbar->base_hpa)) {
struct acrn_vcpu *vcpu = vcpu_from_pid(vpci2vm(vdev->vpci), get_pcpu_id());
if (vcpu != NULL) {
vcpu_inject_gp(vcpu, 0U);

View File

@ -49,7 +49,7 @@ static int32_t vmcs9900_mmio_handler(struct io_request *io_req, void *data)
struct pci_vbar *vbar = &vdev->vbars[MCS9900_MMIO_BAR];
uint16_t offset;
offset = mmio->address - vbar->base_gpa;
offset = (uint16_t)(mmio->address - vbar->base_gpa);
if (mmio->direction == ACRN_IOREQ_DIR_READ) {
mmio->value = vuart_read_reg(vu, offset);
@ -165,7 +165,7 @@ const struct pci_vdev_ops vmcs9900_ops = {
int32_t create_vmcs9900_vdev(struct acrn_vm *vm, struct acrn_vdev *dev)
{
uint32_t i;
uint16_t i;
struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
struct acrn_vm_pci_dev_config *dev_config = NULL;
int32_t ret = -EINVAL;

View File

@ -137,7 +137,7 @@ int32_t add_vmsix_capability(struct pci_vdev *vdev, uint32_t entry_num, uint8_t
(void)memset(&msixcap, 0U, sizeof(struct msixcap));
msixcap.capid = PCIY_MSIX;
msixcap.msgctrl = entry_num - 1U;
msixcap.msgctrl = (uint16_t)entry_num - 1U;
/* - MSI-X table start at offset 0 */
msixcap.table_info = bar_num;

View File

@ -119,15 +119,15 @@ static bool vpci_pio_cfgdata_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t
struct acrn_vpci *vpci = &vm->vpci;
union pci_cfg_addr_reg cfg_addr;
union pci_bdf bdf;
uint16_t offset = addr - PCI_CONFIG_DATA;
uint32_t val = ~0U;
struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
cfg_addr.value = atomic_readandclear32(&vpci->addr.value);
if (cfg_addr.bits.enable != 0U) {
if (pci_is_valid_access(cfg_addr.bits.reg_num + offset, bytes)) {
uint32_t offset = (uint16_t)cfg_addr.bits.reg_num + (addr - PCI_CONFIG_DATA);
if (pci_is_valid_access(offset, bytes)) {
bdf.value = cfg_addr.bits.bdf;
ret = vpci_read_cfg(vpci, bdf, cfg_addr.bits.reg_num + offset, bytes, &val);
ret = vpci_read_cfg(vpci, bdf, offset, bytes, &val);
}
}
@ -152,13 +152,13 @@ static bool vpci_pio_cfgdata_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
struct acrn_vpci *vpci = &vm->vpci;
union pci_cfg_addr_reg cfg_addr;
union pci_bdf bdf;
uint16_t offset = addr - PCI_CONFIG_DATA;
cfg_addr.value = atomic_readandclear32(&vpci->addr.value);
if (cfg_addr.bits.enable != 0U) {
if (pci_is_valid_access(cfg_addr.bits.reg_num + offset, bytes)) {
uint32_t offset = (uint16_t)cfg_addr.bits.reg_num + (addr - PCI_CONFIG_DATA);
if (pci_is_valid_access(offset, bytes)) {
bdf.value = cfg_addr.bits.bdf;
ret = vpci_write_cfg(vpci, bdf, cfg_addr.bits.reg_num + offset, bytes, val);
ret = vpci_write_cfg(vpci, bdf, offset, bytes, val);
}
}
@ -672,7 +672,7 @@ struct pci_vdev *vpci_init_vdev(struct acrn_vpci *vpci, struct acrn_vm_pci_dev_c
*/
static int32_t vpci_init_vdevs(struct acrn_vm *vm)
{
uint32_t idx;
uint16_t idx;
struct acrn_vpci *vpci = &(vm->vpci);
const struct acrn_vm_config *vm_config = get_vm_config(vpci2vm(vpci)->vm_id);
int32_t ret = 0;
@ -843,7 +843,7 @@ uint32_t vpci_add_capability(struct pci_vdev *vdev, uint8_t *capdata, uint8_t ca
#define CAP_START_OFFSET PCI_CFG_HEADER_LENGTH
uint8_t capoff, reallen;
uint16_t sts;
uint32_t sts;
uint32_t ret = 0U;
reallen = roundup(caplen, 4U); /* dword aligned */

View File

@ -121,7 +121,7 @@ int32_t create_vrp(struct acrn_vm *vm, struct acrn_vdev *dev)
struct pci_vdev *vdev;
struct vrp_config *vrp_config;
int i;
uint16_t i;
vrp_config = (struct vrp_config*)dev->args;
@ -134,7 +134,7 @@ int32_t create_vrp(struct acrn_vm *vm, struct acrn_vdev *dev)
for (i = 0U; i < vm_config->pci_dev_num; i++) {
dev_config = &vm_config->pci_devs[i];
if (dev_config->vrp_sec_bus == vrp_config->secondary_bus) {
dev_config->vbdf.value = dev->slot;
dev_config->vbdf.value = (uint16_t)dev->slot;
dev_config->pbdf.value = vrp_config->phy_bdf;
dev_config->vrp_max_payload = vrp_config->max_payload;
dev_config->vdev_ops = &vrp_ops;

View File

@ -114,7 +114,7 @@ static void create_vf(struct pci_vdev *pf_vdev, union pci_bdf vf_bdf, uint16_t v
pr_err("PF %x:%x.%x can't creat VF, unset VF_ENABLE",
pf_vdev->bdf.bits.b, pf_vdev->bdf.bits.d, pf_vdev->bdf.bits.f);
} else {
uint16_t bar_idx;
uint32_t bar_idx;
struct pci_vbar *vf_vbar;
/* VF bars information from its PF SRIOV capability, no need to access physical device */

View File

@ -414,7 +414,7 @@ static uint32_t pci_check_override_drhd_index(union pci_bdf pbdf,
const struct pci_bdf_mapping_group *const bdfs_from_drhds,
uint32_t current_drhd_index)
{
uint16_t bdfi;
uint32_t bdfi;
uint32_t bdf_drhd_index = current_drhd_index;
for (bdfi = 0U; bdfi < bdfs_from_drhds->pci_bdf_map_count; bdfi++) {

View File

@ -41,7 +41,7 @@ static inline uint64_t apic_access_offset(uint64_t qual)
return (qual & APIC_ACCESS_OFFSET);
}
static inline void clear_vmcs_bit(uint32_t vmcs_field, uint32_t bit)
static inline void clear_vmcs_bit(uint32_t vmcs_field, uint64_t bit)
{
uint64_t val64;
@ -50,7 +50,7 @@ static inline void clear_vmcs_bit(uint32_t vmcs_field, uint32_t bit)
exec_vmwrite(vmcs_field, val64);
}
static inline void set_vmcs_bit(uint32_t vmcs_field, uint32_t bit)
static inline void set_vmcs_bit(uint32_t vmcs_field, uint64_t bit)
{
uint64_t val64;

View File

@ -183,7 +183,7 @@ void flush_vpid_global(void);
*/
void invept(const void *eptp);
uint32_t get_hv_ram_size(void);
uint64_t get_hv_ram_size(void);
/* get PDPT address from CR3 vaule in PAE mode */
static inline uint64_t get_pae_pdpt_addr(uint64_t cr3)

View File

@ -596,9 +596,9 @@
/* 5 high-order bits in every field are reserved */
#define PAT_FIELD_RSV_BITS (0xF8UL)
/* MSR_TEST_CTL bits */
#define MSR_TEST_CTL_GP_UCLOCK (1U << 28U)
#define MSR_TEST_CTL_AC_SPLITLOCK (1U << 29U)
#define MSR_TEST_CTL_DISABLE_LOCK_ASSERTION (1U << 31U)
#define MSR_TEST_CTL_GP_UCLOCK (1UL << 28U)
#define MSR_TEST_CTL_AC_SPLITLOCK (1UL << 29U)
#define MSR_TEST_CTL_DISABLE_LOCK_ASSERTION (1UL << 31U)
#ifndef ASSEMBLER
static inline bool is_pat_mem_type_invalid(uint64_t x)