HV:treewide:Update return type of function ffs64 and ffz64

To reduce type conversion in HV:
Update return type of function ffs64 and ffz64 as uint16;
For ffs64, when the input is zero, INVALID_BIT_INDEX is returned;
Update temporary variable type and return value check of caller
when it call ffs64 or ffz64;

Note: In the allocate_mem, there is no return value checking for
calling ffz64, this will be updated latter.

V1-->V2:
        INVALID_BIT_INDEX instead of INVALID_NUMBER
        Coding style fixing;
        INVALID_CPU_ID instead of INVALID_PCPU_ID or INVALID_VCPU_ID;
        "%hu" is used to print vcpu id (uint16_t);
        Add "U/UL" for constant value as needed.
V2-->V3:
        ffs64 return INVALID_BIT_INDEX directly when
        the input value is zero;
        Remove excess "%hu" updates.
V3-->V4:
        Clean up the comments of ffs64;
        Add "U" for constant value as needed.

Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Xiangyang Wu 2018-07-02 14:05:58 +08:00 committed by lijinxia
parent db01efa047
commit a97593e7db
14 changed files with 68 additions and 57 deletions

View File

@ -75,11 +75,11 @@ inline struct vcpu *get_primary_vcpu(struct vm *vm)
inline uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
{
int vcpu_id;
uint16_t vcpu_id;
uint64_t dmask = 0;
struct vcpu *vcpu;
while ((vcpu_id = ffs64(vdmask)) >= 0) {
while ((vcpu_id = ffs64(vdmask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &vdmask);
vcpu = vcpu_from_vid(vm, vcpu_id);
ASSERT(vcpu != NULL, "vcpu_from_vid failed");

View File

@ -880,7 +880,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
uint32_t dfr, ldr, ldest, cluster;
uint32_t mda_flat_ldest, mda_cluster_ldest, mda_ldest, mda_cluster_id;
uint64_t amask;
int vcpu_id;
uint16_t vcpu_id;
if (dest == 0xff) {
/*
@ -918,7 +918,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
*/
*dmask = 0;
amask = vm_active_cpus(vm);
while ((vcpu_id = ffs64(amask)) >= 0) {
while ((vcpu_id = ffs64(amask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &amask);
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
@ -1021,7 +1021,7 @@ vlapic_get_cr8(struct vlapic *vlapic)
static int
vlapic_icrlo_write_handler(struct vlapic *vlapic)
{
int i;
uint16_t vcpu_id;
bool phys;
uint64_t dmask = 0;
uint64_t icrval;
@ -1071,9 +1071,9 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic)
break;
}
while ((i = ffs64(dmask)) >= 0) {
bitmap_clear(i, &dmask);
target_vcpu = vcpu_from_vid(vlapic->vm, i);
while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &dmask);
target_vcpu = vcpu_from_vid(vlapic->vm, vcpu_id);
if (target_vcpu == NULL)
continue;
@ -1081,19 +1081,19 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic)
vlapic_set_intr(target_vcpu, vec,
LAPIC_TRIG_EDGE);
dev_dbg(ACRN_DBG_LAPIC,
"vlapic sending ipi %d to vcpu_id %d",
vec, i);
"vlapic sending ipi %d to vcpu_id %hu",
vec, vcpu_id);
} else if (mode == APIC_DELMODE_NMI){
vcpu_inject_nmi(target_vcpu);
dev_dbg(ACRN_DBG_LAPIC,
"vlapic send ipi nmi to vcpu_id %d", i);
"vlapic send ipi nmi to vcpu_id %hu", vcpu_id);
} else if (mode == APIC_DELMODE_INIT) {
if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT)
continue;
dev_dbg(ACRN_DBG_LAPIC,
"Sending INIT from VCPU %d to %d",
vlapic->vcpu->vcpu_id, i);
"Sending INIT from VCPU %d to %hu",
vlapic->vcpu->vcpu_id, vcpu_id);
/* put target vcpu to INIT state and wait for SIPI */
pause_vcpu(target_vcpu, VCPU_PAUSED);
@ -1110,8 +1110,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic)
continue;
dev_dbg(ACRN_DBG_LAPIC,
"Sending SIPI from VCPU %d to %d with vector %d",
vlapic->vcpu->vcpu_id, i, vec);
"Sending SIPI from VCPU %d to %hu with vector %d",
vlapic->vcpu->vcpu_id, vcpu_id, vec);
if (--target_vcpu->arch_vcpu.nr_sipi > 0)
continue;
@ -1586,7 +1586,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
int delmode, int vec)
{
bool lowprio;
int vcpu_id;
uint16_t vcpu_id;
uint64_t dmask;
struct vcpu *target_vcpu;
@ -1606,7 +1606,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
*/
vlapic_calcdest(vm, &dmask, dest, phys, lowprio);
while ((vcpu_id = ffs64(dmask)) >= 0) {
while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &dmask);
target_vcpu = vcpu_from_vid(vm, vcpu_id);
if (target_vcpu == NULL)
@ -1738,21 +1738,21 @@ vlapic_set_intr(struct vcpu *vcpu, uint32_t vector, bool level)
}
int
vlapic_set_local_intr(struct vm *vm, int vcpu_id, uint32_t vector)
vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id, uint32_t vector)
{
struct vlapic *vlapic;
uint64_t dmask = 0;
int error;
if (vcpu_id < -1 || vcpu_id >= phys_cpu_num)
if ((vcpu_id != BROADCAST_CPU_ID) && (vcpu_id >= phys_cpu_num))
return -EINVAL;
if (vcpu_id == -1)
if (vcpu_id == BROADCAST_CPU_ID)
dmask = vm_active_cpus(vm);
else
bitmap_set(vcpu_id, &dmask);
error = 0;
while ((vcpu_id = ffs64(dmask)) >= 0) {
while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &dmask);
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
error = vlapic_trigger_lvt(vlapic, vector);

View File

@ -231,7 +231,7 @@ static void vpic_notify_intr(struct vpic *vpic)
ASSERT(vcpu != NULL, "vm%d, vcpu0", vpic->vm->attr.id);
vcpu_inject_extint(vcpu);
} else {
vlapic_set_local_intr(vpic->vm, BROADCAST_PCPU_ID, APIC_LVT_LINT0);
vlapic_set_local_intr(vpic->vm, BROADCAST_CPU_ID, APIC_LVT_LINT0);
/* notify vioapic pin0 if existing
* For vPIC + vIOAPIC mode, vpic master irq connected
* to vioapic pin0 (irq2)

View File

@ -42,7 +42,7 @@ void exec_softirq(void)
uint16_t cpu_id = get_cpu_id();
volatile uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
int softirq_id;
uint16_t softirq_id;
if (cpu_id >= phys_cpu_num)
return;
@ -61,7 +61,7 @@ again:
while (1) {
softirq_id = ffs64(*bitmap);
if ((softirq_id < 0) || (softirq_id >= SOFTIRQ_MAX))
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX))
break;
bitmap_clear(softirq_id, bitmap);

View File

@ -220,7 +220,7 @@ int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param)
}
pcpu_id = allocate_pcpu();
if (INVALID_PCPU_ID == pcpu_id) {
if (pcpu_id == INVALID_CPU_ID) {
pr_err("%s: No physical available\n", __func__);
return -1;
}

View File

@ -41,7 +41,7 @@ uint16_t allocate_pcpu(void)
return i;
}
return INVALID_PCPU_ID;
return INVALID_CPU_ID;
}
void set_pcpu_used(uint16_t pcpu_id)

View File

@ -211,6 +211,20 @@ enum feature_word {
FEATURE_WORDS,
};
/**
*The invalid cpu_id (INVALID_CPU_ID) is error
*code for error handling, this means that
*caller can't find a valid physical cpu
*or virtual cpu.
*/
#define INVALID_CPU_ID 0xffffU
/**
*The broadcast id (BROADCAST_CPU_ID)
*used to notify all valid phyiscal cpu
*or virtual cpu.
*/
#define BROADCAST_CPU_ID 0xfffeU
/* CPU states defined */
enum cpu_state {
CPU_STATE_RESET = 0,

View File

@ -96,15 +96,6 @@
#define CPUID_EXTEND_FUNCTION_4 0x80000004
#define CPUID_EXTEND_ADDRESS_SIZE 0x80000008
/**pcpu id type is uint16_t,
*The broadcast id (BROADCAST_PCPU_ID)
* used to notify all valid pcpu,
*the invalid pcpu id (INVALID_PCPU_ID) is error
*code for error handling.
*/
#define INVALID_PCPU_ID 0xffffU
#define BROADCAST_PCPU_ID 0xfffeU
static inline void __cpuid(uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)

View File

@ -89,7 +89,7 @@ vlapic_intr_edge(struct vcpu *vcpu, uint32_t vector)
* Triggers the LAPIC local interrupt (LVT) 'vector' on 'cpu'. 'cpu' can
* be set to -1 to trigger the interrupt on all CPUs.
*/
int vlapic_set_local_intr(struct vm *vm, int vcpu_id, uint32_t vector);
int vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id, uint32_t vector);
int vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);

View File

@ -7,9 +7,9 @@
#ifndef SOFTIRQ_H
#define SOFTIRQ_H
#define SOFTIRQ_TIMER 0
#define SOFTIRQ_DEV_ASSIGN 1
#define SOFTIRQ_MAX 2
#define SOFTIRQ_TIMER 0U
#define SOFTIRQ_DEV_ASSIGN 1U
#define SOFTIRQ_MAX 2U
#define SOFTIRQ_MASK ((1UL<<SOFTIRQ_MAX)-1)
/* used for atomic value for prevent recursive */

View File

@ -91,10 +91,11 @@ static inline uint16_t fls64(uint64_t value)
* and return the index of that bit.
*
* Bits are numbered starting at 0,the least significant bit.
* A return value of -1 means that the argument was zero.
* A return value of INVALID_BIT_INDEX means that the return value is the inalid
* bit index when the input argument was zero.
*
* Examples:
* ffs64 (0x0) = -1
* ffs64 (0x0) = INVALID_BIT_INDEX
* ffs64 (0x01) = 0
* ffs64 (0xf0) = 4
* ffs64 (0xf00) = 8
@ -104,20 +105,24 @@ static inline uint16_t fls64(uint64_t value)
*
* @param value: 'unsigned long' type value
*
* @return value: zero-based bit index, -1 means 'value' was zero.
* @return value: zero-based bit index, INVALID_BIT_INDEX means
* when 'value' was zero, bit operations function can't find bit
* set and return the invalid bit index directly.
*
* **/
static inline int ffs64(unsigned long value)
static inline uint16_t ffs64(uint64_t value)
{
int ret;
asm volatile("bsfq %1,%q0"
uint64_t ret = 0UL;
if (value == 0UL)
return (INVALID_BIT_INDEX);
asm volatile("bsfq %1,%0"
: "=r" (ret)
: "rm" (value), "0" (-1));
return ret;
: "rm" (value));
return (uint16_t)ret;
}
/*bit scan forward for the least significant bit '0'*/
static inline int ffz64(unsigned long value)
static inline uint16_t ffz64(uint64_t value)
{
return ffs64(~value);
}

View File

@ -8,7 +8,7 @@
#define __MEM_MGT_H__
/* Macros */
#define BITMAP_WORD_SIZE 32
#define BITMAP_WORD_SIZE 32U
struct mem_pool {
void *start_addr; /* Start Address of Memory Pool */

View File

@ -105,7 +105,7 @@ int udiv64(uint64_t dividend, uint64_t divisor, struct udiv_result *res)
}
divisor >>= 1;
mask >>= 1;
} while ((bits-- != 0) && (dividend != 0));
} while ((bits-- != 0UL) && (dividend != 0));
res->r.qword = dividend;
return 0;

View File

@ -58,9 +58,10 @@ struct mem_pool Paging_Memory_Pool = {
static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes)
{
void *memory = NULL;
uint32_t idx, bit_idx;
uint32_t requested_buffs;
void *memory = NULL;
uint32_t idx;
uint16_t bit_idx;
uint32_t requested_buffs;
/* Check if provided memory pool exists */
if (pool == NULL)
@ -85,9 +86,9 @@ static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes)
/* Declare temporary variables to be used locally in
* this block
*/
uint32_t i;
uint32_t tmp_bit_idx = bit_idx;
uint32_t tmp_idx = idx;
uint32_t i;
uint16_t tmp_bit_idx = bit_idx;
uint32_t tmp_idx = idx;
/* Check requested_buffs number of buffers availability
* in memory-pool right after selected buffer
@ -101,7 +102,7 @@ static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes)
if (++tmp_idx == pool->bmp_size)
break;
/* Reset tmp_bit_idx */
tmp_bit_idx = 0;
tmp_bit_idx = 0U;
}
/* Break if selected buffer is not free */