HV: Rename functions, variables starting with "_"

In order to comply with MISRA C rules, renamed vairables
and function names starting with "_".
The major changes invloves mostly static function
names, as they are being called inside the same file
by a wrapper function.

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy
2018-08-01 13:42:40 -07:00
committed by lijinxia
parent a71dedecd4
commit 37026590c9
14 changed files with 51 additions and 51 deletions

View File

@@ -56,7 +56,7 @@ is_entry_active(struct ptdev_remapping_info *entry)
/* require ptdev_lock protect */
static inline struct ptdev_remapping_info *
_lookup_entry_by_id(uint32_t id)
local_lookup_entry_by_id(uint32_t id)
{
struct ptdev_remapping_info *entry;
struct list_head *pos;
@@ -74,7 +74,7 @@ _lookup_entry_by_id(uint32_t id)
/* require ptdev_lock protect */
static inline struct ptdev_remapping_info *
_lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, uint32_t index)
local_lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, uint32_t index)
{
struct ptdev_remapping_info *entry;
struct list_head *pos;
@@ -100,14 +100,14 @@ lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, uint32_t index)
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vmsi(vm, vbdf, index);
entry = local_lookup_entry_by_vmsi(vm, vbdf, index);
spinlock_release(&ptdev_lock);
return entry;
}
/* require ptdev_lock protect */
static inline struct ptdev_remapping_info *
_lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
local_lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
enum ptdev_vpin_source vpin_src)
{
struct ptdev_remapping_info *entry;
@@ -134,7 +134,7 @@ lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vintx(vm, vpin, vpin_src);
entry = local_lookup_entry_by_vintx(vm, vpin, vpin_src);
spinlock_release(&ptdev_lock);
return entry;
}
@@ -307,10 +307,10 @@ add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_id(
entry = local_lookup_entry_by_id(
entry_id_from_msix(phys_bdf, msix_entry_index));
if (entry == NULL) {
if (_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index) != NULL) {
if (local_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index) != NULL) {
pr_err("MSIX re-add vbdf%x", virt_bdf);
spinlock_release(&ptdev_lock);
@@ -357,7 +357,7 @@ remove_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint32_t msix_entry_inde
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index);
entry = local_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index);
if (entry == NULL) {
goto END;
}
@@ -393,9 +393,9 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_id(entry_id_from_intx(phys_pin));
entry = local_lookup_entry_by_id(entry_id_from_intx(phys_pin));
if (entry == NULL) {
if (_lookup_entry_by_vintx(vm, virt_pin, vpin_src) != NULL) {
if (local_lookup_entry_by_vintx(vm, virt_pin, vpin_src) != NULL) {
pr_err("INTX re-add vpin %d", virt_pin);
spinlock_release(&ptdev_lock);
return &invalid_entry;
@@ -445,7 +445,7 @@ static void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vintx(vm, virt_pin, vpin_src);
entry = local_lookup_entry_by_vintx(vm, virt_pin, vpin_src);
if (entry == NULL) {
goto END;
}

View File

@@ -116,7 +116,7 @@ void destroy_ept(struct vm *vm)
}
}
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
{
uint64_t hpa = 0UL;
uint64_t *pgentry, pg_size = 0UL;
@@ -142,7 +142,7 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
/* using return value 0 as failure, make sure guest will not use hpa 0 */
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa)
{
return _gpa2hpa(vm, gpa, NULL);
return local_gpa2hpa(vm, gpa, NULL);
}
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)

View File

@@ -134,7 +134,7 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
* during page walk */
static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
static int local_gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
uint32_t i;
@@ -220,7 +220,7 @@ out:
return ret;
}
static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
static int local_gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
int index;
@@ -246,7 +246,7 @@ static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
pw_info->level = 2U;
pw_info->top_entry = entry;
ret = _gva2gpa_common(vcpu, pw_info, gva, gpa, err_code);
ret = local_gva2gpa_common(vcpu, pw_info, gva, gpa, err_code);
out:
return ret;
@@ -298,15 +298,15 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
if (pm == PAGING_MODE_4_LEVEL) {
pw_info.width = 9U;
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
ret = local_gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
} else if(pm == PAGING_MODE_3_LEVEL) {
pw_info.width = 9U;
ret = _gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
ret = local_gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
} else if (pm == PAGING_MODE_2_LEVEL) {
pw_info.width = 10U;
pw_info.pse = ((cur_context->cr4 & CR4_PSE) != 0UL);
pw_info.nxe = false;
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
ret = local_gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
} else {
*gpa = gva;
}
@@ -320,14 +320,14 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
return ret;
}
static inline uint32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
uint32_t size, uint32_t fix_pg_size, bool cp_from_vm)
{
uint64_t hpa;
uint32_t offset_in_pg, len, pg_size;
void *g_ptr;
hpa = _gpa2hpa(vm, gpa, &pg_size);
hpa = local_gpa2hpa(vm, gpa, &pg_size);
if (pg_size == 0U) {
pr_err("GPA2HPA not found");
return 0;
@@ -366,7 +366,7 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
}
while (size > 0U) {
len = _copy_gpa(vm, h_ptr, gpa, size, 0U, cp_from_vm);
len = local_copy_gpa(vm, h_ptr, gpa, size, 0U, cp_from_vm);
if (len == 0U) {
return -EINVAL;
}
@@ -406,7 +406,7 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
return ret;
}
len = _copy_gpa(vcpu->vm, h_ptr, gpa, size,
len = local_copy_gpa(vcpu->vm, h_ptr, gpa, size,
PAGE_SIZE_4K, cp_from_vm);
if (len == 0U) {

View File

@@ -2151,7 +2151,7 @@ decode_moffset(struct instr_emul_vie *vie)
}
int
__decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie)
local_decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie)
{
if (decode_prefixes(vie, cpu_mode, cs_d) != 0) {
return -1;

View File

@@ -89,7 +89,7 @@ int vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu);
*/
#define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */
int
__decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie);
local_decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie);
int emulate_instruction(struct vcpu *vcpu);
int decode_instruction(struct vcpu *vcpu);

View File

@@ -358,7 +358,7 @@ int decode_instruction(struct vcpu *vcpu)
get_guest_paging_info(vcpu, emul_ctxt, csar);
cpu_mode = get_vcpu_mode(vcpu);
retval = __decode_instruction(cpu_mode, SEG_DESC_DEF32(csar),
retval = local_decode_instruction(cpu_mode, SEG_DESC_DEF32(csar),
&emul_ctxt->vie);
if (retval != 0) {

View File

@@ -104,7 +104,7 @@ static uint32_t alloc_irq(void)
}
/* need irq_lock protection before use */
static void _irq_desc_set_vector(uint32_t irq, uint32_t vr)
static void local_irq_desc_set_vector(uint32_t irq, uint32_t vr)
{
struct irq_desc *desc;
@@ -321,7 +321,7 @@ uint32_t irq_desc_alloc_vector(uint32_t irq, bool lowpri)
pr_err("no vector found for irq[%d]", irq);
goto OUT;
}
_irq_desc_set_vector(irq, vr);
local_irq_desc_set_vector(irq, vr);
OUT:
spinlock_irqrestore_release(&desc->irq_lock);
return vr;

View File

@@ -62,7 +62,7 @@ struct invept_desc {
uint64_t _res;
};
static inline void _invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
static inline void local_invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
{
int error = 0;
@@ -82,7 +82,7 @@ static inline void _invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
ASSERT(error == 0, "invvpid error");
}
static inline void _invept(uint64_t type, struct invept_desc desc)
static inline void local_invept(uint64_t type, struct invept_desc desc)
{
int error = 0;
@@ -159,12 +159,12 @@ void flush_vpid_single(uint16_t vpid)
return;
}
_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0UL);
local_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0UL);
}
void flush_vpid_global(void)
{
_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
local_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
}
void invept(struct vcpu *vcpu)
@@ -174,15 +174,15 @@ void invept(struct vcpu *vcpu)
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
(3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
if (vcpu->vm->sworld_control.sworld_enabled &&
vcpu->vm->arch_vm.sworld_eptp != NULL) {
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.sworld_eptp)
| (3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
}
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT)) {
_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
local_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
} else {
/* Neither type of INVEPT is supported. Skip. */
}

View File

@@ -45,7 +45,7 @@ static inline void update_physical_timer(struct per_cpu_timers *cpu_timer)
}
}
static void __add_timer(struct per_cpu_timers *cpu_timer,
static void local_add_timer(struct per_cpu_timers *cpu_timer,
struct hv_timer *timer,
bool *need_update)
{
@@ -90,7 +90,7 @@ int add_timer(struct hv_timer *timer)
pcpu_id = get_cpu_id();
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
__add_timer(cpu_timer, timer, &need_update);
local_add_timer(cpu_timer, timer, &need_update);
if (need_update) {
update_physical_timer(cpu_timer);
@@ -165,7 +165,7 @@ static void timer_softirq(uint16_t pcpu_id)
/* This is to make sure we are not blocked due to delay inside func()
* force to exit irq handler after we serviced >31 timers
* caller used to __add_timer() for periodic timer, if there is a delay
* caller used to local_add_timer() for periodic timer, if there is a delay
* inside func(), it will infinitely loop here, because new added timer
* already passed due to previously func()'s delay.
*/
@@ -181,7 +181,7 @@ static void timer_softirq(uint16_t pcpu_id)
if (timer->mode == TICK_MODE_PERIODIC) {
/* update periodic timer fire tsc */
timer->fire_tsc += timer->period_in_cycle;
__add_timer(cpu_timer, timer, NULL);
local_add_timer(cpu_timer, timer, NULL);
}
} else {
break;