HV: Rename functions, variables starting with "_"

In order to comply with MISRA C rules, renamed vairables
and function names starting with "_".
The major changes invloves mostly static function
names, as they are being called inside the same file
by a wrapper function.

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy 2018-08-01 13:42:40 -07:00 committed by lijinxia
parent a71dedecd4
commit 37026590c9
14 changed files with 51 additions and 51 deletions

View File

@ -56,7 +56,7 @@ is_entry_active(struct ptdev_remapping_info *entry)
/* require ptdev_lock protect */
static inline struct ptdev_remapping_info *
_lookup_entry_by_id(uint32_t id)
local_lookup_entry_by_id(uint32_t id)
{
struct ptdev_remapping_info *entry;
struct list_head *pos;
@ -74,7 +74,7 @@ _lookup_entry_by_id(uint32_t id)
/* require ptdev_lock protect */
static inline struct ptdev_remapping_info *
_lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, uint32_t index)
local_lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, uint32_t index)
{
struct ptdev_remapping_info *entry;
struct list_head *pos;
@ -100,14 +100,14 @@ lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, uint32_t index)
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vmsi(vm, vbdf, index);
entry = local_lookup_entry_by_vmsi(vm, vbdf, index);
spinlock_release(&ptdev_lock);
return entry;
}
/* require ptdev_lock protect */
static inline struct ptdev_remapping_info *
_lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
local_lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
enum ptdev_vpin_source vpin_src)
{
struct ptdev_remapping_info *entry;
@ -134,7 +134,7 @@ lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vintx(vm, vpin, vpin_src);
entry = local_lookup_entry_by_vintx(vm, vpin, vpin_src);
spinlock_release(&ptdev_lock);
return entry;
}
@ -307,10 +307,10 @@ add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_id(
entry = local_lookup_entry_by_id(
entry_id_from_msix(phys_bdf, msix_entry_index));
if (entry == NULL) {
if (_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index) != NULL) {
if (local_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index) != NULL) {
pr_err("MSIX re-add vbdf%x", virt_bdf);
spinlock_release(&ptdev_lock);
@ -357,7 +357,7 @@ remove_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint32_t msix_entry_inde
struct ptdev_remapping_info *entry;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index);
entry = local_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index);
if (entry == NULL) {
goto END;
}
@ -393,9 +393,9 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_id(entry_id_from_intx(phys_pin));
entry = local_lookup_entry_by_id(entry_id_from_intx(phys_pin));
if (entry == NULL) {
if (_lookup_entry_by_vintx(vm, virt_pin, vpin_src) != NULL) {
if (local_lookup_entry_by_vintx(vm, virt_pin, vpin_src) != NULL) {
pr_err("INTX re-add vpin %d", virt_pin);
spinlock_release(&ptdev_lock);
return &invalid_entry;
@ -445,7 +445,7 @@ static void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vintx(vm, virt_pin, vpin_src);
entry = local_lookup_entry_by_vintx(vm, virt_pin, vpin_src);
if (entry == NULL) {
goto END;
}

View File

@ -116,7 +116,7 @@ void destroy_ept(struct vm *vm)
}
}
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
{
uint64_t hpa = 0UL;
uint64_t *pgentry, pg_size = 0UL;
@ -142,7 +142,7 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
/* using return value 0 as failure, make sure guest will not use hpa 0 */
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa)
{
return _gpa2hpa(vm, gpa, NULL);
return local_gpa2hpa(vm, gpa, NULL);
}
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)

View File

@ -134,7 +134,7 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
* during page walk */
static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
static int local_gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
uint32_t i;
@ -220,7 +220,7 @@ out:
return ret;
}
static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
static int local_gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
int index;
@ -246,7 +246,7 @@ static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
pw_info->level = 2U;
pw_info->top_entry = entry;
ret = _gva2gpa_common(vcpu, pw_info, gva, gpa, err_code);
ret = local_gva2gpa_common(vcpu, pw_info, gva, gpa, err_code);
out:
return ret;
@ -298,15 +298,15 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
if (pm == PAGING_MODE_4_LEVEL) {
pw_info.width = 9U;
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
ret = local_gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
} else if(pm == PAGING_MODE_3_LEVEL) {
pw_info.width = 9U;
ret = _gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
ret = local_gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
} else if (pm == PAGING_MODE_2_LEVEL) {
pw_info.width = 10U;
pw_info.pse = ((cur_context->cr4 & CR4_PSE) != 0UL);
pw_info.nxe = false;
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
ret = local_gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
} else {
*gpa = gva;
}
@ -320,14 +320,14 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
return ret;
}
static inline uint32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
uint32_t size, uint32_t fix_pg_size, bool cp_from_vm)
{
uint64_t hpa;
uint32_t offset_in_pg, len, pg_size;
void *g_ptr;
hpa = _gpa2hpa(vm, gpa, &pg_size);
hpa = local_gpa2hpa(vm, gpa, &pg_size);
if (pg_size == 0U) {
pr_err("GPA2HPA not found");
return 0;
@ -366,7 +366,7 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
}
while (size > 0U) {
len = _copy_gpa(vm, h_ptr, gpa, size, 0U, cp_from_vm);
len = local_copy_gpa(vm, h_ptr, gpa, size, 0U, cp_from_vm);
if (len == 0U) {
return -EINVAL;
}
@ -406,7 +406,7 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
return ret;
}
len = _copy_gpa(vcpu->vm, h_ptr, gpa, size,
len = local_copy_gpa(vcpu->vm, h_ptr, gpa, size,
PAGE_SIZE_4K, cp_from_vm);
if (len == 0U) {

View File

@ -2151,7 +2151,7 @@ decode_moffset(struct instr_emul_vie *vie)
}
int
__decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie)
local_decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie)
{
if (decode_prefixes(vie, cpu_mode, cs_d) != 0) {
return -1;

View File

@ -89,7 +89,7 @@ int vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu);
*/
#define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */
int
__decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie);
local_decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct instr_emul_vie *vie);
int emulate_instruction(struct vcpu *vcpu);
int decode_instruction(struct vcpu *vcpu);

View File

@ -358,7 +358,7 @@ int decode_instruction(struct vcpu *vcpu)
get_guest_paging_info(vcpu, emul_ctxt, csar);
cpu_mode = get_vcpu_mode(vcpu);
retval = __decode_instruction(cpu_mode, SEG_DESC_DEF32(csar),
retval = local_decode_instruction(cpu_mode, SEG_DESC_DEF32(csar),
&emul_ctxt->vie);
if (retval != 0) {

View File

@ -104,7 +104,7 @@ static uint32_t alloc_irq(void)
}
/* need irq_lock protection before use */
static void _irq_desc_set_vector(uint32_t irq, uint32_t vr)
static void local_irq_desc_set_vector(uint32_t irq, uint32_t vr)
{
struct irq_desc *desc;
@ -321,7 +321,7 @@ uint32_t irq_desc_alloc_vector(uint32_t irq, bool lowpri)
pr_err("no vector found for irq[%d]", irq);
goto OUT;
}
_irq_desc_set_vector(irq, vr);
local_irq_desc_set_vector(irq, vr);
OUT:
spinlock_irqrestore_release(&desc->irq_lock);
return vr;

View File

@ -62,7 +62,7 @@ struct invept_desc {
uint64_t _res;
};
static inline void _invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
static inline void local_invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
{
int error = 0;
@ -82,7 +82,7 @@ static inline void _invvpid(uint64_t type, uint16_t vpid, uint64_t gva)
ASSERT(error == 0, "invvpid error");
}
static inline void _invept(uint64_t type, struct invept_desc desc)
static inline void local_invept(uint64_t type, struct invept_desc desc)
{
int error = 0;
@ -159,12 +159,12 @@ void flush_vpid_single(uint16_t vpid)
return;
}
_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0UL);
local_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0UL);
}
void flush_vpid_global(void)
{
_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
local_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
}
void invept(struct vcpu *vcpu)
@ -174,15 +174,15 @@ void invept(struct vcpu *vcpu)
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
(3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
if (vcpu->vm->sworld_control.sworld_enabled &&
vcpu->vm->arch_vm.sworld_eptp != NULL) {
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.sworld_eptp)
| (3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
}
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT)) {
_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
local_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
} else {
/* Neither type of INVEPT is supported. Skip. */
}

View File

@ -45,7 +45,7 @@ static inline void update_physical_timer(struct per_cpu_timers *cpu_timer)
}
}
static void __add_timer(struct per_cpu_timers *cpu_timer,
static void local_add_timer(struct per_cpu_timers *cpu_timer,
struct hv_timer *timer,
bool *need_update)
{
@ -90,7 +90,7 @@ int add_timer(struct hv_timer *timer)
pcpu_id = get_cpu_id();
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
__add_timer(cpu_timer, timer, &need_update);
local_add_timer(cpu_timer, timer, &need_update);
if (need_update) {
update_physical_timer(cpu_timer);
@ -165,7 +165,7 @@ static void timer_softirq(uint16_t pcpu_id)
/* This is to make sure we are not blocked due to delay inside func()
* force to exit irq handler after we serviced >31 timers
* caller used to __add_timer() for periodic timer, if there is a delay
* caller used to local_add_timer() for periodic timer, if there is a delay
* inside func(), it will infinitely loop here, because new added timer
* already passed due to previously func()'s delay.
*/
@ -181,7 +181,7 @@ static void timer_softirq(uint16_t pcpu_id)
if (timer->mode == TICK_MODE_PERIODIC) {
/* update periodic timer fire tsc */
timer->fire_tsc += timer->period_in_cycle;
__add_timer(cpu_timer, timer, NULL);
local_add_timer(cpu_timer, timer, NULL);
}
} else {
break;

View File

@ -221,7 +221,7 @@ static void *get_acpi_tbl(const char *sig)
return HPA2HVA(addr);
}
static uint16_t _parse_madt(void *madt, uint8_t lapic_id_array[MAX_PCPU_NUM])
static uint16_t local_parse_madt(void *madt, uint8_t lapic_id_array[MAX_PCPU_NUM])
{
uint16_t pcpu_id = 0;
struct acpi_madt_local_apic *processor;
@ -274,7 +274,7 @@ uint16_t parse_madt(uint8_t lapic_id_array[MAX_PCPU_NUM])
madt = get_acpi_tbl(ACPI_SIG_MADT);
ASSERT(madt != NULL, "fail to get madt");
return _parse_madt(madt, lapic_id_array);
return local_parse_madt(madt, lapic_id_array);
}
void *get_dmar_table(void)

View File

@ -405,7 +405,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
return 0;
}
static int32_t _set_vm_memory_region(struct vm *vm,
static int32_t local_set_vm_memory_region(struct vm *vm,
struct vm *target_vm, struct vm_memory_region *region)
{
uint64_t hpa, base_paddr;
@ -491,7 +491,7 @@ int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param)
return -EPERM;
}
return _set_vm_memory_region(vm, target_vm, &region);
return local_set_vm_memory_region(vm, target_vm, &region);
}
int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param)
@ -526,7 +526,7 @@ int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param)
/* the force pointer change below is for back compatible
* to struct vm_memory_region, it will be removed in the future
*/
int ret = _set_vm_memory_region(vm, target_vm, &regions[idx]);
int ret = local_set_vm_memory_region(vm, target_vm, &regions[idx]);
if (ret < 0) {
return ret;
}

View File

@ -106,7 +106,7 @@ acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req)
}
#ifdef HV_DEBUG
static void _get_req_info_(struct vhm_request *req, int *id, char *type,
static void local_get_req_info_(struct vhm_request *req, int *id, char *type,
char *state, char *dir, uint64_t *addr, uint64_t *val)
{
(void)strcpy_s(dir, 16U, "NONE");
@ -184,7 +184,7 @@ void get_req_info(char *str_arg, int str_max)
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
req = req_buf->req_queue + i;
if (req->valid != 0) {
_get_req_info_(req, &client_id, type,
local_get_req_info_(req, &client_id, type,
state, dir, &addr, &val);
len = snprintf(str, size,
"\r\n%d\t%d\t%d\t%s\t%s\t%s",

View File

@ -7,7 +7,7 @@
#include <hypervisor.h>
#include <zeropage.h>
static uint32_t create_e820_table(struct e820_entry *_e820)
static uint32_t create_e820_table(struct e820_entry *param_e820)
{
uint32_t i;
@ -15,9 +15,9 @@ static uint32_t create_e820_table(struct e820_entry *_e820)
"e820 should be inited");
for (i = 0U; i < e820_entries; i++) {
_e820[i].baseaddr = e820[i].baseaddr;
_e820[i].length = e820[i].length;
_e820[i].type = e820[i].type;
param_e820[i].baseaddr = e820[i].baseaddr;
param_e820[i].length = e820[i].length;
param_e820[i].type = e820[i].type;
}
return e820_entries;

View File

@ -380,7 +380,7 @@ bool is_ept_supported(void);
uint64_t create_guest_initial_paging(struct vm *vm);
void destroy_ept(struct vm *vm);
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
int ept_mr_add(struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg);