HV:treewide:rename vm data structure

For data structure types "struct vm", its name is identical
with variable name in the same scope. This is a MISRA C  violation.

Naming convention rule:If the data structure type is used by multi
modules, its corresponding logic resource is exposed to external
components (such as SOS, UOS), and its name meaning is simplistic
(such as vcpu, vm), its name needs prefix "acrn_".

The following udpates are made:
struct vm *vm-->struct acrn_vm *vm

Tracked-On: #861

Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
This commit is contained in:
Xiangyang Wu
2018-11-05 13:28:23 +08:00
committed by lijinxia
parent ace4f48c9a
commit 99586e32cc
66 changed files with 349 additions and 349 deletions

View File

@@ -17,7 +17,7 @@
*/
static inline struct ptdev_remapping_info *
ptdev_lookup_entry_by_sid(uint32_t intr_type,
const union source_id *sid,const struct vm *vm)
const union source_id *sid,const struct acrn_vm *vm)
{
struct ptdev_remapping_info *entry;
struct list_head *pos;
@@ -43,7 +43,7 @@ is_entry_active(const struct ptdev_remapping_info *entry)
return atomic_load32(&entry->active) == ACTIVE_FLAG;
}
static bool ptdev_hv_owned_intx(const struct vm *vm, const union source_id *virt_sid)
static bool ptdev_hv_owned_intx(const struct acrn_vm *vm, const union source_id *virt_sid)
{
/* vm0 vuart pin is owned by hypervisor under debug version */
if (is_vm0(vm) && (virt_sid->intx_id.pin == COM1_IRQ)) {
@@ -68,7 +68,7 @@ static uint64_t calculate_logical_dest_mask(uint64_t pdmask)
return dest_mask;
}
static void ptdev_build_physical_msi(struct vm *vm, struct ptdev_msi_info *info,
static void ptdev_build_physical_msi(struct acrn_vm *vm, struct ptdev_msi_info *info,
uint32_t vector)
{
uint64_t vdmask, pdmask, dest_mask;
@@ -105,7 +105,7 @@ static void ptdev_build_physical_msi(struct vm *vm, struct ptdev_msi_info *info,
}
static union ioapic_rte
ptdev_build_physical_rte(struct vm *vm,
ptdev_build_physical_rte(struct acrn_vm *vm,
struct ptdev_remapping_info *entry)
{
union ioapic_rte rte;
@@ -191,7 +191,7 @@ ptdev_build_physical_rte(struct vm *vm,
* - if the entry already be added by other vm, return NULL
*/
static struct ptdev_remapping_info *
add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
uint32_t entry_nr)
{
struct ptdev_remapping_info *entry;
@@ -246,7 +246,7 @@ add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
/* deactive & remove mapping entry of vbdf:entry_nr for vm */
static void
remove_msix_remapping(const struct vm *vm, uint16_t virt_bdf, uint32_t entry_nr)
remove_msix_remapping(const struct acrn_vm *vm, uint16_t virt_bdf, uint32_t entry_nr)
{
struct ptdev_remapping_info *entry;
DEFINE_MSI_SID(virt_sid, virt_bdf, entry_nr);
@@ -280,7 +280,7 @@ END:
* - if the entry already be added by other vm, return NULL
*/
static struct ptdev_remapping_info *
add_intx_remapping(struct vm *vm, uint8_t virt_pin,
add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin,
uint8_t phys_pin, bool pic_pin)
{
struct ptdev_remapping_info *entry;
@@ -340,7 +340,7 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
}
/* deactive & remove mapping entry of vpin for vm */
static void remove_intx_remapping(const struct vm *vm, uint8_t virt_pin, bool pic_pin)
static void remove_intx_remapping(const struct acrn_vm *vm, uint8_t virt_pin, bool pic_pin)
{
uint32_t phys_irq;
struct ptdev_remapping_info *entry;
@@ -375,7 +375,7 @@ END:
spinlock_release(&ptdev_lock);
}
static void ptdev_intr_handle_irq(struct vm *vm,
static void ptdev_intr_handle_irq(struct acrn_vm *vm,
const struct ptdev_remapping_info *entry)
{
const union source_id *virt_sid = &entry->virt_sid;
@@ -443,7 +443,7 @@ static void ptdev_intr_handle_irq(struct vm *vm,
void ptdev_softirq(uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu = (struct acrn_vcpu *)per_cpu(vcpu, pcpu_id);
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
while (1) {
struct ptdev_remapping_info *entry = ptdev_dequeue_softirq(vm);
@@ -482,7 +482,7 @@ void ptdev_softirq(uint16_t pcpu_id)
}
}
void ptdev_intx_ack(struct vm *vm, uint8_t virt_pin,
void ptdev_intx_ack(struct acrn_vm *vm, uint8_t virt_pin,
enum ptdev_vpin_source vpin_src)
{
uint32_t phys_irq;
@@ -533,7 +533,7 @@ void ptdev_intx_ack(struct vm *vm, uint8_t virt_pin,
*
* This function is called by SOS pci MSI config routine through hcall
*/
int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
int ptdev_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t entry_nr, struct ptdev_msi_info *info)
{
struct ptdev_remapping_info *entry;
@@ -589,7 +589,7 @@ END:
return 0;
}
static void activate_physical_ioapic(struct vm *vm,
static void activate_physical_ioapic(struct acrn_vm *vm,
struct ptdev_remapping_info *entry)
{
union ioapic_rte rte;
@@ -622,7 +622,7 @@ static void activate_physical_ioapic(struct vm *vm,
/* Main entry for PCI/Legacy device assignment with INTx, calling from vIOAPIC
* or vPIC
*/
int ptdev_intx_pin_remap(struct vm *vm, uint8_t virt_pin,
int ptdev_intx_pin_remap(struct acrn_vm *vm, uint8_t virt_pin,
enum ptdev_vpin_source vpin_src)
{
struct ptdev_remapping_info *entry;
@@ -729,7 +729,7 @@ END:
* - currently, one phys_pin can only be held by one pin source (vPIC or
* vIOAPIC)
*/
int ptdev_add_intx_remapping(struct vm *vm, uint8_t virt_pin, uint8_t phys_pin,
int ptdev_add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin, uint8_t phys_pin,
bool pic_pin)
{
struct ptdev_remapping_info *entry;
@@ -748,7 +748,7 @@ int ptdev_add_intx_remapping(struct vm *vm, uint8_t virt_pin, uint8_t phys_pin,
/*
* @pre vm != NULL
*/
void ptdev_remove_intx_remapping(const struct vm *vm, uint8_t virt_pin, bool pic_pin)
void ptdev_remove_intx_remapping(const struct acrn_vm *vm, uint8_t virt_pin, bool pic_pin)
{
remove_intx_remapping(vm, virt_pin, pic_pin);
}
@@ -758,7 +758,7 @@ void ptdev_remove_intx_remapping(const struct vm *vm, uint8_t virt_pin, bool pic
* - the entry is identified by phys_bdf:msi_idx:
* one entry vs. one phys_bdf:msi_idx
*/
int ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
int ptdev_add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, uint32_t vector_count)
{
struct ptdev_remapping_info *entry;
@@ -777,7 +777,7 @@ int ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
/*
* @pre vm != NULL
*/
void ptdev_remove_msix_remapping(const struct vm *vm, uint16_t virt_bdf,
void ptdev_remove_msix_remapping(const struct acrn_vm *vm, uint16_t virt_bdf,
uint32_t vector_count)
{
uint32_t i;

View File

@@ -11,7 +11,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcp
{
uint32_t i = 0U, nr, half;
struct vcpuid_entry *entry = NULL;
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
uint32_t leaf = leaf_arg;
nr = vm->vcpuid_entry_nr;
@@ -63,7 +63,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcp
return entry;
}
static inline int set_vcpuid_entry(struct vm *vm,
static inline int set_vcpuid_entry(struct acrn_vm *vm,
const struct vcpuid_entry *entry)
{
struct vcpuid_entry *tmp;
@@ -170,7 +170,7 @@ static void init_vcpuid_entry(uint32_t leaf, uint32_t subleaf,
}
}
int set_vcpuid_entries(struct vm *vm)
int set_vcpuid_entries(struct acrn_vm *vm)
{
int result;
struct vcpuid_entry entry;

View File

@@ -10,7 +10,7 @@
#define ACRN_DBG_EPT 6U
void destroy_ept(struct vm *vm)
void destroy_ept(struct acrn_vm *vm)
{
/* Destroy secure world */
if (vm->sworld_control.flag.active != 0UL) {
@@ -23,7 +23,7 @@ void destroy_ept(struct vm *vm)
}
/* using return value INVALID_HPA as error code */
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size)
{
uint64_t hpa = INVALID_HPA;
uint64_t *pgentry, pg_size = 0UL;
@@ -58,7 +58,7 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
}
/* using return value INVALID_HPA as error code */
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa)
uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa)
{
return local_gpa2hpa(vm, gpa, NULL);
}
@@ -178,7 +178,7 @@ int ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
return status;
}
void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page,
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
{
uint16_t i;
@@ -203,7 +203,7 @@ void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
}
}
void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr)
{
@@ -221,7 +221,7 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
/**
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
void ept_mr_del(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
{
struct acrn_vcpu *vcpu;
uint16_t i;

View File

@@ -32,7 +32,7 @@ struct page_walk_info {
bool is_smep_on;
};
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
{
uint16_t vcpu_id;
uint64_t dmask = 0UL;
@@ -327,7 +327,7 @@ int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa,
return ret;
}
static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
static inline uint32_t local_copy_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa,
uint32_t size, uint32_t fix_pg_size, bool cp_from_vm)
{
uint64_t hpa;
@@ -360,7 +360,7 @@ static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
return len;
}
static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
static inline int copy_gpa(struct acrn_vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
uint32_t size_arg, bool cp_from_vm)
{
void *h_ptr = h_ptr_arg;
@@ -427,7 +427,7 @@ static inline int copy_gva(struct acrn_vcpu *vcpu, void *h_ptr_arg, uint64_t gva
* continuous
* @pre Pointer vm is non-NULL
*/
int copy_from_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
int copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
{
return copy_gpa(vm, h_ptr, gpa, size, 1);
}
@@ -438,7 +438,7 @@ int copy_from_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
* continuous
* @pre Pointer vm is non-NULL
*/
int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
int copy_to_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
{
return copy_gpa(vm, h_ptr, gpa, size, 0);
}
@@ -606,7 +606,7 @@ static void rebuild_vm0_e820(void)
* @pre vm != NULL
* @pre is_vm0(vm) == true
*/
int prepare_vm0_memmap_and_e820(struct vm *vm)
int prepare_vm0_memmap_and_e820(struct acrn_vm *vm)
{
uint32_t i;
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);

View File

@@ -288,7 +288,7 @@ static uint8_t mpt_compute_checksum(void *base, size_t len)
return (256U - sum);
}
int mptable_build(struct vm *vm)
int mptable_build(struct acrn_vm *vm)
{
char *startaddr;
char *curraddr;

View File

@@ -6,7 +6,7 @@
#include <hypervisor.h>
int validate_pstate(const struct vm *vm, uint64_t perf_ctl)
int validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
{
const struct cpu_px_data *px_data;
int i, px_cnt;
@@ -31,7 +31,7 @@ int validate_pstate(const struct vm *vm, uint64_t perf_ctl)
return -1;
}
static void vm_setup_cpu_px(struct vm *vm)
static void vm_setup_cpu_px(struct acrn_vm *vm)
{
uint32_t px_data_size;
@@ -56,7 +56,7 @@ static void vm_setup_cpu_px(struct vm *vm)
}
static void vm_setup_cpu_cx(struct vm *vm)
static void vm_setup_cpu_cx(struct acrn_vm *vm)
{
uint32_t cx_data_size;
@@ -84,7 +84,7 @@ static void vm_setup_cpu_cx(struct vm *vm)
}
static inline void init_cx_port(struct vm *vm)
static inline void init_cx_port(struct acrn_vm *vm)
{
uint8_t cx_idx;
@@ -99,7 +99,7 @@ static inline void init_cx_port(struct vm *vm)
}
}
void vm_setup_cpu_state(struct vm *vm)
void vm_setup_cpu_state(struct acrn_vm *vm)
{
vm_setup_cpu_px(vm);
vm_setup_cpu_cx(vm);
@@ -109,7 +109,7 @@ void vm_setup_cpu_state(struct vm *vm)
/* This function is for power management Sx state implementation,
* VM need to load the Sx state data to implement S3/S5.
*/
int vm_load_pm_s_state(struct vm *vm)
int vm_load_pm_s_state(struct acrn_vm *vm)
{
#ifdef ACPI_INFO_VALIDATED
vm->pm.sx_state_data = (struct pm_s_state_data *)&host_pm_s_state;
@@ -132,7 +132,7 @@ static inline uint8_t get_slp_typx(uint32_t pm1_cnt)
return (uint8_t)((pm1_cnt & 0x1fffU) >> BIT_SLP_TYPx);
}
static uint32_t pm1ab_io_read(__unused struct vm *vm, uint16_t addr,
static uint32_t pm1ab_io_read(__unused struct acrn_vm *vm, uint16_t addr,
size_t width)
{
uint32_t val = pio_read(addr, width);
@@ -148,7 +148,7 @@ static uint32_t pm1ab_io_read(__unused struct vm *vm, uint16_t addr,
return val;
}
static void pm1ab_io_write(__unused struct vm *vm, uint16_t addr, size_t width,
static void pm1ab_io_write(__unused struct acrn_vm *vm, uint16_t addr, size_t width,
uint32_t v)
{
static uint32_t pm1a_cnt_ready = 0U;
@@ -187,7 +187,7 @@ static void pm1ab_io_write(__unused struct vm *vm, uint16_t addr, size_t width,
}
static void
register_gas_io_handler(struct vm *vm, const struct acpi_generic_address *gas)
register_gas_io_handler(struct acrn_vm *vm, const struct acpi_generic_address *gas)
{
uint8_t io_len[5] = {0, 1, 2, 4, 8};
struct vm_io_range gas_io;
@@ -210,7 +210,7 @@ register_gas_io_handler(struct vm *vm, const struct acpi_generic_address *gas)
vm->vm_id, gas_io.base, gas_io.len);
}
void register_pm1ab_handler(struct vm *vm)
void register_pm1ab_handler(struct acrn_vm *vm)
{
struct pm_s_state_data *sx_data = vm->pm.sx_state_data;

View File

@@ -311,7 +311,7 @@ void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
* for physical CPU 1 : vcpu->pcpu_id = 1, vcpu->vcpu_id = 1, vmid = 1;
*
***********************************************************************/
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
int create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
{
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;
@@ -598,7 +598,7 @@ void schedule_vcpu(struct acrn_vcpu *vcpu)
}
/* help function for vcpu create */
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
int prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
int ret = 0;
struct acrn_vcpu *vcpu = NULL;

View File

@@ -107,7 +107,7 @@ static void vlapic_timer_expired(void *data);
static inline bool is_x2apic_enabled(const struct acrn_vlapic *vlapic);
static struct acrn_vlapic *
vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
vm_lapic_from_vcpu_id(struct acrn_vm *vm, uint16_t vcpu_id)
{
struct acrn_vcpu *vcpu;
@@ -116,7 +116,7 @@ vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
return vcpu_vlapic(vcpu);
}
static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
static uint16_t vm_apicid2vcpu_id(struct acrn_vm *vm, uint8_t lapicid)
{
uint16_t i;
struct acrn_vcpu *vcpu;
@@ -134,7 +134,7 @@ static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
}
static uint64_t
vm_active_cpus(const struct vm *vm)
vm_active_cpus(const struct acrn_vm *vm)
{
uint64_t dmask = 0UL;
uint16_t i;
@@ -1004,7 +1004,7 @@ vlapic_trigger_lvt(struct acrn_vlapic *vlapic, uint32_t vector)
* addressing specified by the (dest, phys, lowprio) tuple.
*/
static void
vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
vlapic_calcdest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest,
bool phys, bool lowprio)
{
struct acrn_vlapic *vlapic;
@@ -1118,7 +1118,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
}
void
calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys)
calcvdest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys)
{
vlapic_calcdest(vm, dmask, dest, phys, false);
}
@@ -1797,7 +1797,7 @@ vlapic_set_apicbase(struct acrn_vlapic *vlapic, uint64_t new)
}
void
vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
vlapic_deliver_intr(struct acrn_vm *vm, bool level, uint32_t dest, bool phys,
uint32_t delmode, uint32_t vec, bool rh)
{
bool lowprio;
@@ -1968,7 +1968,7 @@ vlapic_set_intr(struct acrn_vcpu *vcpu, uint32_t vector, bool level)
* @pre vm != NULL
*/
int
vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
{
struct acrn_vlapic *vlapic;
uint64_t dmask = 0UL;
@@ -2011,7 +2011,7 @@ vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
* @pre vm != NULL
*/
int
vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg)
{
uint32_t delmode, vec;
uint32_t dest;
@@ -2095,7 +2095,7 @@ static inline uint32_t x2apic_msr_to_regoff(uint32_t msr)
*/
static int
vlapic_x2apic_pt_icr_access(struct vm *vm, uint64_t val)
vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
{
uint64_t apic_id = (uint32_t) (val >> 32U);
uint32_t icr_low = val;

View File

@@ -11,7 +11,7 @@
/* Local variables */
static struct vm vm_array[CONFIG_MAX_VM_NUM] __aligned(CPU_PAGE_SIZE);
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(CPU_PAGE_SIZE);
static uint64_t vmid_bitmap;
@@ -30,7 +30,7 @@ static inline uint16_t alloc_vm_id(void)
return INVALID_VM_ID;
}
static inline void free_vm_id(const struct vm *vm)
static inline void free_vm_id(const struct acrn_vm *vm)
{
bitmap_clear_lock(vm->vm_id, &vmid_bitmap);
}
@@ -43,7 +43,7 @@ static inline bool is_vm_valid(uint16_t vm_id)
/* return a pointer to the virtual machine structure associated with
* this VM ID
*/
struct vm *get_vm_from_vmid(uint16_t vm_id)
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
{
if (is_vm_valid(vm_id)) {
return &vm_array[vm_id];
@@ -55,9 +55,9 @@ struct vm *get_vm_from_vmid(uint16_t vm_id)
/**
* @pre vm_desc != NULL && rtn_vm != NULL
*/
int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
{
struct vm *vm;
struct acrn_vm *vm;
int status;
uint16_t vm_id;
@@ -74,7 +74,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
/* Allocate memory for virtual machine */
vm = &vm_array[vm_id];
(void)memset((void *)vm, 0U, sizeof(struct vm));
(void)memset((void *)vm, 0U, sizeof(struct acrn_vm));
vm->vm_id = vm_id;
#ifdef CONFIG_PARTITION_MODE
/* Map Virtual Machine to its VM Description */
@@ -185,7 +185,7 @@ err:
/*
* @pre vm != NULL
*/
int shutdown_vm(struct vm *vm)
int shutdown_vm(struct acrn_vm *vm)
{
int status = 0;
uint16_t i;
@@ -233,7 +233,7 @@ int shutdown_vm(struct vm *vm)
/**
* * @pre vm != NULL
*/
int start_vm(struct vm *vm)
int start_vm(struct acrn_vm *vm)
{
struct acrn_vcpu *vcpu = NULL;
@@ -249,7 +249,7 @@ int start_vm(struct vm *vm)
/**
* * @pre vm != NULL
*/
int reset_vm(struct vm *vm)
int reset_vm(struct acrn_vm *vm)
{
int i;
struct acrn_vcpu *vcpu = NULL;
@@ -276,7 +276,7 @@ int reset_vm(struct vm *vm)
/**
* * @pre vm != NULL
*/
void pause_vm(struct vm *vm)
void pause_vm(struct acrn_vm *vm)
{
uint16_t i;
struct acrn_vcpu *vcpu = NULL;
@@ -295,7 +295,7 @@ void pause_vm(struct vm *vm)
/**
* * @pre vm != NULL
*/
void resume_vm(struct vm *vm)
void resume_vm(struct acrn_vm *vm)
{
uint16_t i;
struct acrn_vcpu *vcpu = NULL;
@@ -321,7 +321,7 @@ void resume_vm(struct vm *vm)
*
* @pre vm != NULL
*/
void resume_vm_from_s3(struct vm *vm, uint32_t wakeup_vec)
void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
{
struct acrn_vcpu *bsp = vcpu_from_vid(vm, 0U);
@@ -344,7 +344,7 @@ int prepare_vm(uint16_t pcpu_id)
{
int ret = 0;
uint16_t i;
struct vm *vm = NULL;
struct acrn_vm *vm = NULL;
struct vm_description *vm_desc = NULL;
bool is_vm_bsp;
@@ -385,7 +385,7 @@ int prepare_vm0(void)
{
int err;
uint16_t i;
struct vm *vm = NULL;
struct acrn_vm *vm = NULL;
struct vm_description vm0_desc;
(void)memset((void *)&vm0_desc, 0U, sizeof(vm0_desc));

View File

@@ -15,7 +15,7 @@
int vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
{
int32_t ret = -EACCES;
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
/* hypercall ID from guest*/
uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);
/* hypercall param1 from guest*/

View File

@@ -196,7 +196,7 @@ hv_emulate_pio(const struct acrn_vcpu *vcpu, struct io_request *io_req)
int32_t status = -ENODEV;
uint16_t port, size;
uint32_t mask;
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
struct pio_request *pio_req = &io_req->reqs.pio;
struct vm_io_handler *handler;
@@ -394,7 +394,7 @@ int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
return status;
}
static void register_io_handler(struct vm *vm, struct vm_io_handler *hdlr)
static void register_io_handler(struct acrn_vm *vm, struct vm_io_handler *hdlr)
{
if (vm->arch_vm.io_handler != NULL) {
hdlr->next = vm->arch_vm.io_handler;
@@ -403,7 +403,7 @@ static void register_io_handler(struct vm *vm, struct vm_io_handler *hdlr)
vm->arch_vm.io_handler = hdlr;
}
static void empty_io_handler_list(struct vm *vm)
static void empty_io_handler_list(struct acrn_vm *vm)
{
struct vm_io_handler *handler = vm->arch_vm.io_handler;
struct vm_io_handler *tmp;
@@ -421,7 +421,7 @@ static void empty_io_handler_list(struct vm *vm)
*
* @param vm The VM whose I/O bitmaps and handlers are to be freed
*/
void free_io_emulation_resource(struct vm *vm)
void free_io_emulation_resource(struct acrn_vm *vm)
{
empty_io_handler_list(vm);
}
@@ -436,7 +436,7 @@ void free_io_emulation_resource(struct vm *vm)
* @param port_address The start address of the port I/O range
* @param nbytes The size of the range, in bytes
*/
void allow_guest_pio_access(struct vm *vm, uint16_t port_address,
void allow_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
uint32_t nbytes)
{
uint16_t address = port_address;
@@ -450,7 +450,7 @@ void allow_guest_pio_access(struct vm *vm, uint16_t port_address,
}
}
static void deny_guest_pio_access(struct vm *vm, uint16_t port_address,
static void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
uint32_t nbytes)
{
uint16_t address = port_address;
@@ -490,7 +490,7 @@ static struct vm_io_handler *create_io_handler(uint32_t port, uint32_t len,
*
* @param vm The VM whose I/O bitmap is to be initialized
*/
void setup_io_bitmap(struct vm *vm)
void setup_io_bitmap(struct acrn_vm *vm)
{
if (is_vm0(vm)) {
(void)memset(vm->arch_vm.io_bitmap, 0x00U, CPU_PAGE_SIZE * 2);
@@ -508,7 +508,7 @@ void setup_io_bitmap(struct vm *vm)
* @param io_read_fn_ptr The handler for emulating reads from the given range
* @param io_write_fn_ptr The handler for emulating writes to the given range
*/
void register_io_emulation_handler(struct vm *vm, const struct vm_io_range *range,
void register_io_emulation_handler(struct acrn_vm *vm, const struct vm_io_range *range,
io_read_fn_t io_read_fn_ptr,
io_write_fn_t io_write_fn_ptr)
{
@@ -543,7 +543,7 @@ void register_io_emulation_handler(struct vm *vm, const struct vm_io_range *rang
* @return 0 - Registration succeeds
* @return -EINVAL - \p read_write is NULL, \p end is not larger than \p start or \p vm has been launched
*/
int register_mmio_emulation_handler(struct vm *vm,
int register_mmio_emulation_handler(struct acrn_vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data)
{
@@ -600,7 +600,7 @@ int register_mmio_emulation_handler(struct vm *vm,
* @param start The base address of the range the to-be-unregistered handler is for
* @param end The end of the range (exclusive) the to-be-unregistered handler is for
*/
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
void unregister_mmio_emulation_handler(struct acrn_vm *vm, uint64_t start,
uint64_t end)
{
struct list_head *pos, *tmp;

View File

@@ -291,7 +291,7 @@ void init_paging(void)
sanitize_pte((uint64_t *)sanitized_page);
}
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg)
bool check_continuous_hpa(struct acrn_vm *vm, uint64_t gpa_arg, uint64_t size_arg)
{
uint64_t curr_hpa;
uint64_t next_hpa;

View File

@@ -125,7 +125,7 @@ void init_mtrr(struct acrn_vcpu *vcpu)
}
}
static uint32_t update_ept(struct vm *vm, uint64_t start,
static uint32_t update_ept(struct acrn_vm *vm, uint64_t start,
uint64_t size, uint8_t type)
{
uint64_t attr;

View File

@@ -135,7 +135,7 @@ static inline struct page *ept_get_pt_page(const union pgtable_pages_info *info,
return page;
}
void init_ept_mem_ops(struct vm *vm)
void init_ept_mem_ops(struct acrn_vm *vm)
{
uint16_t vm_id = vm->vm_id;
if (vm_id != 0U) {

View File

@@ -87,7 +87,7 @@ static uint32_t acpi_gas_read(const struct acpi_generic_address *gas)
return ret;
}
void do_acpi_s3(struct vm *vm, uint32_t pm1a_cnt_val,
void do_acpi_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val,
uint32_t pm1b_cnt_val)
{
uint32_t s1, s2;
@@ -123,7 +123,7 @@ void do_acpi_s3(struct vm *vm, uint32_t pm1a_cnt_val,
}
}
int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
int enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val,
uint32_t pm1b_cnt_val)
{
uint64_t pmain_entry_saved;

View File

@@ -57,7 +57,7 @@ static struct trusty_key_info g_key_info = {
* @param gpa_rebased gpa rebased to offset xxx (511G_OFFSET)
*
*/
static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
uint64_t size, uint64_t gpa_rebased)
{
uint64_t nworld_pml4e;
@@ -68,7 +68,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
uint64_t table_present = EPT_RWX;
uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p;
void *sub_table_addr, *pml4_base;
struct vm *vm0 = get_vm_from_vmid(0U);
struct acrn_vm *vm0 = get_vm_from_vmid(0U);
uint16_t i;
if ((vm->sworld_control.flag.supported == 0UL)
@@ -148,9 +148,9 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
vm->sworld_control.sworld_memory.length = size;
}
void destroy_secure_world(struct vm *vm, bool need_clr_mem)
void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
{
struct vm *vm0 = get_vm_from_vmid(0U);
struct acrn_vm *vm0 = get_vm_from_vmid(0U);
uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
uint64_t gpa_sos = vm->sworld_control.sworld_memory.base_gpa_in_sos;
uint64_t gpa_uos = vm->sworld_control.sworld_memory.base_gpa_in_uos;
@@ -402,7 +402,7 @@ bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
{
uint64_t trusty_entry_gpa, trusty_base_gpa, trusty_base_hpa;
uint32_t trusty_mem_size;
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
struct trusty_boot_param boot_param;
(void)memset(&boot_param, 0U, sizeof(boot_param));

View File

@@ -150,7 +150,7 @@ static int vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
static int vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
{
struct vm *vm;
struct acrn_vm *vm;
struct acrn_vcpu *primary;
uint32_t vector;

View File

@@ -762,7 +762,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
{
uint32_t value32;
uint64_t value64;
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
/* Log messages to show initializing VMX execution controls */
pr_dbg("*****************************");

View File

@@ -1240,7 +1240,7 @@ int init_iommu(void)
return ret;
}
void init_iommu_vm0_domain(struct vm *vm0)
void init_iommu_vm0_domain(struct acrn_vm *vm0)
{
uint16_t bus;
uint16_t devfun;

View File

@@ -105,7 +105,7 @@ fail:
* return value:
* true if parse successfully, otherwise false.
*/
bool abl_seed_parse(struct vm *vm, char *cmdline, char *out_arg, uint32_t out_len)
bool abl_seed_parse(struct acrn_vm *vm, char *cmdline, char *out_arg, uint32_t out_len)
{
char *arg, *arg_end;
char *param;

View File

@@ -17,7 +17,7 @@
#define MAX_BOOT_PARAMS_LEN 64U
#ifdef CONFIG_PARTITION_MODE
int init_vm_boot_info(struct vm *vm)
int init_vm_boot_info(struct acrn_vm *vm)
{
struct multiboot_module *mods = NULL;
struct multiboot_info *mbi = NULL;
@@ -72,7 +72,7 @@ int init_vm_boot_info(struct vm *vm)
static char kernel_cmdline[MEM_2K];
/* now modules support: FIRMWARE & RAMDISK & SeedList */
static void parse_other_modules(struct vm *vm,
static void parse_other_modules(struct acrn_vm *vm,
const struct multiboot_module *mods, uint32_t mods_count)
{
uint32_t i;
@@ -164,7 +164,7 @@ static void *get_kernel_load_addr(void *kernel_src_addr)
* @pre vm != NULL
* @pre is_vm0(vm) == true
*/
int init_vm_boot_info(struct vm *vm)
int init_vm_boot_info(struct acrn_vm *vm)
{
struct multiboot_module *mods = NULL;
struct multiboot_info *mbi = NULL;

View File

@@ -132,7 +132,7 @@ fail:
* return value:
* true if parse successfully, otherwise false.
*/
bool sbl_seed_parse(struct vm *vm, char *cmdline, char *out_arg, uint32_t out_len)
bool sbl_seed_parse(struct acrn_vm *vm, char *cmdline, char *out_arg, uint32_t out_len)
{
char *arg, *arg_end;
char *param;

View File

@@ -36,7 +36,7 @@ void efi_spurious_handler(int vector)
return;
}
int uefi_sw_loader(struct vm *vm)
int uefi_sw_loader(struct acrn_vm *vm)
{
int ret = 0;
struct acrn_vcpu *vcpu = get_primary_vcpu(vm);

View File

@@ -28,7 +28,7 @@ bool is_hypercall_from_ring0(void)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_sos_offline_cpu(struct vm *vm, uint64_t lapicid)
int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid)
{
struct acrn_vcpu *vcpu;
int i;
@@ -53,7 +53,7 @@ int32_t hcall_sos_offline_cpu(struct vm *vm, uint64_t lapicid)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_get_api_version(struct vm *vm, uint64_t param)
int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param)
{
struct hc_api_version version;
@@ -71,10 +71,10 @@ int32_t hcall_get_api_version(struct vm *vm, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_create_vm(struct vm *vm, uint64_t param)
int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
{
int32_t ret;
struct vm *target_vm = NULL;
struct acrn_vm *target_vm = NULL;
struct acrn_create_vm cv;
struct vm_description vm_desc;
@@ -110,7 +110,7 @@ int32_t hcall_create_vm(struct vm *vm, uint64_t param)
int32_t hcall_destroy_vm(uint16_t vmid)
{
int32_t ret;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -123,7 +123,7 @@ int32_t hcall_destroy_vm(uint16_t vmid)
int32_t hcall_start_vm(uint16_t vmid)
{
int32_t ret;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -139,7 +139,7 @@ int32_t hcall_start_vm(uint16_t vmid)
int32_t hcall_pause_vm(uint16_t vmid)
{
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -153,12 +153,12 @@ int32_t hcall_pause_vm(uint16_t vmid)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_create_vcpu(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_create_vcpu(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
uint16_t pcpu_id;
struct acrn_create_vcpu cv;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if ((target_vm == NULL) || (param == 0U)) {
return -1;
@@ -182,7 +182,7 @@ int32_t hcall_create_vcpu(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_reset_vm(uint16_t vmid)
{
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if ((target_vm == NULL) || is_vm0(target_vm)) {
return -1;
@@ -194,9 +194,9 @@ int32_t hcall_reset_vm(uint16_t vmid)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_vcpu_regs(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_set_vcpu_regs vcpu_regs;
struct acrn_vcpu *vcpu;
@@ -228,11 +228,11 @@ int32_t hcall_set_vcpu_regs(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_irqline(const struct vm *vm, uint16_t vmid,
int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid,
const struct acrn_irqline_ops *ops)
{
uint32_t irq_pic;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -EINVAL;
@@ -261,11 +261,11 @@ int32_t hcall_set_irqline(const struct vm *vm, uint16_t vmid,
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_inject_msi(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
struct acrn_msi_entry msi;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -284,11 +284,11 @@ int32_t hcall_inject_msi(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
uint64_t hpa;
struct acrn_set_ioreq_buffer iobuf;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
union vhm_request_buffer *req_buf;
uint16_t i;
@@ -327,7 +327,7 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
{
struct acrn_vcpu *vcpu;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
/* make sure we have set req_buf */
if ((target_vm == NULL) || (target_vm->sw.io_shared_page == NULL)) {
@@ -353,8 +353,8 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
/**
*@pre Pointer vm shall point to VM0
*/
static int32_t local_set_vm_memory_region(struct vm *vm,
struct vm *target_vm, const struct vm_memory_region *region)
static int32_t local_set_vm_memory_region(struct acrn_vm *vm,
struct acrn_vm *target_vm, const struct vm_memory_region *region)
{
uint64_t hpa, base_paddr, gpa_end;
uint64_t prot;
@@ -433,10 +433,10 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_set_vm_memory_region(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
struct vm_memory_region region;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -EINVAL;
@@ -461,11 +461,11 @@ int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param)
int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, uint64_t param)
{
struct set_regions set_regions;
struct vm_memory_region *regions;
struct vm *target_vm;
struct acrn_vm *target_vm;
uint32_t idx;
@@ -505,7 +505,7 @@ int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
static int32_t write_protect_page(struct vm *vm,const struct wp_data *wp)
static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
{
uint64_t hpa, base_paddr;
uint64_t prot_set;
@@ -540,10 +540,10 @@ static int32_t write_protect_page(struct vm *vm,const struct wp_data *wp)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_write_protect_page(struct vm *vm, uint16_t vmid, uint64_t wp_gpa)
int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_gpa)
{
struct wp_data wp;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -EINVAL;
@@ -567,11 +567,11 @@ int32_t hcall_write_protect_page(struct vm *vm, uint16_t vmid, uint64_t wp_gpa)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct vm_gpa2hpa v_gpa2hpa;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -600,11 +600,11 @@ int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
uint16_t bdf;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
pr_err("%s, vm is null\n", __func__);
@@ -641,11 +641,11 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_deassign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
uint16_t bdf;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -664,11 +664,11 @@ int32_t hcall_deassign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
struct hc_ptdev_irq irq;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -705,11 +705,11 @@ int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
*@pre Pointer vm shall point to VM0
*/
int32_t
hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
struct hc_ptdev_irq irq;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -751,7 +751,7 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
int32_t hcall_setup_sbuf(struct acrn_vm *vm, uint64_t param)
{
struct sbuf_setup_param ssp;
uint64_t *hva;
@@ -772,7 +772,7 @@ int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
return sbuf_share_setup(ssp.pcpu_id, ssp.sbuf_id, hva);
}
#else
int32_t hcall_setup_sbuf(__unused struct vm *vm, __unused uint64_t param)
int32_t hcall_setup_sbuf(__unused struct acrn_vm *vm, __unused uint64_t param)
{
return -ENODEV;
}
@@ -782,7 +782,7 @@ int32_t hcall_setup_sbuf(__unused struct vm *vm, __unused uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_setup_hv_npk_log(struct vm *vm, uint64_t param)
int32_t hcall_setup_hv_npk_log(struct acrn_vm *vm, uint64_t param)
{
struct hv_npk_log_param npk_param;
@@ -803,7 +803,7 @@ int32_t hcall_setup_hv_npk_log(struct vm *vm, uint64_t param)
return 0;
}
#else
int32_t hcall_setup_hv_npk_log(__unused struct vm *vm, __unused uint64_t param)
int32_t hcall_setup_hv_npk_log(__unused struct acrn_vm *vm, __unused uint64_t param)
{
return -ENODEV;
}
@@ -812,10 +812,10 @@ int32_t hcall_setup_hv_npk_log(__unused struct vm *vm, __unused uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param)
{
uint16_t target_vm_id;
struct vm *target_vm;
struct acrn_vm *target_vm;
target_vm_id = (uint16_t)((cmd & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT);
target_vm = get_vm_from_vmid(target_vm_id);
@@ -910,11 +910,11 @@ int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_vm_intr_monitor(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
struct acrn_intr_monitor *intr_hdr;
uint64_t hpa;
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
@@ -955,7 +955,7 @@ int32_t hcall_vm_intr_monitor(struct vm *vm, uint16_t vmid, uint64_t param)
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_set_callback_vector(const struct vm *vm, uint64_t param)
int32_t hcall_set_callback_vector(const struct acrn_vm *vm, uint64_t param)
{
if (!is_vm0(vm)) {
pr_err("%s: Targeting to service vm", __func__);

View File

@@ -15,7 +15,7 @@ static void fire_vhm_interrupt(void)
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
* otherwise, send IPI hardcoded to BOOT_CPU_ID
*/
struct vm *vm0;
struct acrn_vm *vm0;
struct acrn_vcpu *vcpu;
vm0 = get_vm_from_vmid(0U);

View File

@@ -45,7 +45,7 @@ static void ptdev_intr_delay_callback(void *data)
}
struct ptdev_remapping_info*
ptdev_dequeue_softirq(struct vm *vm)
ptdev_dequeue_softirq(struct acrn_vm *vm)
{
uint64_t rflags;
struct ptdev_remapping_info *entry = NULL;
@@ -75,7 +75,7 @@ ptdev_dequeue_softirq(struct vm *vm)
/* require ptdev_lock protect */
struct ptdev_remapping_info *
alloc_entry(struct vm *vm, uint32_t intr_type)
alloc_entry(struct acrn_vm *vm, uint32_t intr_type)
{
struct ptdev_remapping_info *entry;
@@ -120,7 +120,7 @@ release_entry(struct ptdev_remapping_info *entry)
/* require ptdev_lock protect */
static void
release_all_entries(const struct vm *vm)
release_all_entries(const struct acrn_vm *vm)
{
struct ptdev_remapping_info *entry;
struct list_head *pos, *tmp;
@@ -204,7 +204,7 @@ void ptdev_init(void)
register_softirq(SOFTIRQ_PTDEV, ptdev_softirq);
}
void ptdev_release_all_entries(const struct vm *vm)
void ptdev_release_all_entries(const struct acrn_vm *vm)
{
/* VM already down */
spinlock_obtain(&ptdev_lock);
@@ -212,7 +212,7 @@ void ptdev_release_all_entries(const struct vm *vm)
spinlock_release(&ptdev_lock);
}
uint32_t get_vm_ptdev_intr_data(const struct vm *target_vm, uint64_t *buffer,
uint32_t get_vm_ptdev_intr_data(const struct acrn_vm *target_vm, uint64_t *buffer,
uint32_t buffer_cnt)
{
uint32_t index = 0U;

View File

@@ -74,7 +74,7 @@ int32_t hcall_initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
int64_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu)
{
struct vm *vm = vcpu->vm;
struct acrn_vm *vm = vcpu->vm;
if (vm->sworld_control.flag.supported == 0UL) {
dev_dbg(ACRN_DBG_TRUSTY_HYCALL,

View File

@@ -38,7 +38,7 @@ static uint32_t create_e820_table(struct e820_entry *param_e820)
}
#endif
static void prepare_bsp_gdt(struct vm *vm)
static void prepare_bsp_gdt(struct acrn_vm *vm)
{
size_t gdt_len;
uint64_t gdt_base_hpa;
@@ -57,7 +57,7 @@ static void prepare_bsp_gdt(struct vm *vm)
return;
}
static uint64_t create_zero_page(struct vm *vm)
static uint64_t create_zero_page(struct acrn_vm *vm)
{
struct zero_page *zeropage;
struct sw_linux *sw_linux = &(vm->sw.linux_info);
@@ -102,7 +102,7 @@ static uint64_t create_zero_page(struct vm *vm)
return gpa;
}
int general_sw_loader(struct vm *vm)
int general_sw_loader(struct acrn_vm *vm)
{
int32_t ret = 0;
void *hva;

View File

@@ -14,7 +14,7 @@
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_profiling_ops(struct vm *vm, uint64_t cmd, uint64_t param)
int32_t hcall_profiling_ops(struct acrn_vm *vm, uint64_t cmd, uint64_t param)
{
int32_t ret;
switch (cmd) {

View File

@@ -806,7 +806,7 @@ void profiling_stop_pmu(void)
/*
* Performs MSR operations on all the CPU's
*/
int32_t profiling_msr_ops_all_cpus(struct vm *vm, uint64_t addr)
int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
{
uint16_t i;
struct profiling_msr_ops_list msr_list[phys_cpu_num];
@@ -839,9 +839,9 @@ int32_t profiling_msr_ops_all_cpus(struct vm *vm, uint64_t addr)
/*
* Generate VM info list
*/
int32_t profiling_vm_list_info(struct vm *vm, uint64_t addr)
int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
{
struct vm *tmp_vm;
struct acrn_vm *tmp_vm;
struct acrn_vcpu *vcpu;
int32_t vm_idx;
uint16_t i, j;
@@ -905,7 +905,7 @@ int32_t profiling_vm_list_info(struct vm *vm, uint64_t addr)
/*
* Sep/socwatch profiling version
*/
int32_t profiling_get_version_info(struct vm *vm, uint64_t addr)
int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr)
{
struct profiling_version_info ver_info;
@@ -939,7 +939,7 @@ int32_t profiling_get_version_info(struct vm *vm, uint64_t addr)
/*
* Gets type of profiling - sep/socwatch
*/
int32_t profiling_get_control(struct vm *vm, uint64_t addr)
int32_t profiling_get_control(struct acrn_vm *vm, uint64_t addr)
{
struct profiling_control prof_control;
@@ -977,7 +977,7 @@ int32_t profiling_get_control(struct vm *vm, uint64_t addr)
/*
* Update the profiling type based on control switch
*/
int32_t profiling_set_control(struct vm *vm, uint64_t addr)
int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
{
uint64_t old_switch;
uint64_t new_switch;
@@ -1092,7 +1092,7 @@ int32_t profiling_set_control(struct vm *vm, uint64_t addr)
/*
* Configure PMI on all cpus
*/
int32_t profiling_configure_pmi(struct vm *vm, uint64_t addr)
int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
{
uint16_t i;
struct profiling_pmi_config pmi_config;
@@ -1169,7 +1169,7 @@ int32_t profiling_configure_pmi(struct vm *vm, uint64_t addr)
/*
* Configure for VM-switch data on all cpus
*/
int32_t profiling_configure_vmsw(struct vm *vm, uint64_t addr)
int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
{
uint16_t i;
int32_t ret = 0;
@@ -1233,7 +1233,7 @@ int32_t profiling_configure_vmsw(struct vm *vm, uint64_t addr)
/*
* Get the physical cpu id
*/
int32_t profiling_get_pcpu_id(struct vm *vm, uint64_t addr)
int32_t profiling_get_pcpu_id(struct acrn_vm *vm, uint64_t addr)
{
struct profiling_pcpuid pcpuid;

View File

@@ -526,7 +526,7 @@ static int shell_cmd_help(__unused int argc, __unused char **argv)
static int shell_list_vm(__unused int argc, __unused char **argv)
{
char temp_str[MAX_STR_SIZE];
struct vm *vm;
struct acrn_vm *vm;
uint16_t idx;
char state[32];
@@ -568,7 +568,7 @@ static int shell_list_vm(__unused int argc, __unused char **argv)
static int shell_list_vcpu(__unused int argc, __unused char **argv)
{
char temp_str[MAX_STR_SIZE];
struct vm *vm;
struct acrn_vm *vm;
struct acrn_vcpu *vcpu;
char state[32];
uint16_t i;
@@ -623,7 +623,7 @@ static int shell_vcpu_dumpreg(int argc, char **argv)
int status = 0;
uint16_t vm_id;
uint16_t vcpu_id;
struct vm *vm;
struct acrn_vm *vm;
struct acrn_vcpu *vcpu;
uint64_t mask = 0UL;
struct vcpu_dump dump;
@@ -722,7 +722,7 @@ static int shell_to_sos_console(__unused int argc, __unused char **argv)
char temp_str[TEMP_STR_SIZE];
uint16_t guest_no = 0U;
struct vm *vm;
struct acrn_vm *vm;
struct acrn_vuart *vu;
#ifdef CONFIG_PARTITION_MODE
struct vm_description *vm_desc;

View File

@@ -154,7 +154,7 @@ static void vuart_toggle_intr(const struct acrn_vuart *vu)
vioapic_set_irq(vu->vm, COM1_IRQ, operation);
}
static void vuart_write(struct vm *vm, uint16_t offset_arg,
static void vuart_write(struct acrn_vm *vm, uint16_t offset_arg,
__unused size_t width, uint32_t value)
{
uint16_t offset = offset_arg;
@@ -240,7 +240,7 @@ done:
vuart_unlock(vu);
}
static uint32_t vuart_read(struct vm *vm, uint16_t offset_arg,
static uint32_t vuart_read(struct acrn_vm *vm, uint16_t offset_arg,
__unused size_t width)
{
uint16_t offset = offset_arg;
@@ -319,7 +319,7 @@ done:
return (uint32_t)reg;
}
static void vuart_register_io_handler(struct vm *vm)
static void vuart_register_io_handler(struct acrn_vm *vm)
{
struct vm_io_range range = {
.flags = IO_ATTR_RW,
@@ -370,7 +370,7 @@ void vuart_console_rx_chars(struct acrn_vuart *vu)
struct acrn_vuart *vuart_console_active(void)
{
#ifdef CONFIG_PARTITION_MODE
struct vm *vm;
struct acrn_vm *vm;
if (vuart_vmid == -1) {
return NULL;
@@ -378,7 +378,7 @@ struct acrn_vuart *vuart_console_active(void)
vm = get_vm_from_vmid(vuart_vmid);
#else
struct vm *vm = get_vm_from_vmid(0U);
struct acrn_vm *vm = get_vm_from_vmid(0U);
#endif
if (vm != NULL) {
@@ -391,7 +391,7 @@ struct acrn_vuart *vuart_console_active(void)
return NULL;
}
void vuart_init(struct vm *vm)
void vuart_init(struct acrn_vm *vm)
{
uint32_t divisor;
struct acrn_vuart *vu = vm_vuart(vm);

View File

@@ -126,7 +126,7 @@ vioapic_set_pinstate(struct acrn_vioapic *vioapic, uint16_t pin, uint32_t level)
* @return void
*/
void
vioapic_set_irq_nolock(struct vm *vm, uint32_t irq, uint32_t operation)
vioapic_set_irq_nolock(struct acrn_vm *vm, uint32_t irq, uint32_t operation)
{
struct acrn_vioapic *vioapic;
uint16_t pin = (uint16_t)irq;
@@ -169,7 +169,7 @@ vioapic_set_irq_nolock(struct vm *vm, uint32_t irq, uint32_t operation)
* @return void
*/
void
vioapic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation)
vioapic_set_irq(struct acrn_vm *vm, uint32_t irq, uint32_t operation)
{
struct acrn_vioapic *vioapic = vm_ioapic(vm);
@@ -448,7 +448,7 @@ vioapic_mmio_rw(struct acrn_vioapic *vioapic, uint64_t gpa,
}
void
vioapic_process_eoi(struct vm *vm, uint32_t vector)
vioapic_process_eoi(struct acrn_vm *vm, uint32_t vector)
{
struct acrn_vioapic *vioapic;
uint32_t pin, pincount = vioapic_pincount(vm);
@@ -509,7 +509,7 @@ vioapic_reset(struct acrn_vioapic *vioapic)
}
void
vioapic_init(struct vm *vm)
vioapic_init(struct acrn_vm *vm)
{
vm->arch_vm.vioapic.vm = vm;
spinlock_init(&(vm->arch_vm.vioapic.mtx));
@@ -532,7 +532,7 @@ vioapic_cleanup(const struct acrn_vioapic *vioapic)
}
uint32_t
vioapic_pincount(const struct vm *vm)
vioapic_pincount(const struct acrn_vm *vm)
{
if (is_vm0(vm)) {
return REDIR_ENTRIES_HW;
@@ -543,7 +543,7 @@ vioapic_pincount(const struct vm *vm)
int vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private_data)
{
struct vm *vm = (struct vm *)handler_private_data;
struct acrn_vm *vm = (struct acrn_vm *)handler_private_data;
struct acrn_vioapic *vioapic;
struct mmio_request *mmio = &io_req->reqs.mmio;
uint64_t gpa = mmio->address;
@@ -575,7 +575,7 @@ int vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private
* @pre vm->arch_vm.vioapic != NULL
* @pre rte != NULL
*/
void vioapic_get_rte(struct vm *vm, uint32_t pin, union ioapic_rte *rte)
void vioapic_get_rte(struct acrn_vm *vm, uint32_t pin, union ioapic_rte *rte)
{
struct acrn_vioapic *vioapic;
@@ -591,7 +591,7 @@ void get_vioapic_info(char *str_arg, size_t str_max, uint16_t vmid)
union ioapic_rte rte;
uint32_t delmode, vector, dest;
bool level, phys, remote_irr, mask;
struct vm *vm = get_vm_from_vmid(vmid);
struct acrn_vm *vm = get_vm_from_vmid(vmid);
uint32_t pin, pincount;
if (vm == NULL) {

View File

@@ -43,7 +43,7 @@ static int vmsi_remap(struct pci_vdev *vdev, bool enable)
{
struct ptdev_msi_info info;
union pci_bdf pbdf = vdev->pdev.bdf;
struct vm *vm = vdev->vpci->vm;
struct acrn_vm *vm = vdev->vpci->vm;
uint32_t capoff = vdev->msi.capoff;
uint32_t msgctrl, msgdata;
uint32_t addrlo, addrhi;

View File

@@ -49,7 +49,7 @@ static struct pci_vdev *partition_mode_find_vdev(struct vpci *vpci, union pci_bd
return NULL;
}
static int partition_mode_vpci_init(struct vm *vm)
static int partition_mode_vpci_init(struct acrn_vm *vm)
{
struct vpci_vdev_array *vdev_array;
struct vpci *vpci = &vm->vpci;
@@ -73,7 +73,7 @@ static int partition_mode_vpci_init(struct vm *vm)
return 0;
}
static void partition_mode_vpci_deinit(struct vm *vm)
static void partition_mode_vpci_deinit(struct acrn_vm *vm)
{
struct vpci_vdev_array *vdev_array;
struct pci_vdev *vdev;

View File

@@ -56,7 +56,7 @@ static int vdev_pt_init_validate(struct pci_vdev *vdev)
static int vdev_pt_init(struct pci_vdev *vdev)
{
int ret;
struct vm *vm = vdev->vpci->vm;
struct acrn_vm *vm = vdev->vpci->vm;
uint16_t pci_command;
ret = vdev_pt_init_validate(vdev);
@@ -89,7 +89,7 @@ static int vdev_pt_init(struct pci_vdev *vdev)
static int vdev_pt_deinit(struct pci_vdev *vdev)
{
int ret;
struct vm *vm = vdev->vpci->vm;
struct acrn_vm *vm = vdev->vpci->vm;
ret = unassign_iommu_device(vm->iommu, vdev->pdev.bdf.bits.b,
(uint8_t)(vdev->pdev.bdf.value & 0xFFU));
@@ -119,7 +119,7 @@ static int vdev_pt_cfgread(struct pci_vdev *vdev, uint32_t offset,
static void vdev_pt_remap_bar(struct pci_vdev *vdev, uint32_t idx,
uint32_t new_base)
{
struct vm *vm = vdev->vpci->vm;
struct acrn_vm *vm = vdev->vpci->vm;
if (vdev->bar[idx].base != 0UL) {
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,

View File

@@ -104,7 +104,7 @@ static void sharing_mode_cfgwrite(__unused struct vpci *vpci, union pci_bdf bdf,
}
}
static struct pci_vdev *alloc_pci_vdev(struct vm *vm, union pci_bdf bdf)
static struct pci_vdev *alloc_pci_vdev(struct acrn_vm *vm, union pci_bdf bdf)
{
struct pci_vdev *vdev;
@@ -124,7 +124,7 @@ static struct pci_vdev *alloc_pci_vdev(struct vm *vm, union pci_bdf bdf)
static void enumerate_pci_dev(uint16_t pbdf, void *cb_data)
{
struct vm *vm = (struct vm *)cb_data;
struct acrn_vm *vm = (struct acrn_vm *)cb_data;
struct pci_vdev *vdev;
vdev = alloc_pci_vdev(vm, (union pci_bdf)pbdf);
@@ -133,7 +133,7 @@ static void enumerate_pci_dev(uint16_t pbdf, void *cb_data)
}
}
static int sharing_mode_vpci_init(struct vm *vm)
static int sharing_mode_vpci_init(struct acrn_vm *vm)
{
struct pci_vdev *vdev;
uint32_t i, j;
@@ -165,7 +165,7 @@ static int sharing_mode_vpci_init(struct vm *vm)
return 0;
}
static void sharing_mode_vpci_deinit(__unused struct vm *vm)
static void sharing_mode_vpci_deinit(__unused struct acrn_vm *vm)
{
struct pci_vdev *vdev;
uint32_t i, j;
@@ -201,7 +201,7 @@ struct vpci_ops sharing_mode_vpci_ops = {
.cfgwrite = sharing_mode_cfgwrite,
};
void vpci_set_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf)
void vpci_set_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf)
{
struct pci_vdev *vdev;
@@ -218,10 +218,10 @@ void vpci_set_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf
vdev->pdev.bdf.value = pbdf;
}
void vpci_reset_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf)
void vpci_reset_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf)
{
struct pci_vdev *vdev;
struct vm *vm;
struct acrn_vm *vm;
vdev = sharing_mode_find_vdev((union pci_bdf)pbdf);
if (vdev == NULL) {

View File

@@ -48,7 +48,7 @@ static void pci_cfg_clear_cache(struct pci_addr_info *pi)
pi->cached_enable = 0U;
}
static uint32_t pci_cfg_io_read(struct vm *vm, uint16_t addr, size_t bytes)
static uint32_t pci_cfg_io_read(struct acrn_vm *vm, uint16_t addr, size_t bytes)
{
uint32_t val = 0xFFFFFFFFU;
struct vpci *vpci = &vm->vpci;
@@ -82,7 +82,7 @@ static uint32_t pci_cfg_io_read(struct vm *vm, uint16_t addr, size_t bytes)
return val;
}
static void pci_cfg_io_write(struct vm *vm, uint16_t addr, size_t bytes,
static void pci_cfg_io_write(struct acrn_vm *vm, uint16_t addr, size_t bytes,
uint32_t val)
{
struct vpci *vpci = &vm->vpci;
@@ -114,7 +114,7 @@ static void pci_cfg_io_write(struct vm *vm, uint16_t addr, size_t bytes,
}
}
void vpci_init(struct vm *vm)
void vpci_init(struct acrn_vm *vm)
{
struct vpci *vpci = &vm->vpci;
struct vm_io_range pci_cfg_range = {
@@ -143,7 +143,7 @@ void vpci_init(struct vm *vm)
}
}
void vpci_cleanup(struct vm *vm)
void vpci_cleanup(struct acrn_vm *vm)
{
struct vpci *vpci = &vm->vpci;

View File

@@ -448,7 +448,7 @@ static void vpic_set_pinstate(struct acrn_vpic *vpic, uint8_t pin,
*
* @return void
*/
void vpic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation)
void vpic_set_irq(struct acrn_vm *vm, uint32_t irq, uint32_t operation)
{
struct acrn_vpic *vpic;
struct i8259_reg_state *i8259;
@@ -501,7 +501,7 @@ vpic_pincount(void)
* @pre vm->vpic != NULL
* @pre irq < NR_VPIC_PINS_TOTAL
*/
void vpic_get_irq_trigger(struct vm *vm, uint32_t irq,
void vpic_get_irq_trigger(struct acrn_vm *vm, uint32_t irq,
enum vpic_trigger *trigger)
{
struct acrn_vpic *vpic;
@@ -524,7 +524,7 @@ void vpic_get_irq_trigger(struct vm *vm, uint32_t irq,
*
* @return void.
*/
void vpic_pending_intr(struct vm *vm, uint32_t *vecptr)
void vpic_pending_intr(struct acrn_vm *vm, uint32_t *vecptr)
{
struct acrn_vpic *vpic;
struct i8259_reg_state *i8259;
@@ -587,7 +587,7 @@ static void vpic_pin_accepted(struct i8259_reg_state *i8259, uint8_t pin)
*
* @pre vm != NULL
*/
void vpic_intr_accepted(struct vm *vm, uint32_t vector)
void vpic_intr_accepted(struct acrn_vm *vm, uint32_t vector)
{
struct acrn_vpic *vpic;
uint8_t pin;
@@ -699,7 +699,7 @@ static int vpic_write(struct acrn_vpic *vpic, struct i8259_reg_state *i8259,
return error;
}
static int vpic_master_handler(struct vm *vm, bool in, uint16_t port,
static int vpic_master_handler(struct acrn_vm *vm, bool in, uint16_t port,
size_t bytes, uint32_t *eax)
{
struct acrn_vpic *vpic;
@@ -719,7 +719,7 @@ static int vpic_master_handler(struct vm *vm, bool in, uint16_t port,
return vpic_write(vpic, i8259, port, eax);
}
static uint32_t vpic_master_io_read(struct vm *vm, uint16_t addr, size_t width)
static uint32_t vpic_master_io_read(struct acrn_vm *vm, uint16_t addr, size_t width)
{
uint32_t val = 0U;
@@ -730,7 +730,7 @@ static uint32_t vpic_master_io_read(struct vm *vm, uint16_t addr, size_t width)
return val;
}
static void vpic_master_io_write(struct vm *vm, uint16_t addr, size_t width,
static void vpic_master_io_write(struct acrn_vm *vm, uint16_t addr, size_t width,
uint32_t v)
{
uint32_t val = v;
@@ -741,7 +741,7 @@ static void vpic_master_io_write(struct vm *vm, uint16_t addr, size_t width,
}
}
static int vpic_slave_handler(struct vm *vm, bool in, uint16_t port,
static int vpic_slave_handler(struct acrn_vm *vm, bool in, uint16_t port,
size_t bytes, uint32_t *eax)
{
struct acrn_vpic *vpic;
@@ -761,7 +761,7 @@ static int vpic_slave_handler(struct vm *vm, bool in, uint16_t port,
return vpic_write(vpic, i8259, port, eax);
}
static uint32_t vpic_slave_io_read(struct vm *vm, uint16_t addr, size_t width)
static uint32_t vpic_slave_io_read(struct acrn_vm *vm, uint16_t addr, size_t width)
{
uint32_t val = 0U;
@@ -772,7 +772,7 @@ static uint32_t vpic_slave_io_read(struct vm *vm, uint16_t addr, size_t width)
return val;
}
static void vpic_slave_io_write(struct vm *vm, uint16_t addr, size_t width,
static void vpic_slave_io_write(struct acrn_vm *vm, uint16_t addr, size_t width,
uint32_t v)
{
uint32_t val = v;
@@ -783,7 +783,7 @@ static void vpic_slave_io_write(struct vm *vm, uint16_t addr, size_t width,
}
}
static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
static int vpic_elc_handler(struct acrn_vm *vm, bool in, uint16_t port, size_t bytes,
uint32_t *eax)
{
struct acrn_vpic *vpic;
@@ -827,7 +827,7 @@ static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
return 0;
}
static uint32_t vpic_elc_io_read(struct vm *vm, uint16_t addr, size_t width)
static uint32_t vpic_elc_io_read(struct acrn_vm *vm, uint16_t addr, size_t width)
{
uint32_t val = 0U;
@@ -837,7 +837,7 @@ static uint32_t vpic_elc_io_read(struct vm *vm, uint16_t addr, size_t width)
return val;
}
static void vpic_elc_io_write(struct vm *vm, uint16_t addr, size_t width,
static void vpic_elc_io_write(struct acrn_vm *vm, uint16_t addr, size_t width,
uint32_t v)
{
uint32_t val = v;
@@ -848,7 +848,7 @@ static void vpic_elc_io_write(struct vm *vm, uint16_t addr, size_t width,
}
}
static void vpic_register_io_handler(struct vm *vm)
static void vpic_register_io_handler(struct acrn_vm *vm)
{
struct vm_io_range master_range = {
.flags = IO_ATTR_RW,
@@ -874,7 +874,7 @@ static void vpic_register_io_handler(struct vm *vm)
&vpic_elc_io_read, &vpic_elc_io_write);
}
void vpic_init(struct vm *vm)
void vpic_init(struct acrn_vm *vm)
{
struct acrn_vpic *vpic = vm_pic(vm);
vpic_register_io_handler(vm);

View File

@@ -42,7 +42,7 @@ static uint8_t cmos_get_reg_val(uint8_t addr)
return reg;
}
static uint32_t vrtc_read(struct vm *vm, uint16_t addr, __unused size_t width)
static uint32_t vrtc_read(struct acrn_vm *vm, uint16_t addr, __unused size_t width)
{
uint8_t reg;
uint8_t offset;
@@ -57,7 +57,7 @@ static uint32_t vrtc_read(struct vm *vm, uint16_t addr, __unused size_t width)
return reg;
}
static void vrtc_write(struct vm *vm, uint16_t addr, size_t width,
static void vrtc_write(struct acrn_vm *vm, uint16_t addr, size_t width,
uint32_t value)
{
@@ -69,7 +69,7 @@ static void vrtc_write(struct vm *vm, uint16_t addr, size_t width,
}
}
void vrtc_init(struct vm *vm)
void vrtc_init(struct acrn_vm *vm)
{
struct vm_io_range range = {
.flags = IO_ATTR_RW, .base = CMOS_ADDR_PORT, .len = 2U};

View File

@@ -7,6 +7,6 @@
#ifndef ABL_SEED_PARSE_H_
#define ABL_SEED_PARSE_H_
bool abl_seed_parse(struct vm *vm, char *cmdline, char *out_arg, uint32_t out_len);
bool abl_seed_parse(struct acrn_vm *vm, char *cmdline, char *out_arg, uint32_t out_len);
#endif /* ABL_SEED_PARSE_H_ */

View File

@@ -9,18 +9,18 @@
#include <ptdev.h>
void ptdev_intx_ack(struct vm *vm, uint8_t virt_pin,
void ptdev_intx_ack(struct acrn_vm *vm, uint8_t virt_pin,
enum ptdev_vpin_source vpin_src);
int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
int ptdev_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t entry_nr, struct ptdev_msi_info *info);
int ptdev_intx_pin_remap(struct vm *vm, uint8_t virt_pin,
int ptdev_intx_pin_remap(struct acrn_vm *vm, uint8_t virt_pin,
enum ptdev_vpin_source vpin_src);
int ptdev_add_intx_remapping(struct vm *vm, uint8_t virt_pin, uint8_t phys_pin,
int ptdev_add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin, uint8_t phys_pin,
bool pic_pin);
void ptdev_remove_intx_remapping(const struct vm *vm, uint8_t virt_pin, bool pic_pin);
int ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
void ptdev_remove_intx_remapping(const struct acrn_vm *vm, uint8_t virt_pin, bool pic_pin);
int ptdev_add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, uint32_t vector_count);
void ptdev_remove_msix_remapping(const struct vm *vm, uint16_t virt_bdf,
void ptdev_remove_msix_remapping(const struct acrn_vm *vm, uint16_t virt_bdf,
uint32_t vector_count);
#endif /* ASSIGN_H */

View File

@@ -131,7 +131,7 @@ static inline void cpuid_subleaf(uint32_t leaf, uint32_t subleaf,
asm_cpuid(eax, ebx, ecx, edx);
}
int set_vcpuid_entries(struct vm *vm);
int set_vcpuid_entries(struct acrn_vm *vm);
void guest_cpuid(struct acrn_vcpu *vcpu,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);

View File

@@ -87,7 +87,7 @@ struct e820_mem_params {
uint64_t max_ram_blk_size;
};
int prepare_vm0_memmap_and_e820(struct vm *vm);
int prepare_vm0_memmap_and_e820(struct acrn_vm *vm);
uint64_t e820_alloc_low_memory(uint32_t size_arg);
/* Definition for a mem map lookup */
@@ -110,7 +110,7 @@ enum vm_paging_mode {
/*
* VM related APIs
*/
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);
uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask);
int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
@@ -142,9 +142,9 @@ void init_msr_emulation(struct acrn_vcpu *vcpu);
struct run_context;
int vmx_vmrun(struct run_context *context, int ops, int ibrs);
int general_sw_loader(struct vm *vm);
int general_sw_loader(struct acrn_vm *vm);
typedef int (*vm_sw_loader_t)(struct vm *vm);
typedef int (*vm_sw_loader_t)(struct acrn_vm *vm);
extern vm_sw_loader_t vm_sw_loader;
/**
* @brief Data transfering between hypervisor and VM
@@ -170,7 +170,7 @@ extern vm_sw_loader_t vm_sw_loader;
* continuous
* @pre Pointer vm is non-NULL
*/
int copy_from_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
int copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
/**
* @brief Copy data from HV address space to VM GPA space
*
@@ -189,7 +189,7 @@ int copy_from_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
* continuous
* @pre Pointer vm is non-NULL
*/
int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
int copy_to_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
/**
* @brief Copy data from VM GVA space to HV address space
*

View File

@@ -7,9 +7,9 @@
#ifndef GUEST_PM_H
#define GUEST_PM_H
void vm_setup_cpu_state(struct vm *vm);
int vm_load_pm_s_state(struct vm *vm);
int validate_pstate(const struct vm *vm, uint64_t perf_ctl);
void register_pm1ab_handler(struct vm *vm);
void vm_setup_cpu_state(struct acrn_vm *vm);
int vm_load_pm_s_state(struct acrn_vm *vm);
int validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl);
void register_pm1ab_handler(struct acrn_vm *vm);
#endif /* PM_H */

View File

@@ -26,6 +26,6 @@ struct mptable_info;
extern struct mptable_info mptable_vm1;
extern struct mptable_info mptable_vm2;
int mptable_build(struct vm *vm);
int mptable_build(struct acrn_vm *vm);
#endif /* MPTABLE_H */

View File

@@ -219,13 +219,13 @@ struct acrn_vcpu_arch {
} __aligned(CPU_PAGE_SIZE);
struct vm;
struct acrn_vm;
struct acrn_vcpu {
/* Architecture specific definitions for this VCPU */
struct acrn_vcpu_arch arch;
uint16_t pcpu_id; /* Physical CPU ID of this VCPU */
uint16_t vcpu_id; /* virtual identifier for VCPU */
struct vm *vm; /* Reference to the VM this VCPU belongs to */
struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */
/* State of this VCPU before suspend */
volatile enum vcpu_state prev_state;
@@ -514,7 +514,7 @@ struct acrn_vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
*
* @return 0: vcpu created successfully, other values failed.
*/
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_handle);
int create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle);
/**
* @brief run into non-root mode based on vcpu setting
@@ -586,7 +586,7 @@ void schedule_vcpu(struct acrn_vcpu *vcpu);
* @param[inout] vm pointer to vm data structure
* @param[in] pcpu_id which the vcpu will be mapped
*/
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id);
int prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id);
void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id);

View File

@@ -46,7 +46,7 @@
#define STATE_BITMAP_SIZE INT_DIV_ROUNDUP(REDIR_ENTRIES_HW, 64U)
struct acrn_vioapic {
struct vm *vm;
struct acrn_vm *vm;
spinlock_t mtx;
uint32_t id;
uint32_t ioregsel;
@@ -55,7 +55,7 @@ struct acrn_vioapic {
uint64_t pin_state[STATE_BITMAP_SIZE];
};
void vioapic_init(struct vm *vm);
void vioapic_init(struct acrn_vm *vm);
void vioapic_cleanup(const struct acrn_vioapic *vioapic);
void vioapic_reset(struct acrn_vioapic *vioapic);
@@ -79,7 +79,7 @@ void vioapic_reset(struct acrn_vioapic *vioapic);
*
* @return void
*/
void vioapic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation);
void vioapic_set_irq(struct acrn_vm *vm, uint32_t irq, uint32_t operation);
/**
* @brief Set vIOAPIC IRQ line status.
@@ -95,12 +95,12 @@ void vioapic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation);
* @pre irq < vioapic_pincount(vm)
* @return void
*/
void vioapic_set_irq_nolock(struct vm *vm, uint32_t irq, uint32_t operation);
void vioapic_set_irq_nolock(struct acrn_vm *vm, uint32_t irq, uint32_t operation);
void vioapic_update_tmr(struct acrn_vcpu *vcpu);
uint32_t vioapic_pincount(const struct vm *vm);
void vioapic_process_eoi(struct vm *vm, uint32_t vector);
void vioapic_get_rte(struct vm *vm, uint32_t pin, union ioapic_rte *rte);
uint32_t vioapic_pincount(const struct acrn_vm *vm);
void vioapic_process_eoi(struct acrn_vm *vm, uint32_t vector);
void vioapic_get_rte(struct acrn_vm *vm, uint32_t pin, union ioapic_rte *rte);
int vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private_data);
#ifdef HV_DEBUG

View File

@@ -69,7 +69,7 @@ struct acrn_vlapic {
struct lapic_regs apic_page;
struct vlapic_pir_desc pir_desc;
struct vm *vm;
struct acrn_vm *vm;
struct acrn_vcpu *vcpu;
uint32_t esr_pending;
@@ -233,7 +233,7 @@ vlapic_intr_edge(struct acrn_vcpu *vcpu, uint32_t vector)
*
* @pre vm != NULL
*/
int vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector);
int vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t vector);
/**
* @brief Inject MSI to target VM.
@@ -247,9 +247,9 @@ int vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector);
*
* @pre vm != NULL
*/
int vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);
int vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg);
void vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest,
void vlapic_deliver_intr(struct acrn_vm *vm, bool level, uint32_t dest,
bool phys, uint32_t delmode, uint32_t vec, bool rh);
/* Reset the trigger-mode bits for all vectors to be edge-triggered */
@@ -281,7 +281,7 @@ int apic_access_vmexit_handler(struct acrn_vcpu *vcpu);
int apic_write_vmexit_handler(struct acrn_vcpu *vcpu);
int veoi_vmexit_handler(struct acrn_vcpu *vcpu);
int tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu);
void calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
void calcvdest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
/**
* @}

View File

@@ -130,7 +130,7 @@ struct vcpuid_entry {
uint32_t padding;
};
struct vm {
struct acrn_vm {
struct vm_arch arch_vm; /* Reference to this VM's arch information */
struct vm_hw_info hw; /* Reference to this VM's HW information */
struct vm_sw_info sw; /* Reference to SW associated with this VM */
@@ -197,7 +197,7 @@ struct vm_description {
#endif
};
static inline bool is_vm0(const struct vm *vm)
static inline bool is_vm0(const struct acrn_vm *vm)
{
return (vm->vm_id) == 0U;
}
@@ -205,7 +205,7 @@ static inline bool is_vm0(const struct vm *vm)
/*
* @pre vcpu_id < CONFIG_MAX_VCPUS_PER_VM
*/
static inline struct acrn_vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
static inline struct acrn_vcpu *vcpu_from_vid(struct acrn_vm *vm, uint16_t vcpu_id)
{
uint16_t i;
struct acrn_vcpu *vcpu;
@@ -218,7 +218,7 @@ static inline struct acrn_vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
return vcpu;
}
static inline struct acrn_vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_id)
{
uint16_t i;
struct acrn_vcpu *vcpu;
@@ -232,7 +232,7 @@ static inline struct acrn_vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
return NULL;
}
static inline struct acrn_vcpu *get_primary_vcpu(struct vm *vm)
static inline struct acrn_vcpu *get_primary_vcpu(struct acrn_vm *vm)
{
uint16_t i;
struct acrn_vcpu *vcpu;
@@ -247,37 +247,37 @@ static inline struct acrn_vcpu *get_primary_vcpu(struct vm *vm)
}
static inline struct acrn_vuart*
vm_vuart(struct vm *vm)
vm_vuart(struct acrn_vm *vm)
{
return &(vm->vuart);
}
static inline struct acrn_vpic *
vm_pic(struct vm *vm)
vm_pic(struct acrn_vm *vm)
{
return (struct acrn_vpic *)&(vm->arch_vm.vpic);
}
static inline struct acrn_vioapic *
vm_ioapic(struct vm *vm)
vm_ioapic(struct acrn_vm *vm)
{
return (struct acrn_vioapic *)&(vm->arch_vm.vioapic);
}
int shutdown_vm(struct vm *vm);
void pause_vm(struct vm *vm);
void resume_vm(struct vm *vm);
void resume_vm_from_s3(struct vm *vm, uint32_t wakeup_vec);
int start_vm(struct vm *vm);
int reset_vm(struct vm *vm);
int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm);
int shutdown_vm(struct acrn_vm *vm);
void pause_vm(struct acrn_vm *vm);
void resume_vm(struct acrn_vm *vm);
void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec);
int start_vm(struct acrn_vm *vm);
int reset_vm(struct acrn_vm *vm);
int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm);
int prepare_vm(uint16_t pcpu_id);
#ifdef CONFIG_PARTITION_MODE
const struct vm_description_array *get_vm_desc_base(void);
#endif
struct vm *get_vm_from_vmid(uint16_t vm_id);
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
#ifdef CONFIG_PARTITION_MODE
struct vm_description_array {
@@ -291,6 +291,6 @@ struct pcpu_vm_desc_mapping {
};
extern const struct pcpu_vm_desc_mapping pcpu_vm_desc_map[];
void vrtc_init(struct vm *vm);
void vrtc_init(struct acrn_vm *vm);
#endif
#endif /* VM_H_ */

View File

@@ -121,12 +121,12 @@ struct i8259_reg_state {
};
struct acrn_vpic {
struct vm *vm;
struct acrn_vm *vm;
spinlock_t lock;
struct i8259_reg_state i8259[2];
};
void vpic_init(struct vm *vm);
void vpic_init(struct acrn_vm *vm);
/**
* @brief virtual PIC
@@ -145,7 +145,7 @@ void vpic_init(struct vm *vm);
*
* @return void
*/
void vpic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation);
void vpic_set_irq(struct acrn_vm *vm, uint32_t irq, uint32_t operation);
/**
* @brief Get pending virtual interrupts for vPIC.
@@ -156,7 +156,7 @@ void vpic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation);
*
* @return void.
*/
void vpic_pending_intr(struct vm *vm, uint32_t *vecptr);
void vpic_pending_intr(struct acrn_vm *vm, uint32_t *vecptr);
/**
* @brief Accept virtual interrupt for vPIC.
@@ -168,8 +168,8 @@ void vpic_pending_intr(struct vm *vm, uint32_t *vecptr);
*
* @pre vm != NULL
*/
void vpic_intr_accepted(struct vm *vm, uint32_t vector);
void vpic_get_irq_trigger(struct vm *vm, uint32_t irq,
void vpic_intr_accepted(struct acrn_vm *vm, uint32_t vector);
void vpic_get_irq_trigger(struct acrn_vm *vm, uint32_t irq,
enum vpic_trigger *trigger);
uint32_t vpic_pincount(void);

View File

@@ -13,8 +13,8 @@ extern struct pm_s_state_data host_pm_s_state;
extern uint8_t host_enter_s3_success;
int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
extern void asm_enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
int enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
extern void asm_enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val,
uint32_t pm1b_cnt_val);
extern void restore_s3_context(void);

View File

@@ -48,14 +48,14 @@ struct vm_io_range {
};
struct vm_io_handler;
struct vm;
struct acrn_vm;
struct acrn_vcpu;
typedef
uint32_t (*io_read_fn_t)(struct vm *vm, uint16_t port, size_t size);
uint32_t (*io_read_fn_t)(struct acrn_vm *vm, uint16_t port, size_t size);
typedef
void (*io_write_fn_t)(struct vm *vm, uint16_t port, size_t size, uint32_t val);
void (*io_write_fn_t)(struct acrn_vm *vm, uint16_t port, size_t size, uint32_t val);
/**
* @brief Describes a single IO handler description entry.
@@ -185,14 +185,14 @@ int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu);
*
* @param vm The VM whose I/O bitmaps are to be initialized
*/
void setup_io_bitmap(struct vm *vm);
void setup_io_bitmap(struct acrn_vm *vm);
/**
* @brief Free I/O bitmaps and port I/O handlers of \p vm
*
* @param vm The VM whose I/O bitmaps and handlers are to be freed
*/
void free_io_emulation_resource(struct vm *vm);
void free_io_emulation_resource(struct acrn_vm *vm);
/**
* @brief Allow a VM to access a port I/O range
@@ -204,7 +204,7 @@ void free_io_emulation_resource(struct vm *vm);
* @param port_address The start address of the port I/O range
* @param nbytes The size of the range, in bytes
*/
void allow_guest_pio_access(struct vm *vm, uint16_t port_address,
void allow_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
uint32_t nbytes);
/**
@@ -215,7 +215,7 @@ void allow_guest_pio_access(struct vm *vm, uint16_t port_address,
* @param io_read_fn_ptr The handler for emulating reads from the given range
* @param io_write_fn_ptr The handler for emulating writes to the given range
*/
void register_io_emulation_handler(struct vm *vm, const struct vm_io_range *range,
void register_io_emulation_handler(struct acrn_vm *vm, const struct vm_io_range *range,
io_read_fn_t io_read_fn_ptr,
io_write_fn_t io_write_fn_ptr);
@@ -233,7 +233,7 @@ void register_io_emulation_handler(struct vm *vm, const struct vm_io_range *ra
* @return 0 - Registration succeeds
* @return -EINVAL - \p read_write is NULL, \p end is not larger than \p start or \p vm has been launched
*/
int register_mmio_emulation_handler(struct vm *vm,
int register_mmio_emulation_handler(struct acrn_vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data);
@@ -244,7 +244,7 @@ int register_mmio_emulation_handler(struct vm *vm,
* @param start The base address of the range the to-be-unregistered handler is for
* @param end The end of the range (exclusive) the to-be-unregistered handler is for
*/
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
void unregister_mmio_emulation_handler(struct acrn_vm *vm, uint64_t start,
uint64_t end);
/**

View File

@@ -160,7 +160,7 @@ void invept(const struct acrn_vcpu *vcpu);
* @return true - The HPA of the guest memory region is continuous
* @return false - The HPA of the guest memory region is non-continuous
*/
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg);
bool check_continuous_hpa(struct acrn_vm *vm, uint64_t gpa_arg, uint64_t size_arg);
/**
*@pre (pml4_page != NULL) && (pg_size != NULL)
*/
@@ -214,7 +214,7 @@ static inline void clflush(volatile void *p)
*
* @return None
*/
void destroy_ept(struct vm *vm);
void destroy_ept(struct acrn_vm *vm);
/**
* @brief Translating from guest-physical address to host-physcial address
*
@@ -224,7 +224,7 @@ void destroy_ept(struct vm *vm);
* @return INVALID_HPA - the HPA of parameter gpa is unmapping
* @return hpa - the HPA of parameter gpa is hpa
*/
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa);
/**
* @brief Translating from guest-physical address to host-physcial address
*
@@ -236,7 +236,7 @@ uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
* @return INVALID_HPA - the HPA of parameter gpa is unmapping
* @return hpa - the HPA of parameter gpa is hpa
*/
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size);
/**
* @brief Translating from host-physical address to guest-physical address for VM0
*
@@ -260,7 +260,7 @@ uint64_t vm0_hpa2gpa(uint64_t hpa);
*
* @return None
*/
void ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t hpa,
uint64_t gpa, uint64_t size, uint64_t prot_orig);
/**
* @brief Guest-physical memory page access right or memory type updating
@@ -277,7 +277,7 @@ void ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
*
* @return None
*/
void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
/**
* @brief Guest-physical memory region unmapping
@@ -292,7 +292,7 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
*
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
void ept_mr_del(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size);
/**
* @brief EPT violation handling

View File

@@ -79,5 +79,5 @@ struct multiboot_module {
};
int parse_hv_cmdline(void);
int init_vm_boot_info(struct vm *vm);
int init_vm_boot_info(struct acrn_vm *vm);
#endif

View File

@@ -60,6 +60,6 @@ struct memory_ops {
};
extern const struct memory_ops ppt_mem_ops;
void init_ept_mem_ops(struct vm *vm);
void init_ept_mem_ops(struct acrn_vm *vm);
#endif /* PAGE_H */

View File

@@ -7,6 +7,6 @@
#ifndef SBL_SEED_PARSE_H_
#define SBL_SEED_PARSE_H_
bool sbl_seed_parse(struct vm *vm, char *cmdline, char *out_arg, uint32_t out_len);
bool sbl_seed_parse(struct acrn_vm *vm, char *cmdline, char *out_arg, uint32_t out_len);
#endif /* SBL_SEED_PARSE_H_ */

View File

@@ -130,7 +130,7 @@ struct trusty_startup_param {
void switch_world(struct acrn_vcpu *vcpu, int next_world);
bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param);
void destroy_secure_world(struct vm *vm, bool need_clr_mem);
void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem);
void save_sworld_context(struct acrn_vcpu *vcpu);
void restore_sworld_context(struct acrn_vcpu *vcpu);
void trusty_set_dseed(const void *dseed, uint8_t dseed_num);

View File

@@ -610,7 +610,7 @@ int init_iommu(void);
* @remark to reduce boot time & memory cost, a config IOMMU_INIT_BUS_LIMIT, which limit the bus number.
*
*/
void init_iommu_vm0_domain(struct vm *vm0);
void init_iommu_vm0_domain(struct acrn_vm *vm0);
/**
* @}

View File

@@ -35,7 +35,7 @@ bool is_hypercall_from_ring0(void);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_sos_offline_cpu(struct vm *vm, uint64_t lapicid);
int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid);
/**
* @brief Get hypervisor api version
@@ -49,7 +49,7 @@ int32_t hcall_sos_offline_cpu(struct vm *vm, uint64_t lapicid);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_get_api_version(struct vm *vm, uint64_t param);
int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param);
/**
* @brief create virtual machine
@@ -65,7 +65,7 @@ int32_t hcall_get_api_version(struct vm *vm, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_create_vm(struct vm *vm, uint64_t param);
int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param);
/**
* @brief destroy virtual machine
@@ -134,7 +134,7 @@ int32_t hcall_pause_vm(uint16_t vmid);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_create_vcpu(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_create_vcpu(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief set vcpu regs
@@ -150,7 +150,7 @@ int32_t hcall_create_vcpu(struct vm *vm, uint16_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vcpu_regs(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief set or clear IRQ line
@@ -166,7 +166,7 @@ int32_t hcall_set_vcpu_regs(struct vm *vm, uint16_t vmid, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_irqline(const struct vm *vm, uint16_t vmid,
int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid,
const struct acrn_irqline_ops *ops);
/**
* @brief inject MSI interrupt
@@ -181,7 +181,7 @@ int32_t hcall_set_irqline(const struct vm *vm, uint16_t vmid,
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_inject_msi(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief set ioreq shared buffer
@@ -197,7 +197,7 @@ int32_t hcall_inject_msi(struct vm *vm, uint16_t vmid, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief notify request done
@@ -223,7 +223,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_set_vm_memory_region(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief setup ept memory mapping for multi regions
@@ -235,7 +235,7 @@ int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param)
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param);
int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, uint64_t param);
/**
* @brief change guest memory page write permission
@@ -248,7 +248,7 @@ int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_write_protect_page(struct vm *vm, uint16_t vmid, uint64_t wp_gpa);
int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_gpa);
/**
* @brief translate guest physical address to host physical address
@@ -263,7 +263,7 @@ int32_t hcall_write_protect_page(struct vm *vm, uint16_t vmid, uint64_t wp_gpa);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Assign one passthrough dev to VM.
@@ -276,7 +276,7 @@ int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Deassign one passthrough dev from VM.
@@ -289,7 +289,7 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Set interrupt mapping info of ptdev.
@@ -302,7 +302,7 @@ int32_t hcall_deassign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Clear interrupt mapping info of ptdev.
@@ -315,7 +315,7 @@ int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid,
int32_t hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid,
uint64_t param);
/**
@@ -328,7 +328,7 @@ int32_t hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid,
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param);
int32_t hcall_setup_sbuf(struct acrn_vm *vm, uint64_t param);
/**
* @brief Setup the hypervisor NPK log.
@@ -340,7 +340,7 @@ int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_setup_hv_npk_log(struct vm *vm, uint64_t param);
int32_t hcall_setup_hv_npk_log(struct acrn_vm *vm, uint64_t param);
/**
* @brief Execute profiling operation
@@ -353,7 +353,7 @@ int32_t hcall_setup_hv_npk_log(struct vm *vm, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_profiling_ops(struct vm *vm, uint64_t cmd, uint64_t param);
int32_t hcall_profiling_ops(struct acrn_vm *vm, uint64_t cmd, uint64_t param);
/**
* @brief Get VCPU Power state.
@@ -366,7 +366,7 @@ int32_t hcall_profiling_ops(struct vm *vm, uint64_t cmd, uint64_t param);
* @return 0 on success, non-zero on error.
*/
int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param);
int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param);
/**
* @brief Get VCPU a VM's interrupt count data.
@@ -379,7 +379,7 @@ int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_vm_intr_monitor(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @defgroup trusty_hypercall Trusty Hypercalls
@@ -450,7 +450,7 @@ int64_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu);
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_callback_vector(const struct vm *vm, uint64_t param);
int32_t hcall_set_callback_vector(const struct acrn_vm *vm, uint64_t param);
/**
* @}

View File

@@ -55,7 +55,7 @@ struct ptdev_remapping_info {
uint32_t intr_type;
union source_id phys_sid;
union source_id virt_sid;
struct vm *vm;
struct acrn_vm *vm;
uint32_t active; /* 1=active, 0=inactive and to free*/
uint32_t allocated_pirq;
uint32_t polarity; /* 0=active high, 1=active low*/
@@ -72,10 +72,10 @@ extern spinlock_t ptdev_lock;
void ptdev_softirq(uint16_t pcpu_id);
void ptdev_init(void);
void ptdev_release_all_entries(const struct vm *vm);
void ptdev_release_all_entries(const struct acrn_vm *vm);
struct ptdev_remapping_info *ptdev_dequeue_softirq(struct vm *vm);
struct ptdev_remapping_info *alloc_entry(struct vm *vm,
struct ptdev_remapping_info *ptdev_dequeue_softirq(struct acrn_vm *vm);
struct ptdev_remapping_info *alloc_entry(struct acrn_vm *vm,
uint32_t intr_type);
void release_entry(struct ptdev_remapping_info *entry);
void ptdev_activate_entry(
@@ -87,7 +87,7 @@ void ptdev_deactivate_entry(struct ptdev_remapping_info *entry);
void get_ptdev_info(char *str_arg, size_t str_max);
#endif /* HV_DEBUG */
uint32_t get_vm_ptdev_intr_data(const struct vm *target_vm, uint64_t *buffer,
uint32_t get_vm_ptdev_intr_data(const struct acrn_vm *target_vm, uint64_t *buffer,
uint32_t buffer_cnt);
#endif /* PTDEV_H */

View File

@@ -22,7 +22,7 @@ static inline void profiling_vmexit_handler(__unused struct acrn_vcpu *vcpu,
__unused uint64_t exit_reason) {}
static inline void profiling_setup(void) {}
static inline int32_t hcall_profiling_ops(__unused struct vm *vm,
static inline int32_t hcall_profiling_ops(__unused struct acrn_vm *vm,
__unused uint64_t cmd, __unused uint64_t param)
{
return -ENODEV;

View File

@@ -292,14 +292,14 @@ struct profiling_info_wrapper {
struct sw_msr_op_info sw_msr_op_info;
} __aligned(8);
int32_t profiling_get_version_info(struct vm *vm, uint64_t addr);
int32_t profiling_get_pcpu_id(struct vm *vm, uint64_t addr);
int32_t profiling_msr_ops_all_cpus(struct vm *vm, uint64_t addr);
int32_t profiling_vm_list_info(struct vm *vm, uint64_t addr);
int32_t profiling_get_control(struct vm *vm, uint64_t addr);
int32_t profiling_set_control(struct vm *vm, uint64_t addr);
int32_t profiling_configure_pmi(struct vm *vm, uint64_t addr);
int32_t profiling_configure_vmsw(struct vm *vm, uint64_t addr);
int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_get_pcpu_id(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_get_control(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr);
int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr);
void profiling_ipi_handler(void *data);
#endif

View File

@@ -62,7 +62,7 @@ struct acrn_vuart {
#endif
bool thre_int_pending; /* THRE interrupt pending */
bool active;
struct vm *vm;
struct acrn_vm *vm;
spinlock_t lock; /* protects all softc elements */
};
#ifdef CONFIG_PARTITION_MODE
@@ -70,13 +70,13 @@ extern int8_t vuart_vmid;
#endif
#ifdef HV_DEBUG
#define COM1_IRQ 6U
void vuart_init(struct vm *vm);
void vuart_init(struct acrn_vm *vm);
struct acrn_vuart *vuart_console_active(void);
void vuart_console_tx_chars(struct acrn_vuart *vu);
void vuart_console_rx_chars(struct acrn_vuart *vu);
#else
#define COM1_IRQ 0xFFU
static inline void vuart_init(__unused struct vm *vm)
static inline void vuart_init(__unused struct acrn_vm *vm)
{
}
static inline struct acrn_vuart *vuart_console_active(void)

View File

@@ -124,8 +124,8 @@ struct pci_addr_info {
};
struct vpci_ops {
int (*init)(struct vm *vm);
void (*deinit)(struct vm *vm);
int (*init)(struct acrn_vm *vm);
void (*deinit)(struct acrn_vm *vm);
void (*cfgread)(struct vpci *vpci, union pci_bdf vbdf, uint32_t offset,
uint32_t bytes, uint32_t *val);
void (*cfgwrite)(struct vpci *vpci, union pci_bdf vbdf, uint32_t offset,
@@ -134,7 +134,7 @@ struct vpci_ops {
struct vpci {
struct vm *vm;
struct acrn_vm *vm;
struct pci_addr_info addr_info;
struct vpci_ops *ops;
};
@@ -142,9 +142,9 @@ struct vpci {
extern struct pci_vdev_ops pci_ops_vdev_hostbridge;
extern struct pci_vdev_ops pci_ops_vdev_pt;
void vpci_init(struct vm *vm);
void vpci_cleanup(struct vm *vm);
void vpci_set_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf);
void vpci_reset_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf);
void vpci_init(struct acrn_vm *vm);
void vpci_cleanup(struct acrn_vm *vm);
void vpci_set_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf);
void vpci_reset_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf);
#endif /* VPCI_H_ */

View File

@@ -38,12 +38,12 @@
#ifndef ASSEMBLER
/* gpa --> hpa -->hva */
static inline void *gpa2hva(struct vm *vm, uint64_t x)
static inline void *gpa2hva(struct acrn_vm *vm, uint64_t x)
{
return hpa2hva(gpa2hpa(vm, x));
}
static inline uint64_t hva2gpa(struct vm *vm, void *x)
static inline uint64_t hva2gpa(struct acrn_vm *vm, void *x)
{
return (is_vm0(vm)) ? vm0_hpa2gpa(hva2hpa(x)) : INVALID_GPA;
}