mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-16 08:26:41 +00:00
HV:vcpu fix "Pointer param should be declared pointer to const"
Fix violations whose parameter can be read-only. This patch only fix the parameter whose name is vcpu. Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
parent
ea32c34ae1
commit
46d198244f
@ -322,7 +322,7 @@ static uint32_t get_vmcs_field(enum cpu_reg_name ident)
|
||||
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
|
||||
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
|
||||
*/
|
||||
static uint64_t vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg)
|
||||
static uint64_t vm_get_register(const struct vcpu *vcpu, enum cpu_reg_name reg)
|
||||
{
|
||||
uint64_t reg_val = 0UL;
|
||||
|
||||
@ -492,7 +492,7 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmio_read(struct vcpu *vcpu, uint64_t *rval)
|
||||
static int mmio_read(const struct vcpu *vcpu, uint64_t *rval)
|
||||
{
|
||||
if (vcpu == NULL) {
|
||||
return -EINVAL;
|
||||
@ -538,7 +538,7 @@ static void vie_calc_bytereg(const struct instr_emul_vie *vie,
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t vie_read_bytereg(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static uint8_t vie_read_bytereg(const struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int lhbr;
|
||||
uint64_t val;
|
||||
@ -883,7 +883,7 @@ static int emulate_movx(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
*
|
||||
* It's only used by MOVS/STO
|
||||
*/
|
||||
static void get_gva_si_nocheck(struct vcpu *vcpu, uint8_t addrsize,
|
||||
static void get_gva_si_nocheck(const struct vcpu *vcpu, uint8_t addrsize,
|
||||
enum cpu_reg_name seg, uint64_t *gva)
|
||||
{
|
||||
uint64_t val;
|
||||
@ -2343,7 +2343,7 @@ int decode_instruction(struct vcpu *vcpu)
|
||||
return (int)(emul_ctxt->vie.opsize);
|
||||
}
|
||||
|
||||
int emulate_instruction(struct vcpu *vcpu)
|
||||
int emulate_instruction(const struct vcpu *vcpu)
|
||||
{
|
||||
struct instr_emul_ctxt *ctxt = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
|
||||
|
||||
|
@ -193,7 +193,7 @@ struct instr_emul_ctxt {
|
||||
struct vcpu *vcpu;
|
||||
};
|
||||
|
||||
int emulate_instruction(struct vcpu *vcpu);
|
||||
int emulate_instruction(const struct vcpu *vcpu);
|
||||
int decode_instruction(struct vcpu *vcpu);
|
||||
|
||||
#endif
|
||||
|
@ -10,9 +10,9 @@
|
||||
|
||||
vm_sw_loader_t vm_sw_loader;
|
||||
|
||||
inline uint64_t vcpu_get_gpreg(struct vcpu *vcpu, uint32_t reg)
|
||||
inline uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
const struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
|
||||
return ctx->guest_cpu_regs.longs[reg];
|
||||
@ -146,7 +146,7 @@ inline void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
|
||||
vmx_write_cr4(vcpu, val);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_pat_ext(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_pat_ext(const struct vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].
|
||||
ext_ctx.ia32_pat;
|
||||
|
@ -68,7 +68,7 @@ void dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
* either a previous call to emulate_io() returning 0 or the corresponding VHM
|
||||
* request having transferred to the COMPLETE state.
|
||||
*/
|
||||
void emulate_mmio_post(struct vcpu *vcpu, const struct io_request *io_req)
|
||||
void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req)
|
||||
{
|
||||
const struct mmio_request *mmio_req = &io_req->reqs.mmio;
|
||||
|
||||
@ -173,7 +173,7 @@ void emulate_io_post(struct vcpu *vcpu)
|
||||
* @return -EIO - The request spans multiple devices and cannot be emulated.
|
||||
*/
|
||||
int32_t
|
||||
hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
hv_emulate_pio(const struct vcpu *vcpu, struct io_request *io_req)
|
||||
{
|
||||
int32_t status = -ENODEV;
|
||||
uint16_t port, size;
|
||||
|
@ -167,7 +167,7 @@ void flush_vpid_global(void)
|
||||
local_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
|
||||
}
|
||||
|
||||
void invept(struct vcpu *vcpu)
|
||||
void invept(const struct vcpu *vcpu)
|
||||
{
|
||||
struct invept_desc desc = {0};
|
||||
|
||||
|
@ -63,18 +63,18 @@ get_subrange_start_of_fixed_mtrr(uint32_t index, uint32_t subrange_id)
|
||||
get_subrange_size_of_fixed_mtrr(index));
|
||||
}
|
||||
|
||||
static inline bool is_mtrr_enabled(struct vcpu *vcpu)
|
||||
static inline bool is_mtrr_enabled(const struct vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->mtrr.def_type.bits.enable != 0U);
|
||||
}
|
||||
|
||||
static inline bool is_fixed_range_mtrr_enabled(struct vcpu *vcpu)
|
||||
static inline bool is_fixed_range_mtrr_enabled(const struct vcpu *vcpu)
|
||||
{
|
||||
return ((vcpu->mtrr.cap.bits.fix != 0U) &&
|
||||
(vcpu->mtrr.def_type.bits.fixed_enable != 0U));
|
||||
}
|
||||
|
||||
static inline uint8_t get_default_memory_type(struct vcpu *vcpu)
|
||||
static inline uint8_t get_default_memory_type(const struct vcpu *vcpu)
|
||||
{
|
||||
return (uint8_t)(vcpu->mtrr.def_type.bits.type);
|
||||
}
|
||||
@ -154,7 +154,7 @@ static uint32_t update_ept(struct vm *vm, uint64_t start,
|
||||
return attr;
|
||||
}
|
||||
|
||||
static void update_ept_mem_type(struct vcpu *vcpu)
|
||||
static void update_ept_mem_type(const struct vcpu *vcpu)
|
||||
{
|
||||
uint8_t type;
|
||||
uint64_t start, size;
|
||||
@ -236,9 +236,9 @@ void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value)
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t mtrr_rdmsr(struct vcpu *vcpu, uint32_t msr)
|
||||
uint64_t mtrr_rdmsr(const struct vcpu *vcpu, uint32_t msr)
|
||||
{
|
||||
struct mtrr_state *mtrr = &vcpu->mtrr;
|
||||
const struct mtrr_state *mtrr = &vcpu->mtrr;
|
||||
uint64_t ret = 0UL;
|
||||
uint32_t index;
|
||||
|
||||
|
@ -148,7 +148,7 @@ static int vcpu_inject_vlapic_int(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_do_pending_extint(struct vcpu *vcpu)
|
||||
static int vcpu_do_pending_extint(const struct vcpu *vcpu)
|
||||
{
|
||||
struct vm *vm;
|
||||
struct vcpu *primary;
|
||||
|
@ -250,7 +250,7 @@ static void init_cr0_cr4_host_mask(void)
|
||||
pr_dbg("CR4 mask value: 0x%016llx", cr4_host_mask);
|
||||
}
|
||||
|
||||
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu)
|
||||
uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* note: if context->cr0.CD is set, the actual value in guest's
|
||||
@ -288,7 +288,7 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void load_pdptrs(struct vcpu *vcpu)
|
||||
static void load_pdptrs(const struct vcpu *vcpu)
|
||||
{
|
||||
uint64_t guest_cr3 = exec_vmread(VMX_GUEST_CR3);
|
||||
/* TODO: check whether guest cr3 is valid */
|
||||
@ -945,7 +945,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
|
||||
}
|
||||
|
||||
static void init_entry_ctrl(__unused struct vcpu *vcpu)
|
||||
static void init_entry_ctrl(__unused const struct vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
|
||||
|
@ -96,7 +96,7 @@ static struct vcpu *select_next_vcpu(uint16_t pcpu_id)
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
void make_reschedule_request(struct vcpu *vcpu)
|
||||
void make_reschedule_request(const struct vcpu *vcpu)
|
||||
{
|
||||
struct sched_context *ctx = &per_cpu(sched_ctx, vcpu->pcpu_id);
|
||||
|
||||
|
@ -248,7 +248,7 @@ struct vcpu_dump {
|
||||
int str_max;
|
||||
};
|
||||
|
||||
static inline bool is_vcpu_bsp(struct vcpu *vcpu)
|
||||
static inline bool is_vcpu_bsp(const struct vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->vcpu_id == BOOT_CPU_ID);
|
||||
}
|
||||
@ -266,7 +266,7 @@ vcpu_vlapic(struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* External Interfaces */
|
||||
uint64_t vcpu_get_gpreg(struct vcpu *vcpu, uint32_t reg);
|
||||
uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg);
|
||||
void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val);
|
||||
uint64_t vcpu_get_rip(struct vcpu *vcpu);
|
||||
void vcpu_set_rip(struct vcpu *vcpu, uint64_t val);
|
||||
@ -282,7 +282,7 @@ uint64_t vcpu_get_cr2(struct vcpu *vcpu);
|
||||
void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val);
|
||||
uint64_t vcpu_get_cr4(struct vcpu *vcpu);
|
||||
void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val);
|
||||
uint64_t vcpu_get_pat_ext(struct vcpu *vcpu);
|
||||
uint64_t vcpu_get_pat_ext(const struct vcpu *vcpu);
|
||||
void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val);
|
||||
void set_vcpu_regs(struct vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs);
|
||||
void reset_vcpu_regs(struct vcpu *vcpu);
|
||||
|
@ -121,7 +121,7 @@ int register_mmio_emulation_handler(struct vm *vm,
|
||||
uint64_t end, void *handler_private_data);
|
||||
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
|
||||
uint64_t end);
|
||||
void emulate_mmio_post(struct vcpu *vcpu, const struct io_request *io_req);
|
||||
void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req);
|
||||
void dm_emulate_mmio_post(struct vcpu *vcpu);
|
||||
|
||||
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
|
||||
|
@ -88,7 +88,7 @@ int check_vmx_mmu_cap(void);
|
||||
uint16_t allocate_vpid(void);
|
||||
void flush_vpid_single(uint16_t vpid);
|
||||
void flush_vpid_global(void);
|
||||
void invept(struct vcpu *vcpu);
|
||||
void invept(const struct vcpu *vcpu);
|
||||
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg);
|
||||
/**
|
||||
*@pre (pml4_page != NULL) && (pg_size != NULL)
|
||||
|
@ -45,7 +45,7 @@ struct mtrr_state {
|
||||
};
|
||||
|
||||
void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value);
|
||||
uint64_t mtrr_rdmsr(struct vcpu *vcpu, uint32_t msr);
|
||||
uint64_t mtrr_rdmsr(const struct vcpu *vcpu, uint32_t msr);
|
||||
void init_mtrr(struct vcpu *vcpu);
|
||||
|
||||
#endif /* MTRR_H */
|
||||
|
@ -458,7 +458,7 @@ void vmx_off(uint16_t pcpu_id);
|
||||
void exec_vmclear(void *addr);
|
||||
void exec_vmptrld(void *addr);
|
||||
|
||||
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu);
|
||||
uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu);
|
||||
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value);
|
||||
|
||||
void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0);
|
||||
|
@ -31,7 +31,7 @@ void remove_vcpu_from_runqueue(struct vcpu *vcpu);
|
||||
|
||||
void default_idle(void);
|
||||
|
||||
void make_reschedule_request(struct vcpu *vcpu);
|
||||
void make_reschedule_request(const struct vcpu *vcpu);
|
||||
int need_reschedule(uint16_t pcpu_id);
|
||||
void make_pcpu_offline(uint16_t pcpu_id);
|
||||
int need_offline(uint16_t pcpu_id);
|
||||
|
Loading…
Reference in New Issue
Block a user