mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-19 18:02:06 +00:00
hv: vmcs: fix MISRA-C violations related to variable scope
This patch fixes the MISRA-C violations in arch/x86/vmcs.c. * add local variable if the input parameter passed by value is changed in function definition * move the variable declaration inside function scope if this variable is only used in one function Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
9a051e7ab2
commit
8e58a686f1
@ -9,10 +9,8 @@
|
|||||||
#include <hypervisor.h>
|
#include <hypervisor.h>
|
||||||
#include <cpu.h>
|
#include <cpu.h>
|
||||||
|
|
||||||
static uint64_t cr0_host_mask;
|
|
||||||
static uint64_t cr0_always_on_mask;
|
static uint64_t cr0_always_on_mask;
|
||||||
static uint64_t cr0_always_off_mask;
|
static uint64_t cr0_always_off_mask;
|
||||||
static uint64_t cr4_host_mask;
|
|
||||||
static uint64_t cr4_always_on_mask;
|
static uint64_t cr4_always_on_mask;
|
||||||
static uint64_t cr4_always_off_mask;
|
static uint64_t cr4_always_off_mask;
|
||||||
|
|
||||||
@ -34,7 +32,9 @@ bool is_vmx_disabled(void)
|
|||||||
static void init_cr0_cr4_host_mask(void)
|
static void init_cr0_cr4_host_mask(void)
|
||||||
{
|
{
|
||||||
static bool inited = false;
|
static bool inited = false;
|
||||||
|
static uint64_t cr0_host_mask, cr4_host_mask;
|
||||||
uint64_t fixed0, fixed1;
|
uint64_t fixed0, fixed1;
|
||||||
|
|
||||||
if (!inited) {
|
if (!inited) {
|
||||||
/* Read the CR0 fixed0 / fixed1 MSR registers */
|
/* Read the CR0 fixed0 / fixed1 MSR registers */
|
||||||
fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
|
fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
|
||||||
@ -194,6 +194,7 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
|||||||
uint32_t entry_ctrls;
|
uint32_t entry_ctrls;
|
||||||
bool old_paging_enabled = is_paging_enabled(vcpu);
|
bool old_paging_enabled = is_paging_enabled(vcpu);
|
||||||
uint64_t cr0_changed_bits = vcpu_get_cr0(vcpu) ^ cr0;
|
uint64_t cr0_changed_bits = vcpu_get_cr0(vcpu) ^ cr0;
|
||||||
|
uint64_t cr0_mask = cr0;
|
||||||
|
|
||||||
if (!is_cr0_write_valid(vcpu, cr0)) {
|
if (!is_cr0_write_valid(vcpu, cr0)) {
|
||||||
pr_dbg("Invalid cr0 write operation from guest");
|
pr_dbg("Invalid cr0 write operation from guest");
|
||||||
@ -205,9 +206,9 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
|||||||
* When loading a control register, reserved bit should always set
|
* When loading a control register, reserved bit should always set
|
||||||
* to the value previously read.
|
* to the value previously read.
|
||||||
*/
|
*/
|
||||||
cr0 &= ~CR0_RESERVED_MASK;
|
cr0_mask &= ~CR0_RESERVED_MASK;
|
||||||
|
|
||||||
if (!old_paging_enabled && ((cr0 & CR0_PG) != 0UL)) {
|
if (!old_paging_enabled && ((cr0_mask & CR0_PG) != 0UL)) {
|
||||||
if ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL) {
|
if ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL) {
|
||||||
/* Enable long mode */
|
/* Enable long mode */
|
||||||
pr_dbg("VMM: Enable long mode");
|
pr_dbg("VMM: Enable long mode");
|
||||||
@ -223,7 +224,7 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
|||||||
} else {
|
} else {
|
||||||
/* do nothing */
|
/* do nothing */
|
||||||
}
|
}
|
||||||
} else if (old_paging_enabled && ((cr0 & CR0_PG) == 0UL)) {
|
} else if (old_paging_enabled && ((cr0_mask & CR0_PG) == 0UL)) {
|
||||||
if ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL) {
|
if ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL) {
|
||||||
/* Disable long mode */
|
/* Disable long mode */
|
||||||
pr_dbg("VMM: Disable long mode");
|
pr_dbg("VMM: Disable long mode");
|
||||||
@ -242,7 +243,7 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
|||||||
if ((cr0_changed_bits & (CR0_CD | CR0_NW)) != 0UL) {
|
if ((cr0_changed_bits & (CR0_CD | CR0_NW)) != 0UL) {
|
||||||
/* No action if only CR0.NW is cr0_changed_bits */
|
/* No action if only CR0.NW is cr0_changed_bits */
|
||||||
if ((cr0_changed_bits & CR0_CD) != 0UL) {
|
if ((cr0_changed_bits & CR0_CD) != 0UL) {
|
||||||
if ((cr0 & CR0_CD) != 0UL) {
|
if ((cr0_mask & CR0_CD) != 0UL) {
|
||||||
/*
|
/*
|
||||||
* When the guest requests to set CR0.CD, we don't allow
|
* When the guest requests to set CR0.CD, we don't allow
|
||||||
* guest's CR0.CD to be actually set, instead, we write guest
|
* guest's CR0.CD to be actually set, instead, we write guest
|
||||||
@ -269,18 +270,17 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
|||||||
/* CR0 has no always off bits, except the always on bits, and reserved
|
/* CR0 has no always off bits, except the always on bits, and reserved
|
||||||
* bits, allow to set according to guest.
|
* bits, allow to set according to guest.
|
||||||
*/
|
*/
|
||||||
cr0_vmx = cr0_always_on_mask | cr0;
|
cr0_vmx = cr0_always_on_mask | cr0_mask;
|
||||||
|
|
||||||
/* Don't set CD or NW bit to guest */
|
/* Don't set CD or NW bit to guest */
|
||||||
cr0_vmx &= ~(CR0_CD | CR0_NW);
|
cr0_vmx &= ~(CR0_CD | CR0_NW);
|
||||||
exec_vmwrite(VMX_GUEST_CR0, cr0_vmx & 0xFFFFFFFFUL);
|
exec_vmwrite(VMX_GUEST_CR0, cr0_vmx & 0xFFFFFFFFUL);
|
||||||
exec_vmwrite(VMX_CR0_READ_SHADOW, cr0 & 0xFFFFFFFFUL);
|
exec_vmwrite(VMX_CR0_READ_SHADOW, cr0_mask & 0xFFFFFFFFUL);
|
||||||
|
|
||||||
/* clear read cache, next time read should from VMCS */
|
/* clear read cache, next time read should from VMCS */
|
||||||
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
|
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
|
||||||
|
|
||||||
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0",
|
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", cr0_mask, cr0_vmx);
|
||||||
cr0, cr0_vmx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
|
static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
|
||||||
|
Loading…
Reference in New Issue
Block a user