mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2026-01-05 07:35:31 +00:00
hv: multi-arch reconstruct bits library
Extract common interface to include/lib/bits.h, and invoke the variant implementation of arch. Re-implement unlocked functions as C in common library. Rename bitmap*lock() to bitmap*(), bitmap*nolock() to bitmap*non_atomic(). Tracked-On: #8803 Signed-off-by: Haoyu Tang <haoyu.tang@intel.com> Reviewed-by: Yifan Liu <yifan1.liu@intel.com> Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
@@ -465,7 +465,7 @@ running). See :ref:`vcpu-request-interrupt-injection` for details.
|
||||
{
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req);
|
||||
bitmap_set(eventid, &vcpu->arch_vcpu.pending_req);
|
||||
/*
|
||||
* if current hostcpu is not the target vcpu's hostcpu, we need
|
||||
* to invoke IPI to wake up target vcpu
|
||||
|
||||
@@ -149,9 +149,11 @@ endif
|
||||
# all the work.
|
||||
#
|
||||
COMMON_C_SRCS += common/notify.c
|
||||
COMMON_C_SRCS += lib/memory.c
|
||||
COMMON_C_SRCS += common/percpu.c
|
||||
COMMON_C_SRCS += common/cpu.c
|
||||
COMMON_C_SRCS += lib/memory.c
|
||||
COMMON_C_SRCS += lib/bits.c
|
||||
|
||||
|
||||
ifeq ($(ARCH),x86)
|
||||
COMMON_C_SRCS += common/ticks.c
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/mmu.h>
|
||||
@@ -321,7 +321,7 @@ void init_pcpu_post(uint16_t pcpu_id)
|
||||
|
||||
init_keylocker();
|
||||
|
||||
bitmap_clear_lock(pcpu_id, &pcpu_sync);
|
||||
bitmap_clear(pcpu_id, &pcpu_sync);
|
||||
/* Waiting for each pCPU has done its initialization before to continue */
|
||||
wait_sync_change(&pcpu_sync, 0UL);
|
||||
}
|
||||
@@ -392,7 +392,7 @@ bool start_pcpus(uint64_t mask)
|
||||
|
||||
i = ffs64(expected_start_mask);
|
||||
while (i != INVALID_BIT_INDEX) {
|
||||
bitmap_clear_nolock(i, &expected_start_mask);
|
||||
bitmap_clear_non_atomic(i, &expected_start_mask);
|
||||
|
||||
if (pcpu_id == i) {
|
||||
continue; /* Avoid start itself */
|
||||
@@ -407,7 +407,7 @@ bool start_pcpus(uint64_t mask)
|
||||
|
||||
void make_pcpu_offline(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set_lock(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
|
||||
bitmap_set(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
|
||||
if (get_pcpu_id() != pcpu_id) {
|
||||
kick_pcpu(pcpu_id);
|
||||
}
|
||||
@@ -415,7 +415,7 @@ void make_pcpu_offline(uint16_t pcpu_id)
|
||||
|
||||
bool need_offline(uint16_t pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear_lock(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
|
||||
return bitmap_test_and_clear(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
|
||||
}
|
||||
|
||||
void wait_pcpus_offline(uint64_t mask)
|
||||
@@ -439,7 +439,7 @@ void stop_pcpus(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
bitmap_set_nolock(pcpu_id, &mask);
|
||||
bitmap_set_non_atomic(pcpu_id, &mask);
|
||||
make_pcpu_offline(pcpu_id);
|
||||
}
|
||||
|
||||
@@ -496,6 +496,7 @@ void cpu_dead(void)
|
||||
|
||||
/* Set state to show CPU is dead */
|
||||
pcpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
|
||||
|
||||
clear_pcpu_active(pcpu_id);
|
||||
|
||||
/* Halt the CPU */
|
||||
@@ -624,7 +625,7 @@ void msr_write_pcpu(uint32_t msr_index, uint64_t value64, uint16_t pcpu_id)
|
||||
} else {
|
||||
msr.msr_index = msr_index;
|
||||
msr.write_val = value64;
|
||||
bitmap_set_nolock(pcpu_id, &mask);
|
||||
bitmap_set_non_atomic(pcpu_id, &mask);
|
||||
smp_call_function(mask, smpcall_write_msr_func, &msr);
|
||||
}
|
||||
}
|
||||
@@ -646,7 +647,7 @@ uint64_t msr_read_pcpu(uint32_t msr_index, uint16_t pcpu_id)
|
||||
ret = msr_read(msr_index);
|
||||
} else {
|
||||
msr.msr_index = msr_index;
|
||||
bitmap_set_nolock(pcpu_id, &mask);
|
||||
bitmap_set_non_atomic(pcpu_id, &mask);
|
||||
smp_call_function(mask, smpcall_read_msr_func, &msr);
|
||||
ret = msr.read_val;
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/guest/vm.h>
|
||||
#include <asm/vtd.h>
|
||||
#include <ptdev.h>
|
||||
@@ -67,7 +67,7 @@ static uint32_t calculate_logical_dest_mask(uint64_t pdmask)
|
||||
*/
|
||||
dest_cluster_id = per_cpu(arch.lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK;
|
||||
do {
|
||||
bitmap_clear_nolock(pcpu_id, &pcpu_mask);
|
||||
bitmap_clear_non_atomic(pcpu_id, &pcpu_mask);
|
||||
cluster_id = per_cpu(arch.lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK;
|
||||
if (cluster_id == dest_cluster_id) {
|
||||
logical_id_mask |= (per_cpu(arch.lapic_ldr, pcpu_id) & X2APIC_LDR_LOGICAL_ID_MASK);
|
||||
|
||||
@@ -398,7 +398,7 @@ static void setup_vmcs_shadowing_bitmap(void)
|
||||
for (field_index = 0U; field_index < MAX_SHADOW_VMCS_FIELDS; field_index++) {
|
||||
bit_pos = vmcs_shadowing_fields[field_index] % 64U;
|
||||
array_index = vmcs_shadowing_fields[field_index] / 64U;
|
||||
bitmap_clear_nolock(bit_pos, &vmcs_shadowing_bitmap[array_index]);
|
||||
bitmap_clear_non_atomic(bit_pos, &vmcs_shadowing_bitmap[array_index]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1338,8 +1338,8 @@ static void set_vmcs01_guest_state(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
exec_vmwrite(VMX_GUEST_CR0, vmcs12->host_cr0);
|
||||
exec_vmwrite(VMX_GUEST_CR4, vmcs12->host_cr4);
|
||||
bitmap_clear_nolock(CPU_REG_CR0, &vcpu->reg_cached);
|
||||
bitmap_clear_nolock(CPU_REG_CR4, &vcpu->reg_cached);
|
||||
bitmap_clear_non_atomic(CPU_REG_CR0, &vcpu->reg_cached);
|
||||
bitmap_clear_non_atomic(CPU_REG_CR4, &vcpu->reg_cached);
|
||||
|
||||
exec_vmwrite(VMX_GUEST_CR3, vmcs12->host_cr3);
|
||||
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);
|
||||
|
||||
@@ -167,7 +167,7 @@ static inline void enter_s5(struct acrn_vcpu *vcpu, uint32_t pm1a_cnt_val, uint3
|
||||
pause_vm(vm);
|
||||
put_vm_lock(vm);
|
||||
|
||||
bitmap_set_nolock(vm->vm_id, &per_cpu(shutdown_vm_bitmap, pcpu_id));
|
||||
bitmap_set_non_atomic(vm->vm_id, &per_cpu(shutdown_vm_bitmap, pcpu_id));
|
||||
make_shutdown_vm_request(pcpu_id);
|
||||
}
|
||||
|
||||
@@ -352,7 +352,7 @@ static bool prelaunched_vm_sleep_io_write(struct acrn_vcpu *vcpu, uint16_t addr,
|
||||
pause_vm(vm);
|
||||
put_vm_lock(vm);
|
||||
|
||||
bitmap_set_nolock(vm->vm_id, &per_cpu(shutdown_vm_bitmap, pcpuid_from_vcpu(vcpu)));
|
||||
bitmap_set_non_atomic(vm->vm_id, &per_cpu(shutdown_vm_bitmap, pcpuid_from_vcpu(vcpu)));
|
||||
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <crypto_api.h>
|
||||
#include <asm/guest/trusty.h>
|
||||
#include <asm/page.h>
|
||||
@@ -165,12 +165,12 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
|
||||
uint32_t i;
|
||||
|
||||
/* mark to update on-demand run_context for efer/rflags/rsp/rip/cr0/cr4 */
|
||||
bitmap_set_nolock(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
bitmap_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated);
|
||||
bitmap_set_nolock(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
bitmap_set_nolock(CPU_REG_RIP, &vcpu->reg_updated);
|
||||
bitmap_set_nolock(CPU_REG_CR0, &vcpu->reg_updated);
|
||||
bitmap_set_nolock(CPU_REG_CR4, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_RIP, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_CR0, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_CR4, &vcpu->reg_updated);
|
||||
|
||||
/* VMCS Execution field */
|
||||
exec_vmwrite64(VMX_TSC_OFFSET_FULL, ext_ctx->tsc_offset);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021-2022 Intel Corporation.
|
||||
* Copyright (C) 2021-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/cpuid.h>
|
||||
#include <asm/rdt.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/board.h>
|
||||
#include <asm/vm_config.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@@ -8,7 +8,7 @@
|
||||
#include <errno.h>
|
||||
#include <asm/guest/vcpu.h>
|
||||
#include <asm/guest/virq.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/vmx.h>
|
||||
#include <logmsg.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
@@ -59,7 +59,7 @@ uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (!bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) &&
|
||||
!bitmap_test_and_set_nolock(CPU_REG_RIP, &vcpu->reg_cached)) {
|
||||
!bitmap_test_and_set_non_atomic(CPU_REG_RIP, &vcpu->reg_cached)) {
|
||||
ctx->rip = exec_vmread(VMX_GUEST_RIP);
|
||||
}
|
||||
return ctx->rip;
|
||||
@@ -68,7 +68,7 @@ uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||
void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rip = val;
|
||||
bitmap_set_nolock(CPU_REG_RIP, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_RIP, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu)
|
||||
@@ -85,7 +85,7 @@ void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
ctx->cpu_regs.regs.rsp = val;
|
||||
bitmap_set_nolock(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
|
||||
@@ -110,7 +110,7 @@ void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
}
|
||||
|
||||
/* Write the new value to VMCS in either case */
|
||||
bitmap_set_nolock(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
|
||||
@@ -119,7 +119,7 @@ uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (!bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) &&
|
||||
!bitmap_test_and_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_cached) && vcpu->launched) {
|
||||
!bitmap_test_and_set_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_cached) && vcpu->launched) {
|
||||
ctx->rflags = exec_vmread(VMX_GUEST_RFLAGS);
|
||||
}
|
||||
return ctx->rflags;
|
||||
@@ -129,7 +129,7 @@ void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rflags =
|
||||
val;
|
||||
bitmap_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated);
|
||||
bitmap_set_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr)
|
||||
@@ -176,7 +176,7 @@ void vcpu_set_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
{
|
||||
pr_dbg("%s", __func__);
|
||||
|
||||
if (!bitmap_test_and_set_lock((uint16_t)(vector & 0x3fU),
|
||||
if (!bitmap_test_and_set((uint16_t)(vector & 0x3fU),
|
||||
&(vcpu->arch.eoi_exit_bitmap[(vector & 0xffU) >> 6U]))) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE);
|
||||
}
|
||||
@@ -186,7 +186,7 @@ void vcpu_clear_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
{
|
||||
pr_dbg("%s", __func__);
|
||||
|
||||
if (bitmap_test_and_clear_lock((uint16_t)(vector & 0x3fU),
|
||||
if (bitmap_test_and_clear((uint16_t)(vector & 0x3fU),
|
||||
&(vcpu->arch.eoi_exit_bitmap[(vector & 0xffU) >> 6U]))) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE);
|
||||
}
|
||||
@@ -660,16 +660,16 @@ static void write_cached_registers(struct acrn_vcpu *vcpu)
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test_and_clear_nolock(CPU_REG_RIP, &vcpu->reg_updated)) {
|
||||
if (bitmap_test_and_clear_non_atomic(CPU_REG_RIP, &vcpu->reg_updated)) {
|
||||
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
|
||||
}
|
||||
if (bitmap_test_and_clear_nolock(CPU_REG_RSP, &vcpu->reg_updated)) {
|
||||
if (bitmap_test_and_clear_non_atomic(CPU_REG_RSP, &vcpu->reg_updated)) {
|
||||
exec_vmwrite(VMX_GUEST_RSP, ctx->cpu_regs.regs.rsp);
|
||||
}
|
||||
if (bitmap_test_and_clear_nolock(CPU_REG_EFER, &vcpu->reg_updated)) {
|
||||
if (bitmap_test_and_clear_non_atomic(CPU_REG_EFER, &vcpu->reg_updated)) {
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
|
||||
}
|
||||
if (bitmap_test_and_clear_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated)) {
|
||||
if (bitmap_test_and_clear_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_updated)) {
|
||||
exec_vmwrite(VMX_GUEST_RFLAGS, ctx->rflags);
|
||||
}
|
||||
|
||||
@@ -678,11 +678,11 @@ static void write_cached_registers(struct acrn_vcpu *vcpu)
|
||||
* switching. There should no other module request updating
|
||||
* CR0/CR4 here.
|
||||
*/
|
||||
if (bitmap_test_and_clear_nolock(CPU_REG_CR0, &vcpu->reg_updated)) {
|
||||
if (bitmap_test_and_clear_non_atomic(CPU_REG_CR0, &vcpu->reg_updated)) {
|
||||
vcpu_set_cr0(vcpu, ctx->cr0);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_nolock(CPU_REG_CR4, &vcpu->reg_updated)) {
|
||||
if (bitmap_test_and_clear_non_atomic(CPU_REG_CR4, &vcpu->reg_updated)) {
|
||||
vcpu_set_cr4(vcpu, ctx->cr4);
|
||||
}
|
||||
}
|
||||
@@ -1022,7 +1022,7 @@ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
|
||||
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
|
||||
if ((vdmask & (1UL << vcpu_id)) != 0UL) {
|
||||
vcpu = vcpu_from_vid(vm, vcpu_id);
|
||||
bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &dmask);
|
||||
bitmap_set_non_atomic(pcpuid_from_vcpu(vcpu), &dmask);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/guest/vcpu.h>
|
||||
#include <asm/guest/vm.h>
|
||||
#include <asm/cpuid.h>
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/guest/virq.h>
|
||||
#include <asm/lapic.h>
|
||||
#include <asm/mmu.h>
|
||||
@@ -130,7 +130,7 @@ static inline bool is_nmi_injectable(void)
|
||||
}
|
||||
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
|
||||
{
|
||||
bitmap_set_lock(eventid, &vcpu->arch.pending_req);
|
||||
bitmap_set(eventid, &vcpu->arch.pending_req);
|
||||
kick_vcpu(vcpu);
|
||||
}
|
||||
|
||||
@@ -367,38 +367,38 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
|
||||
if (*pending_req_bits != 0UL) {
|
||||
/* make sure ACRN_REQUEST_INIT_VMCS handler as the first one */
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_INIT_VMCS, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_INIT_VMCS, pending_req_bits)) {
|
||||
init_vmcs(vcpu);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
|
||||
pr_fatal("Triple fault happen -> shutdown!");
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_WAIT_WBINVD, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_WAIT_WBINVD, pending_req_bits)) {
|
||||
wait_event(&vcpu->events[VCPU_EVENT_SYNC_WBINVD]);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_SPLIT_LOCK, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_SPLIT_LOCK, pending_req_bits)) {
|
||||
wait_event(&vcpu->events[VCPU_EVENT_SPLIT_LOCK]);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EPT_FLUSH, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_EPT_FLUSH, pending_req_bits)) {
|
||||
invept(vcpu->vm->arch_vm.nworld_eptp);
|
||||
if (vcpu->vm->sworld_control.flag.active != 0UL) {
|
||||
invept(vcpu->vm->arch_vm.sworld_eptp);
|
||||
}
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_VPID_FLUSH, pending_req_bits)) {
|
||||
flush_vpid_single(arch->vpid);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE, pending_req_bits)) {
|
||||
vcpu_set_vmcs_eoi_exit(vcpu);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_SMP_CALL, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_SMP_CALL, pending_req_bits)) {
|
||||
handle_smp_call();
|
||||
}
|
||||
|
||||
@@ -409,14 +409,14 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
/*
|
||||
* Inject pending exception prior pending interrupt to complete the previous instruction.
|
||||
*/
|
||||
if ((*pending_req_bits != 0UL) && bitmap_test_and_clear_lock(ACRN_REQUEST_EXCP, pending_req_bits)) {
|
||||
if ((*pending_req_bits != 0UL) && bitmap_test_and_clear(ACRN_REQUEST_EXCP, pending_req_bits)) {
|
||||
vcpu_inject_exception(vcpu);
|
||||
injected = true;
|
||||
} else {
|
||||
/* inject NMI before maskable hardware interrupt */
|
||||
|
||||
if ((*pending_req_bits != 0UL) &&
|
||||
bitmap_test_and_clear_lock(ACRN_REQUEST_NMI, pending_req_bits)) {
|
||||
bitmap_test_and_clear(ACRN_REQUEST_NMI, pending_req_bits)) {
|
||||
if (is_nmi_injectable()) {
|
||||
/* Inject NMI vector = 2 */
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
@@ -424,7 +424,7 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
injected = true;
|
||||
} else {
|
||||
/* keep the NMI request for next vmexit */
|
||||
bitmap_set_lock(ACRN_REQUEST_NMI, pending_req_bits);
|
||||
bitmap_set(ACRN_REQUEST_NMI, pending_req_bits);
|
||||
}
|
||||
} else {
|
||||
/* handling pending vector injection:
|
||||
@@ -490,13 +490,13 @@ static inline void acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
|
||||
|
||||
if (guest_irq_enabled && (!ret)) {
|
||||
/* Inject external interrupt first */
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EXTINT, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_EXTINT, pending_req_bits)) {
|
||||
/* has pending external interrupts */
|
||||
ret = vcpu_do_pending_extint(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT, pending_req_bits)) {
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_EVENT, pending_req_bits)) {
|
||||
vlapic_inject_intr(vcpu_vlapic(vcpu), guest_irq_enabled, ret);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/guest/virq.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/guest/vcpu.h>
|
||||
@@ -318,7 +318,7 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t value)
|
||||
exec_vmwrite(VMX_CR0_READ_SHADOW, effective_cr0);
|
||||
|
||||
/* clear read cache, next time read should from VMCS */
|
||||
bitmap_clear_nolock(CPU_REG_CR0, &vcpu->reg_cached);
|
||||
bitmap_clear_non_atomic(CPU_REG_CR0, &vcpu->reg_cached);
|
||||
|
||||
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR0", effective_cr0, tmp);
|
||||
}
|
||||
@@ -420,7 +420,7 @@ static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
|
||||
exec_vmwrite(VMX_CR4_READ_SHADOW, cr4);
|
||||
|
||||
/* clear read cache, next time read should from VMCS */
|
||||
bitmap_clear_nolock(CPU_REG_CR4, &vcpu->reg_cached);
|
||||
bitmap_clear_non_atomic(CPU_REG_CR4, &vcpu->reg_cached);
|
||||
|
||||
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR4", cr4, tmp);
|
||||
}
|
||||
@@ -521,7 +521,7 @@ uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test_and_set_nolock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
|
||||
if (bitmap_test_and_set_non_atomic(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
|
||||
ctx->cr0 = (exec_vmread(VMX_CR0_READ_SHADOW) & ~cr0_passthru_mask) |
|
||||
(exec_vmread(VMX_GUEST_CR0) & cr0_passthru_mask);
|
||||
}
|
||||
@@ -549,7 +549,7 @@ uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test_and_set_nolock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
|
||||
if (bitmap_test_and_set_non_atomic(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
|
||||
ctx->cr4 = (exec_vmread(VMX_CR4_READ_SHADOW) & ~cr4_passthru_mask) |
|
||||
(exec_vmread(VMX_GUEST_CR4) & cr4_passthru_mask);
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <atomic.h>
|
||||
#include <per_cpu.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -450,11 +449,11 @@ vlapic_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
|
||||
{
|
||||
struct lapic_reg *tmrptr = &(vlapic->apic_page.tmr[0]);
|
||||
if (level) {
|
||||
if (!bitmap32_test_and_set_lock((uint16_t)(vector & 0x1fU), &tmrptr[(vector & 0xffU) >> 5U].v)) {
|
||||
if (!bitmap32_test_and_set((uint16_t)(vector & 0x1fU), &tmrptr[(vector & 0xffU) >> 5U].v)) {
|
||||
vcpu_set_eoi_exit_bitmap(vlapic2vcpu(vlapic), vector);
|
||||
}
|
||||
} else {
|
||||
if (bitmap32_test_and_clear_lock((uint16_t)(vector & 0x1fU), &tmrptr[(vector & 0xffU) >> 5U].v)) {
|
||||
if (bitmap32_test_and_clear((uint16_t)(vector & 0x1fU), &tmrptr[(vector & 0xffU) >> 5U].v)) {
|
||||
vcpu_clear_eoi_exit_bitmap(vlapic2vcpu(vlapic), vector);
|
||||
}
|
||||
}
|
||||
@@ -488,7 +487,7 @@ static void apicv_basic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector,
|
||||
irrptr = &lapic->irr[0];
|
||||
|
||||
/* If the interrupt is set, don't try to do it again */
|
||||
if (!bitmap32_test_and_set_lock((uint16_t)(vector & 0x1fU), &irrptr[idx].v)) {
|
||||
if (!bitmap32_test_and_set((uint16_t)(vector & 0x1fU), &irrptr[idx].v)) {
|
||||
/* update TMR if interrupt trigger mode has changed */
|
||||
vlapic_set_tmr(vlapic, vector, level);
|
||||
vcpu_make_request(vlapic2vcpu(vlapic), ACRN_REQUEST_EVENT);
|
||||
@@ -512,7 +511,7 @@ static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vect
|
||||
* send PI notification to vCPU and hardware will
|
||||
* sync PIR to vIRR automatically.
|
||||
*/
|
||||
bitmap_set_lock(ACRN_REQUEST_EVENT, &vcpu->arch.pending_req);
|
||||
bitmap_set(ACRN_REQUEST_EVENT, &vcpu->arch.pending_req);
|
||||
|
||||
if (get_pcpu_id() != pcpuid_from_vcpu(vcpu)) {
|
||||
apicv_trigger_pi_anv(pcpuid_from_vcpu(vcpu), (uint32_t)vcpu->arch.pid.control.bits.nv);
|
||||
@@ -808,7 +807,7 @@ vlapic_process_eoi(struct acrn_vlapic *vlapic)
|
||||
vector = vlapic->isrv;
|
||||
i = (vector >> 5U);
|
||||
bitpos = (vector & 0x1fU);
|
||||
bitmap32_clear_nolock((uint16_t)bitpos, &isrptr[i].v);
|
||||
bitmap32_clear_non_atomic((uint16_t)bitpos, &isrptr[i].v);
|
||||
|
||||
dev_dbg(DBG_LEVEL_VLAPIC, "EOI vector %u", vector);
|
||||
vlapic_dump_isr(vlapic, "vlapic_process_eoi");
|
||||
@@ -933,7 +932,7 @@ static inline void set_dest_mask_phys(struct acrn_vm *vm, uint64_t *dmask, uint3
|
||||
|
||||
vcpu_id = vm_apicid2vcpu_id(vm, dest);
|
||||
if (vcpu_id < vm->hw.created_vcpus) {
|
||||
bitmap_set_nolock(vcpu_id, dmask);
|
||||
bitmap_set_non_atomic(vcpu_id, dmask);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1033,12 +1032,12 @@ vlapic_calc_dest_noshort(struct acrn_vm *vm, bool is_broadcast,
|
||||
/* No other state currently, do nothing */
|
||||
}
|
||||
} else {
|
||||
bitmap_set_nolock(vcpu_id, &dmask);
|
||||
bitmap_set_non_atomic(vcpu_id, &dmask);
|
||||
}
|
||||
}
|
||||
|
||||
if (lowprio && (lowprio_dest != NULL)) {
|
||||
bitmap_set_nolock(vlapic2vcpu(lowprio_dest)->vcpu_id, &dmask);
|
||||
bitmap_set_non_atomic(vlapic2vcpu(lowprio_dest)->vcpu_id, &dmask);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1056,14 +1055,14 @@ vlapic_calc_dest(struct acrn_vcpu *vcpu, uint32_t shorthand, bool is_broadcast,
|
||||
dmask = vlapic_calc_dest_noshort(vcpu->vm, is_broadcast, dest, phys, lowprio);
|
||||
break;
|
||||
case APIC_DEST_SELF:
|
||||
bitmap_set_nolock(vcpu->vcpu_id, &dmask);
|
||||
bitmap_set_non_atomic(vcpu->vcpu_id, &dmask);
|
||||
break;
|
||||
case APIC_DEST_ALLISELF:
|
||||
dmask = vm_active_cpus(vcpu->vm);
|
||||
break;
|
||||
case APIC_DEST_ALLESELF:
|
||||
dmask = vm_active_cpus(vcpu->vm);
|
||||
bitmap_clear_nolock(vcpu->vcpu_id, &dmask);
|
||||
bitmap_clear_non_atomic(vcpu->vcpu_id, &dmask);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
@@ -1275,12 +1274,12 @@ static void vlapic_get_deliverable_intr(struct acrn_vlapic *vlapic, uint32_t vec
|
||||
idx = vector >> 5U;
|
||||
|
||||
irrptr = &lapic->irr[0];
|
||||
bitmap32_clear_lock((uint16_t)(vector & 0x1fU), &irrptr[idx].v);
|
||||
bitmap32_clear((uint16_t)(vector & 0x1fU), &irrptr[idx].v);
|
||||
|
||||
vlapic_dump_irr(vlapic, "vlapic_get_deliverable_intr");
|
||||
|
||||
isrptr = &lapic->isr[0];
|
||||
bitmap32_set_nolock((uint16_t)(vector & 0x1fU), &isrptr[idx].v);
|
||||
bitmap32_set_non_atomic((uint16_t)(vector & 0x1fU), &isrptr[idx].v);
|
||||
vlapic_dump_isr(vlapic, "vlapic_get_deliverable_intr");
|
||||
|
||||
vlapic->isrv = vector;
|
||||
@@ -1808,7 +1807,7 @@ vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t lvt_ind
|
||||
if (vcpu_id == BROADCAST_CPU_ID) {
|
||||
dmask = vm_active_cpus(vm);
|
||||
} else {
|
||||
bitmap_set_nolock(vcpu_id, &dmask);
|
||||
bitmap_set_non_atomic(vcpu_id, &dmask);
|
||||
}
|
||||
error = 0;
|
||||
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
|
||||
@@ -1915,7 +1914,7 @@ static void inject_msi_for_lapic_pt(struct acrn_vm *vm, uint64_t addr, uint64_t
|
||||
|
||||
vcpu_id = ffs64(vdmask);
|
||||
while (vcpu_id != INVALID_BIT_INDEX) {
|
||||
bitmap_clear_nolock(vcpu_id, &vdmask);
|
||||
bitmap_clear_non_atomic(vcpu_id, &vdmask);
|
||||
vcpu = vcpu_from_vid(vm, vcpu_id);
|
||||
dest |= per_cpu(arch.lapic_ldr, pcpuid_from_vcpu(vcpu));
|
||||
vcpu_id = ffs64(vdmask);
|
||||
@@ -2232,8 +2231,8 @@ apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector)
|
||||
|
||||
pid = get_pi_desc(vlapic2vcpu(vlapic));
|
||||
idx = vector >> 6U;
|
||||
if (!bitmap_test_and_set_lock((uint16_t)(vector & 0x3fU), &pid->pir[idx])) {
|
||||
notify = !bitmap_test_and_set_lock(POSTED_INTR_ON, &pid->control.value);
|
||||
if (!bitmap_test_and_set((uint16_t)(vector & 0x3fU), &pid->pir[idx])) {
|
||||
notify = !bitmap_test_and_set(POSTED_INTR_ON, &pid->control.value);
|
||||
}
|
||||
return notify;
|
||||
}
|
||||
@@ -2365,7 +2364,7 @@ bool vlapic_clear_pending_intr(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
{
|
||||
struct lapic_reg *irrptr = &(vcpu->arch.vlapic.apic_page.irr[0]);
|
||||
uint32_t idx = vector >> 5U;
|
||||
return bitmap32_test_and_clear_lock((uint16_t)(vector & 0x1fU), &irrptr[idx].v);
|
||||
return bitmap32_test_and_clear((uint16_t)(vector & 0x1fU), &irrptr[idx].v);
|
||||
}
|
||||
|
||||
bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@@ -12,7 +12,7 @@
|
||||
#include <asm/guest/vm.h>
|
||||
#include <asm/guest/vm_reset.h>
|
||||
#include <asm/guest/virq.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/e820.h>
|
||||
#include <boot.h>
|
||||
#include <asm/vtd.h>
|
||||
@@ -625,7 +625,7 @@ static uint64_t lapic_pt_enabled_pcpu_bitmap(struct acrn_vm *vm)
|
||||
if (is_lapic_pt_configured(vm)) {
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
if (is_x2apic_enabled(vcpu_vlapic(vcpu))) {
|
||||
bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &bitmap);
|
||||
bitmap_set_non_atomic(pcpuid_from_vcpu(vcpu), &bitmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -832,7 +832,7 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
||||
uint64_t tmp64 = pcpu_bitmap;
|
||||
while (tmp64 != 0UL) {
|
||||
pcpu_id = ffs64(tmp64);
|
||||
bitmap_clear_nolock(pcpu_id, &tmp64);
|
||||
bitmap_clear_non_atomic(pcpu_id, &tmp64);
|
||||
status = prepare_vcpu(vm, pcpu_id);
|
||||
if (status != 0) {
|
||||
break;
|
||||
@@ -886,7 +886,7 @@ static int32_t offline_lapic_pt_enabled_pcpus(const struct acrn_vm *vm, uint64_t
|
||||
uint16_t this_pcpu_id = get_pcpu_id();
|
||||
|
||||
if (bitmap_test(this_pcpu_id, &mask)) {
|
||||
bitmap_clear_nolock(this_pcpu_id, &mask);
|
||||
bitmap_clear_non_atomic(this_pcpu_id, &mask);
|
||||
if (vm->state == VM_POWERED_OFF) {
|
||||
/*
|
||||
* If the current pcpu needs to offline itself,
|
||||
@@ -1268,7 +1268,7 @@ bool has_rt_vm(void)
|
||||
|
||||
void make_shutdown_vm_request(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
|
||||
bitmap_set(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
|
||||
if (get_pcpu_id() != pcpu_id) {
|
||||
kick_pcpu(pcpu_id);
|
||||
}
|
||||
@@ -1276,7 +1276,7 @@ void make_shutdown_vm_request(uint16_t pcpu_id)
|
||||
|
||||
bool need_shutdown_vm(uint16_t pcpu_id)
|
||||
{
|
||||
return bitmap_test_and_clear_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
|
||||
return bitmap_test_and_clear(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -59,7 +59,7 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
||||
pause_vm(vm);
|
||||
put_vm_lock(vm);
|
||||
|
||||
bitmap_set_nolock(vm->vm_id,
|
||||
bitmap_set_non_atomic(vm->vm_id,
|
||||
&per_cpu(shutdown_vm_bitmap, pcpuid_from_vcpu(vcpu)));
|
||||
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||
}
|
||||
@@ -109,7 +109,7 @@ static bool handle_common_reset_reg_write(struct acrn_vcpu *vcpu, bool reset, bo
|
||||
* ACRN doesn't support re-launch, just shutdown the guest.
|
||||
*/
|
||||
pause_vm(vm);
|
||||
bitmap_set_nolock(vm->vm_id,
|
||||
bitmap_set_non_atomic(vm->vm_id,
|
||||
&per_cpu(shutdown_vm_bitmap, pcpuid_from_vcpu(vcpu)));
|
||||
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||
}
|
||||
@@ -250,6 +250,6 @@ void shutdown_vm_from_idle(uint16_t pcpu_id)
|
||||
(void)shutdown_vm(vm);
|
||||
}
|
||||
put_vm_lock(vm);
|
||||
bitmap_clear_nolock(vm_id, vms);
|
||||
bitmap_clear_non_atomic(vm_id, vms);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <spinlock.h>
|
||||
#include <per_cpu.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <per_cpu.h>
|
||||
@@ -248,7 +248,7 @@ void send_dest_ipi_mask(uint64_t dest_mask, uint32_t vector)
|
||||
|
||||
pcpu_id = ffs64(mask);
|
||||
while (pcpu_id < MAX_PCPU_NUM) {
|
||||
bitmap_clear_nolock(pcpu_id, &mask);
|
||||
bitmap_clear_non_atomic(pcpu_id, &mask);
|
||||
send_single_ipi(pcpu_id, vector);
|
||||
pcpu_id = ffs64(mask);
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <atomic.h>
|
||||
#include <bits.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/per_cpu.h>
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2024 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/page.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
@@ -33,7 +33,7 @@ struct page *alloc_page(struct page_pool *pool)
|
||||
idx = loop_idx % pool->bitmap_size;
|
||||
if (*(pool->bitmap + idx) != ~0UL) {
|
||||
bit = ffz64(*(pool->bitmap + idx));
|
||||
bitmap_set_nolock(bit, pool->bitmap + idx);
|
||||
bitmap_set_non_atomic(bit, pool->bitmap + idx);
|
||||
page = pool->start_page + ((idx << 6U) + bit);
|
||||
|
||||
pool->last_hint_id = idx;
|
||||
@@ -67,7 +67,7 @@ void free_page(struct page_pool *pool, struct page *page)
|
||||
spinlock_obtain(&pool->lock);
|
||||
idx = (page - pool->start_page) >> 6U;
|
||||
bit = (page - pool->start_page) & 0x3fUL;
|
||||
bitmap_clear_nolock(bit, pool->bitmap + idx);
|
||||
bitmap_clear_non_atomic(bit, pool->bitmap + idx);
|
||||
spinlock_release(&pool->lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Intel Corporation.
|
||||
* Copyright (C) 2020-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@@ -12,7 +12,7 @@
|
||||
#include <errno.h>
|
||||
#include <logmsg.h>
|
||||
#include <asm/rdt.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/board.h>
|
||||
#include <asm/vm_config.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Intel Corporation.
|
||||
* Copyright (C) 2020-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <rtl.h>
|
||||
#include <util.h>
|
||||
#include <logmsg.h>
|
||||
@@ -142,7 +142,7 @@ bool init_software_sram(bool is_bsp)
|
||||
/* Clear the NX bit of PTCM area */
|
||||
set_paging_x((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size);
|
||||
}
|
||||
bitmap_clear_lock(get_pcpu_id(), &init_sw_sram_cpus_mask);
|
||||
bitmap_clear(get_pcpu_id(), &init_sw_sram_cpus_mask);
|
||||
}
|
||||
|
||||
wait_sync_change(&init_sw_sram_cpus_mask, 0UL);
|
||||
@@ -167,7 +167,7 @@ bool init_software_sram(bool is_bsp)
|
||||
set_paging_nx((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size);
|
||||
}
|
||||
|
||||
bitmap_set_lock(get_pcpu_id(), &init_sw_sram_cpus_mask);
|
||||
bitmap_set(get_pcpu_id(), &init_sw_sram_cpus_mask);
|
||||
wait_sync_change(&init_sw_sram_cpus_mask, ALL_CPUS_MASK);
|
||||
/* Flush the TLB on BSP and all APs to restore the NX for Software SRAM area */
|
||||
flush_tlb_range((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <spinlock.h>
|
||||
#include <asm/cpu_caps.h>
|
||||
#include <irq.h>
|
||||
@@ -1413,7 +1413,7 @@ void dmar_free_irte(const struct intr_source *intr_src, uint16_t index)
|
||||
|
||||
if (!is_irte_reserved(dmar_unit, index)) {
|
||||
spinlock_obtain(&dmar_unit->lock);
|
||||
bitmap_clear_nolock(index & 0x3FU, &dmar_unit->irte_alloc_bitmap[index >> 6U]);
|
||||
bitmap_clear_non_atomic(index & 0x3FU, &dmar_unit->irte_alloc_bitmap[index >> 6U]);
|
||||
spinlock_release(&dmar_unit->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include <cpu.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
|
||||
|
||||
static volatile uint64_t pcpu_active_bitmap = 0UL;
|
||||
@@ -24,13 +24,13 @@ bool is_pcpu_active(uint16_t pcpu_id)
|
||||
|
||||
void set_pcpu_active(uint16_t pcpu_id)
|
||||
{
|
||||
bitmap_set_lock(pcpu_id, &pcpu_active_bitmap);
|
||||
bitmap_set(pcpu_id, &pcpu_active_bitmap);
|
||||
}
|
||||
|
||||
void clear_pcpu_active(uint16_t pcpu_id)
|
||||
{
|
||||
|
||||
bitmap_clear_lock(pcpu_id, &pcpu_active_bitmap);
|
||||
bitmap_clear(pcpu_id, &pcpu_active_bitmap);
|
||||
}
|
||||
|
||||
bool check_pcpus_active(uint64_t mask)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright (C) 2021-2022 Intel Corporation.
|
||||
* Copyright (C) 2021-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <irq.h>
|
||||
#include <common/softirq.h>
|
||||
#include <asm/irq.h>
|
||||
@@ -40,10 +40,10 @@ static uint32_t alloc_irq_num(uint32_t req_irq, bool reserve)
|
||||
if (irq >= NR_IRQS) {
|
||||
irq = IRQ_INVALID;
|
||||
} else {
|
||||
bitmap_set_nolock((uint16_t)(irq & 0x3FU),
|
||||
bitmap_set_non_atomic((uint16_t)(irq & 0x3FU),
|
||||
irq_alloc_bitmap + (irq >> 6U));
|
||||
if (reserve) {
|
||||
bitmap_set_nolock((uint16_t)(irq & 0x3FU),
|
||||
bitmap_set_non_atomic((uint16_t)(irq & 0x3FU),
|
||||
irq_rsvd_bitmap + (irq >> 6U));
|
||||
}
|
||||
}
|
||||
@@ -71,7 +71,7 @@ static void free_irq_num(uint32_t irq)
|
||||
|
||||
if (bitmap_test((uint16_t)(irq & 0x3FU),
|
||||
irq_rsvd_bitmap + (irq >> 6U)) == false) {
|
||||
bitmap_clear_nolock((uint16_t)(irq & 0x3FU),
|
||||
bitmap_clear_non_atomic((uint16_t)(irq & 0x3FU),
|
||||
irq_alloc_bitmap + (irq >> 6U));
|
||||
}
|
||||
spinlock_irqrestore_release(&irq_alloc_spinlock, rflags);
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <atomic.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <per_cpu.h>
|
||||
#include <asm/notify.h>
|
||||
#include <common/notify.h>
|
||||
@@ -38,7 +38,7 @@ void kick_notification(__unused uint32_t irq, __unused void *data)
|
||||
if (smp_call->func != NULL) {
|
||||
smp_call->func(smp_call->data);
|
||||
}
|
||||
bitmap_clear_lock(pcpu_id, &smp_call_mask);
|
||||
bitmap_clear(pcpu_id, &smp_call_mask);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,10 +63,10 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
|
||||
|
||||
pcpu_id = ffs64(mask);
|
||||
while (pcpu_id < MAX_PCPU_NUM) {
|
||||
bitmap_clear_nolock(pcpu_id, &mask);
|
||||
bitmap_clear_non_atomic(pcpu_id, &mask);
|
||||
if (pcpu_id == get_pcpu_id()) {
|
||||
func(data);
|
||||
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
|
||||
bitmap_clear_non_atomic(pcpu_id, &smp_call_mask);
|
||||
} else if (is_pcpu_active(pcpu_id)) {
|
||||
smp_call = &per_cpu(smp_call_info, pcpu_id);
|
||||
|
||||
@@ -81,8 +81,8 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
|
||||
arch_smp_call_kick_pcpu(pcpu_id);
|
||||
} else {
|
||||
/* pcpu is not in active, print error */
|
||||
pr_err("pcpu_id %d not in active!", pcpu_id);
|
||||
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
|
||||
//pr_err("pcpu_id %d not in active!", pcpu_id);
|
||||
bitmap_clear_non_atomic(pcpu_id, &smp_call_mask);
|
||||
}
|
||||
pcpu_id = ffs64(mask);
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ static inline uint16_t ptirq_alloc_entry_id(void)
|
||||
uint16_t id = (uint16_t)ffz64_ex(ptirq_entry_bitmaps, CONFIG_MAX_PT_IRQ_ENTRIES);
|
||||
|
||||
while (id < CONFIG_MAX_PT_IRQ_ENTRIES) {
|
||||
if (!bitmap_test_and_set_lock((id & 0x3FU), &ptirq_entry_bitmaps[id >> 6U])) {
|
||||
if (!bitmap_test_and_set((id & 0x3FU), &ptirq_entry_bitmaps[id >> 6U])) {
|
||||
break;
|
||||
}
|
||||
id = (uint16_t)ffz64_ex(ptirq_entry_bitmaps, CONFIG_MAX_PT_IRQ_ENTRIES);
|
||||
@@ -177,7 +177,7 @@ void ptirq_release_entry(struct ptirq_remapping_info *entry)
|
||||
del_timer(&entry->intr_delay_timer);
|
||||
CPU_INT_ALL_RESTORE(rflags);
|
||||
|
||||
bitmap_clear_lock((entry->ptdev_entry_id) & 0x3FU, &ptirq_entry_bitmaps[entry->ptdev_entry_id >> 6U]);
|
||||
bitmap_clear((entry->ptdev_entry_id) & 0x3FU, &ptirq_entry_bitmaps[entry->ptdev_entry_id >> 6U]);
|
||||
|
||||
(void)memset((void *)entry, 0U, sizeof(struct ptirq_remapping_info));
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <rtl.h>
|
||||
#include <list.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <per_cpu.h>
|
||||
#include <asm/lapic.h>
|
||||
@@ -153,7 +153,7 @@ void make_reschedule_request(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
|
||||
bitmap_set_lock(NEED_RESCHEDULE, &ctl->flags);
|
||||
bitmap_set(NEED_RESCHEDULE, &ctl->flags);
|
||||
if (get_pcpu_id() != pcpu_id) {
|
||||
kick_pcpu(pcpu_id);
|
||||
}
|
||||
@@ -179,7 +179,7 @@ void schedule(void)
|
||||
if (ctl->scheduler->pick_next != NULL) {
|
||||
next = ctl->scheduler->pick_next(ctl);
|
||||
}
|
||||
bitmap_clear_lock(NEED_RESCHEDULE, &ctl->flags);
|
||||
bitmap_clear(NEED_RESCHEDULE, &ctl->flags);
|
||||
|
||||
/* If we picked different sched object, switch context */
|
||||
if (prev != next) {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <per_cpu.h>
|
||||
#include <softirq.h>
|
||||
@@ -29,7 +29,7 @@ void register_softirq(uint16_t nr, softirq_handler handler)
|
||||
*/
|
||||
void fire_softirq(uint16_t nr)
|
||||
{
|
||||
bitmap_set_lock(nr, &per_cpu(softirq_pending, get_pcpu_id()));
|
||||
bitmap_set(nr, &per_cpu(softirq_pending, get_pcpu_id()));
|
||||
}
|
||||
|
||||
static void do_softirq_internal(uint16_t cpu_id)
|
||||
@@ -39,7 +39,7 @@ static void do_softirq_internal(uint16_t cpu_id)
|
||||
uint16_t nr = ffs64(*softirq_pending_bitmap);
|
||||
|
||||
while (nr < NR_SOFTIRQS) {
|
||||
bitmap_clear_lock(nr, softirq_pending_bitmap);
|
||||
bitmap_clear(nr, softirq_pending_bitmap);
|
||||
(*softirq_handlers[nr])(cpu_id);
|
||||
nr = ffs64(*softirq_pending_bitmap);
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2022 Intel Corporation.
|
||||
* Copyright (C) 2018-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <errno.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include "shell_priv.h"
|
||||
#include <asm/irq.h>
|
||||
#include <console.h>
|
||||
@@ -1000,7 +1000,7 @@ static int32_t shell_vcpu_dumpreg(int32_t argc, char **argv)
|
||||
dump.vcpu = vcpu;
|
||||
dump.str = shell_log_buf;
|
||||
dump.str_max = SHELL_LOG_BUF_SIZE;
|
||||
bitmap_set_nolock(pcpu_id, &mask);
|
||||
bitmap_set_non_atomic(pcpu_id, &mask);
|
||||
smp_call_function(mask, dump_vcpu_reg, &dump);
|
||||
shell_puts(shell_log_buf);
|
||||
status = 0;
|
||||
@@ -1101,7 +1101,7 @@ static int32_t shell_dump_guest_mem(int32_t argc, char **argv)
|
||||
dump.len = length;
|
||||
|
||||
pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
bitmap_set_nolock(pcpu_id, &mask);
|
||||
bitmap_set_non_atomic(pcpu_id, &mask);
|
||||
smp_call_function(mask, dump_guest_mem, &dump);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@@ -96,14 +96,14 @@ vioapic_set_pinstate(struct acrn_single_vioapic *vioapic, uint32_t pin, uint32_t
|
||||
old_lvl = (uint32_t)bitmap_test((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
|
||||
if (level == 0U) {
|
||||
/* clear pin_state and deliver interrupt according to polarity */
|
||||
bitmap_clear_nolock((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
|
||||
bitmap_clear_non_atomic((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
|
||||
if ((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO)
|
||||
&& (old_lvl != level)) {
|
||||
vioapic_generate_intr(vioapic, pin);
|
||||
}
|
||||
} else {
|
||||
/* set pin_state and deliver intrrupt according to polarity */
|
||||
bitmap_set_nolock((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
|
||||
bitmap_set_non_atomic((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
|
||||
if ((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_AHI)
|
||||
&& (old_lvl != level)) {
|
||||
vioapic_generate_intr(vioapic, pin);
|
||||
|
||||
@@ -719,7 +719,7 @@ struct pci_vdev *vpci_init_vdev(struct acrn_vpci *vpci, struct acrn_vm_pci_dev_c
|
||||
uint32_t id = (uint32_t)ffz64_ex(vpci->vdev_bitmaps, CONFIG_MAX_PCI_DEV_NUM);
|
||||
|
||||
if (id < CONFIG_MAX_PCI_DEV_NUM) {
|
||||
bitmap_set_nolock((id & 0x3FU), &vpci->vdev_bitmaps[id >> 6U]);
|
||||
bitmap_set_non_atomic((id & 0x3FU), &vpci->vdev_bitmaps[id >> 6U]);
|
||||
|
||||
vdev = &vpci->pci_vdevs[id];
|
||||
vdev->id = id;
|
||||
@@ -758,7 +758,7 @@ void vpci_deinit_vdev(struct pci_vdev *vdev)
|
||||
vdev->vdev_ops->deinit_vdev(vdev);
|
||||
|
||||
hlist_del(&vdev->link);
|
||||
bitmap_clear_nolock((vdev->id & 0x3FU), &vdev->vpci->vdev_bitmaps[vdev->id >> 6U]);
|
||||
bitmap_clear_non_atomic((vdev->id & 0x3FU), &vdev->vpci->vdev_bitmaps[vdev->id >> 6U]);
|
||||
memset(vdev, 0U, sizeof(struct pci_vdev));
|
||||
}
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
#include <logmsg.h>
|
||||
#include <asm/pci_dev.h>
|
||||
#include <asm/vtd.h>
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/board.h>
|
||||
#include <platform_acpi_info.h>
|
||||
#include <hash.h>
|
||||
@@ -433,7 +433,7 @@ static void scan_pci_hierarchy(uint8_t bus, uint64_t buses_visited[BUSES_BITMAP_
|
||||
current_drhd_index = bus_map[s].bus_drhd_index;
|
||||
s = s + 1U;
|
||||
|
||||
bitmap_set_nolock(current_bus_index,
|
||||
bitmap_set_non_atomic(current_bus_index,
|
||||
&buses_visited[current_bus_index >> 6U]);
|
||||
|
||||
pbdf.bits.b = current_bus_index;
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <spinlock.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/guest/vcpu.h>
|
||||
@@ -194,7 +194,7 @@ static inline uint64_t vm_active_cpus(const struct acrn_vm *vm)
|
||||
const struct acrn_vcpu *vcpu;
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
bitmap_set_nolock(vcpu->vcpu_id, &dmask);
|
||||
bitmap_set_non_atomic(vcpu->vcpu_id, &dmask);
|
||||
}
|
||||
|
||||
return dmask;
|
||||
|
||||
@@ -25,20 +25,10 @@
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef BITS_H
|
||||
#define BITS_H
|
||||
#ifndef X86_LIB_BITS_H
|
||||
#define X86_LIB_BITS_H
|
||||
#include <atomic.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* INVALID_BIT_INDEX means when input paramter is zero,
|
||||
* bit operations function can't find bit set and return
|
||||
* the invalid bit index directly.
|
||||
*
|
||||
**/
|
||||
#define INVALID_BIT_INDEX 0xffffU
|
||||
|
||||
/*
|
||||
*
|
||||
* fls32 - Find the Last (most significant) bit Set in value and
|
||||
@@ -62,7 +52,7 @@
|
||||
* set and return the invalid bit index directly.
|
||||
*
|
||||
*/
|
||||
static inline uint16_t fls32(uint32_t value)
|
||||
static inline int16_t arch_fls32(uint32_t value)
|
||||
{
|
||||
uint32_t ret;
|
||||
asm volatile("bsrl %1,%0\n\t"
|
||||
@@ -73,7 +63,7 @@ static inline uint16_t fls32(uint32_t value)
|
||||
return (uint16_t)ret;
|
||||
}
|
||||
|
||||
static inline uint16_t fls64(uint64_t value)
|
||||
static inline uint16_t arch_fls64(uint64_t value)
|
||||
{
|
||||
uint64_t ret = 0UL;
|
||||
asm volatile("bsrq %1,%0\n\t"
|
||||
@@ -109,7 +99,7 @@ static inline uint16_t fls64(uint64_t value)
|
||||
* set and return the invalid bit index directly.
|
||||
*
|
||||
*/
|
||||
static inline uint16_t ffs64(uint64_t value)
|
||||
static inline uint16_t arch_ffs64(uint64_t value)
|
||||
{
|
||||
uint64_t ret;
|
||||
asm volatile("bsfq %1,%0\n\t"
|
||||
@@ -120,109 +110,48 @@ static inline uint16_t ffs64(uint64_t value)
|
||||
return (uint16_t)ret;
|
||||
}
|
||||
|
||||
/*bit scan forward for the least significant bit '0'*/
|
||||
static inline uint16_t ffz64(uint64_t value)
|
||||
{
|
||||
return ffs64(~value);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* find the first zero bit in a uint64_t array.
|
||||
* @pre: the size must be multiple of 64.
|
||||
*/
|
||||
static inline uint64_t ffz64_ex(const uint64_t *addr, uint64_t size)
|
||||
{
|
||||
uint64_t ret = size;
|
||||
uint64_t idx;
|
||||
|
||||
for (idx = 0UL; (idx << 6U) < size; idx++) {
|
||||
if (addr[idx] != ~0UL) {
|
||||
ret = (idx << 6U) + ffz64(addr[idx]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Counts leading zeros.
|
||||
*
|
||||
* The number of leading zeros is defined as the number of
|
||||
* most significant bits which are not '1'. E.g.:
|
||||
* clz(0x80000000)==0
|
||||
* clz(0x40000000)==1
|
||||
* ...
|
||||
* clz(0x00000001)==31
|
||||
* clz(0x00000000)==32
|
||||
*
|
||||
* @param value:The 32 bit value to count the number of leading zeros.
|
||||
*
|
||||
* @return The number of leading zeros in 'value'.
|
||||
*/
|
||||
static inline uint16_t clz(uint32_t value)
|
||||
{
|
||||
return ((value != 0U) ? (31U - fls32(value)) : 32U);
|
||||
}
|
||||
|
||||
/*
|
||||
* Counts leading zeros (64 bit version).
|
||||
*
|
||||
* @param value:The 64 bit value to count the number of leading zeros.
|
||||
*
|
||||
* @return The number of leading zeros in 'value'.
|
||||
*/
|
||||
static inline uint16_t clz64(uint64_t value)
|
||||
{
|
||||
return ((value != 0UL) ? (63U - fls64(value)) : 64U);
|
||||
}
|
||||
|
||||
/*
|
||||
* (*addr) |= (1UL<<nr);
|
||||
* Note:Input parameter nr shall be less than 64.
|
||||
* Note:Input parameter nr shall be less than 64.
|
||||
* If nr>=64, it will be truncated.
|
||||
*/
|
||||
#define build_bitmap_set(name, op_len, op_type, lock) \
|
||||
static inline void name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
static inline void name(uint32_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
uint32_t nr; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "or" op_len " %1,%0" \
|
||||
: "+m" (*addr) \
|
||||
: "r" ((op_type)(1UL<<nr)) \
|
||||
: "cc", "memory"); \
|
||||
}
|
||||
build_bitmap_set(bitmap_set_nolock, "q", uint64_t, "")
|
||||
build_bitmap_set(bitmap_set_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_set(bitmap32_set_nolock, "l", uint32_t, "")
|
||||
build_bitmap_set(bitmap32_set_lock, "l", uint32_t, BUS_LOCK)
|
||||
build_bitmap_set(arch_bitmap_set, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_set(arch_bitmap32_set, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
/*
|
||||
* (*addr) &= ~(1UL<<nr);
|
||||
* Note:Input parameter nr shall be less than 64.
|
||||
* Note:Input parameter nr shall be less than 64.
|
||||
* If nr>=64, it will be truncated.
|
||||
*/
|
||||
#define build_bitmap_clear(name, op_len, op_type, lock) \
|
||||
static inline void name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
#define build_bitmap_clear(name, op_len, op_type, lock) \
|
||||
static inline void name(uint32_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
uint32_t nr; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "and" op_len " %1,%0" \
|
||||
: "+m" (*addr) \
|
||||
: "r" ((op_type)(~(1UL<<(nr)))) \
|
||||
: "cc", "memory"); \
|
||||
}
|
||||
build_bitmap_clear(bitmap_clear_nolock, "q", uint64_t, "")
|
||||
build_bitmap_clear(bitmap_clear_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_clear(bitmap32_clear_nolock, "l", uint32_t, "")
|
||||
build_bitmap_clear(bitmap32_clear_lock, "l", uint32_t, BUS_LOCK)
|
||||
build_bitmap_clear(arch_bitmap_clear, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_clear(arch_bitmap32_clear, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
/*
|
||||
* return !!((*addr) & (1UL<<nr));
|
||||
* Note:Input parameter nr shall be less than 64. If nr>=64, it will
|
||||
* be truncated.
|
||||
*/
|
||||
static inline bool bitmap_test(uint16_t nr, const volatile uint64_t *addr)
|
||||
static inline bool arch_bitmap_test(uint32_t nr, const volatile uint64_t *addr)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
asm volatile("btq %q2,%1\n\tsbbl %0, %0"
|
||||
@@ -232,7 +161,7 @@ static inline bool bitmap_test(uint16_t nr, const volatile uint64_t *addr)
|
||||
return (ret != 0);
|
||||
}
|
||||
|
||||
static inline bool bitmap32_test(uint16_t nr, const volatile uint32_t *addr)
|
||||
static inline bool arch_bitmap32_test(uint32_t nr, const volatile uint32_t *addr)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
asm volatile("btl %2,%1\n\tsbbl %0, %0"
|
||||
@@ -250,9 +179,9 @@ static inline bool bitmap32_test(uint16_t nr, const volatile uint32_t *addr)
|
||||
* will be truncated.
|
||||
*/
|
||||
#define build_bitmap_testandset(name, op_len, op_type, lock) \
|
||||
static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
static inline bool name(uint32_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
uint32_t nr; \
|
||||
int32_t ret=0; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "bts" op_len " %2,%1\n\tsbbl %0,%0" \
|
||||
@@ -261,10 +190,8 @@ static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
: "cc", "memory"); \
|
||||
return (ret != 0); \
|
||||
}
|
||||
build_bitmap_testandset(bitmap_test_and_set_nolock, "q", uint64_t, "")
|
||||
build_bitmap_testandset(bitmap_test_and_set_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_testandset(bitmap32_test_and_set_nolock, "l", uint32_t, "")
|
||||
build_bitmap_testandset(bitmap32_test_and_set_lock, "l", uint32_t, BUS_LOCK)
|
||||
build_bitmap_testandset(arch_bitmap_test_and_set, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_testandset(arch_bitmap32_test_and_set, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
/*
|
||||
* bool ret = (*addr) & (1UL<<nr);
|
||||
@@ -274,9 +201,9 @@ build_bitmap_testandset(bitmap32_test_and_set_lock, "l", uint32_t, BUS_LOCK)
|
||||
* it will be truncated.
|
||||
*/
|
||||
#define build_bitmap_testandclear(name, op_len, op_type, lock) \
|
||||
static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
static inline bool name(uint32_t nr_arg, volatile op_type *addr) \
|
||||
{ \
|
||||
uint16_t nr; \
|
||||
uint32_t nr; \
|
||||
int32_t ret=0; \
|
||||
nr = nr_arg & ((8U * sizeof(op_type)) - 1U); \
|
||||
asm volatile(lock "btr" op_len " %2,%1\n\tsbbl %0,%0" \
|
||||
@@ -285,14 +212,6 @@ static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
|
||||
: "cc", "memory"); \
|
||||
return (ret != 0); \
|
||||
}
|
||||
build_bitmap_testandclear(bitmap_test_and_clear_nolock, "q", uint64_t, "")
|
||||
build_bitmap_testandclear(bitmap_test_and_clear_lock, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_testandclear(bitmap32_test_and_clear_nolock, "l", uint32_t, "")
|
||||
build_bitmap_testandclear(bitmap32_test_and_clear_lock, "l", uint32_t, BUS_LOCK)
|
||||
|
||||
static inline uint16_t bitmap_weight(uint64_t bits)
|
||||
{
|
||||
return (uint16_t)__builtin_popcountl(bits);
|
||||
}
|
||||
|
||||
#endif /* BITS_H*/
|
||||
build_bitmap_testandclear(arch_bitmap_test_and_clear, "q", uint64_t, BUS_LOCK)
|
||||
build_bitmap_testandclear(arch_bitmap32_test_and_clear, "l", uint32_t, BUS_LOCK)
|
||||
#endif /* X86_LIB_BITS_H */
|
||||
126
hypervisor/include/lib/bits.h
Normal file
126
hypervisor/include/lib/bits.h
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Copyright (C) 2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Authors:
|
||||
* Haoyu Tang <haoyu.tang@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef BITS_H
|
||||
#define BITS_H
|
||||
#include <types.h>
|
||||
#include <asm/lib/bits.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* INVALID_BIT_INDEX means when input paramter is zero,
|
||||
* bit operations function can't find bit set and return
|
||||
* the invalid bit index directly.
|
||||
*
|
||||
**/
|
||||
#ifndef INVALID_BIT_INDEX
|
||||
#define INVALID_BIT_INDEX 0xffffU
|
||||
#endif
|
||||
|
||||
/* The mandatory functions should be implemented by arch bits library */
|
||||
static inline int16_t arch_fls32(uint32_t value);
|
||||
static inline uint16_t arch_fls64(uint64_t value);
|
||||
static inline uint16_t arch_ffs64(uint64_t value);
|
||||
static inline void arch_bitmap_set(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
static inline void arch_bitmap32_set(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
static inline void arch_bitmap_clear(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
static inline void arch_bitmap32_clear(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
static inline bool arch_bitmap_test_and_set(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
static inline bool arch_bitmap32_test_and_set(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
static inline bool arch_bitmap_test_and_clear(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
static inline bool arch_bitmap32_test_and_clear(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
static inline bool arch_bitmap_test(uint32_t nr, const volatile uint64_t *addr);
|
||||
static inline bool arch_bitmap32_test(uint32_t nr, const volatile uint32_t *addr);
|
||||
|
||||
/* The common functions map to arch implementation */
|
||||
static inline int16_t fls32(uint32_t value)
|
||||
{
|
||||
return arch_fls32(value);
|
||||
}
|
||||
|
||||
static inline uint16_t fls64(uint64_t value)
|
||||
{
|
||||
return arch_fls64(value);
|
||||
}
|
||||
|
||||
static inline uint16_t ffs64(uint64_t value)
|
||||
{
|
||||
return arch_ffs64(value);
|
||||
}
|
||||
|
||||
static inline uint16_t ffz64(uint64_t value)
|
||||
{
|
||||
return arch_ffs64(~value);
|
||||
}
|
||||
|
||||
static inline void bitmap_set(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
arch_bitmap_set(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline void bitmap32_set(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
arch_bitmap32_set(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline void bitmap_clear(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
arch_bitmap_clear(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline void bitmap32_clear(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
arch_bitmap32_clear(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline bool bitmap_test_and_set(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
return arch_bitmap_test_and_set(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline bool bitmap32_test_and_set(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
return arch_bitmap32_test_and_set(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline bool bitmap_test_and_clear(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
return arch_bitmap_test_and_clear(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline bool bitmap32_test_and_clear(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
return arch_bitmap32_test_and_clear(nr_arg, addr);
|
||||
}
|
||||
|
||||
static inline bool bitmap_test(uint32_t nr, const volatile uint64_t *addr)
|
||||
{
|
||||
return arch_bitmap_test(nr, addr);
|
||||
}
|
||||
|
||||
static inline bool bitmap32_test(uint32_t nr, const volatile uint32_t *addr)
|
||||
{
|
||||
return arch_bitmap32_test(nr, addr);
|
||||
}
|
||||
|
||||
/* The funcitons are implemented in common bits library */
|
||||
uint64_t ffz64_ex(const uint64_t *addr, uint64_t size);
|
||||
uint16_t clz32(uint32_t value);
|
||||
uint16_t clz64(uint64_t value);
|
||||
uint16_t bitmap_weight(uint64_t bits);
|
||||
void bitmap_set_non_atomic(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
void bitmap32_set_non_atomic(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
void bitmap_clear_non_atomic(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
void bitmap32_clear_non_atomic(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
bool bitmap_test_and_set_non_atomic(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
bool bitmap32_test_and_set_non_atomic(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
bool bitmap_test_and_clear_non_atomic(uint32_t nr_arg, volatile uint64_t *addr);
|
||||
bool bitmap32_test_and_clear_non_atomic(uint32_t nr_arg, volatile uint32_t *addr);
|
||||
|
||||
#endif /* BITS_H*/
|
||||
97
hypervisor/lib/bits.c
Normal file
97
hypervisor/lib/bits.c
Normal file
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright (C) 2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Authors:
|
||||
* Haoyu Tang <haoyu.tang@intel.com>
|
||||
*/
|
||||
|
||||
#include <bits.h>
|
||||
|
||||
/* common functions implementation */
|
||||
uint64_t ffz64_ex(const uint64_t *addr, uint64_t size)
|
||||
{
|
||||
uint64_t ret = size;
|
||||
uint64_t idx;
|
||||
|
||||
for (idx = 0UL; (idx << 6U) < size; idx++) {
|
||||
if (addr[idx] != ~0UL) {
|
||||
ret = (idx << 6U) + ffz64(addr[idx]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t clz32(uint32_t value)
|
||||
{
|
||||
return ((value != 0U) ? (31U - fls32(value)) : 32U);
|
||||
}
|
||||
|
||||
uint16_t clz64(uint64_t value)
|
||||
{
|
||||
return ((value != 0UL) ? (63U - fls64(value)) : 64U);
|
||||
}
|
||||
|
||||
void bitmap_set_non_atomic(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
uint64_t mask = 1UL << (nr_arg & ((8U * sizeof(uint64_t)) - 1U));
|
||||
*addr |= mask;
|
||||
}
|
||||
|
||||
void bitmap32_set_non_atomic(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
uint32_t mask = 1U << (nr_arg & ((8U * sizeof(uint32_t)) - 1U));
|
||||
*addr |= mask;
|
||||
}
|
||||
|
||||
void bitmap_clear_non_atomic(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
uint64_t mask = 1UL << (nr_arg & ((8U * sizeof(uint64_t)) - 1U));
|
||||
*addr &= ~mask;
|
||||
}
|
||||
|
||||
void bitmap32_clear_non_atomic(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
uint32_t mask = 1U << (nr_arg & ((8U * sizeof(uint32_t)) - 1U));
|
||||
*addr &= ~mask;
|
||||
}
|
||||
|
||||
bool bitmap_test_and_set_non_atomic(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
uint64_t mask = 1UL << (nr_arg & ((8U * sizeof(uint64_t)) - 1U));
|
||||
bool old = !!(*addr & mask);
|
||||
*addr |= mask;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool bitmap32_test_and_set_non_atomic(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
uint32_t mask = 1U << (nr_arg & ((8U * sizeof(uint32_t)) - 1U));
|
||||
bool old = !!(*addr & mask);
|
||||
*addr |= mask;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool bitmap_test_and_clear_non_atomic(uint32_t nr_arg, volatile uint64_t *addr)
|
||||
{
|
||||
uint64_t mask = 1UL << (nr_arg & ((8U * sizeof(uint64_t)) - 1U));
|
||||
bool old = !!(*addr & mask);
|
||||
*addr &= ~mask;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool bitmap32_test_and_clear_non_atomic(uint32_t nr_arg, volatile uint32_t *addr)
|
||||
{
|
||||
uint32_t mask = 1U << (nr_arg & ((8U * sizeof(uint32_t)) - 1U));
|
||||
bool old = !!(*addr & mask);
|
||||
*addr &= ~mask;
|
||||
return old;
|
||||
}
|
||||
|
||||
uint16_t bitmap_weight(uint64_t bits)
|
||||
{
|
||||
return (uint16_t)__builtin_popcountl(bits);
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Intel Corporation.
|
||||
* Copyright (C) 2020-2025 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <asm/lib/bits.h>
|
||||
#include <bits.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/vm_config.h>
|
||||
#include <asm/rdt.h>
|
||||
|
||||
Reference in New Issue
Block a user