HV: Rename functions beginning with "_"

V1:
In order to remove MISRA C violations for rule
219S, rename functions, macros whose name
starts with "_".
Also removed some __mmio functions because they are
duplicates and are not used anywhere.
Renamed functions like __assert, to asm_assert,
because they were only wrappers around asm calls.

V2:
Over and above the changes on V1, modified bitmap
functions names to lock (corresponding to unlock)
introduced in V1

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy 2018-07-24 18:11:26 -07:00 committed by lijinxia
parent d40a6b9e93
commit a2fe964de8
17 changed files with 116 additions and 180 deletions

View File

@ -431,7 +431,7 @@ void bsp_boot_init(void)
CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL,
"run_context ia32_spec_ctrl offset not match");
__bitmap_set(BOOT_CPU_ID, &pcpu_active_bitmap);
bitmap_set_nolock(BOOT_CPU_ID, &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0UL) {
@ -540,7 +540,7 @@ static void bsp_boot_post(void)
start_cpus();
/* Trigger event to allow secondary CPUs to continue */
__bitmap_set(0U, &pcpu_sync);
bitmap_set_nolock(0U, &pcpu_sync);
ASSERT(get_cpu_id() == BOOT_CPU_ID, "");
@ -580,7 +580,7 @@ void cpu_secondary_init(void)
cpu_set_current_state(get_cpu_id_from_lapic_id(get_cur_lapic_id()),
CPU_STATE_INITIALIZING);
__bitmap_set(get_cpu_id(), &pcpu_active_bitmap);
bitmap_set_nolock(get_cpu_id(), &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0UL) {
@ -742,7 +742,7 @@ void cpu_dead(uint16_t pcpu_id)
*/
int halt = 1;
if (bitmap_test_and_clear(pcpu_id, &pcpu_active_bitmap) == false) {
if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap) == false) {
pr_err("pcpu%hu already dead", pcpu_id);
return;
}

View File

@ -85,10 +85,10 @@ inline uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
for (vcpu_id = ffs64(vdmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(vdmask)) {
bitmap_clear(vcpu_id, &vdmask);
bitmap_clear_lock(vcpu_id, &vdmask);
vcpu = vcpu_from_vid(vm, vcpu_id);
ASSERT(vcpu != NULL, "vcpu_from_vid failed");
bitmap_set(vcpu->pcpu_id, &dmask);
bitmap_set_lock(vcpu->pcpu_id, &dmask);
}
return dmask;

View File

@ -391,5 +391,5 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id)
{
bitmap_set(pre_work_id, &vcpu->pending_pre_work);
bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work);
}

View File

@ -146,7 +146,7 @@ vm_active_cpus(struct vm *vm)
struct vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
bitmap_set(vcpu->vcpu_id, &dmask);
bitmap_set_lock(vcpu->vcpu_id, &dmask);
}
return dmask;
@ -485,7 +485,7 @@ vlapic_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
irrptr = &lapic->irr[0];
/* If the interrupt is set, don't try to do it again */
if (bitmap32_test_and_set((uint16_t)(vector % 32U), &irrptr[idx].val)) {
if (bitmap32_test_and_set_lock((uint16_t)(vector % 32U), &irrptr[idx].val)) {
return 0;
}
@ -934,7 +934,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
*dmask = 0UL;
vcpu_id = vm_apicid2vcpu_id(vm, (uint8_t)dest);
if (vcpu_id < phys_cpu_num) {
bitmap_set(vcpu_id, dmask);
bitmap_set_lock(vcpu_id, dmask);
}
} else {
/*
@ -958,7 +958,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
amask = vm_active_cpus(vm);
for (vcpu_id = ffs64(amask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(amask)) {
bitmap_clear(vcpu_id, &amask);
bitmap_clear_lock(vcpu_id, &amask);
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
dfr = vlapic->apic_page->dfr;
@ -1001,13 +1001,13 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
/* target is the dest */
}
} else {
bitmap_set(vcpu_id, dmask);
bitmap_set_lock(vcpu_id, dmask);
}
}
}
if (lowprio && (target != NULL)) {
bitmap_set(target->vcpu->vcpu_id, dmask);
bitmap_set_lock(target->vcpu->vcpu_id, dmask);
}
}
}
@ -1108,19 +1108,19 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
vlapic_calcdest(vlapic->vm, &dmask, dest, phys, false);
break;
case APIC_DEST_SELF:
bitmap_set(vlapic->vcpu->vcpu_id, &dmask);
bitmap_set_lock(vlapic->vcpu->vcpu_id, &dmask);
break;
case APIC_DEST_ALLISELF:
dmask = vm_active_cpus(vlapic->vm);
break;
case APIC_DEST_ALLESELF:
dmask = vm_active_cpus(vlapic->vm);
bitmap_clear(vlapic->vcpu->vcpu_id, &dmask);
bitmap_clear_lock(vlapic->vcpu->vcpu_id, &dmask);
break;
}
while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) {
bitmap_clear(vcpu_id, &dmask);
bitmap_clear_lock(vcpu_id, &dmask);
target_vcpu = vcpu_from_vid(vlapic->vm, vcpu_id);
if (target_vcpu == NULL) {
continue;
@ -1690,7 +1690,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
struct acrn_vlapic *vlapic;
bitmap_clear(vcpu_id, &dmask);
bitmap_clear_lock(vcpu_id, &dmask);
target_vcpu = vcpu_from_vid(vm, vcpu_id);
if (target_vcpu == NULL) {
return;
@ -1846,12 +1846,12 @@ vlapic_set_local_intr(struct vm *vm, uint16_t vcpu_id_arg, uint32_t vector)
if (vcpu_id == BROADCAST_CPU_ID) {
dmask = vm_active_cpus(vm);
} else {
bitmap_set(vcpu_id, &dmask);
bitmap_set_lock(vcpu_id, &dmask);
}
error = 0;
for (vcpu_id = ffs64(dmask); vcpu_id != INVALID_BIT_INDEX;
vcpu_id = ffs64(dmask)) {
bitmap_clear(vcpu_id, &dmask);
bitmap_clear_lock(vcpu_id, &dmask);
vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
error = vlapic_trigger_lvt(vlapic, vector);
if (error != 0) {

View File

@ -103,7 +103,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
}
for (id = 0U; id < (size_t)(sizeof(vmid_bitmap) * 8U); id++) {
if (!bitmap_test_and_set(id, &vmid_bitmap)) {
if (!bitmap_test_and_set_lock(id, &vmid_bitmap)) {
break;
}
}
@ -273,7 +273,7 @@ int shutdown_vm(struct vm *vm)
destroy_iommu_domain(vm->iommu_domain);
}
bitmap_clear(vm->attr.id, &vmid_bitmap);
bitmap_clear_lock(vm->attr.id, &vmid_bitmap);
if (vm->vpic != NULL) {
vpic_cleanup(vm);

View File

@ -8,12 +8,12 @@
void disable_softirq(uint16_t cpu_id)
{
bitmap_clear(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
bitmap_clear_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
}
void enable_softirq(uint16_t cpu_id)
{
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
bitmap_set_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
}
void init_softirq(void)
@ -22,7 +22,7 @@ void init_softirq(void)
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
per_cpu(softirq_pending, pcpu_id) = 0UL;
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, pcpu_id));
bitmap_set_lock(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, pcpu_id));
}
}
@ -35,7 +35,7 @@ void raise_softirq(uint16_t softirq_id)
return;
}
bitmap_set(softirq_id, bitmap);
bitmap_set_lock(softirq_id, bitmap);
}
void exec_softirq(void)
@ -56,7 +56,7 @@ void exec_softirq(void)
/* Disable softirq
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
*/
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap)) {
if (!bitmap_test_and_clear_lock(SOFTIRQ_ATOMIC, bitmap)) {
return;
}
@ -69,7 +69,7 @@ again:
break;
}
bitmap_clear(softirq_id, bitmap);
bitmap_clear_lock(softirq_id, bitmap);
switch (softirq_id) {
case SOFTIRQ_TIMER:

View File

@ -96,7 +96,7 @@ static bool vcpu_pending_request(struct vcpu *vcpu)
void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
{
bitmap_set(eventid, &vcpu->arch_vcpu.pending_req);
bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req);
/*
* if current hostcpu is not the target vcpu's hostcpu, we need
* to invoke IPI to wake up target vcpu
@ -372,18 +372,18 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
struct vcpu_arch * arch_vcpu = &vcpu->arch_vcpu;
uint64_t *pending_req_bits = &arch_vcpu->pending_req;
if (bitmap_test_and_clear(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
pr_fatal("Triple fault happen -> shutdown!");
return -EFAULT;
}
if (bitmap_test_and_clear(ACRN_REQUEST_EPT_FLUSH, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EPT_FLUSH, pending_req_bits))
invept(vcpu);
if (bitmap_test_and_clear(ACRN_REQUEST_VPID_FLUSH, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH, pending_req_bits))
flush_vpid_single(arch_vcpu->vpid);
if (bitmap_test_and_clear(ACRN_REQUEST_TMR_UPDATE, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TMR_UPDATE, pending_req_bits))
vioapic_update_tmr(vcpu);
/* handling cancelled event injection when vcpu is switched out */
@ -408,7 +408,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
goto INTR_WIN;
/* inject NMI before maskable hardware interrupt */
if (bitmap_test_and_clear(ACRN_REQUEST_NMI, pending_req_bits)) {
if (bitmap_test_and_clear_lock(ACRN_REQUEST_NMI, pending_req_bits)) {
/* Inject NMI vector = 2 */
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8U) | IDT_NMI);
@ -432,7 +432,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
/* Guest interruptable or not */
if (is_guest_irq_enabled(vcpu)) {
/* Inject external interrupt first */
if (bitmap_test_and_clear(ACRN_REQUEST_EXTINT,
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EXTINT,
pending_req_bits)) {
/* has pending external interrupts */
ret = vcpu_do_pending_extint(vcpu);
@ -440,7 +440,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
}
/* Inject vLAPIC vectors */
if (bitmap_test_and_clear(ACRN_REQUEST_EVENT,
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT,
pending_req_bits)) {
/* has pending vLAPIC interrupts */
ret = vcpu_do_pending_event(vcpu);

View File

@ -13,7 +13,7 @@ static void run_vcpu_pre_work(struct vcpu *vcpu)
{
uint64_t *pending_pre_work = &vcpu->pending_pre_work;
if (bitmap_test_and_clear(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work)) {
if (bitmap_test_and_clear_lock(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work)) {
dm_emulate_mmio_post(vcpu);
}
}

View File

@ -42,7 +42,7 @@ uint16_t allocate_pcpu(void)
uint16_t i;
for (i = 0U; i < phys_cpu_num; i++) {
if (bitmap_test_and_set(i, &pcpu_used_bitmap) == 0) {
if (bitmap_test_and_set_lock(i, &pcpu_used_bitmap) == 0) {
return i;
}
}
@ -52,12 +52,12 @@ uint16_t allocate_pcpu(void)
void set_pcpu_used(uint16_t pcpu_id)
{
bitmap_set(pcpu_id, &pcpu_used_bitmap);
bitmap_set_lock(pcpu_id, &pcpu_used_bitmap);
}
void free_pcpu(uint16_t pcpu_id)
{
bitmap_clear(pcpu_id, &pcpu_used_bitmap);
bitmap_clear_lock(pcpu_id, &pcpu_used_bitmap);
}
void add_vcpu_to_runqueue(struct vcpu *vcpu)
@ -100,7 +100,7 @@ void make_reschedule_request(struct vcpu *vcpu)
{
struct sched_context *ctx = &per_cpu(sched_ctx, vcpu->pcpu_id);
bitmap_set(NEED_RESCHEDULE, &ctx->flags);
bitmap_set_lock(NEED_RESCHEDULE, &ctx->flags);
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
}
@ -108,7 +108,7 @@ int need_reschedule(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
return bitmap_test_and_clear(NEED_RESCHEDULE, &ctx->flags);
return bitmap_test_and_clear_lock(NEED_RESCHEDULE, &ctx->flags);
}
static void context_switch_out(struct vcpu *vcpu)
@ -152,7 +152,7 @@ void make_pcpu_offline(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
bitmap_set(NEED_OFFLINE, &ctx->flags);
bitmap_set_lock(NEED_OFFLINE, &ctx->flags);
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
}
@ -160,7 +160,7 @@ int need_offline(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
return bitmap_test_and_clear(NEED_OFFLINE, &ctx->flags);
return bitmap_test_and_clear_lock(NEED_OFFLINE, &ctx->flags);
}
void default_idle(void)

View File

@ -231,7 +231,7 @@ static void show_host_call_trace(uint64_t rsp, uint64_t rbp_arg, uint16_t pcpu_i
printf("\r\n");
}
void __assert(int32_t line, const char *file, const char *txt)
void asm_assert(int32_t line, const char *file, const char *txt)
{
uint16_t pcpu_id = get_cpu_id();
uint64_t rsp = cpu_rsp_get();

View File

@ -101,7 +101,7 @@
#define CPUID_EXTEND_ADDRESS_SIZE 0x80000008U
static inline void __cpuid(uint32_t *eax, uint32_t *ebx,
static inline void asm_cpuid(uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
/* Execute CPUID instruction and save results */
@ -118,7 +118,7 @@ static inline void cpuid(uint32_t leaf,
*eax = leaf;
*ecx = 0U;
__cpuid(eax, ebx, ecx, edx);
asm_cpuid(eax, ebx, ecx, edx);
}
static inline void cpuid_subleaf(uint32_t leaf, uint32_t subleaf,
@ -128,7 +128,7 @@ static inline void cpuid_subleaf(uint32_t leaf, uint32_t subleaf,
*eax = leaf;
*ecx = subleaf;
__cpuid(eax, ebx, ecx, edx);
asm_cpuid(eax, ebx, ecx, edx);
}
int set_vcpuid_entries(struct vm *vm);

View File

@ -143,73 +143,6 @@ static inline uint8_t mmio_read_byte(void *addr)
}
/** Writes a 32 bit value to a memory mapped IO device (ROM code version).
*
* @param value The 32 bit value to write.
* @param addr The memory address to write to.
*/
static inline void __mmio_write_long(uint32_t value, void *addr)
{
volatile uint32_t *addr32 = (volatile uint32_t *)addr;
*addr32 = value;
}
/** Writes a 16 bit value to a memory mapped IO device (ROM code version).
*
* @param value The 16 bit value to write.
* @param addr The memory address to write to.
*/
static inline void __mmio_write_word(uint16_t value, void *addr)
{
volatile uint16_t *addr16 = (volatile uint16_t *)addr;
*addr16 = value;
}
/** Writes an 8 bit value to a memory mapped IO device (ROM code version).
*
* @param value The 8 bit value to write.
* @param addr The memory address to write to.
*/
static inline void __mmio_write_byte(uint8_t value, void *addr)
{
volatile uint8_t *addr8 = (volatile uint8_t *)addr;
*addr8 = value;
}
/** Reads a 32 bit value from a memory mapped IO device (ROM code version).
*
* @param addr The memory address to read from.
*
* @return The 32 bit value read from the given address.
*/
static inline uint32_t __mmio_read_long(void *addr)
{
return *((volatile uint32_t *)addr);
}
/** Reads a 16 bit value from a memory mapped IO device (ROM code version).
*
* @param addr The memory address to read from.
*
* @return The 16 bit value read from the given address.
*/
static inline uint16_t __mmio_read_word(void *addr)
{
return *((volatile uint16_t *)addr);
}
/** Reads an 8 bit value from a memory mapped IO device (ROM code version).
*
* @param addr The memory address to read from.
*
* @return The 32 16 value read from the given address.
*/
static inline uint8_t __mmio_read_byte(void *addr)
{
return *((volatile uint8_t *)addr);
}
/** Reads a 32 Bit memory mapped IO register, mask it and write it back into
* memory mapped IO register.
*

View File

@ -8,11 +8,11 @@
#define ASSERT_H
#ifdef HV_DEBUG
void __assert(int32_t line, const char *file, const char *txt);
void asm_assert(int32_t line, const char *file, const char *txt);
#define ASSERT(x, ...) \
if (!(x)) {\
__assert(__LINE__, __FILE__, "fatal error");\
asm_assert(__LINE__, __FILE__, "fatal error");\
}
#else
#define ASSERT(x, ...) do { } while(0)

View File

@ -90,7 +90,7 @@ trace_check(uint16_t cpu_id, __unused uint32_t evid)
}
static inline void
_trace_put(uint16_t cpu_id, uint32_t evid,
trace_put(uint16_t cpu_id, uint32_t evid,
uint32_t n_data, struct trace_entry *entry)
{
struct shared_buf *sbuf = (struct shared_buf *)
@ -115,7 +115,7 @@ TRACE_2L(uint32_t evid, uint64_t e, uint64_t f)
entry.payload.fields_64.e = e;
entry.payload.fields_64.f = f;
_trace_put(cpu_id, evid, 2U, &entry);
trace_put(cpu_id, evid, 2U, &entry);
}
static inline void
@ -133,7 +133,7 @@ TRACE_4I(uint32_t evid, uint32_t a, uint32_t b, uint32_t c,
entry.payload.fields_32.b = b;
entry.payload.fields_32.c = c;
entry.payload.fields_32.d = d;
_trace_put(cpu_id, evid, 4U, &entry);
trace_put(cpu_id, evid, 4U, &entry);
}
static inline void
@ -154,7 +154,7 @@ TRACE_6C(uint32_t evid, uint8_t a1, uint8_t a2, uint8_t a3,
entry.payload.fields_8.b1 = b1;
entry.payload.fields_8.b2 = b2;
/* payload.fields_8.b3/b4 not used, but is put in trace buf */
_trace_put(cpu_id, evid, 8U, &entry);
trace_put(cpu_id, evid, 8U, &entry);
}
#define TRACE_ENTER TRACE_16STR(TRACE_FUNC_ENTER, __func__)
@ -181,7 +181,7 @@ TRACE_16STR(uint32_t evid, const char name[])
}
entry.payload.str[15] = 0;
_trace_put(cpu_id, evid, 16U, &entry);
trace_put(cpu_id, evid, 16U, &entry);
}
#else /* HV_DEBUG */

View File

@ -185,10 +185,10 @@ static inline void name(uint16_t nr_arg, volatile op_type *addr) \
: "r" ((op_type)(1UL<<nr)) \
: "cc", "memory"); \
}
build_bitmap_set(__bitmap_set, "q", uint64_t, "", nr, addr)
build_bitmap_set(bitmap_set, "q", uint64_t, BUS_LOCK, nr, addr)
build_bitmap_set(__bitmap32_set, "l", uint32_t, "", nr, addr)
build_bitmap_set(bitmap32_set, "l", uint32_t, BUS_LOCK, nr, addr)
build_bitmap_set(bitmap_set_nolock, "q", uint64_t, "", nr, addr)
build_bitmap_set(bitmap_set_lock, "q", uint64_t, BUS_LOCK, nr, addr)
build_bitmap_set(bitmap32_set_nolock, "l", uint32_t, "", nr, addr)
build_bitmap_set(bitmap32_set_lock, "l", uint32_t, BUS_LOCK, nr, addr)
/*
* (*addr) &= ~(1UL<<nr);
@ -205,10 +205,10 @@ static inline void name(uint16_t nr_arg, volatile op_type *addr) \
: "r" ((op_type)(~(1UL<<(nr)))) \
: "cc", "memory"); \
}
build_bitmap_clear(__bitmap_clear, "q", uint64_t, "", nr, addr)
build_bitmap_clear(bitmap_clear, "q", uint64_t, BUS_LOCK, nr, addr)
build_bitmap_clear(__bitmap32_clear, "l", uint32_t, "", nr, addr)
build_bitmap_clear(bitmap32_clear, "l", uint32_t, BUS_LOCK, nr, addr)
build_bitmap_clear(bitmap_clear_nolock, "q", uint64_t, "", nr, addr)
build_bitmap_clear(bitmap_clear_lock, "q", uint64_t, BUS_LOCK, nr, addr)
build_bitmap_clear(bitmap32_clear_nolock, "l", uint32_t, "", nr, addr)
build_bitmap_clear(bitmap32_clear_lock, "l", uint32_t, BUS_LOCK, nr, addr)
/*
* return !!((*addr) & (1UL<<nr));
@ -258,10 +258,10 @@ static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
: "cc", "memory"); \
return (ret != 0); \
}
build_bitmap_testandset(__bitmap_test_and_set, "q", uint64_t, "", nr, addr)
build_bitmap_testandset(bitmap_test_and_set, "q", uint64_t, BUS_LOCK, nr, addr)
build_bitmap_testandset(__bitmap32_test_and_set, "l", uint32_t, "", nr, addr)
build_bitmap_testandset(bitmap32_test_and_set, "l", uint32_t, BUS_LOCK, nr, addr)
build_bitmap_testandset(bitmap_test_and_set_nolock, "q", uint64_t, "", nr, addr)
build_bitmap_testandset(bitmap_test_and_set_lock, "q", uint64_t, BUS_LOCK, nr, addr)
build_bitmap_testandset(bitmap32_test_and_set_nolock, "l", uint32_t, "", nr, addr)
build_bitmap_testandset(bitmap32_test_and_set_lock, "l", uint32_t, BUS_LOCK, nr, addr)
/*
* bool ret = (*addr) & (1UL<<nr);
@ -282,12 +282,12 @@ static inline bool name(uint16_t nr_arg, volatile op_type *addr) \
: "cc", "memory"); \
return (ret != 0); \
}
build_bitmap_testandclear(__bitmap_test_and_clear, "q", uint64_t, "", nr, addr)
build_bitmap_testandclear(bitmap_test_and_clear, "q",
build_bitmap_testandclear(bitmap_test_and_clear_nolock, "q", uint64_t, "", nr, addr)
build_bitmap_testandclear(bitmap_test_and_clear_lock, "q",
uint64_t, BUS_LOCK, nr, addr)
build_bitmap_testandclear(__bitmap32_test_and_clear, "l",
build_bitmap_testandclear(bitmap32_test_and_clear_nolock, "l",
uint32_t, "", nr, addr)
build_bitmap_testandclear(bitmap32_test_and_clear, "l",
build_bitmap_testandclear(bitmap32_test_and_clear_lock, "l",
uint32_t, BUS_LOCK, nr, addr)
#endif /* BITS_H*/

View File

@ -36,27 +36,27 @@ struct list_head {
#define INIT_LIST_HEAD(ptr) do { (ptr)->next = (ptr); (ptr)->prev = (ptr); } \
while (0)
static inline void __list_add(struct list_head *_new, struct list_head *prev,
static inline void list_add_node(struct list_head *new_node, struct list_head *prev,
struct list_head *next)
{
next->prev = _new;
_new->next = next;
_new->prev = prev;
prev->next = _new;
next->prev = new_node;
new_node->next = next;
new_node->prev = prev;
prev->next = new_node;
}
static inline void list_add(struct list_head *_new, struct list_head *head)
static inline void list_add(struct list_head *new_node, struct list_head *head)
{
__list_add(_new, head, head->next);
list_add_node(new_node, head, head->next);
}
static inline void list_add_tail(struct list_head *_new,
static inline void list_add_tail(struct list_head *new_node,
struct list_head *head)
{
__list_add(_new, head->prev, head);
list_add_node(new_node, head->prev, head);
}
static inline void __list_del(struct list_head *prev, struct list_head *next)
static inline void list_del_node(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
@ -64,12 +64,12 @@ static inline void __list_del(struct list_head *prev, struct list_head *next)
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
list_del_node(entry->prev, entry->next);
}
static inline void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
list_del_node(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
@ -78,7 +78,7 @@ static inline _Bool list_empty(struct list_head *head)
return head->next == head;
}
static inline void __list_splice(struct list_head *list,
static inline void list_splice_node(struct list_head *list,
struct list_head *head)
{
struct list_head *first = list->next;
@ -95,7 +95,7 @@ static inline void __list_splice(struct list_head *list,
static inline void list_splice(struct list_head *list, struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
list_splice_node(list, head);
}
}
@ -103,7 +103,7 @@ static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
list_splice_node(list, head);
INIT_LIST_HEAD(list);
}
}

View File

@ -19,63 +19,66 @@
* Common structures for HV/VHM
*/
#define _HC_ID(x, y) (((x)<<24)|(y))
#define BASE_HC_ID(x, y) (((x)<<24)|(y))
#define HC_ID 0x80UL
/* general */
#define HC_ID_GEN_BASE 0x0UL
#define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00UL)
#define HC_SOS_OFFLINE_CPU _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01UL)
#define HC_GET_API_VERSION BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00UL)
#define HC_SOS_OFFLINE_CPU BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01UL)
/* VM management */
#define HC_ID_VM_BASE 0x10UL
#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00UL)
#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01UL)
#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02UL)
#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03UL)
#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04UL)
#define HC_CREATE_VM BASE_HC_ID(HC_ID, HC_ID_VM_BASE + 0x00UL)
#define HC_DESTROY_VM BASE_HC_ID(HC_ID, HC_ID_VM_BASE + 0x01UL)
#define HC_START_VM BASE_HC_ID(HC_ID, HC_ID_VM_BASE + 0x02UL)
#define HC_PAUSE_VM BASE_HC_ID(HC_ID, HC_ID_VM_BASE + 0x03UL)
#define HC_CREATE_VCPU BASE_HC_ID(HC_ID, HC_ID_VM_BASE + 0x04UL)
/* IRQ and Interrupts */
#define HC_ID_IRQ_BASE 0x20UL
#define HC_ASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x00UL)
#define HC_DEASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x01UL)
#define HC_PULSE_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x02UL)
#define HC_INJECT_MSI _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03UL)
#define HC_ASSERT_IRQLINE BASE_HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x00UL)
#define HC_DEASSERT_IRQLINE BASE_HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x01UL)
#define HC_PULSE_IRQLINE BASE_HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x02UL)
#define HC_INJECT_MSI BASE_HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03UL)
/* DM ioreq management */
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01UL)
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01UL)
/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
#define HC_VM_SET_MEMORY_REGION _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00UL)
#define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01UL)
#define HC_VM_SET_MEMORY_REGIONS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02UL)
#define HC_VM_WRITE_PROTECT_PAGE _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x03UL)
#define HC_VM_SET_MEMORY_REGION BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00UL)
#define HC_VM_GPA2HPA BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01UL)
#define HC_VM_SET_MEMORY_REGIONS BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02UL)
#define HC_VM_WRITE_PROTECT_PAGE BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x03UL)
#define HC_VM_SET_MEMORY_REGION BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00UL)
#define HC_VM_GPA2HPA BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01UL)
#define HC_VM_SET_MEMORY_REGIONS BASE_HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02UL)
/* PCI assignment*/
#define HC_ID_PCI_BASE 0x50UL
#define HC_ASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x00UL)
#define HC_DEASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x01UL)
#define HC_VM_PCI_MSIX_REMAP _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x02UL)
#define HC_SET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03UL)
#define HC_RESET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04UL)
#define HC_ASSIGN_PTDEV BASE_HC_ID(HC_ID, HC_ID_PCI_BASE + 0x00UL)
#define HC_DEASSIGN_PTDEV BASE_HC_ID(HC_ID, HC_ID_PCI_BASE + 0x01UL)
#define HC_VM_PCI_MSIX_REMAP BASE_HC_ID(HC_ID, HC_ID_PCI_BASE + 0x02UL)
#define HC_SET_PTDEV_INTR_INFO BASE_HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03UL)
#define HC_RESET_PTDEV_INTR_INFO BASE_HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04UL)
/* DEBUG */
#define HC_ID_DBG_BASE 0x60UL
#define HC_SETUP_SBUF _HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00UL)
#define HC_SETUP_SBUF BASE_HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00UL)
/* Trusty */
#define HC_ID_TRUSTY_BASE 0x70UL
#define HC_INITIALIZE_TRUSTY _HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x00UL)
#define HC_WORLD_SWITCH _HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x01UL)
#define HC_GET_SEC_INFO _HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x02UL)
#define HC_INITIALIZE_TRUSTY BASE_HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x00UL)
#define HC_WORLD_SWITCH BASE_HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x01UL)
#define HC_GET_SEC_INFO BASE_HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x02UL)
/* Power management */
#define HC_ID_PM_BASE 0x80UL
#define HC_PM_GET_CPU_STATE _HC_ID(HC_ID, HC_ID_PM_BASE + 0x00UL)
#define HC_PM_GET_CPU_STATE BASE_HC_ID(HC_ID, HC_ID_PM_BASE + 0x00UL)
#define ACRN_DOM0_VMID (0UL)
#define ACRN_INVALID_VMID (0xffffU)