hv: clean up spinlock

- move spinlock_init and spinlock_obtain to spinlock.h as inline APIs
- remove spinlock.c

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Shiqing Gao 2018-09-13 10:50:46 +08:00 committed by lijinxia
parent 8858634493
commit d886375ee4
3 changed files with 28 additions and 37 deletions

View File

@ -151,7 +151,6 @@ C_SRCS += arch/x86/guest/vmsr.c
C_SRCS += arch/x86/guest/instr_emul.c
C_SRCS += arch/x86/guest/ucode.c
C_SRCS += arch/x86/guest/pm.c
C_SRCS += lib/spinlock.c
C_SRCS += lib/misc.c
C_SRCS += lib/string.c
C_SRCS += lib/memory.c

View File

@ -10,6 +10,7 @@
#ifndef ASSEMBLER
#include <types.h>
#include <rtl.h>
/** The architecture dependent spinlock type. */
typedef struct _spinlock {
@ -19,8 +20,33 @@ typedef struct _spinlock {
} spinlock_t;
/* Function prototypes */
void spinlock_init(spinlock_t *lock);
void spinlock_obtain(spinlock_t *lock);
static inline void spinlock_init(spinlock_t *lock)
{
(void)memset(lock, 0U, sizeof(spinlock_t));
}
static inline void spinlock_obtain(spinlock_t *lock)
{
/* The lock function atomically increments and exchanges the head
* counter of the queue. If the old head of the queue is equal to the
* tail, we have locked the spinlock. Otherwise we have to wait.
*/
asm volatile (" movl $0x1,%%eax\n"
" lock xaddl %%eax,%[head]\n"
" cmpl %%eax,%[tail]\n"
" jz 1f\n"
"2: pause\n"
" cmpl %%eax,%[tail]\n"
" jnz 2b\n"
"1:\n"
:
:
[head] "m"(lock->head),
[tail] "m"(lock->tail)
: "cc", "memory", "eax");
}
static inline void spinlock_release(spinlock_t *lock)
{

View File

@ -1,34 +0,0 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hv_lib.h>
void spinlock_init(spinlock_t *lock)
{
(void)memset(lock, 0U, sizeof(spinlock_t));
}
void spinlock_obtain(spinlock_t *lock)
{
/* The lock function atomically increments and exchanges the head
* counter of the queue. If the old head of the queue is equal to the
* tail, we have locked the spinlock. Otherwise we have to wait.
*/
asm volatile (" lock xaddl %%eax,%[head]\n"
" cmpl %%eax,%[tail]\n"
" jz 1f\n"
"2: pause\n"
" cmpl %%eax,%[tail]\n"
" jnz 2b\n"
"1:\n"
:
: "a" (1),
[head] "m"(lock->head),
[tail] "m"(lock->tail)
: "cc", "memory");
}