From d886375ee4e457bd30759130e8af3775532ee516 Mon Sep 17 00:00:00 2001 From: Shiqing Gao Date: Thu, 13 Sep 2018 10:50:46 +0800 Subject: [PATCH] hv: clean up spinlock - move spinlock_init and spinlock_obtain to spinlock.h as inline APIs - remove spinlock.c Tracked-On: #861 Signed-off-by: Shiqing Gao Acked-by: Anthony Xu --- hypervisor/Makefile | 1 - hypervisor/include/lib/spinlock.h | 30 +++++++++++++++++++++++++-- hypervisor/lib/spinlock.c | 34 ------------------------------- 3 files changed, 28 insertions(+), 37 deletions(-) delete mode 100644 hypervisor/lib/spinlock.c diff --git a/hypervisor/Makefile b/hypervisor/Makefile index f5e4713ff..cc18804a9 100644 --- a/hypervisor/Makefile +++ b/hypervisor/Makefile @@ -151,7 +151,6 @@ C_SRCS += arch/x86/guest/vmsr.c C_SRCS += arch/x86/guest/instr_emul.c C_SRCS += arch/x86/guest/ucode.c C_SRCS += arch/x86/guest/pm.c -C_SRCS += lib/spinlock.c C_SRCS += lib/misc.c C_SRCS += lib/string.c C_SRCS += lib/memory.c diff --git a/hypervisor/include/lib/spinlock.h b/hypervisor/include/lib/spinlock.h index d6383bc8b..53190001c 100644 --- a/hypervisor/include/lib/spinlock.h +++ b/hypervisor/include/lib/spinlock.h @@ -10,6 +10,7 @@ #ifndef ASSEMBLER #include +#include /** The architecture dependent spinlock type. */ typedef struct _spinlock { @@ -19,8 +20,33 @@ typedef struct _spinlock { } spinlock_t; /* Function prototypes */ -void spinlock_init(spinlock_t *lock); -void spinlock_obtain(spinlock_t *lock); +static inline void spinlock_init(spinlock_t *lock) +{ + (void)memset(lock, 0U, sizeof(spinlock_t)); +} + +static inline void spinlock_obtain(spinlock_t *lock) +{ + + /* The lock function atomically increments and exchanges the head + * counter of the queue. If the old head of the queue is equal to the + * tail, we have locked the spinlock. Otherwise we have to wait. + */ + + asm volatile (" movl $0x1,%%eax\n" + " lock xaddl %%eax,%[head]\n" + " cmpl %%eax,%[tail]\n" + " jz 1f\n" + "2: pause\n" + " cmpl %%eax,%[tail]\n" + " jnz 2b\n" + "1:\n" + : + : + [head] "m"(lock->head), + [tail] "m"(lock->tail) + : "cc", "memory", "eax"); +} static inline void spinlock_release(spinlock_t *lock) { diff --git a/hypervisor/lib/spinlock.c b/hypervisor/lib/spinlock.c deleted file mode 100644 index 1611d13f4..000000000 --- a/hypervisor/lib/spinlock.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (C) 2018 Intel Corporation. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include - -void spinlock_init(spinlock_t *lock) -{ - (void)memset(lock, 0U, sizeof(spinlock_t)); -} - -void spinlock_obtain(spinlock_t *lock) -{ - - /* The lock function atomically increments and exchanges the head - * counter of the queue. If the old head of the queue is equal to the - * tail, we have locked the spinlock. Otherwise we have to wait. - */ - - asm volatile (" lock xaddl %%eax,%[head]\n" - " cmpl %%eax,%[tail]\n" - " jz 1f\n" - "2: pause\n" - " cmpl %%eax,%[tail]\n" - " jnz 2b\n" - "1:\n" - : - : "a" (1), - [head] "m"(lock->head), - [tail] "m"(lock->tail) - : "cc", "memory"); -}