mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-07-30 22:21:34 +00:00
200 lines
5.0 KiB
Diff
200 lines
5.0 KiB
Diff
Subject: rt: Add local irq locks
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Mon, 20 Jun 2011 09:03:47 +0200
|
|
|
|
Introduce locallock. For !RT this maps to preempt_disable()/
|
|
local_irq_disable() so there is not much that changes. For RT this will
|
|
map to a spinlock. This makes preemption possible and locked "ressource"
|
|
gets the lockdep anotation it wouldn't have otherwise. The locks are
|
|
recursive for owner == current. Also, all locks user migrate_disable()
|
|
which ensures that the task is not migrated to another CPU while the lock
|
|
is held and the owner is preempted.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
include/linux/local_lock_internal.h | 126 ++++++++++++++++++++++++++++++++----
|
|
1 file changed, 113 insertions(+), 13 deletions(-)
|
|
|
|
--- a/include/linux/local_lock_internal.h
|
|
+++ b/include/linux/local_lock_internal.h
|
|
@@ -7,33 +7,90 @@
|
|
#include <linux/lockdep.h>
|
|
|
|
typedef struct {
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+#ifdef CONFIG_PREEMPT_RT
|
|
+ spinlock_t lock;
|
|
+ struct task_struct *owner;
|
|
+ int nestcnt;
|
|
+
|
|
+#elif defined(CONFIG_DEBUG_LOCK_ALLOC)
|
|
struct lockdep_map dep_map;
|
|
struct task_struct *owner;
|
|
#endif
|
|
} local_lock_t;
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define LL_DEP_MAP_INIT(lockname) \
|
|
+#ifdef CONFIG_PREEMPT_RT
|
|
+
|
|
+#define INIT_LOCAL_LOCK(lockname) { \
|
|
+ __SPIN_LOCK_UNLOCKED((lockname).lock), \
|
|
+ .owner = NULL, \
|
|
+ .nestcnt = 0, \
|
|
+ }
|
|
+#else
|
|
+
|
|
+# ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define LL_DEP_MAP_INIT(lockname) \
|
|
.dep_map = { \
|
|
.name = #lockname, \
|
|
.wait_type_inner = LD_WAIT_CONFIG, \
|
|
}
|
|
-#else
|
|
-# define LL_DEP_MAP_INIT(lockname)
|
|
-#endif
|
|
+# else
|
|
+# define LL_DEP_MAP_INIT(lockname)
|
|
+# endif
|
|
|
|
#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
|
|
|
|
-#define __local_lock_init(lock) \
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT
|
|
+
|
|
+static inline void ___local_lock_init(local_lock_t *l)
|
|
+{
|
|
+ l->owner = NULL;
|
|
+ l->nestcnt = 0;
|
|
+}
|
|
+
|
|
+#define __local_lock_init(l) \
|
|
+do { \
|
|
+ spin_lock_init(&(l)->lock); \
|
|
+ ___local_lock_init(l); \
|
|
+} while (0)
|
|
+
|
|
+#else
|
|
+
|
|
+#define __local_lock_init(l) \
|
|
do { \
|
|
static struct lock_class_key __key; \
|
|
\
|
|
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
|
|
- lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
|
|
+ debug_check_no_locks_freed((void *)l, sizeof(*l)); \
|
|
+ lockdep_init_map_wait(&(l)->dep_map, #l, &__key, 0, LD_WAIT_CONFIG);\
|
|
} while (0)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT
|
|
+
|
|
+static inline void local_lock_acquire(local_lock_t *l)
|
|
+{
|
|
+ if (l->owner != current) {
|
|
+ spin_lock(&l->lock);
|
|
+ DEBUG_LOCKS_WARN_ON(l->owner);
|
|
+ DEBUG_LOCKS_WARN_ON(l->nestcnt);
|
|
+ l->owner = current;
|
|
+ }
|
|
+ l->nestcnt++;
|
|
+}
|
|
+
|
|
+static inline void local_lock_release(local_lock_t *l)
|
|
+{
|
|
+ DEBUG_LOCKS_WARN_ON(l->nestcnt == 0);
|
|
+ DEBUG_LOCKS_WARN_ON(l->owner != current);
|
|
+ if (--l->nestcnt)
|
|
+ return;
|
|
+
|
|
+ l->owner = NULL;
|
|
+ spin_unlock(&l->lock);
|
|
+}
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+#elif defined(CONFIG_DEBUG_LOCK_ALLOC)
|
|
static inline void local_lock_acquire(local_lock_t *l)
|
|
{
|
|
lock_map_acquire(&l->dep_map);
|
|
@@ -53,21 +110,50 @@ static inline void local_lock_acquire(lo
|
|
static inline void local_lock_release(local_lock_t *l) { }
|
|
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT
|
|
+
|
|
#define __local_lock(lock) \
|
|
do { \
|
|
- preempt_disable(); \
|
|
+ migrate_disable(); \
|
|
local_lock_acquire(this_cpu_ptr(lock)); \
|
|
} while (0)
|
|
|
|
+#define __local_unlock(lock) \
|
|
+ do { \
|
|
+ local_lock_release(this_cpu_ptr(lock)); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
#define __local_lock_irq(lock) \
|
|
do { \
|
|
- local_irq_disable(); \
|
|
+ migrate_disable(); \
|
|
local_lock_acquire(this_cpu_ptr(lock)); \
|
|
} while (0)
|
|
|
|
#define __local_lock_irqsave(lock, flags) \
|
|
do { \
|
|
- local_irq_save(flags); \
|
|
+ migrate_disable(); \
|
|
+ flags = 0; \
|
|
+ local_lock_acquire(this_cpu_ptr(lock)); \
|
|
+ } while (0)
|
|
+
|
|
+#define __local_unlock_irq(lock) \
|
|
+ do { \
|
|
+ local_lock_release(this_cpu_ptr(lock)); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define __local_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ local_lock_release(this_cpu_ptr(lock)); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#else
|
|
+
|
|
+#define __local_lock(lock) \
|
|
+ do { \
|
|
+ preempt_disable(); \
|
|
local_lock_acquire(this_cpu_ptr(lock)); \
|
|
} while (0)
|
|
|
|
@@ -77,6 +163,18 @@ static inline void local_lock_release(lo
|
|
preempt_enable(); \
|
|
} while (0)
|
|
|
|
+#define __local_lock_irq(lock) \
|
|
+ do { \
|
|
+ local_irq_disable(); \
|
|
+ local_lock_acquire(this_cpu_ptr(lock)); \
|
|
+ } while (0)
|
|
+
|
|
+#define __local_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ local_irq_save(flags); \
|
|
+ local_lock_acquire(this_cpu_ptr(lock)); \
|
|
+ } while (0)
|
|
+
|
|
#define __local_unlock_irq(lock) \
|
|
do { \
|
|
local_lock_release(this_cpu_ptr(lock)); \
|
|
@@ -88,3 +186,5 @@ static inline void local_lock_release(lo
|
|
local_lock_release(this_cpu_ptr(lock)); \
|
|
local_irq_restore(flags); \
|
|
} while (0)
|
|
+
|
|
+#endif
|