mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-09-12 21:29:59 +00:00
update -rt to 4.14.53-rt34
Signed-off-by: Tiejun Chen <tiejun.china@gmail.com>
This commit is contained in:
175
kernel/patches-4.14.x-rt/0190-radix-tree-use-local-locks.patch
Normal file
175
kernel/patches-4.14.x-rt/0190-radix-tree-use-local-locks.patch
Normal file
@@ -0,0 +1,175 @@
|
||||
From fd6c6558bb3bf8254b405dc12a0692994585d9bd Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 25 Jan 2017 16:34:27 +0100
|
||||
Subject: [PATCH 190/418] radix-tree: use local locks
|
||||
|
||||
The preload functionality uses per-CPU variables and preempt-disable to
|
||||
ensure that it does not switch CPUs during its usage. This patch adds
|
||||
local_locks() instead preempt_disable() for the same purpose and to
|
||||
remain preemptible on -RT.
|
||||
|
||||
Cc: stable-rt@vger.kernel.org
|
||||
Reported-and-debugged-by: Mike Galbraith <efault@gmx.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/idr.h | 5 +----
|
||||
include/linux/radix-tree.h | 7 ++-----
|
||||
lib/radix-tree.c | 32 +++++++++++++++++++++++---------
|
||||
3 files changed, 26 insertions(+), 18 deletions(-)
|
||||
|
||||
diff --git a/include/linux/idr.h b/include/linux/idr.h
|
||||
index 7c3a365f7e12..a922d984d9b6 100644
|
||||
--- a/include/linux/idr.h
|
||||
+++ b/include/linux/idr.h
|
||||
@@ -167,10 +167,7 @@ static inline bool idr_is_empty(const struct idr *idr)
|
||||
* Each idr_preload() should be matched with an invocation of this
|
||||
* function. See idr_preload() for details.
|
||||
*/
|
||||
-static inline void idr_preload_end(void)
|
||||
-{
|
||||
- preempt_enable();
|
||||
-}
|
||||
+void idr_preload_end(void);
|
||||
|
||||
/**
|
||||
* idr_find - return pointer for given id
|
||||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
|
||||
index 567ebb5eaab0..9da7ea957399 100644
|
||||
--- a/include/linux/radix-tree.h
|
||||
+++ b/include/linux/radix-tree.h
|
||||
@@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
|
||||
int radix_tree_preload(gfp_t gfp_mask);
|
||||
int radix_tree_maybe_preload(gfp_t gfp_mask);
|
||||
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
|
||||
+void radix_tree_preload_end(void);
|
||||
+
|
||||
void radix_tree_init(void);
|
||||
void *radix_tree_tag_set(struct radix_tree_root *,
|
||||
unsigned long index, unsigned int tag);
|
||||
@@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
|
||||
unsigned int max_items, unsigned int tag);
|
||||
int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
|
||||
|
||||
-static inline void radix_tree_preload_end(void)
|
||||
-{
|
||||
- preempt_enable();
|
||||
-}
|
||||
-
|
||||
int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
|
||||
int radix_tree_split(struct radix_tree_root *, unsigned long index,
|
||||
unsigned new_order);
|
||||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
||||
index d172f0341b80..c1da1109a107 100644
|
||||
--- a/lib/radix-tree.c
|
||||
+++ b/lib/radix-tree.c
|
||||
@@ -37,7 +37,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
-
|
||||
+#include <linux/locallock.h>
|
||||
|
||||
/* Number of nodes in fully populated tree of given height */
|
||||
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
|
||||
@@ -86,6 +86,7 @@ struct radix_tree_preload {
|
||||
struct radix_tree_node *nodes;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
|
||||
+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
|
||||
|
||||
static inline struct radix_tree_node *entry_to_node(void *ptr)
|
||||
{
|
||||
@@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
|
||||
* succeed in getting a node here (and never reach
|
||||
* kmem_cache_alloc)
|
||||
*/
|
||||
- rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
+ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
|
||||
if (rtp->nr) {
|
||||
ret = rtp->nodes;
|
||||
rtp->nodes = ret->parent;
|
||||
rtp->nr--;
|
||||
}
|
||||
+ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
|
||||
/*
|
||||
* Update the allocation stack trace as this is more useful
|
||||
* for debugging.
|
||||
@@ -475,14 +477,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
|
||||
*/
|
||||
gfp_mask &= ~__GFP_ACCOUNT;
|
||||
|
||||
- preempt_disable();
|
||||
+ local_lock(radix_tree_preloads_lock);
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
while (rtp->nr < nr) {
|
||||
- preempt_enable();
|
||||
+ local_unlock(radix_tree_preloads_lock);
|
||||
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
||||
if (node == NULL)
|
||||
goto out;
|
||||
- preempt_disable();
|
||||
+ local_lock(radix_tree_preloads_lock);
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
if (rtp->nr < nr) {
|
||||
node->parent = rtp->nodes;
|
||||
@@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
|
||||
if (gfpflags_allow_blocking(gfp_mask))
|
||||
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
|
||||
/* Preloading doesn't help anything with this gfp mask, skip it */
|
||||
- preempt_disable();
|
||||
+ local_lock(radix_tree_preloads_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(radix_tree_maybe_preload);
|
||||
@@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
|
||||
|
||||
/* Preloading doesn't help anything with this gfp mask, skip it */
|
||||
if (!gfpflags_allow_blocking(gfp_mask)) {
|
||||
- preempt_disable();
|
||||
+ local_lock(radix_tree_preloads_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
|
||||
return __radix_tree_preload(gfp_mask, nr_nodes);
|
||||
}
|
||||
|
||||
+void radix_tree_preload_end(void)
|
||||
+{
|
||||
+ local_unlock(radix_tree_preloads_lock);
|
||||
+}
|
||||
+EXPORT_SYMBOL(radix_tree_preload_end);
|
||||
+
|
||||
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
|
||||
struct radix_tree_node **nodep, unsigned long *maxindex)
|
||||
{
|
||||
@@ -2105,10 +2113,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
|
||||
void idr_preload(gfp_t gfp_mask)
|
||||
{
|
||||
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
|
||||
- preempt_disable();
|
||||
+ local_lock(radix_tree_preloads_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(idr_preload);
|
||||
|
||||
+void idr_preload_end(void)
|
||||
+{
|
||||
+ local_unlock(radix_tree_preloads_lock);
|
||||
+}
|
||||
+EXPORT_SYMBOL(idr_preload_end);
|
||||
+
|
||||
/**
|
||||
* ida_pre_get - reserve resources for ida allocation
|
||||
* @ida: ida handle
|
||||
@@ -2125,7 +2139,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
|
||||
* to return to the ida_pre_get() step.
|
||||
*/
|
||||
if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
|
||||
- preempt_enable();
|
||||
+ local_unlock(radix_tree_preloads_lock);
|
||||
|
||||
if (!this_cpu_read(ida_bitmap)) {
|
||||
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
|
||||
--
|
||||
2.17.1
|
||||
|
Reference in New Issue
Block a user