Merge pull request #3040 from TiejunChina/master-dev

update -rt to 4.14.39-rt29
This commit is contained in:
Rolf Neugebauer 2018-05-12 21:00:17 +01:00 committed by GitHub
commit ce9c5eb6fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
150 changed files with 387 additions and 137 deletions

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:4.14.34-rt
image: linuxkit/kernel:4.14.39-rt
cmdline: "console=tty0"
init:
- linuxkit/init:v0.4

View File

@ -221,14 +221,14 @@ ifeq ($(ARCH),x86_64)
$(eval $(call kernel,4.16.8,4.16.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.40,4.14.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.40,4.14.x,,-dbg))
$(eval $(call kernel,4.14.34,4.14.x,-rt,))
$(eval $(call kernel,4.14.39,4.14.x,-rt,))
$(eval $(call kernel,4.9.99,4.9.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.4.131,4.4.x,$(EXTRA),$(DEBUG)))
else ifeq ($(ARCH),aarch64)
$(eval $(call kernel,4.16.8,4.16.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.40,4.14.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.34,4.14.x,-rt,))
$(eval $(call kernel,4.14.39,4.14.x,-rt,))
else ifeq ($(ARCH),s390x)
$(eval $(call kernel,4.16.8,4.16.x,$(EXTRA),$(DEBUG)))

View File

@ -0,0 +1,62 @@
From: Tejun Heo <tj@kernel.org>
Date: Tue, 9 Jan 2018 07:21:15 -0800
Subject: [PATCH] string: drop __must_check from strscpy() and restore
strscpy() usages in cgroup
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Upstream commit 08a77676f9c5fc69a681ccd2cd8140e65dcb26c7
e7fd37ba1217 ("cgroup: avoid copying strings longer than the buffers")
converted possibly unsafe strncpy() usages in cgroup to strscpy().
However, although the callsites are completely fine with truncated
copied, because strscpy() is marked __must_check, it led to the
following warnings.
kernel/cgroup/cgroup.c: In function cgroup_file_name:
kernel/cgroup/cgroup.c:1400:10: warning: ignoring return value of strscpy, declared with attribute warn_unused_result [-Wunused-result]
strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
^
To avoid the warnings, 50034ed49645 ("cgroup: use strlcpy() instead of
strscpy() to avoid spurious warning") switched them to strlcpy().
strlcpy() is worse than strlcpy() because it unconditionally runs
strlen() on the source string, and the only reason we switched to
strlcpy() here was because it was lacking __must_check, which doesn't
reflect any material differences between the two function. It's just
that someone added __must_check to strscpy() and not to strlcpy().
These basic string copy operations are used in variety of ways, and
one of not-so-uncommon use cases is safely handling truncated copies,
where the caller naturally doesn't care about the return value. The
__must_check doesn't match the actual use cases and forces users to
opt for inferior variants which lack __must_check by happenstance or
spread ugly (void) casts.
Remove __must_check from strscpy() and restore strscpy() usages in
cgroup.
Signed-off-by: Tejun Heo <tj@kernel.org>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
[bigeasy: drop the cgroup.c hunk]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/string.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -28,7 +28,7 @@ extern char * strncpy(char *,const char
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);

View File

@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1131,7 +1131,7 @@ static inline void tick_nohz_activate(st
@@ -1132,7 +1132,7 @@ static inline void tick_nohz_activate(st
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))

View File

@ -1,55 +0,0 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Wed, 20 Dec 2017 17:12:57 +0100
Subject: [PATCH 08/29] tracing/hrtimer: Take all clock bases and modes into
account
So far only CLOCK_MONOTONIC and CLOCK_REALTIME were taken into account as
well as HRTIMER_MODE_ABS/REL in hrtimer_init tracepoint. The query for
detecting timer mode ABS or REL is not valid, since the introduction of
HRTIMER_MODE_PINNED.
HRTIMER_MODE_PINNED is not evaluated in hrtimer_init() call. But for the
sake of completeness print all given modes.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/trace/events/timer.h | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -136,6 +136,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+#define decode_clockid(type) \
+ __print_symbolic(type, \
+ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
+ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
+ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
+ { CLOCK_TAI, "CLOCK_TAI" })
+
+#define decode_hrtimer_mode(mode) \
+ __print_symbolic(mode, \
+ { HRTIMER_MODE_ABS, "ABS" }, \
+ { HRTIMER_MODE_REL, "REL" }, \
+ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
+ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
+
/**
* hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
@@ -162,10 +176,8 @@ TRACE_EVENT(hrtimer_init,
),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
- __entry->clockid == CLOCK_REALTIME ?
- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
- __entry->mode == HRTIMER_MODE_ABS ?
- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+ decode_clockid(__entry->clockid),
+ decode_hrtimer_mode(__entry->mode))
);
/**

View File

@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2638,7 +2638,7 @@ static int _nfs4_open_and_get_state(stru
@@ -2642,7 +2642,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata);
if (ret != 0)
@@ -2676,7 +2676,7 @@ static int _nfs4_open_and_get_state(stru
@@ -2680,7 +2680,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
@@ -1516,8 +1516,12 @@ static int nfs4_reclaim_open_state(struc
@@ -1519,8 +1519,12 @@ static int nfs4_reclaim_open_state(struc
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
@@ -1586,14 +1590,20 @@ static int nfs4_reclaim_open_state(struc
@@ -1589,14 +1593,20 @@ static int nfs4_reclaim_open_state(struc
spin_lock(&sp->so_lock);
goto restart;
}

View File

@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2540,61 +2540,29 @@ rb_wakeups(struct ring_buffer *buffer, s
@@ -2545,61 +2545,29 @@ rb_wakeups(struct ring_buffer *buffer, s
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
* by the current task between lock and unlock. But it can
@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
@@ -2602,7 +2570,9 @@ trace_recursive_lock(struct ring_buffer_
@@ -2607,7 +2575,9 @@ trace_recursive_lock(struct ring_buffer_
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{

View File

@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct ring_buffer_iter {
@@ -1382,6 +1383,16 @@ void ring_buffer_set_clock(struct ring_b
@@ -1387,6 +1388,16 @@ void ring_buffer_set_clock(struct ring_b
buffer->clock = clock;
}

View File

@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Flag when events were overwritten */
#define RB_MISSED_EVENTS (1 << 31)
/* Missed count stored at end */
@@ -2223,12 +2249,15 @@ rb_move_tail(struct ring_buffer_per_cpu
@@ -2228,12 +2254,15 @@ rb_move_tail(struct ring_buffer_per_cpu
/* Slow path, do not inline */
static noinline struct ring_buffer_event *
@ -167,7 +167,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
event->time_delta = delta & TS_MASK;
event->array[0] = delta >> TS_SHIFT;
} else {
@@ -2271,7 +2300,9 @@ rb_update_event(struct ring_buffer_per_c
@@ -2276,7 +2305,9 @@ rb_update_event(struct ring_buffer_per_c
* add it to the start of the resevered space.
*/
if (unlikely(info->add_timestamp)) {
@ -178,7 +178,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
length -= RB_LEN_TIME_EXTEND;
delta = 0;
}
@@ -2459,7 +2490,7 @@ static __always_inline void rb_end_commi
@@ -2464,7 +2495,7 @@ static __always_inline void rb_end_commi
static inline void rb_event_discard(struct ring_buffer_event *event)
{
@ -187,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
event = skip_time_extend(event);
/* array[0] holds the actual length for the discarded event */
@@ -2503,10 +2534,11 @@ rb_update_write_stamp(struct ring_buffer
@@ -2508,10 +2539,11 @@ rb_update_write_stamp(struct ring_buffer
cpu_buffer->write_stamp =
cpu_buffer->commit_page->page->time_stamp;
else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else
cpu_buffer->write_stamp += event->time_delta;
}
@@ -2659,7 +2691,7 @@ static struct ring_buffer_event *
@@ -2664,7 +2696,7 @@ static struct ring_buffer_event *
* If this is the first commit on the page, then it has the same
* timestamp as the page itself.
*/
@ -211,7 +211,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
info->delta = 0;
/* See if we shot pass the end of this buffer page */
@@ -2736,8 +2768,11 @@ rb_reserve_next_event(struct ring_buffer
@@ -2741,8 +2773,11 @@ rb_reserve_next_event(struct ring_buffer
/* make sure this diff is calculated here */
barrier();
@ -225,7 +225,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
info.delta = diff;
if (unlikely(test_time_stamp(info.delta)))
rb_handle_timestamp(cpu_buffer, &info);
@@ -3419,14 +3454,13 @@ rb_update_read_stamp(struct ring_buffer_
@@ -3424,14 +3459,13 @@ rb_update_read_stamp(struct ring_buffer_
return;
case RINGBUF_TYPE_TIME_EXTEND:
@ -243,7 +243,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
case RINGBUF_TYPE_DATA:
@@ -3450,14 +3484,13 @@ rb_update_iter_read_stamp(struct ring_bu
@@ -3455,14 +3489,13 @@ rb_update_iter_read_stamp(struct ring_bu
return;
case RINGBUF_TYPE_TIME_EXTEND:
@ -261,7 +261,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
case RINGBUF_TYPE_DATA:
@@ -3681,6 +3714,8 @@ rb_buffer_peek(struct ring_buffer_per_cp
@@ -3686,6 +3719,8 @@ rb_buffer_peek(struct ring_buffer_per_cp
struct buffer_page *reader;
int nr_loops = 0;
@ -270,7 +270,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
again:
/*
* We repeat when a time extend is encountered.
@@ -3717,12 +3752,17 @@ rb_buffer_peek(struct ring_buffer_per_cp
@@ -3722,12 +3757,17 @@ rb_buffer_peek(struct ring_buffer_per_cp
goto again;
case RINGBUF_TYPE_TIME_STAMP:
@ -290,7 +290,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*ts = cpu_buffer->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
@@ -3747,6 +3787,9 @@ rb_iter_peek(struct ring_buffer_iter *it
@@ -3752,6 +3792,9 @@ rb_iter_peek(struct ring_buffer_iter *it
struct ring_buffer_event *event;
int nr_loops = 0;
@ -300,7 +300,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpu_buffer = iter->cpu_buffer;
buffer = cpu_buffer->buffer;
@@ -3799,12 +3842,17 @@ rb_iter_peek(struct ring_buffer_iter *it
@@ -3804,12 +3847,17 @@ rb_iter_peek(struct ring_buffer_iter *it
goto again;
case RINGBUF_TYPE_TIME_STAMP:

View File

@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2583,29 +2583,59 @@ rb_wakeups(struct ring_buffer *buffer, s
@@ -2588,29 +2588,59 @@ rb_wakeups(struct ring_buffer *buffer, s
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
* by the current task between lock and unlock. But it can
@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
@@ -2613,9 +2643,7 @@ trace_recursive_lock(struct ring_buffer_
@@ -2618,9 +2648,7 @@ trace_recursive_lock(struct ring_buffer_
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{

View File

@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2628,8 +2628,7 @@ trace_recursive_lock(struct ring_buffer_
@@ -2633,8 +2633,7 @@ trace_recursive_lock(struct ring_buffer_
bit = RB_CTX_NORMAL;
else
bit = pc & NMI_MASK ? RB_CTX_NMI :

View File

@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_t entries_bytes;
local_t entries;
local_t overrun;
@@ -2630,10 +2631,10 @@ trace_recursive_lock(struct ring_buffer_
@@ -2635,10 +2636,10 @@ trace_recursive_lock(struct ring_buffer_
bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpu_buffer->current_context = val;
return 0;
@@ -2642,7 +2643,57 @@ trace_recursive_lock(struct ring_buffer_
@@ -2647,7 +2648,57 @@ trace_recursive_lock(struct ring_buffer_
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{

View File

@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3305,7 +3305,7 @@ static void queue_unplugged(struct reque
@@ -3307,7 +3307,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -3353,7 +3353,6 @@ EXPORT_SYMBOL(blk_check_plugged);
@@ -3355,7 +3355,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -3373,11 +3372,6 @@ void blk_flush_plug_list(struct blk_plug
@@ -3375,11 +3374,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -3390,7 +3384,7 @@ void blk_flush_plug_list(struct blk_plug
@@ -3392,7 +3386,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
@@ -3417,8 +3411,6 @@ void blk_flush_plug_list(struct blk_plug
@@ -3419,8 +3413,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);

View File

@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1016,8 +1016,6 @@ static void add_timer_randomness(struct
@@ -1122,8 +1122,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -1058,7 +1056,6 @@ static void add_timer_randomness(struct
@@ -1164,7 +1162,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}

View File

@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6148,6 +6148,13 @@ int kvm_arch_init(void *opaque)
@@ -6150,6 +6150,13 @@ int kvm_arch_init(void *opaque)
goto out;
}

View File

@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2827,10 +2827,9 @@ static bool blk_mq_poll_hybrid_sleep(str
@@ -2845,10 +2845,9 @@ static bool blk_mq_poll_hybrid_sleep(str
kt = nsecs;
mode = HRTIMER_MODE_REL;

View File

@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
timer->function = perf_mux_hrtimer_handler;
}
@@ -8711,7 +8711,7 @@ static void perf_swevent_init_hrtimer(st
@@ -8714,7 +8714,7 @@ static void perf_swevent_init_hrtimer(st
if (!is_sampling_event(event))
return;
@ -190,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1254,7 +1254,7 @@ void tick_setup_sched_timer(void)
@@ -1255,7 +1255,7 @@ void tick_setup_sched_timer(void)
/*
* Emulate tick processing via per-CPU hrtimers:
*/

View File

@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -429,7 +429,7 @@ int alarm_cancel(struct alarm *alarm)
@@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm)
int ret = alarm_try_to_cancel(alarm);
if (ret >= 0)
return ret;

View File

@ -798,7 +798,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.thread_comm = "ksoftirqd/%u",
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -910,14 +910,7 @@ static bool can_stop_idle_tick(int cpu,
@@ -911,14 +911,7 @@ static bool can_stop_idle_tick(int cpu,
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {

View File

@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -3250,10 +3250,8 @@ void serial8250_console_write(struct uar
@@ -3224,10 +3224,8 @@ void serial8250_console_write(struct uar
serial8250_rpm_get(up);

View File

@ -34,7 +34,7 @@ fold in:
#include <linux/jump_label.h>
#include <asm/intel-family.h>
@@ -1355,7 +1356,7 @@ int memory_failure(unsigned long pfn, in
@@ -1353,7 +1354,7 @@ int memory_failure(unsigned long pfn, in
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@ -43,7 +43,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
@@ -1364,27 +1365,19 @@ static unsigned long mce_adjust_timer_de
@@ -1362,27 +1363,19 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@ -77,7 +77,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
@@ -1407,7 +1400,11 @@ static void mce_timer_fn(unsigned long d
@@ -1405,7 +1398,11 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
@ -90,7 +90,7 @@ fold in:
}
/*
@@ -1415,7 +1412,7 @@ static void mce_timer_fn(unsigned long d
@@ -1413,7 +1410,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@ -99,7 +99,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__start_timer(t, interval);
@@ -1430,7 +1427,7 @@ static void mce_timer_delete_all(void)
@@ -1428,7 +1425,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@ -108,7 +108,7 @@ fold in:
}
/*
@@ -1759,7 +1756,7 @@ static void __mcheck_cpu_clear_vendor(st
@@ -1757,7 +1754,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
@ -117,7 +117,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
@@ -1772,18 +1769,19 @@ static void mce_start_timer(struct timer
@@ -1770,18 +1767,19 @@ static void mce_start_timer(struct timer
static void __mcheck_cpu_setup_timer(void)
{
@ -143,7 +143,7 @@ fold in:
mce_start_timer(t);
}
@@ -2302,7 +2300,7 @@ static int mce_cpu_dead(unsigned int cpu
@@ -2300,7 +2298,7 @@ static int mce_cpu_dead(unsigned int cpu
static int mce_cpu_online(unsigned int cpu)
{
@ -152,7 +152,7 @@ fold in:
int ret;
mce_device_create(cpu);
@@ -2319,10 +2317,10 @@ static int mce_cpu_online(unsigned int c
@@ -2317,10 +2315,10 @@ static int mce_cpu_online(unsigned int c
static int mce_cpu_pre_down(unsigned int cpu)
{

View File

@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
@@ -1201,14 +1201,14 @@ static void __blk_mq_delay_run_hw_queue(
@@ -1219,14 +1219,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {

View File

@ -0,0 +1,229 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 3 May 2018 17:16:26 +0200
Subject: [PATCH] rtmutex: annotate sleeping lock context
The RCU code complains on schedule() within a rcu_readlock() section.
The valid scenario on -RT is if a sleeping is held. In order to suppress
the warning the mirgrate_disable counter was used to identify the
invocation of schedule() due to lock contention.
Grygorii Strashko report that during CPU hotplug we might see the
warning via
rt_spin_lock() -> migrate_disable() -> pin_current_cpu() -> __read_rt_lock()
because the counter is not yet set.
It is also possible to trigger the warning from cpu_chill()
(seen on a kblockd_mod_delayed_work_on() caller).
To address this RCU warning I annotate the sleeping lock context. The
counter is incremented before migrate_disable() so the warning Grygorii
should not trigger anymore. Additionally I use that counter in
cpu_chill() to avoid the RCU warning from there.
Reported-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/sched.h | 20 ++++++++++++++++++++
kernel/locking/rtmutex.c | 12 ++++++++++--
kernel/locking/rwlock-rt.c | 18 ++++++++++++++----
kernel/rcu/tree_plugin.h | 8 ++++----
kernel/time/hrtimer.c | 2 ++
5 files changed, 50 insertions(+), 10 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -598,6 +598,9 @@ struct task_struct {
int migrate_disable_atomic;
# endif
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int sleeping_lock;
+#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1708,6 +1711,23 @@ static __always_inline bool need_resched
return unlikely(tif_need_resched());
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void sleeping_lock_inc(void)
+{
+ current->sleeping_lock++;
+}
+
+static inline void sleeping_lock_dec(void)
+{
+ current->sleeping_lock--;
+}
+
+#else
+
+static inline void sleeping_lock_inc(void) { }
+static inline void sleeping_lock_dec(void) { }
+#endif
+
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1141,6 +1141,7 @@ void __sched rt_spin_lock_slowunlock(str
void __lockfunc rt_spin_lock(spinlock_t *lock)
{
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1155,6 +1156,7 @@ void __lockfunc __rt_spin_lock(struct rt
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
{
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1168,6 +1170,7 @@ void __lockfunc rt_spin_unlock(spinlock_
spin_release(&lock->dep_map, 1, _RET_IP_);
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
migrate_enable();
+ sleeping_lock_dec();
}
EXPORT_SYMBOL(rt_spin_unlock);
@@ -1193,12 +1196,15 @@ int __lockfunc rt_spin_trylock(spinlock_
{
int ret;
+ sleeping_lock_inc();
migrate_disable();
ret = __rt_mutex_trylock(&lock->lock);
- if (ret)
+ if (ret) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- else
+ } else {
migrate_enable();
+ sleeping_lock_dec();
+ }
return ret;
}
EXPORT_SYMBOL(rt_spin_trylock);
@@ -1210,6 +1216,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
local_bh_disable();
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
} else
@@ -1225,6 +1232,7 @@ int __lockfunc rt_spin_trylock_irqsave(s
*flags = 0;
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
--- a/kernel/locking/rwlock-rt.c
+++ b/kernel/locking/rwlock-rt.c
@@ -305,12 +305,15 @@ int __lockfunc rt_read_trylock(rwlock_t
{
int ret;
+ sleeping_lock_inc();
migrate_disable();
ret = do_read_rt_trylock(rwlock);
- if (ret)
+ if (ret) {
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
- else
+ } else {
migrate_enable();
+ sleeping_lock_dec();
+ }
return ret;
}
EXPORT_SYMBOL(rt_read_trylock);
@@ -319,18 +322,22 @@ int __lockfunc rt_write_trylock(rwlock_t
{
int ret;
+ sleeping_lock_inc();
migrate_disable();
ret = do_write_rt_trylock(rwlock);
- if (ret)
+ if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- else
+ } else {
migrate_enable();
+ sleeping_lock_dec();
+ }
return ret;
}
EXPORT_SYMBOL(rt_write_trylock);
void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
+ sleeping_lock_inc();
migrate_disable();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
do_read_rt_lock(rwlock);
@@ -339,6 +346,7 @@ EXPORT_SYMBOL(rt_read_lock);
void __lockfunc rt_write_lock(rwlock_t *rwlock)
{
+ sleeping_lock_inc();
migrate_disable();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
do_write_rt_lock(rwlock);
@@ -350,6 +358,7 @@ void __lockfunc rt_read_unlock(rwlock_t
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
do_read_rt_unlock(rwlock);
migrate_enable();
+ sleeping_lock_dec();
}
EXPORT_SYMBOL(rt_read_unlock);
@@ -358,6 +367,7 @@ void __lockfunc rt_write_unlock(rwlock_t
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
do_write_rt_unlock(rwlock);
migrate_enable();
+ sleeping_lock_dec();
}
EXPORT_SYMBOL(rt_write_unlock);
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -324,13 +324,13 @@ static void rcu_preempt_note_context_swi
struct task_struct *t = current;
struct rcu_data *rdp;
struct rcu_node *rnp;
- int mg_counter = 0;
+ int sleeping_l = 0;
RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n");
-#if defined(CONFIG_PREEMPT_RT_BASE)
- mg_counter = t->migrate_disable;
+#if defined(CONFIG_PREEMPT_RT_FULL)
+ sleeping_l = t->sleeping_lock;
#endif
- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !mg_counter);
+ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1870,7 +1870,9 @@ void cpu_chill(void)
chill_time = ktime_set(0, NSEC_PER_MSEC);
set_current_state(TASK_UNINTERRUPTIBLE);
current->flags |= PF_NOFREEZE;
+ sleeping_lock_inc();
schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
+ sleeping_lock_dec();
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;
}

View File

@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -3623,6 +3633,8 @@ int __init blk_dev_init(void)
@@ -3625,6 +3635,8 @@ int __init blk_dev_init(void)
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");

View File

@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -700,7 +701,7 @@ static void prb_retire_rx_blk_timer_expi
@@ -707,7 +708,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
@@ -962,7 +963,7 @@ static void prb_retire_current_block(str
@@ -969,7 +970,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */

View File

@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const
@@ -1627,7 +1627,7 @@ static struct dentry *lookup_slow(const
{
struct dentry *dentry = ERR_PTR(-ENOENT), *old;
struct inode *inode = dir->d_inode;
@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
inode_lock_shared(inode);
/* Don't go there if it's already dead */
@@ -3099,7 +3099,7 @@ static int lookup_open(struct nameidata
@@ -3100,7 +3100,7 @@ static int lookup_open(struct nameidata
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1878,7 +1878,7 @@ bool proc_fill_cache(struct file *file,
@@ -1880,7 +1880,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {

View File

@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1113,6 +1113,9 @@ struct task_struct {
@@ -1116,6 +1116,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif

View File

@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -646,7 +646,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
@@ -651,7 +651,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
@@ -683,7 +683,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
@@ -688,7 +688,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
@@ -738,7 +738,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
@@ -743,7 +743,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_vgic_sync_hwstate(vcpu);

View File

@ -33,7 +33,7 @@ Jason.
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/ktime.h>
@@ -3252,6 +3253,8 @@ void serial8250_console_write(struct uar
@@ -3226,6 +3227,8 @@ void serial8250_console_write(struct uar
if (port->sysrq || oops_in_progress)
locked = 0;

View File

@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1110,6 +1111,12 @@ struct task_struct {
@@ -1113,6 +1114,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif

Some files were not shown because too many files have changed in this diff Show More