Merge pull request #3487 from TiejunChina/master-dev

update -rt to 4.19.106-rt46
This commit is contained in:
Rolf Neugebauer 2020-04-09 09:09:35 +01:00 committed by GitHub
commit 447e5fa27c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
330 changed files with 7986 additions and 1712 deletions

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:4.19.59-rt
image: linuxkit/kernel:4.19.106-rt
cmdline: "console=tty0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -255,12 +255,12 @@ ifeq ($(ARCH),x86_64)
$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.19.113,4.19.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.19.113,4.19.x,,-dbg))
$(eval $(call kernel,4.19.59,4.19.x,-rt,))
$(eval $(call kernel,4.19.106,4.19.x,-rt,))
$(eval $(call kernel,4.14.174,4.14.x,$(EXTRA),$(DEBUG)))
else ifeq ($(ARCH),aarch64)
$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.19.59,4.19.x,-rt,))
$(eval $(call kernel,4.19.106,4.19.x,-rt,))
else ifeq ($(ARCH),s390x)
$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG)))

View File

@ -1,7 +1,7 @@
From 200fca9a9f123bcca859fadc996b1e40c0384269 Mon Sep 17 00:00:00 2001
From d831f2ac120e802a4ff642f48f6b88e543665514 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:18 +0200
Subject: [PATCH 001/283] ARM: at91: add TCB registers definitions
Subject: [PATCH 001/328] ARM: at91: add TCB registers definitions
Add registers and bits definitions for the timer counter blocks found on
Atmel ARM SoCs.
@ -205,5 +205,5 @@ index 000000000000..657e234b1483
+
+#endif /* __SOC_ATMEL_TCB_H */
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 13e8b97c81d4029a2c41ce13ffc84507af252845 Mon Sep 17 00:00:00 2001
From a8f6e3cf352d669d8b870469ab3bff8fc64c3367 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:19 +0200
Subject: [PATCH 002/283] clocksource/drivers: Add a new driver for the Atmel
Subject: [PATCH 002/328] clocksource/drivers: Add a new driver for the Atmel
ARM TC blocks
Add a driver for the Atmel Timer Counter Blocks. This driver provides a
@ -480,5 +480,5 @@ index 000000000000..21fbe430f91b
+}
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 108301f18eaae6fde1bf8b864d52052bdc2a7043 Mon Sep 17 00:00:00 2001
From f2e0ea85054574af7f632ca36991c5c1a25a7bfd Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:20 +0200
Subject: [PATCH 003/283] clocksource/drivers: timer-atmel-tcb: add clockevent
Subject: [PATCH 003/328] clocksource/drivers: timer-atmel-tcb: add clockevent
device on separate channel
Add an other clockevent device that uses a separate TCB channel when
@ -266,5 +266,5 @@ index 21fbe430f91b..63ce3b69338a 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From e60c9d976e3462237d2f3644c18091ac1e7746c6 Mon Sep 17 00:00:00 2001
From 23ef2fe8b6933933fb81af9decf35cfae8c14571 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:21 +0200
Subject: [PATCH 004/283] clocksource/drivers: atmel-pit: make option silent
Subject: [PATCH 004/328] clocksource/drivers: atmel-pit: make option silent
To conform with the other option, make the ATMEL_PIT option silent so it
can be selected from the platform
@ -31,5 +31,5 @@ index 0ab22e7037f4..34b07047b91f 100644
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 8cd066d01a3bc84384ba64a7521fdc80598a3418 Mon Sep 17 00:00:00 2001
From 56d1624c2b43a84717f237d3c2d58ac52cb37b33 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:22 +0200
Subject: [PATCH 005/283] ARM: at91: Implement clocksource selection
Subject: [PATCH 005/328] ARM: at91: Implement clocksource selection
Allow selecting and unselecting the PIT clocksource driver so it doesn't
have to be compile when unused.
@ -50,5 +50,5 @@ index 903f23c309df..fa493a86e2bb 100644
bool
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From db6f702c9d0558505d757c28c61f4f6a567a898a Mon Sep 17 00:00:00 2001
From 9591e618026011c31f7275edd0643d390e185e38 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:23 +0200
Subject: [PATCH 006/283] ARM: configs: at91: use new TCB timer driver
Subject: [PATCH 006/328] ARM: configs: at91: use new TCB timer driver
Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to
timer-atmel-tcb.
@ -38,5 +38,5 @@ index 2080025556b5..f2bbc6339ca6 100644
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 95cda24e3882fa19a569c029275d14089e8418e9 Mon Sep 17 00:00:00 2001
From f58179ebd23db67a287e5267a5cbc2c1ae5d75d9 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:24 +0200
Subject: [PATCH 007/283] ARM: configs: at91: unselect PIT
Subject: [PATCH 007/328] ARM: configs: at91: unselect PIT
The PIT is not required anymore to successfully boot and may actually harm
in case preempt-rt is used because the PIT interrupt is shared.
@ -39,5 +39,5 @@ index f2bbc6339ca6..be92871ab155 100644
CONFIG_UACCESS_WITH_MEMCPY=y
CONFIG_ZBOOT_ROM_TEXT=0x0
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 44f074c1b1621cbfa2d9f8f44aa69231154399d9 Mon Sep 17 00:00:00 2001
From f5fc79f507ee8c22a6f18709552cecbada48d328 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <marc.zyngier@arm.com>
Date: Fri, 27 Jul 2018 13:38:54 +0100
Subject: [PATCH 008/283] irqchip/gic-v3-its: Move pending table allocation to
Subject: [PATCH 008/328] irqchip/gic-v3-its: Move pending table allocation to
init time
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
2 files changed, 53 insertions(+), 28 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 65ab2c80529c..21681f0f85f4 100644
index bf7b69449b43..f93b8cd5eea2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -179,6 +179,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
@ -23,7 +23,7 @@ index 65ab2c80529c..21681f0f85f4 100644
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
@@ -1631,7 +1632,7 @@ static void its_free_prop_table(struct page *prop_page)
@@ -1659,7 +1660,7 @@ static void its_free_prop_table(struct page *prop_page)
get_order(LPI_PROPBASE_SZ));
}
@ -32,7 +32,7 @@ index 65ab2c80529c..21681f0f85f4 100644
{
phys_addr_t paddr;
@@ -1979,30 +1980,47 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
@@ -2007,30 +2008,47 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
return val;
}
@ -95,7 +95,7 @@ index 65ab2c80529c..21681f0f85f4 100644
/* set PROPBASE */
val = (page_to_phys(gic_rdists->prop_page) |
GICR_PROPBASER_InnerShareable |
@@ -2078,6 +2096,10 @@ static void its_cpu_init_lpis(void)
@@ -2106,6 +2124,10 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
@ -106,7 +106,7 @@ index 65ab2c80529c..21681f0f85f4 100644
}
static void its_cpu_init_collection(struct its_node *its)
@@ -3558,16 +3580,6 @@ static int redist_disable_lpis(void)
@@ -3585,16 +3607,6 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
@ -123,7 +123,7 @@ index 65ab2c80529c..21681f0f85f4 100644
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
@@ -3577,7 +3589,18 @@ static int redist_disable_lpis(void)
@@ -3604,7 +3616,18 @@ static int redist_disable_lpis(void)
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
@ -143,7 +143,7 @@ index 65ab2c80529c..21681f0f85f4 100644
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -3833,7 +3856,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
@@ -3860,7 +3883,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
gic_rdists = rdists;
@ -154,7 +154,7 @@ index 65ab2c80529c..21681f0f85f4 100644
return err;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 3188c0bef3e7..5b57501fd2e7 100644
index 1d21e98d6854..fdddead7e307 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -585,6 +585,7 @@ struct rdists {
@ -166,5 +166,5 @@ index 3188c0bef3e7..5b57501fd2e7 100644
struct page *prop_page;
u64 flags;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From cd9320a1954642117f572891a8b45b177e6b0ebf Mon Sep 17 00:00:00 2001
From 89b3ba99261e5321ba2276305f817b5c0c9817ad Mon Sep 17 00:00:00 2001
From: Julia Cartwright <julia@ni.com>
Date: Fri, 28 Sep 2018 21:03:51 +0000
Subject: [PATCH 009/283] kthread: convert worker lock to raw spinlock
Subject: [PATCH 009/328] kthread: convert worker lock to raw spinlock
In order to enable the queuing of kthread work items from hardirq
context even when PREEMPT_RT_FULL is enabled, convert the worker
@ -198,5 +198,5 @@ index 087d18d771b5..5641b55783a6 100644
return ret;
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From d4cc8969937e548b95b4d6f40804a4b706c9b441 Mon Sep 17 00:00:00 2001
From 1e7f9f15b5cb5088ac28a0919a2fcc74bfc5f5c7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
Date: Mon, 8 Oct 2018 14:09:37 +0300
Subject: [PATCH 010/283] crypto: caam/qi - simplify CGR allocation, freeing
Subject: [PATCH 010/328] crypto: caam/qi - simplify CGR allocation, freeing
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -135,5 +135,5 @@ index 357b69f57072..b6c8acc30853 100644
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 256f2e459fd2eea3e04b6b9934f06c46e19185bb Mon Sep 17 00:00:00 2001
From 2a9fed89a7bea6fbe31e717ab5f277405e20826e Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 7 Jan 2019 13:52:31 +0100
Subject: [PATCH 011/283] sched/fair: Robustify CFS-bandwidth timer locking
Subject: [PATCH 011/328] sched/fair: Robustify CFS-bandwidth timer locking
Traditionally hrtimer callbacks were run with IRQs disabled, but with
the introduction of HRTIMER_MODE_SOFT it is possible they run from
@ -29,10 +29,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 16 insertions(+), 14 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4a433608ba74..289c966f907a 100644
index 7f4f4ab5bfef..0f1ba3d72336 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4557,7 +4557,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4576,7 +4576,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
@ -41,7 +41,7 @@ index 4a433608ba74..289c966f907a 100644
if (!cfs_rq_throttled(cfs_rq))
goto next;
@@ -4574,7 +4574,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4595,7 +4595,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
unthrottle_cfs_rq(cfs_rq);
next:
@ -50,35 +50,33 @@ index 4a433608ba74..289c966f907a 100644
if (!remaining)
break;
@@ -4590,7 +4590,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4611,7 +4611,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
* used to track this state.
*/
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
{
u64 runtime, runtime_expires;
u64 runtime;
int throttled;
@@ -4632,11 +4632,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
@@ -4651,10 +4651,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
cfs_b->distribute_running = 1;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires);
runtime = distribute_cfs_runtime(cfs_b, runtime);
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
@@ -4745,17 +4745,18 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@@ -4762,16 +4762,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
+ unsigned long flags;
u64 expires;
/* confirm we're still not at a refresh boundary */
- raw_spin_lock(&cfs_b->lock);
@ -95,7 +93,7 @@ index 4a433608ba74..289c966f907a 100644
return;
}
@@ -4766,18 +4767,18 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
@@ -4781,17 +4782,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
if (runtime)
cfs_b->distribute_running = 1;
@ -105,19 +103,18 @@ index 4a433608ba74..289c966f907a 100644
if (!runtime)
return;
runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
runtime = distribute_cfs_runtime(cfs_b, runtime);
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
if (expires == cfs_b->runtime_expires)
cfs_b->runtime -= min(runtime, cfs_b->runtime);
cfs_b->runtime -= min(runtime, cfs_b->runtime);
cfs_b->distribute_running = 0;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
}
/*
@@ -4857,11 +4858,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
@@ -4871,11 +4872,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
@ -131,7 +128,7 @@ index 4a433608ba74..289c966f907a 100644
for (;;) {
overrun = hrtimer_forward_now(timer, cfs_b->period);
if (!overrun)
@@ -4889,11 +4891,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
@@ -4911,11 +4913,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
count = 0;
}
@ -146,5 +143,5 @@ index 4a433608ba74..289c966f907a 100644
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3c866fa6b40a3acfe50a091680cd9f51a54cd45b Mon Sep 17 00:00:00 2001
From 7c89d978bdfea369853567288ced4880deddd0b1 Mon Sep 17 00:00:00 2001
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: [PATCH 012/283] arm: Convert arm boot_lock to raw
Subject: [PATCH 012/328] arm: Convert arm boot_lock to raw
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
@ -427,5 +427,5 @@ index c2366510187a..6b60f582b738 100644
return pen_release != -1 ? -ENOSYS : 0;
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From abbec8803a7e474a1e1a1b1ee105de8ffd0c8cbc Mon Sep 17 00:00:00 2001
From 9ecaf2a8f433399cc3fabcfb9fbce9a88fe6f200 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 17 Jul 2018 18:25:31 +0200
Subject: [PATCH 013/283] x86/ioapic: Don't let setaffinity unmask threaded EOI
Subject: [PATCH 013/328] x86/ioapic: Don't let setaffinity unmask threaded EOI
interrupt too early
There is an issue with threaded interrupts which are marked ONESHOT
@ -30,27 +30,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ifdef paths (spotted by Andy Shevchenko)]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/kernel/apic/io_apic.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
arch/x86/kernel/apic/io_apic.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ff0d14cd9e82..c2bd6e0433f8 100644
index fa3b85b222e3..1bdad61a3ef7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1722,19 +1722,20 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
@@ -1722,7 +1722,7 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
return false;
}
-static inline bool ioapic_irqd_mask(struct irq_data *data)
+static inline bool ioapic_prepare_move(struct irq_data *data)
{
/* If we are moving the irq we need to mask it */
/* If we are moving the IRQ we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
- mask_ioapic_irq(data);
+ if (!irqd_irq_masked(data))
+ mask_ioapic_irq(data);
return true;
}
@@ -1733,9 +1733,9 @@ static inline bool ioapic_irqd_mask(struct irq_data *data)
return false;
}
@ -62,14 +58,7 @@ index ff0d14cd9e82..c2bd6e0433f8 100644
/* Only migrate the irq if the ack has been received.
*
* On rare occasions the broadcast level triggered ack gets
@@ -1763,15 +1764,17 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
*/
if (!io_apic_level_ack_pending(data->chip_data))
irq_move_masked_irq(data);
- unmask_ioapic_irq(data);
+ /* If the irq is masked in the core, leave it */
+ if (!irqd_irq_masked(data))
+ unmask_ioapic_irq(data);
@@ -1770,11 +1770,11 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
}
}
#else
@ -83,7 +72,7 @@ index ff0d14cd9e82..c2bd6e0433f8 100644
{
}
#endif
@@ -1780,11 +1783,11 @@ static void ioapic_ack_level(struct irq_data *irq_data)
@@ -1783,11 +1783,11 @@ static void ioapic_ack_level(struct irq_data *irq_data)
{
struct irq_cfg *cfg = irqd_cfg(irq_data);
unsigned long v;
@ -97,7 +86,7 @@ index ff0d14cd9e82..c2bd6e0433f8 100644
/*
* It appears there is an erratum which affects at least version 0x11
@@ -1839,7 +1842,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
@@ -1842,7 +1842,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
}
@ -107,5 +96,5 @@ index ff0d14cd9e82..c2bd6e0433f8 100644
static void ioapic_ir_ack_level(struct irq_data *irq_data)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 39150ca165ea6d7d6b5ffe76efb6170893ffdb06 Mon Sep 17 00:00:00 2001
From 759e6d7c318bbcff7507641d5a9fb6b5074b2a87 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 18:19:48 +0200
Subject: [PATCH 014/283] cgroup: use irqsave in cgroup_rstat_flush_locked()
Subject: [PATCH 014/328] cgroup: use irqsave in cgroup_rstat_flush_locked()
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
either with spin_lock_irq() or spin_lock_irqsave().
@ -45,5 +45,5 @@ index bb95a35e8c2d..3266a9781b4e 100644
/* if @may_sleep, play nice and yield if necessary */
if (may_sleep && (need_resched() ||
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 6c83cc3183d8efc6378788160d78a3a917a5ae96 Mon Sep 17 00:00:00 2001
From 934128f28dd37073d6513a37f0433df6399c7953 Mon Sep 17 00:00:00 2001
From: Clark Williams <williams@redhat.com>
Date: Tue, 3 Jul 2018 13:34:30 -0500
Subject: [PATCH 015/283] fscache: initialize cookie hash table raw spinlocks
Subject: [PATCH 015/328] fscache: initialize cookie hash table raw spinlocks
The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The
PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT
@ -59,5 +59,5 @@ index 84b90a79d75a..87a9330eafa2 100644
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From fdfc7c94f7e160bd80c27ac31c6823fbb20330f7 Mon Sep 17 00:00:00 2001
From 2a2f1a8c287a6b6fb14a4a1b5583e043d5897df4 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 29 Aug 2018 21:59:04 +0200
Subject: [PATCH 016/283] Drivers: hv: vmbus: include header for get_irq_regs()
Subject: [PATCH 016/328] Drivers: hv: vmbus: include header for get_irq_regs()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -35,5 +35,5 @@ index 87d3d7da78f8..1d2d8a4b837d 100644
#include "hv_trace.h"
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 31f7158d8389cec550de5964422b1123fc94079b Mon Sep 17 00:00:00 2001
From d487edd01d698abf2b4f3ea4e3f27897b227250c Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 11 Oct 2018 16:39:59 +0200
Subject: [PATCH 017/283] percpu: include irqflags.h for raw_local_irq_save()
Subject: [PATCH 017/328] percpu: include irqflags.h for raw_local_irq_save()
The header percpu.h header file is using raw_local_irq_save() but does
not include irqflags.h for its definition. It compiles because the
@ -28,5 +28,5 @@ index 1817a8415a5e..942d64c0476e 100644
#ifdef CONFIG_SMP
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From fcb3ebea1da6aede14a10c28a06902043072f250 Mon Sep 17 00:00:00 2001
From 5c77a75aaa23c5fc32b5485897d0d14e66fafd37 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:06:10 +0200
Subject: [PATCH 018/283] efi: Allow efi=runtime
Subject: [PATCH 018/328] efi: Allow efi=runtime
In case the option "efi=noruntime" is default at built-time, the user
could overwrite its sate by `efi=runtime' and allow it again.
@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 3 insertions(+)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2a29dd9c986d..ab668e17fd05 100644
index d54fca902e64..5db20908aa9c 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char *str)
@ -27,5 +27,5 @@ index 2a29dd9c986d..ab668e17fd05 100644
}
early_param("efi", parse_efi_cmdline);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 9dda6e746277e68f244d5660b5a3f3f85b0d9be0 Mon Sep 17 00:00:00 2001
From af50891c552632469b09b7b97abd197545aec804 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 24 Jul 2018 14:48:55 +0200
Subject: [PATCH 019/283] x86/efi: drop task_lock() from efi_switch_mm()
Subject: [PATCH 019/328] x86/efi: drop task_lock() from efi_switch_mm()
efi_switch_mm() is a wrapper around switch_mm() which saves current's
->active_mm, sets the requests mm as ->active_mm and invokes
@ -23,10 +23,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ee5d08f25ce4..e8da7f492970 100644
index 6db8f3598c80..c9ccaef8df57 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -619,18 +619,16 @@ void __init efi_dump_pagetable(void)
@@ -620,18 +620,16 @@ void __init efi_dump_pagetable(void)
/*
* Makes the calling thread switch to/from efi_mm context. Can be used
@ -50,5 +50,5 @@ index ee5d08f25ce4..e8da7f492970 100644
#ifdef CONFIG_EFI_MIXED
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From b66a9f85a9e8ee817d0e2de1637bf95b7710127f Mon Sep 17 00:00:00 2001
From c96c598b9bc12e2909dcec0a1bf8f4a1b846107e Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 09:13:42 +0200
Subject: [PATCH 020/283] arm64: KVM: compute_layout before altenates are
Subject: [PATCH 020/328] arm64: KVM: compute_layout before altenates are
applied
compute_layout() is invoked as part of an alternative fixup under
@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
3 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 4b650ec1d7dd..f561ea0ac645 100644
index 887a8512bf10..376561351bae 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length);
@ -30,9 +30,9 @@ index 4b650ec1d7dd..f561ea0ac645 100644
+static inline void kvm_compute_layout(void) { }
+#endif
+
#define ALTINSTR_ENTRY(feature,cb) \
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
" .if " __stringify(cb) " == 0\n" \
" .word 663f - .\n" /* new instruction */ \
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index b5d603992d40..f92815d56d17 100644
--- a/arch/arm64/kernel/alternative.c
@ -78,5 +78,5 @@ index c712a7376bc1..792da0e125de 100644
* Compute HYP VA by using the same computation as kern_hyp_va()
*/
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 6d0f5b28de481062ee69b0d62ae5ef2fc5101f9c Mon Sep 17 00:00:00 2001
From 8779fdd5686d1f9be670c7ee5ea6dfaece9e37d8 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 31 Aug 2018 14:16:30 +0200
Subject: [PATCH 021/283] of: allocate / free phandle cache outside of the
Subject: [PATCH 021/328] of: allocate / free phandle cache outside of the
devtree_lock
The phandle cache code allocates memory while holding devtree_lock which
@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 3f21ea6a90dc..2c7cf83b200c 100644
index f0dbb7ad88cf..c59b30bab0e0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -130,31 +130,34 @@ static u32 phandle_cache_mask;
@ -98,5 +98,5 @@ index 3f21ea6a90dc..2c7cf83b200c 100644
void __init of_core_init(void)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3ec52d2ee13b6e83429a4f7a048a0005305b8033 Mon Sep 17 00:00:00 2001
From 7841950d4460ea93ee4ddd6a400ad67cfacee592 Mon Sep 17 00:00:00 2001
From: Clark Williams <williams@redhat.com>
Date: Tue, 18 Sep 2018 10:29:31 -0500
Subject: [PATCH 022/283] mm/kasan: make quarantine_lock a raw_spinlock_t
Subject: [PATCH 022/328] mm/kasan: make quarantine_lock a raw_spinlock_t
The static lock quarantine_lock is used in quarantine.c to protect the
quarantine queue datastructures. It is taken inside quarantine queue
@ -93,5 +93,5 @@ index 3a8ddf8baf7d..b209dbaefde8 100644
qlist_free_all(&to_free, cache);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 22ddccee8b48a817b261c98dda99967345475755 Mon Sep 17 00:00:00 2001
From c7753a6fd996fcaa0285c1c8285fde721d519a0a Mon Sep 17 00:00:00 2001
From: "Paul E. McKenney" <paulmck@linux.ibm.com>
Date: Mon, 29 Oct 2018 11:53:01 +0100
Subject: [PATCH 023/283] EXP rcu: Revert expedited GP parallelization
Subject: [PATCH 023/328] EXP rcu: Revert expedited GP parallelization
cleverness
(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu)
@ -46,5 +46,5 @@ index 0b2c2ad69629..a0486414edb4 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From ced9290a5d8460c8a46615a475cd094bc3b0c344 Mon Sep 17 00:00:00 2001
From 4b0c7eda4403c5a7146714857bd1abffd2b080f8 Mon Sep 17 00:00:00 2001
From: He Zhe <zhe.he@windriver.com>
Date: Wed, 19 Dec 2018 16:30:57 +0100
Subject: [PATCH 024/283] kmemleak: Turn kmemleak_lock to raw spinlock on RT
Subject: [PATCH 024/328] kmemleak: Turn kmemleak_lock to raw spinlock on RT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 72e3fb3bb037..0ed549045074 100644
index 5eeabece0c17..92ce99b15f2b 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -26,7 +26,7 @@
@ -164,5 +164,5 @@ index 72e3fb3bb037..0ed549045074 100644
/*
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From e1b321401ca437984b8973749826aea3a245e15b Mon Sep 17 00:00:00 2001
From 7cb617c6dac1356dfe57b1c4a976ec78ead046a0 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 28 Oct 2016 23:05:11 +0200
Subject: [PATCH 025/283] NFSv4: replace seqcount_t with a seqlock_t
Subject: [PATCH 025/328] NFSv4: replace seqcount_t with a seqlock_t
The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
because it maps to preempt_disable() in -RT which I can't have at this
@ -26,25 +26,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
4 files changed, 21 insertions(+), 11 deletions(-)
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 75fe92eaa681..e8d05393443f 100644
index b0c0c2fc2fba..26565ba05dc1 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -152,11 +152,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
@@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+ seq = read_seqbegin(&sp->so_reclaim_seqlock);
err = nfs4_open_delegation_recall(ctx, state, stateid, type);
err = nfs4_open_delegation_recall(ctx, state, stateid);
if (!err)
err = nfs_delegation_claim_locks(ctx, state, stateid);
err = nfs_delegation_claim_locks(state, stateid);
- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
put_nfs_open_context(ctx);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 63287d911c08..2ae55eaa4a1e 100644
index 5b61520dce88..2771aafaca19 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -114,7 +114,7 @@ struct nfs4_state_owner {
@ -57,10 +57,10 @@ index 63287d911c08..2ae55eaa4a1e 100644
};
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1de855e0ae61..78c3f4359e76 100644
index 668b648064b7..187d411668ed 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2865,7 +2865,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
@@ -2870,7 +2870,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
unsigned int seq;
int ret;
@ -69,7 +69,7 @@ index 1de855e0ae61..78c3f4359e76 100644
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
@@ -2906,7 +2906,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
@@ -2911,7 +2911,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
@ -79,7 +79,7 @@ index 1de855e0ae61..78c3f4359e76 100644
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3ba2087469ac..f10952680bd9 100644
index b3086e99420c..c9bf1eb7e1b2 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -515,7 +515,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
@ -91,7 +91,7 @@ index 3ba2087469ac..f10952680bd9 100644
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
@@ -1568,8 +1568,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
@@ -1583,8 +1583,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
@ -105,7 +105,7 @@ index 3ba2087469ac..f10952680bd9 100644
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
@@ -1656,14 +1660,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
@@ -1671,14 +1675,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
spin_lock(&sp->so_lock);
goto restart;
}
@ -131,5 +131,5 @@ index 3ba2087469ac..f10952680bd9 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 02954bb06eedf19db3637fea6699d0dc1761b270 Mon Sep 17 00:00:00 2001
From 4906d6c574d916416e92a9de0b959c4d0ed0bc17 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 4 Apr 2017 12:50:16 +0200
Subject: [PATCH 026/283] kernel: sched: Provide a pointer to the valid CPU
Subject: [PATCH 026/328] kernel: sched: Provide a pointer to the valid CPU
mask
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
@ -165,10 +165,10 @@ index c9ef3c532169..cb10249b1125 100644
/* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index 912d53939f4f..6b8dc68b5ccc 100644
index a999a58ca331..d6410d0740ea 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -1435,7 +1435,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1445,7 +1445,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
* may be scheduled elsewhere and invalidate entries in the
* pseudo-locked region.
*/
@ -178,10 +178,10 @@ index 912d53939f4f..6b8dc68b5ccc 100644
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index bedd5fba33b0..3f4259f11a35 100644
index 01ed0a667928..2c62de6b5bf1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node)
@@ -1039,7 +1039,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
@ -190,7 +190,7 @@ index bedd5fba33b0..3f4259f11a35 100644
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
@@ -1045,7 +1045,7 @@ int hfi1_get_proc_affinity(int node)
@@ -1047,7 +1047,7 @@ int hfi1_get_proc_affinity(int node)
* check whether process/context affinity has already
* been set
*/
@ -199,7 +199,7 @@ index bedd5fba33b0..3f4259f11a35 100644
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
@@ -1056,7 +1056,7 @@ int hfi1_get_proc_affinity(int node)
@@ -1058,7 +1058,7 @@ int hfi1_get_proc_affinity(int node)
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
@ -209,10 +209,10 @@ index bedd5fba33b0..3f4259f11a35 100644
current->pid, current->comm,
cpumask_pr_args(proc_mask));
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 88e326d6cc49..b0d01ace6611 100644
index 291c12f588b5..05e7b28a03c1 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -855,14 +855,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
@@ -853,14 +853,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
{
struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL;
@ -270,7 +270,7 @@ index 9eb99a43f849..e4d0cfebaac5 100644
static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5dc024e28397..fdb8ba398ea8 100644
index 0530de9a4efc..4298a87b9de6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -660,7 +660,8 @@ struct task_struct {
@ -283,7 +283,7 @@ index 5dc024e28397..fdb8ba398ea8 100644
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1390,7 +1391,7 @@ extern struct pid *cad_pid;
@@ -1398,7 +1399,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
@ -320,10 +320,10 @@ index ff956ccbb6df..7bb129c5b412 100644
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 69874db3fba8..98c971cb1d36 100644
index 1a2d18e98bf9..bc182d6fa2a9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -845,6 +845,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
@@ -850,6 +850,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
@ -333,7 +333,7 @@ index 69874db3fba8..98c971cb1d36 100644
/*
* One for us, one for whoever does the "release_task()" (usually
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6859ea1d5c04..d6f690064cce 100644
index 2befd2c4ce9e..07dc66137a26 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
@ -371,7 +371,7 @@ index 6859ea1d5c04..d6f690064cce 100644
+ if (cpumask_equal(p->cpus_ptr, new_mask))
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
@@ -1237,10 +1237,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@ -456,7 +456,7 @@ index 6859ea1d5c04..d6f690064cce 100644
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
@@ -4275,7 +4275,7 @@ static int __sched_setscheduler(struct task_struct *p,
@@ -4316,7 +4316,7 @@ static int __sched_setscheduler(struct task_struct *p,
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@ -465,7 +465,7 @@ index 6859ea1d5c04..d6f690064cce 100644
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
@@ -4874,7 +4874,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
@@ -4915,7 +4915,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@ -474,7 +474,7 @@ index 6859ea1d5c04..d6f690064cce 100644
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
@@ -5454,7 +5454,7 @@ int task_can_attach(struct task_struct *p,
@@ -5496,7 +5496,7 @@ int task_can_attach(struct task_struct *p,
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@ -483,7 +483,7 @@ index 6859ea1d5c04..d6f690064cce 100644
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
@@ -5481,7 +5481,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
@@ -5523,7 +5523,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
if (curr_cpu == target_cpu)
return 0;
@ -492,7 +492,7 @@ index 6859ea1d5c04..d6f690064cce 100644
return -EINVAL;
/* TODO: This is not properly updating schedstats */
@@ -5619,7 +5619,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
@@ -5661,7 +5661,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
put_prev_task(rq, next);
/*
@ -541,10 +541,10 @@ index daaadf939ccb..f7d2c10b4c92 100644
/*
* We have to ensure that we have at least one bit
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 72c07059ef37..fb6e64417470 100644
index ebec37cb3be9..4b13df38c069 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
@ -553,7 +553,7 @@ index 72c07059ef37..fb6e64417470 100644
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
@@ -1823,7 +1823,7 @@ static void set_curr_task_dl(struct rq *rq)
@@ -1856,7 +1856,7 @@ static void set_curr_task_dl(struct rq *rq)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@ -562,7 +562,7 @@ index 72c07059ef37..fb6e64417470 100644
return 1;
return 0;
}
@@ -1973,7 +1973,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
@@ -2006,7 +2006,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
@ -572,10 +572,10 @@ index 72c07059ef37..fb6e64417470 100644
!dl_task(task) ||
!task_on_rq_queued(task))) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 289c966f907a..0048a32a3b4d 100644
index 0f1ba3d72336..27f9f9a785c1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1630,7 +1630,7 @@ static void task_numa_compare(struct task_numa_env *env,
@@ -1678,7 +1678,7 @@ static void task_numa_compare(struct task_numa_env *env,
* be incurred if the tasks were swapped.
*/
/* Skip this swap candidate if cannot move to the source cpu */
@ -584,7 +584,7 @@ index 289c966f907a..0048a32a3b4d 100644
goto unlock;
/*
@@ -1727,7 +1727,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
@@ -1776,7 +1776,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
@ -593,7 +593,7 @@ index 289c966f907a..0048a32a3b4d 100644
continue;
env->dst_cpu = cpu;
@@ -5741,7 +5741,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
@@ -5782,7 +5782,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
@ -602,7 +602,7 @@ index 289c966f907a..0048a32a3b4d 100644
continue;
local_group = cpumask_test_cpu(this_cpu,
@@ -5873,7 +5873,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
@@ -5914,7 +5914,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
@ -611,7 +611,7 @@ index 289c966f907a..0048a32a3b4d 100644
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
@@ -5913,7 +5913,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
@@ -5954,7 +5954,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
{
int new_cpu = cpu;
@ -620,7 +620,7 @@ index 289c966f907a..0048a32a3b4d 100644
return prev_cpu;
/*
@@ -6030,7 +6030,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
@@ -6071,7 +6071,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
if (!test_idle_cores(target, false))
return -1;
@ -629,7 +629,7 @@ index 289c966f907a..0048a32a3b4d 100644
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
@@ -6064,7 +6064,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
@@ -6105,7 +6105,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@ -638,7 +638,7 @@ index 289c966f907a..0048a32a3b4d 100644
continue;
if (available_idle_cpu(cpu))
return cpu;
@@ -6127,7 +6127,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
@@ -6168,7 +6168,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@ -647,7 +647,7 @@ index 289c966f907a..0048a32a3b4d 100644
continue;
if (available_idle_cpu(cpu))
break;
@@ -6164,7 +6164,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
@@ -6205,7 +6205,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
@ -656,7 +656,7 @@ index 289c966f907a..0048a32a3b4d 100644
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
@@ -6382,7 +6382,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
@@ -6423,7 +6423,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@ -665,7 +665,7 @@ index 289c966f907a..0048a32a3b4d 100644
}
rcu_read_lock();
@@ -7121,14 +7121,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
@@ -7162,14 +7162,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@ -682,7 +682,7 @@ index 289c966f907a..0048a32a3b4d 100644
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
@@ -7148,7 +7148,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
@@ -7189,7 +7189,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@ -691,7 +691,7 @@ index 289c966f907a..0048a32a3b4d 100644
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
@@ -7745,7 +7745,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
@@ -7786,7 +7786,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@ -700,7 +700,7 @@ index 289c966f907a..0048a32a3b4d 100644
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
@@ -8360,7 +8360,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
@@ -8401,7 +8401,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@ -709,7 +709,7 @@ index 289c966f907a..0048a32a3b4d 100644
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
@@ -8756,7 +8756,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
@@ -8797,7 +8797,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
@ -741,10 +741,10 @@ index b980cc96604f..b6ca4a630050 100644
!rt_task(task) ||
!task_on_rq_queued(task))) {
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 1e6db9cbe4dc..fa95139445b2 100644
index 8030e24dbf14..862f4b0139fc 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -277,7 +277,7 @@ static void move_to_next_cpu(void)
@@ -279,7 +279,7 @@ static void move_to_next_cpu(void)
* of this thread, than stop migrating for the duration
* of the current test.
*/
@ -780,5 +780,5 @@ index 5522692100ba..8b4be8e1802a 100644
trace_foo_with_template_simple("HELLO", cnt);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From bda814671dc9f9f74cabd99a65cad3101b68ee83 Mon Sep 17 00:00:00 2001
From 9f9cd889fa22fc1e25802f565f7210b271d136a2 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: [PATCH 027/283] kernel/sched/core: add migrate_disable()
Subject: [PATCH 027/328] kernel/sched/core: add migrate_disable()
---
include/linux/preempt.h | 23 +++++++
@ -53,7 +53,7 @@ index c01813c3fbe9..3196d0e76719 100644
#ifdef MODULE
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fdb8ba398ea8..df39ad5916e7 100644
index 4298a87b9de6..0489d3e0e78c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -662,6 +662,13 @@ struct task_struct {
@ -85,7 +85,7 @@ index 9fb239e12b82..5801e516ba63 100644
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d6f690064cce..b658f0147c3b 100644
index 07dc66137a26..d0450f06612c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
@ -126,7 +126,7 @@ index d6f690064cce..b658f0147c3b 100644
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
@@ -1095,9 +1117,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
@@ -1096,9 +1118,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
}
/* Can the task run on the task's current CPU? If so, we're done */
@ -141,10 +141,10 @@ index d6f690064cce..b658f0147c3b 100644
+ }
+#endif
+
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
@@ -7067,3 +7096,100 @@ const u32 sched_prio_to_wmult[40] = {
/* Need help from migration thread: drop lock and wait. */
@@ -7105,3 +7134,100 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
@ -261,5 +261,5 @@ index 78fadf0438ea..5027158d3908 100644
#undef PN
#undef __PN
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 6fb56185df42e49e0e2d8fe12d315356a57f4bce Mon Sep 17 00:00:00 2001
From db2220843fd1c19c7b89db5f6e20382b5622fa05 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 9 Oct 2018 17:34:50 +0200
Subject: [PATCH 028/283] sched/migrate_disable: Add export_symbol_gpl for
Subject: [PATCH 028/328] sched/migrate_disable: Add export_symbol_gpl for
__migrate_disabled
Jonathan reported that lttng/modules can't use __migrate_disabled().
@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 1 insertion(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b658f0147c3b..7a39d56f6a6b 100644
index d0450f06612c..e6022cc2605b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1013,6 +1013,7 @@ int __migrate_disabled(struct task_struct *p)
@ -33,5 +33,5 @@ index b658f0147c3b..7a39d56f6a6b 100644
static void __do_set_cpus_allowed_tail(struct task_struct *p,
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 35e0294d43f7c53bdb7ecba19df0710037d888ec Mon Sep 17 00:00:00 2001
From b978b0a313d26ed5e51a9120c8744385a99e541a Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 9 Mar 2016 10:51:06 +0100
Subject: [PATCH 029/283] arm: at91: do not disable/enable clocks in a row
Subject: [PATCH 029/328] arm: at91: do not disable/enable clocks in a row
Currently the driver will disable the clock and enable it one line later
if it is switching from periodic mode into one shot.
@ -93,5 +93,5 @@ index 43f4d5c4d6fa..de6baf564dfe 100644
.set_state_oneshot = tc_set_oneshot,
},
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 4d4322de76f7b52df1529acfc6e5fc46e25761f1 Mon Sep 17 00:00:00 2001
From 8549d4577f0573dddbc34e310c4310920a6bb714 Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: [PATCH 030/283] clocksource: TCLIB: Allow higher clock rates for
Subject: [PATCH 030/328] clocksource: TCLIB: Allow higher clock rates for
clock events
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
@ -165,5 +165,5 @@ index 3726eacdf65d..0900dec7ec04 100644
tristate "Dummy IRQ handler"
default n
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 64f770d93319861d308ce265a4389e4ca0a6ed5f Mon Sep 17 00:00:00 2001
From 4948d026b217faebc43bfe4a21cb3c5e781508ad Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 14 Feb 2013 22:36:59 +0100
Subject: [PATCH 031/283] timekeeping: Split jiffies seqlock
Subject: [PATCH 031/328] timekeeping: Split jiffies seqlock
Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
it can be taken in atomic context on RT.
@ -76,10 +76,10 @@ index a02e0f6b287c..32f5101f07ce 100644
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5b33e2f5c0ed..54fd344ef973 100644
index 48403fb653c2..e774a49176cc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -67,7 +67,8 @@ static void tick_do_update_jiffies64(ktime_t now)
@@ -68,7 +68,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevaluate with jiffies_lock held */
@ -89,7 +89,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644
delta = ktime_sub(now, last_jiffies_update);
if (delta >= tick_period) {
@@ -90,10 +91,12 @@ static void tick_do_update_jiffies64(ktime_t now)
@@ -94,10 +95,12 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
@ -104,7 +104,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644
update_wall_time();
}
@@ -104,12 +107,14 @@ static ktime_t tick_init_jiffy_update(void)
@@ -108,12 +111,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
@ -121,7 +121,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644
return period;
}
@@ -652,10 +657,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
@@ -656,10 +661,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
/* Read jiffies and the time when jiffies were updated last */
do {
@ -135,10 +135,10 @@ index 5b33e2f5c0ed..54fd344ef973 100644
ts->timer_expires_base = basemono;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 443edcddac8a..0517bc42c6b6 100644
index 81ee5b83c920..512db778f442 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2418,8 +2418,10 @@ EXPORT_SYMBOL(hardpps);
@@ -2394,8 +2394,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
@ -166,5 +166,5 @@ index 141ab3ab0354..099737f6f10c 100644
#define CS_NAME_LEN 32
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 8cf90f7e58f51438a7ec0e4e704918afaa450ff1 Mon Sep 17 00:00:00 2001
From dad624b7531ae0a0275cab3c82ea0d7c6a29cc7c Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 21 Sep 2011 19:57:12 +0200
Subject: [PATCH 032/283] signal: Revert ptrace preempt magic
Subject: [PATCH 032/328] signal: Revert ptrace preempt magic
Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
than a bandaid around the ptrace design trainwreck. It's not a
@ -13,10 +13,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 8 deletions(-)
diff --git a/kernel/signal.c b/kernel/signal.c
index 0e6bc3049427..d5a9646b3538 100644
index 08911bb6fe9a..5e278f1540ad 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2094,15 +2094,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
@@ -2103,15 +2103,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
@ -33,5 +33,5 @@ index 0e6bc3049427..d5a9646b3538 100644
} else {
/*
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 882c4f88db8d6179773dc733e794fa504aef75e3 Mon Sep 17 00:00:00 2001
From 5b974aebb7a0797ecc4c47dda6158e8c6788d50b Mon Sep 17 00:00:00 2001
From: Marc Kleine-Budde <mkl@pengutronix.de>
Date: Wed, 5 Mar 2014 00:49:47 +0100
Subject: [PATCH 033/283] net: sched: Use msleep() instead of yield()
Subject: [PATCH 033/328] net: sched: Use msleep() instead of yield()
On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
(by default). If a high priority userspace process tries to shut down a busy
@ -46,10 +46,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 77b289da7763..31b9c2b415b4 100644
index 8a4d01e427a2..4ab20f1138fd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1183,7 +1183,7 @@ void dev_deactivate_many(struct list_head *head)
@@ -1204,7 +1204,7 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev))
@ -59,5 +59,5 @@ index 77b289da7763..31b9c2b415b4 100644
* unwind stale skb lists and qdisc statistics
*/
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From ea6d238547b58b5fe9ce953cd818ef8bf6cb8915 Mon Sep 17 00:00:00 2001
From 38dbd44808bcdd34f0b973698b0f9bd65d2f2db5 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 27 Mar 2018 16:24:15 +0200
Subject: [PATCH 034/283] dm rq: remove BUG_ON(!irqs_disabled) check
Subject: [PATCH 034/328] dm rq: remove BUG_ON(!irqs_disabled) check
In commit 052189a2ec95 ("dm: remove superfluous irq disablement in
dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a
@ -20,10 +20,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 1 deletion(-)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8dd298..29736c7e5f1f 100644
index 4d36373e1c0f..12ed08245130 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -688,7 +688,6 @@ static void dm_old_request_fn(struct request_queue *q)
@@ -692,7 +692,6 @@ static void dm_old_request_fn(struct request_queue *q)
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
@ -32,5 +32,5 @@ index 6e547b8dd298..29736c7e5f1f 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From b91ae27f3efa2e15087397591db35dd1d11f5120 Mon Sep 17 00:00:00 2001
From f31d5f36bfd80c261ba37fe3b8849f2be819c088 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 8 Nov 2013 17:34:54 +0100
Subject: [PATCH 035/283] usb: do no disable interrupts in giveback
Subject: [PATCH 035/328] usb: do no disable interrupts in giveback
Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet
context") the USB code disables interrupts before invoking the complete
@ -41,5 +41,5 @@ index b82a7d787add..2f3015356124 100644
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 79f8ad95ffc5f1cd2ac721ab3d01291d4ad055df Mon Sep 17 00:00:00 2001
From f93f63735dec865d4013677969324e66da7f02c4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 12:39:57 +0200
Subject: [PATCH 036/283] rt: Provide PREEMPT_RT_BASE config switch
Subject: [PATCH 036/328] rt: Provide PREEMPT_RT_BASE config switch
Introduce PREEMPT_RT_BASE which enables parts of
PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
@ -59,5 +59,5 @@ index cd1655122ec0..027db5976c2f 100644
\ No newline at end of file
+ bool
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 75df679c581581978487f6f6de91bf86a9f72e7c Mon Sep 17 00:00:00 2001
From 824fc9b2ae92b317da3e2a42406a49f330e20a6d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
Subject: [PATCH 037/283] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
Subject: [PATCH 037/328] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
There are "valid" GFP_ATOMIC allocations such as
@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e76d16ac2776..04a45d6d0167 100644
index af35f5caadbe..e40ba59efe7f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -934,7 +934,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
@ -71,5 +71,5 @@ index a3928d4438b5..a50b2158f7cd 100644
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From b5d77d6b4b4bcead77cd720e8a93f4ae78420034 Mon Sep 17 00:00:00 2001
From feb8e4e9bfee1c054ec0c83ae2a12897d85da9b3 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 8 Jul 2015 17:14:48 +0200
Subject: [PATCH 038/283] jump-label: disable if stop_machine() is used
Subject: [PATCH 038/328] jump-label: disable if stop_machine() is used
Some architectures are using stop_machine() while switching the opcode which
leads to latency spikes.
@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 51794c7fa6d5..7d11242a37d2 100644
index e2f7c50dbace..91ba9fe945ff 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -51,7 +51,7 @@ config ARM
@ -37,5 +37,5 @@ index 51794c7fa6d5..7d11242a37d2 100644
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 94fd428643474b867a8cac432d7d911a5250c367 Mon Sep 17 00:00:00 2001
From 521872fd8b19b111a7001f6388912ab9d506f741 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 24 Jul 2011 12:11:43 +0200
Subject: [PATCH 039/283] kconfig: Disable config options which are not RT
Subject: [PATCH 039/328] kconfig: Disable config options which are not RT
compatible
Disable stuff which is known to have issues on RT
@ -38,5 +38,5 @@ index b457e94ae618..0dddbb2a3282 100644
select RADIX_TREE_MULTIORDER
help
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 1a9e9b418236c18717a91955eeafe5bd72a00598 Mon Sep 17 00:00:00 2001
From 464fd34dd6dc53c357221cff371377ca19601e14 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 17 Oct 2017 16:36:18 +0200
Subject: [PATCH 040/283] lockdep: disable self-test
Subject: [PATCH 040/328] lockdep: disable self-test
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3dea52f7be9c..1504e6aa8418 100644
index 46a910acce3f..38cf7f81daa7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1207,7 +1207,7 @@ config DEBUG_ATOMIC_SLEEP
@ -30,5 +30,5 @@ index 3dea52f7be9c..1504e6aa8418 100644
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 75102ff5e253e5ababc30c7512e0c07f2b7dc297 Mon Sep 17 00:00:00 2001
From bb8c948b260e99e7c7ad2dc38ea03a958be18769 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:44:03 -0500
Subject: [PATCH 041/283] mm: Allow only slub on RT
Subject: [PATCH 041/328] mm: Allow only slub on RT
Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
@ -32,5 +32,5 @@ index 47035b5a46f6..ae9a0113a699 100644
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 32697a0be9afdc5c631cc3d232a298b5880ed65c Mon Sep 17 00:00:00 2001
From 6bbedb933d43f1bc2283d96523412298d765b8a2 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:51:45 +0200
Subject: [PATCH 042/283] locking: Disable spin on owner for RT
Subject: [PATCH 042/328] locking: Disable spin on owner for RT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -33,5 +33,5 @@ index 84d882f3e299..af27c4000812 100644
config LOCK_SPIN_ON_OWNER
def_bool y
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 708879e986c1b552ee69d6444b808a196bba0f5f Mon Sep 17 00:00:00 2001
From 0942d8d1880802a3a19df4dfdff1ec5769d92fe3 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 28 Oct 2012 13:26:09 +0000
Subject: [PATCH 043/283] rcu: Disable RCU_FAST_NO_HZ on RT
Subject: [PATCH 043/328] rcu: Disable RCU_FAST_NO_HZ on RT
This uses a timer_list timer from the irq disabled guts of the idle
code. Disable it for now to prevent wreckage.
@ -25,5 +25,5 @@ index 9210379c0353..644264be90f0 100644
help
This option permits CPUs to enter dynticks-idle state even if
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From a14822b6d5fcc441064faf3edc2f91b5d461e703 Mon Sep 17 00:00:00 2001
From b784c987142020d5cc32de03823004d362b390ec Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Mar 2014 20:19:05 +0100
Subject: [PATCH 044/283] rcu: make RCU_BOOST default on RT
Subject: [PATCH 044/328] rcu: make RCU_BOOST default on RT
Since it is no longer invoked from the softirq people run into OOM more
often if the priority of the RCU thread is too low. Making boosting
@ -29,5 +29,5 @@ index 644264be90f0..a243a78ff38c 100644
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 36c33c65b461082612dffa7be01862b7bd55270e Mon Sep 17 00:00:00 2001
From 648e8c04474df9ed71c649af1d1e5a161cddaf41 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:03:52 +0200
Subject: [PATCH 045/283] sched: Disable CONFIG_RT_GROUP_SCHED on RT
Subject: [PATCH 045/328] sched: Disable CONFIG_RT_GROUP_SCHED on RT
Carsten reported problems when running:
@ -30,5 +30,5 @@ index ae9a0113a699..61e8b531649b 100644
help
This feature lets you explicitly allocate real CPU bandwidth
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From f2b7e396c43d3607ee0a0090c7470da50f833e93 Mon Sep 17 00:00:00 2001
From 0b90609a04c39529c4ff712a4786aecde55a0733 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: [PATCH 046/283] net/core: disable NET_RX_BUSY_POLL
Subject: [PATCH 046/328] net/core: disable NET_RX_BUSY_POLL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -33,5 +33,5 @@ index 228dfa382eec..bc8d01996f22 100644
config BQL
bool
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 477660c22f2036e69299438b1292307ee1dba46b Mon Sep 17 00:00:00 2001
From af731f1e8edb7e93c5977a0da70bd61c5d9fa7b1 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 1 Dec 2017 10:42:03 +0100
Subject: [PATCH 047/283] arm*: disable NEON in kernel mode
Subject: [PATCH 047/328] arm*: disable NEON in kernel mode
NEON in kernel mode is used by the crypto algorithms and raid6 code.
While the raid6 code looks okay, the crypto algorithms do not: NEON
@ -20,10 +20,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
3 files changed, 17 insertions(+), 16 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7d11242a37d2..e122dd212ab3 100644
index 91ba9fe945ff..bd9d180db5c7 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2162,7 +2162,7 @@ config NEON
@@ -2163,7 +2163,7 @@ config NEON
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
@ -161,5 +161,5 @@ index 34b4e3d46aab..ae055cdad8cf 100644
crc32_pmull_algs[1].update = crc32c_pmull_update;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 297ef639cbc4bc3aac2e5a8835090136753796fc Mon Sep 17 00:00:00 2001
From c90bc1f0bbce77f2baf2b4213125fb5b7870fc20 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: [PATCH 048/283] powerpc: Use generic rwsem on RT
Subject: [PATCH 048/328] powerpc: Use generic rwsem on RT
Use generic code which uses rtmutex
@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a80669209155..9952764db9c5 100644
index 6f475dc5829b..3d5c86336072 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT
@ -28,5 +28,5 @@ index a80669209155..9952764db9c5 100644
config GENERIC_LOCKBREAK
bool
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3bead4e3fc7560659c1982ace99de374aa9df79c Mon Sep 17 00:00:00 2001
From 3460880af8146f0e3e05acd590e7e52d450bbf80 Mon Sep 17 00:00:00 2001
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
Date: Fri, 24 Apr 2015 15:53:13 +0000
Subject: [PATCH 049/283] powerpc/kvm: Disable in-kernel MPIC emulation for
Subject: [PATCH 049/328] powerpc/kvm: Disable in-kernel MPIC emulation for
PREEMPT_RT_FULL
While converting the openpic emulation code to use a raw_spinlock_t enables
@ -40,5 +40,5 @@ index 68a0e9d5b440..6f4d5d7615af 100644
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From ae9000e3c66794249fbca61b8a71bcdf690910e0 Mon Sep 17 00:00:00 2001
From 13e6a60aad3edc7b4efd2168abcca0447ff20763 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:08:34 +0200
Subject: [PATCH 050/283] powerpc: Disable highmem on RT
Subject: [PATCH 050/328] powerpc: Disable highmem on RT
The current highmem handling on -RT is not compatible and needs fixups.
@ -11,10 +11,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9952764db9c5..1563820a37e8 100644
index 3d5c86336072..1b332f69dd36 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -398,7 +398,7 @@ menu "Kernel options"
@@ -399,7 +399,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
@ -24,5 +24,5 @@ index 9952764db9c5..1563820a37e8 100644
source kernel/Kconfig.hz
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 454e636edd0bb26495afb3850a37aa5e5214a4ed Mon Sep 17 00:00:00 2001
From 55ff21a4418f35a443f2c210779a9ff4dee33e93 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:10:12 +0200
Subject: [PATCH 051/283] mips: Disable highmem on RT
Subject: [PATCH 051/328] mips: Disable highmem on RT
The current highmem handling on -RT is not compatible and needs fixups.
@ -11,10 +11,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 201caf226b47..bd268302efa4 100644
index a830a9701e50..3d5fae3891be 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2517,7 +2517,7 @@ config MIPS_CRC_SUPPORT
@@ -2518,7 +2518,7 @@ config MIPS_CRC_SUPPORT
#
config HIGHMEM
bool "High Memory Support"
@ -24,5 +24,5 @@ index 201caf226b47..bd268302efa4 100644
config CPU_SUPPORTS_HIGHMEM
bool
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 5c86aec91ae10f140d18bd33cd62783cdde0922d Mon Sep 17 00:00:00 2001
From d0b5d43931b3de89c64c8a697256eb60eb9c0ebb Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 26 Jul 2009 02:21:32 +0200
Subject: [PATCH 052/283] x86: Use generic rwsem_spinlocks on -rt
Subject: [PATCH 052/328] x86: Use generic rwsem_spinlocks on -rt
Simplifies the separation of anon_rw_semaphores and rw_semaphores for
-rt.
@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 04a45d6d0167..1b05ae86bdde 100644
index e40ba59efe7f..f22e787329cf 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -264,8 +264,11 @@ config ARCH_MAY_HAVE_PC_FDC
@ -29,5 +29,5 @@ index 04a45d6d0167..1b05ae86bdde 100644
config GENERIC_CALIBRATE_DELAY
def_bool y
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 9cd1a715d85ace3e9b1d3ae703eb16744dd3ebb6 Mon Sep 17 00:00:00 2001
From 9c164cac4dbebd9bf5376428113db97b366625a0 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 23 Jan 2014 14:45:59 +0100
Subject: [PATCH 053/283] leds: trigger: disable CPU trigger on -RT
Subject: [PATCH 053/328] leds: trigger: disable CPU trigger on -RT
as it triggers:
|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
@ -36,5 +36,5 @@ index 4018af769969..b4ce8c115949 100644
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From f19ffb87fe48ba1e8904df670b13d52f8b9c08f1 Mon Sep 17 00:00:00 2001
From fa67192faa15cd98f554bcf82f0ecc40a26d9165 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 9 Apr 2015 15:23:01 +0200
Subject: [PATCH 054/283] cpufreq: drop K8's driver from beeing selected
Subject: [PATCH 054/328] cpufreq: drop K8's driver from beeing selected
Ralf posted a picture of a backtrace from
@ -34,5 +34,5 @@ index 35f71825b7f3..bb4a6160d0f7 100644
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3b1c3bc41b87cd7a714ebfa5e88651d4f3326f2e Mon Sep 17 00:00:00 2001
From 274246f0cc33aabdc562929c114eae24541eb9a3 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Aug 2013 11:48:57 +0200
Subject: [PATCH 055/283] md: disable bcache
Subject: [PATCH 055/328] md: disable bcache
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -36,5 +36,5 @@ index f6e0a8b3a61e..18c03d79a442 100644
help
Allows a block device to be used as cache for other devices; uses
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From f0e7a6e0f76d2ab27a0c5ef0f7872d971ec1dd23 Mon Sep 17 00:00:00 2001
From b453602d0f02b94edf714d46e6293b037ea94b67 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:03:16 +0200
Subject: [PATCH 056/283] efi: Disable runtime services on RT
Subject: [PATCH 056/328] efi: Disable runtime services on RT
Based on meassurements the EFI functions get_variable /
get_next_variable take up to 2us which looks okay.
@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ab668e17fd05..f58ab9ed4ade 100644
index 5db20908aa9c..1708505fdf5d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -87,7 +87,7 @@ struct mm_struct efi_mm = {
@ -41,5 +41,5 @@ index ab668e17fd05..f58ab9ed4ade 100644
{
disable_runtime = true;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From d1e9e20fe16f16a1665eabaa44a0f1c2a4cebfec Mon Sep 17 00:00:00 2001
From dc16c6a47edf44cb0f69ff6124d6a85ee6dd3dcc Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 22 Jul 2011 17:58:40 +0200
Subject: [PATCH 057/283] printk: Add a printk kill switch
Subject: [PATCH 057/328] printk: Add a printk kill switch
Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that
it does not dead-lock with the early printk code.
@ -30,7 +30,7 @@ index cf3eccfe1543..30ebf5f82a7c 100644
#ifdef CONFIG_PRINTK_NMI
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 06045abd1887..413160a93814 100644
index 7a2fdc097c8c..29838e532f46 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -405,6 +405,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
@ -92,9 +92,9 @@ index 06045abd1887..413160a93814 100644
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
@@ -1897,6 +1949,13 @@ asmlinkage int vprintk_emit(int facility, int level,
bool in_sched = false;
@@ -1905,6 +1957,13 @@ asmlinkage int vprintk_emit(int facility, int level,
unsigned long flags;
u64 curr_log_seq;
+ /*
+ * Fall back to early_printk if a debugging subsystem has
@ -106,7 +106,7 @@ index 06045abd1887..413160a93814 100644
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
@@ -2037,26 +2096,6 @@ static bool suppress_message_printing(int level) { return false; }
@@ -2049,26 +2108,6 @@ static bool suppress_message_printing(int level) { return false; }
#endif /* CONFIG_PRINTK */
@ -169,5 +169,5 @@ index 71381168dede..685443375dc0 100644
nmi_panic(regs, "Hard LOCKUP");
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3d881bc012788bea38e0bf55b03d9996eb40b1b9 Mon Sep 17 00:00:00 2001
From bfe8e0affbc2a1d3f23e9aec43198b31d1115f3d Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Fri, 2 Sep 2011 14:41:29 +0200
Subject: [PATCH 058/283] printk: Add "force_early_printk" boot param to help
Subject: [PATCH 058/328] printk: Add "force_early_printk" boot param to help
with debugging
Gives me an option to screw printk and actually see what the machine
@ -16,7 +16,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
1 file changed, 7 insertions(+)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 413160a93814..6553508ff388 100644
index 29838e532f46..f934baed564d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -435,6 +435,13 @@ asmlinkage void early_printk(const char *fmt, ...)
@ -34,5 +34,5 @@ index 413160a93814..6553508ff388 100644
{
printk_killswitch = true;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 6a3ec551d9ea7e49f20d8f9d3d45fb8d9ca1b720 Mon Sep 17 00:00:00 2001
From f3b0b7d6dafcddd6bed43f78535a5494b0e09aa8 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 24 Jul 2009 12:38:56 +0200
Subject: [PATCH 059/283] preempt: Provide preempt_*_(no)rt variants
Subject: [PATCH 059/328] preempt: Provide preempt_*_(no)rt variants
RT needs a few preempt_disable/enable points which are not necessary
otherwise. Implement variants to avoid #ifdeffery.
@ -48,5 +48,5 @@ index 3196d0e76719..f7a17fcc3fec 100644
struct preempt_notifier;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 02487d0393920e03426a2378e40bc7547193c3aa Mon Sep 17 00:00:00 2001
From a2a505f1e5d127a2c3b1ee184d27d0f402dcdf63 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 8 Mar 2017 14:23:35 +0100
Subject: [PATCH 060/283] futex: workaround migrate_disable/enable in different
Subject: [PATCH 060/328] futex: workaround migrate_disable/enable in different
context
migrate_disable()/migrate_enable() takes a different path in atomic() vs
@ -16,10 +16,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 19 insertions(+)
diff --git a/kernel/futex.c b/kernel/futex.c
index afdc5eadce6e..304f07d08c95 100644
index e75ad30aa7bc..5c8053098fc8 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2876,6 +2876,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
@@ -2879,6 +2879,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
* before __rt_mutex_start_proxy_lock() is done.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
@ -34,7 +34,7 @@ index afdc5eadce6e..304f07d08c95 100644
spin_unlock(q.lock_ptr);
/*
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
@@ -2884,6 +2892,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
@@ -2887,6 +2895,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
*/
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
@ -42,7 +42,7 @@ index afdc5eadce6e..304f07d08c95 100644
if (ret) {
if (ret == 1)
@@ -3032,11 +3041,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
@@ -3035,11 +3044,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* rt_waiter. Also see the WARN in wake_futex_pi().
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
@ -65,5 +65,5 @@ index afdc5eadce6e..304f07d08c95 100644
/*
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 1e4195bafdb198d778c98aece678c7b16cd035c8 Mon Sep 17 00:00:00 2001
From 4db63a0605ac780bf1525c6a90667aef3f897dc1 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 20 Jun 2011 09:03:47 +0200
Subject: [PATCH 061/283] rt: Add local irq locks
Subject: [PATCH 061/328] rt: Add local irq locks
Introduce locallock. For !RT this maps to preempt_disable()/
local_irq_disable() so there is not much that changes. For RT this will
@ -336,5 +336,5 @@ index 70b7123f38c7..24421bf8c4b3 100644
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 58ee9341c0c3521cdb41239c83807a98cef97bd0 Mon Sep 17 00:00:00 2001
From 0e057064ed654b5f7fa22cc5f159ed67eeb332dc Mon Sep 17 00:00:00 2001
From: Julia Cartwright <julia@ni.com>
Date: Mon, 7 May 2018 08:58:56 -0500
Subject: [PATCH 062/283] locallock: provide {get,put}_locked_ptr() variants
Subject: [PATCH 062/328] locallock: provide {get,put}_locked_ptr() variants
Provide a set of locallocked accessors for pointers to per-CPU data;
this is useful for dynamically-allocated per-CPU regions, for example.
@ -44,5 +44,5 @@ index d658c2552601..921eab83cd34 100644
#define local_lock_cpu(lvar) get_cpu()
#define local_unlock_cpu(lvar) put_cpu()
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From a6c38f0b349a8921a1bfe4dcef5972cf1e2224a0 Mon Sep 17 00:00:00 2001
From a132d9a98679bcc505c36c80270ddaa741c15cbc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:34 -0500
Subject: [PATCH 063/283] mm/scatterlist: Do not disable irqs on RT
Subject: [PATCH 063/328] mm/scatterlist: Do not disable irqs on RT
For -RT it is enough to keep pagefault disabled (which is currently handled by
kmap_atomic()).
@ -12,10 +12,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c6096a71704..5c2c68962709 100644
index 60e7eca2f4be..aad8b9ecd496 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -776,7 +776,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
@@ -777,7 +777,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
@ -25,5 +25,5 @@ index 7c6096a71704..5c2c68962709 100644
} else
kunmap(miter->page);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From f994c5279fb1173131e67419c540713cd25a59e3 Mon Sep 17 00:00:00 2001
From f95acea987d23816f8094d7db13ae2afb94136ce Mon Sep 17 00:00:00 2001
From: Oleg Nesterov <oleg@redhat.com>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: [PATCH 064/283] signal/x86: Delay calling signals in atomic
Subject: [PATCH 064/328] signal/x86: Delay calling signals in atomic
On x86_64 we must disable preemption before we enable interrupts
for stack faults, int3 and debugging, because the current task is using
@ -79,7 +79,7 @@ index 33d3c88a7225..fb0438d06ca7 100644
typedef sigset_t compat_sigset_t;
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index df39ad5916e7..535e57775208 100644
index 0489d3e0e78c..e4af260f81c5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -881,6 +881,10 @@ struct task_struct {
@ -94,10 +94,10 @@ index df39ad5916e7..535e57775208 100644
size_t sas_ss_size;
unsigned int sas_ss_flags;
diff --git a/kernel/signal.c b/kernel/signal.c
index d5a9646b3538..56edb0580a3a 100644
index 5e278f1540ad..d5e764bb2444 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1268,8 +1268,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
@@ -1277,8 +1277,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@ -108,7 +108,7 @@ index d5a9646b3538..56edb0580a3a 100644
{
unsigned long int flags;
int ret, blocked, ignored;
@@ -1298,6 +1298,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
@@ -1307,6 +1307,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
@ -149,5 +149,5 @@ index d5a9646b3538..56edb0580a3a 100644
* Nuke all other threads in the group.
*/
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 77f58646e4722365c6b6b91802d5feddd57dff34 Mon Sep 17 00:00:00 2001
From 44575d6c7e6fb548a6bf67f427d151301cd1dfd8 Mon Sep 17 00:00:00 2001
From: Yang Shi <yang.shi@linaro.org>
Date: Thu, 10 Dec 2015 10:58:51 -0800
Subject: [PATCH 065/283] x86/signal: delay calling signals on 32bit
Subject: [PATCH 065/328] x86/signal: delay calling signals on 32bit
When running some ptrace single step tests on x86-32 machine, the below problem
is triggered:
@ -44,5 +44,5 @@ index fb0438d06ca7..c00e27af2205 100644
#endif
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 86a1cbd4eddbe083ad0331f0eeec002f6fa7b322 Mon Sep 17 00:00:00 2001
From 6b9121d4d6cf25eabc1b638027345308486f88b1 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: [PATCH 066/283] buffer_head: Replace bh_uptodate_lock for -rt
Subject: [PATCH 066/328] buffer_head: Replace bh_uptodate_lock for -rt
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
@ -86,7 +86,7 @@ index a550e0d8e965..a5b3a456dbff 100644
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db7590178dfc..d76364124443 100644
index 9cc79b7b0df1..3f4ba2011499 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio)
@ -192,5 +192,5 @@ index 96225a77c112..8a1bcfb145d7 100644
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From a0ac5bf9b179bff5745bd4c15d14cb2ec5c81c16 Mon Sep 17 00:00:00 2001
From 72ca6594764d9a6523352dc609644bea68a3a74b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: [PATCH 067/283] fs: jbd/jbd2: Make state lock and journal head lock
Subject: [PATCH 067/328] fs: jbd/jbd2: Make state lock and journal head lock
rt safe
bit_spin_locks break under RT.
@ -44,7 +44,7 @@ index 8a1bcfb145d7..5869330d1f38 100644
}
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 583b82b5a1e9..57f4ad8d45a5 100644
index 268f3000d1b3..8f5d6ecb802e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
@ -105,5 +105,5 @@ index 583b82b5a1e9..57f4ad8d45a5 100644
#define J_ASSERT(assert) BUG_ON(!(assert))
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 575440eb3e514693de4892b3589bd02b584834ef Mon Sep 17 00:00:00 2001
From eb37c3d2df6895d5c86504fdb1a509d075414f52 Mon Sep 17 00:00:00 2001
From: Paul Gortmaker <paul.gortmaker@windriver.com>
Date: Fri, 21 Jun 2013 15:07:25 -0400
Subject: [PATCH 068/283] list_bl: Make list head locking RT safe
Subject: [PATCH 068/328] list_bl: Make list head locking RT safe
As per changes in include/linux/jbd_common.h for avoiding the
bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
@ -116,5 +116,5 @@ index 3fc2cc57ba1b..69b659259bac 100644
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 386260fdddeed151902355b8c816f9b166c1c2b8 Mon Sep 17 00:00:00 2001
From a294373c35c31ae762358146f49c3c48f1429526 Mon Sep 17 00:00:00 2001
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 31 Mar 2016 00:04:25 -0500
Subject: [PATCH 069/283] list_bl: fixup bogus lockdep warning
Subject: [PATCH 069/328] list_bl: fixup bogus lockdep warning
At first glance, the use of 'static inline' seems appropriate for
INIT_HLIST_BL_HEAD().
@ -99,5 +99,5 @@ index 69b659259bac..0b5de7d9ffcf 100644
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From d884d2bff2d643468c5e37727aa29e8f5c88b3be Mon Sep 17 00:00:00 2001
From 974bfebe6d809861b9a25af561668633ef3168a7 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: [PATCH 070/283] genirq: Disable irqpoll on -rt
Subject: [PATCH 070/328] genirq: Disable irqpoll on -rt
Creates long latencies for no value
@ -38,5 +38,5 @@ index d867d6ddafdd..cd12ee86c01e 100644
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 91f768aed73cc93826112811b4e622dce0c1915f Mon Sep 17 00:00:00 2001
From 056e2667682c7afa14f71cb6d97ccf2217c0c08b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 3 Apr 2011 11:57:29 +0200
Subject: [PATCH 071/283] genirq: Force interrupt thread on RT
Subject: [PATCH 071/328] genirq: Force interrupt thread on RT
Force threaded_irqs and optimize the code (force_irqthreads) in regard
to this.
@ -29,10 +29,10 @@ index eeceac3376fc..315f852b4981 100644
#define force_irqthreads (0)
#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd4f9f3e8345..c3b9f6dacd8f 100644
index 23bcfa71077f..3c26d0708709 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -23,6 +23,7 @@
@@ -24,6 +24,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
@ -40,14 +40,14 @@ index cd4f9f3e8345..c3b9f6dacd8f 100644
__read_mostly bool force_irqthreads;
EXPORT_SYMBOL_GPL(force_irqthreads);
@@ -32,6 +33,7 @@ static int __init setup_forced_irqthreads(char *arg)
@@ -33,6 +34,7 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
+# endif
#endif
static void __synchronize_hardirq(struct irq_desc *desc)
static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 6ec0e8d1526370de73bd18c096f7f96827594308 Mon Sep 17 00:00:00 2001
From 4c6015fdf87fb7f0f38ce92c85d5630d79c6ae23 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:20 +0200
Subject: [PATCH 072/283] Split IRQ-off and zone->lock while freeing pages from
Subject: [PATCH 072/328] Split IRQ-off and zone->lock while freeing pages from
PCP list #1
Split the IRQ-off section while accessing the PCP list from zone->lock
@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 52 insertions(+), 30 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2d04bd2e1ced..332b48f38d1e 100644
index e5c610d711f3..0cfcd42517a4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1095,7 +1095,7 @@ static inline void prefetch_buddy(struct page *page)
@ -111,7 +111,7 @@ index 2d04bd2e1ced..332b48f38d1e 100644
}
static void free_one_page(struct zone *zone,
@@ -2536,13 +2543,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
@@ -2544,13 +2551,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
int to_drain, batch;
@ -131,7 +131,7 @@ index 2d04bd2e1ced..332b48f38d1e 100644
}
#endif
@@ -2558,14 +2570,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
@@ -2566,14 +2578,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@ -155,7 +155,7 @@ index 2d04bd2e1ced..332b48f38d1e 100644
}
/*
@@ -2787,7 +2806,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
@@ -2795,7 +2814,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@ -168,5 +168,5 @@ index 2d04bd2e1ced..332b48f38d1e 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From f9efb76f365f15eaca8f29ee7f2648de90925a76 Mon Sep 17 00:00:00 2001
From dba8e6d7ab200ab5fe544af8c6093bcb3d215320 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:21 +0200
Subject: [PATCH 073/283] Split IRQ-off and zone->lock while freeing pages from
Subject: [PATCH 073/328] Split IRQ-off and zone->lock while freeing pages from
PCP list #2
Split the IRQ-off section while accessing the PCP list from zone->lock
@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 50 insertions(+), 10 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 332b48f38d1e..55cee9a17a36 100644
index 0cfcd42517a4..9a4d150ea5b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1105,8 +1105,8 @@ static inline void prefetch_buddy(struct page *page)
@ -59,7 +59,7 @@ index 332b48f38d1e..55cee9a17a36 100644
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
}
@@ -2554,7 +2569,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
@@ -2562,7 +2577,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
local_irq_restore(flags);
if (to_drain > 0)
@ -68,7 +68,7 @@ index 332b48f38d1e..55cee9a17a36 100644
}
#endif
@@ -2584,7 +2599,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
@@ -2592,7 +2607,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
local_irq_restore(flags);
if (count)
@ -77,7 +77,7 @@ index 332b48f38d1e..55cee9a17a36 100644
}
/*
@@ -2777,7 +2792,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
@@ -2785,7 +2800,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
return true;
}
@ -87,7 +87,7 @@ index 332b48f38d1e..55cee9a17a36 100644
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
@@ -2806,10 +2822,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
@@ -2814,10 +2830,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@ -99,7 +99,7 @@ index 332b48f38d1e..55cee9a17a36 100644
}
}
@@ -2820,13 +2834,17 @@ void free_unref_page(struct page *page)
@@ -2828,13 +2842,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@ -118,7 +118,7 @@ index 332b48f38d1e..55cee9a17a36 100644
}
/*
@@ -2837,6 +2855,11 @@ void free_unref_page_list(struct list_head *list)
@@ -2845,6 +2863,11 @@ void free_unref_page_list(struct list_head *list)
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@ -130,7 +130,7 @@ index 332b48f38d1e..55cee9a17a36 100644
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
@@ -2849,10 +2872,12 @@ void free_unref_page_list(struct list_head *list)
@@ -2857,10 +2880,12 @@ void free_unref_page_list(struct list_head *list)
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@ -144,7 +144,7 @@ index 332b48f38d1e..55cee9a17a36 100644
/*
* Guard against excessive IRQ disabled times when we get
@@ -2865,6 +2890,21 @@ void free_unref_page_list(struct list_head *list)
@@ -2873,6 +2898,21 @@ void free_unref_page_list(struct list_head *list)
}
}
local_irq_restore(flags);
@ -167,5 +167,5 @@ index 332b48f38d1e..55cee9a17a36 100644
/*
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 302fcfd8e9527e8f7b6ec9d733a5a3a760af64ef Mon Sep 17 00:00:00 2001
From abbdf6516e6ac19a92a3c08fc7a2f1ecc66c2bc6 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 May 2018 15:24:22 +0200
Subject: [PATCH 074/283] mm/SLxB: change list_lock to raw_spinlock_t
Subject: [PATCH 074/328] mm/SLxB: change list_lock to raw_spinlock_t
The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t
otherwise the interrupts won't be disabled on -RT. The locking rules remain
@ -398,7 +398,7 @@ index 9632772e14be..d6b01d61f768 100644
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
diff --git a/mm/slub.c b/mm/slub.c
index 09c0e24a06d8..9450fb6da89f 100644
index 9c3937c5ce38..ba20c68a9cfd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1167,7 +1167,7 @@ static noinline int free_debug_processing(
@ -614,5 +614,5 @@ index 09c0e24a06d8..9450fb6da89f 100644
for (i = 0; i < t.count; i++) {
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 9da82885e5b9187857b5fdc2eaa482752e814fbc Mon Sep 17 00:00:00 2001
From b3c42996e1092269d6fac0652b22140250f11b7b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 21 Jun 2018 17:29:19 +0200
Subject: [PATCH 075/283] mm/SLUB: delay giving back empty slubs to IRQ enabled
Subject: [PATCH 075/328] mm/SLUB: delay giving back empty slubs to IRQ enabled
regions
__free_slab() is invoked with disabled interrupts which increases the
@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 69 insertions(+), 5 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 9450fb6da89f..7fd47a914f61 100644
index ba20c68a9cfd..224663e20772 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1330,6 +1330,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
@ -218,5 +218,5 @@ index 9450fb6da89f..7fd47a914f61 100644
if (debug_guardpage_minorder())
slub_max_order = 0;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 4cd1dede47de27525631161fdc6cdfc9d8608c31 Mon Sep 17 00:00:00 2001
From d3dec69695332f82af11a39b0fd327ad173c4715 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
Subject: [PATCH 076/283] mm: page_alloc: rt-friendly per-cpu pages
Subject: [PATCH 076/328] mm: page_alloc: rt-friendly per-cpu pages
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 43 insertions(+), 20 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 55cee9a17a36..99b3861b1ef6 100644
index 9a4d150ea5b7..d6f9be9c6635 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -60,6 +60,7 @@
@ -60,7 +60,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2560,13 +2573,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
@@ -2568,13 +2581,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
int to_drain, batch;
LIST_HEAD(dst);
@ -76,7 +76,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
@@ -2588,7 +2601,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
@@ -2596,7 +2609,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
LIST_HEAD(dst);
int count;
@ -85,7 +85,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
@@ -2596,7 +2609,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
@@ -2604,7 +2617,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
if (count)
isolate_pcp_pages(count, pcp, &dst);
@ -94,7 +94,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
if (count)
free_pcppages_bulk(zone, &dst, false);
@@ -2634,6 +2647,7 @@ void drain_local_pages(struct zone *zone)
@@ -2642,6 +2655,7 @@ void drain_local_pages(struct zone *zone)
drain_pages(cpu);
}
@ -102,7 +102,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
static void drain_local_pages_wq(struct work_struct *work)
{
/*
@@ -2647,6 +2661,7 @@ static void drain_local_pages_wq(struct work_struct *work)
@@ -2655,6 +2669,7 @@ static void drain_local_pages_wq(struct work_struct *work)
drain_local_pages(NULL);
preempt_enable();
}
@ -110,7 +110,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
@@ -2713,7 +2728,14 @@ void drain_all_pages(struct zone *zone)
@@ -2721,7 +2736,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@ -126,7 +126,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
@@ -2721,6 +2743,7 @@ void drain_all_pages(struct zone *zone)
@@ -2729,6 +2751,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@ -134,7 +134,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
mutex_unlock(&pcpu_drain_mutex);
}
@@ -2840,9 +2863,9 @@ void free_unref_page(struct page *page)
@@ -2848,9 +2871,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@ -146,7 +146,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
@@ -2869,7 +2892,7 @@ void free_unref_page_list(struct list_head *list)
@@ -2877,7 +2900,7 @@ void free_unref_page_list(struct list_head *list)
set_page_private(page, pfn);
}
@ -155,7 +155,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
@@ -2884,12 +2907,12 @@ void free_unref_page_list(struct list_head *list)
@@ -2892,12 +2915,12 @@ void free_unref_page_list(struct list_head *list)
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@ -171,7 +171,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
@@ -3038,7 +3061,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
@@ -3046,7 +3069,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct page *page;
unsigned long flags;
@ -180,7 +180,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, pcp, list);
@@ -3046,7 +3069,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
@@ -3054,7 +3077,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
@ -189,7 +189,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
return page;
}
@@ -3073,7 +3096,7 @@ struct page *rmqueue(struct zone *preferred_zone,
@@ -3081,7 +3104,7 @@ struct page *rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@ -198,7 +198,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
do {
page = NULL;
@@ -3093,14 +3116,14 @@ struct page *rmqueue(struct zone *preferred_zone,
@@ -3101,14 +3124,14 @@ struct page *rmqueue(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@ -215,7 +215,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
return NULL;
}
@@ -8096,7 +8119,7 @@ void zone_pcp_reset(struct zone *zone)
@@ -8129,7 +8152,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@ -224,7 +224,7 @@ index 55cee9a17a36..99b3861b1ef6 100644
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -8105,7 +8128,7 @@ void zone_pcp_reset(struct zone *zone)
@@ -8138,7 +8161,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
@ -234,5 +234,5 @@ index 55cee9a17a36..99b3861b1ef6 100644
#ifdef CONFIG_MEMORY_HOTREMOVE
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 98c01e9756e741d807b1198eb885a26e0998fcde Mon Sep 17 00:00:00 2001
From a03a4ee82ac46307acebdfe58e602aea9835a9a1 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:51 -0500
Subject: [PATCH 077/283] mm/swap: Convert to percpu locked
Subject: [PATCH 077/328] mm/swap: Convert to percpu locked
Replace global locks (get_cpu + local_irq_save) with "local_locks()".
Currently there is one of for "rotate" and one for "swap".
@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
4 files changed, 30 insertions(+), 19 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7bd0a6f2ac2b..e643672fa802 100644
index ee8f9f554a9e..2ad000e362bd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -12,6 +12,7 @@
@ -36,10 +36,10 @@ index 7bd0a6f2ac2b..e643672fa802 100644
extern void lru_cache_add_anon(struct page *page);
extern void lru_cache_add_file(struct page *page);
diff --git a/mm/compaction.c b/mm/compaction.c
index faca45ebe62d..f8ccb9d9daa3 100644
index 5079ddbec8f9..c40d3a13cbbd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1657,10 +1657,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
@@ -1668,10 +1668,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
block_start_pfn(cc->migrate_pfn, cc->order);
if (cc->last_migrated_pfn < current_block_start) {
@ -55,10 +55,10 @@ index faca45ebe62d..f8ccb9d9daa3 100644
cc->last_migrated_pfn = 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 99b3861b1ef6..1679f5883307 100644
index d6f9be9c6635..a1547f1be42c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7207,8 +7207,9 @@ void __init free_area_init(unsigned long *zones_size)
@@ -7242,8 +7242,9 @@ void __init free_area_init(unsigned long *zones_size)
static int page_alloc_cpu_dead(unsigned int cpu)
{
@ -70,7 +70,7 @@ index 99b3861b1ef6..1679f5883307 100644
/*
diff --git a/mm/swap.c b/mm/swap.c
index a3fc028e338e..4bac22ec1328 100644
index 45fdbfb6b2a6..92f994b962f0 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -33,6 +33,7 @@
@ -206,5 +206,5 @@ index a3fc028e338e..4bac22ec1328 100644
#ifdef CONFIG_SMP
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From f4f53c9fdf55676d783a4fbad5049f39401a0542 Mon Sep 17 00:00:00 2001
From c48feb8fe1bad2aed0a15440a28da0bca8b5292a Mon Sep 17 00:00:00 2001
From: Luiz Capitulino <lcapitulino@redhat.com>
Date: Fri, 27 May 2016 15:03:28 +0200
Subject: [PATCH 078/283] mm: perform lru_add_drain_all() remotely
Subject: [PATCH 078/328] mm: perform lru_add_drain_all() remotely
lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
on all CPUs that have non-empty LRU pagevecs and then waiting for
@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 30 insertions(+), 6 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 4bac22ec1328..0457927d3f0c 100644
index 92f994b962f0..3885645a45ce 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -585,9 +585,15 @@ void lru_add_drain_cpu(int cpu)
@ -104,5 +104,5 @@ index 4bac22ec1328..0457927d3f0c 100644
mutex_unlock(&lock);
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3e1b4a0068b41c1782264376379985fb992bd41e Mon Sep 17 00:00:00 2001
From 4e41266214b4e88cf9fb9d2c20b5bbc83dcfbdcc Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
Subject: [PATCH 079/283] mm/vmstat: Protect per cpu variables with preempt
Subject: [PATCH 079/328] mm/vmstat: Protect per cpu variables with preempt
disable on RT
Disable preemption on -RT for the vmstat code. On vanila the code runs in
@ -40,7 +40,7 @@ index f25cef84b41d..febee8649220 100644
static inline void count_vm_events(enum vm_event_item item, long delta)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4a387937f9f5..0cd11c5e3999 100644
index ce81b0a7d018..cfa2a3bbdf91 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@ -140,5 +140,5 @@ index 4a387937f9f5..0cd11c5e3999 100644
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From fb089e89b26bc5653a90d9983021813e15fa04d9 Mon Sep 17 00:00:00 2001
From 5af4ea849237914c63d3fd50079e6975aa28f9b2 Mon Sep 17 00:00:00 2001
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Sat, 1 Oct 2011 18:58:13 -0700
Subject: [PATCH 080/283] ARM: Initialize split page table locks for vector
Subject: [PATCH 080/328] ARM: Initialize split page table locks for vector
page
Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
@ -71,5 +71,5 @@ index 82ab015bf42b..8d3c7ce34c24 100644
/*
* The vectors page is always readable from user space for the
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From b01d03c695bcba2149713f4425c806b5b5e3410d Mon Sep 17 00:00:00 2001
From 5bc9982e129b20ecb8f6c32d3d342af5087ffdae Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 25 Oct 2012 10:32:35 +0100
Subject: [PATCH 081/283] mm: Enable SLUB for RT
Subject: [PATCH 081/328] mm: Enable SLUB for RT
Avoid the memory allocation in IRQ section
@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 6 insertions(+)
diff --git a/mm/slub.c b/mm/slub.c
index 7fd47a914f61..efd441e79e6f 100644
index 224663e20772..cbe47408c6eb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3680,6 +3680,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
@ -37,5 +37,5 @@ index 7fd47a914f61..efd441e79e6f 100644
/*
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 14471a3281f661b8b8bccdb64820879a699fb2ad Mon Sep 17 00:00:00 2001
From c908b13d5d4d0e1154b41e93d9fb8349b7b7197a Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 9 Jan 2013 12:08:15 +0100
Subject: [PATCH 082/283] slub: Enable irqs for __GFP_WAIT
Subject: [PATCH 082/328] slub: Enable irqs for __GFP_WAIT
SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
with GFP_WAIT can happen before that. So use this as an indicator.
@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/mm/slub.c b/mm/slub.c
index efd441e79e6f..2240b51a0549 100644
index cbe47408c6eb..81c32ceab228 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1570,10 +1570,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
@ -43,5 +43,5 @@ index efd441e79e6f..2240b51a0549 100644
if (!page)
return NULL;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From fb6bfe69057a4177f5f5b273cace7ea5cbb5f649 Mon Sep 17 00:00:00 2001
From 41e0143022ef4180dafc14f033e72efa7ac652de Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 15 Apr 2015 19:00:47 +0200
Subject: [PATCH 083/283] slub: Disable SLUB_CPU_PARTIAL
Subject: [PATCH 083/328] slub: Disable SLUB_CPU_PARTIAL
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
@ -49,5 +49,5 @@ index 61e8b531649b..b4e88fb19c26 100644
help
Per cpu partial caches accellerate objects allocation and freeing
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From b64de8d2bb376abf6af01c84a94e1a201aecc6ec Mon Sep 17 00:00:00 2001
From 5d6ef143b9e65be0cda54dcea9150f3cfa951ffd Mon Sep 17 00:00:00 2001
From: Yang Shi <yang.shi@windriver.com>
Date: Wed, 30 Oct 2013 11:48:33 -0700
Subject: [PATCH 084/283] mm/memcontrol: Don't call schedule_work_on in
Subject: [PATCH 084/328] mm/memcontrol: Don't call schedule_work_on in
preemption disabled context
The following trace is triggered when running ltp oom test cases:
@ -48,10 +48,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7e7cc0cd89fe..174329de4779 100644
index 3a3d109dce21..cf9e81fb342d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2063,7 +2063,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
@@ -2082,7 +2082,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@ -60,7 +60,7 @@ index 7e7cc0cd89fe..174329de4779 100644
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
@@ -2083,7 +2083,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
@@ -2102,7 +2102,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
}
css_put(&memcg->css);
}
@ -70,5 +70,5 @@ index 7e7cc0cd89fe..174329de4779 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3cb7dde3b41a847eefeac79763e46ce167c8521f Mon Sep 17 00:00:00 2001
From ab73b56574e07b881a37aa1a4b0040a331352d7c Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 28 Jan 2015 17:14:16 +0100
Subject: [PATCH 085/283] mm/memcontrol: Replace local_irq_disable with local
Subject: [PATCH 085/328] mm/memcontrol: Replace local_irq_disable with local
locks
There are a few local_irq_disable() which then take sleeping locks. This
@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 174329de4779..d0f245d80f93 100644
index cf9e81fb342d..421ac74450f6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -69,6 +69,7 @@
@ -33,7 +33,7 @@ index 174329de4779..d0f245d80f93 100644
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
@@ -4884,12 +4887,12 @@ static int mem_cgroup_move_account(struct page *page,
@@ -4922,12 +4925,12 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;
@ -48,7 +48,7 @@ index 174329de4779..d0f245d80f93 100644
out_unlock:
unlock_page(page);
out:
@@ -6008,10 +6011,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -6046,10 +6049,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
commit_charge(page, memcg, lrucare);
@ -61,7 +61,7 @@ index 174329de4779..d0f245d80f93 100644
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
@@ -6080,7 +6083,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
@@ -6118,7 +6121,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
memcg_oom_recover(ug->memcg);
}
@ -70,7 +70,7 @@ index 174329de4779..d0f245d80f93 100644
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
@@ -6088,7 +6091,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
@@ -6126,7 +6129,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
@ -79,7 +79,7 @@ index 174329de4779..d0f245d80f93 100644
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
@@ -6251,10 +6254,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
@@ -6289,10 +6292,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
commit_charge(newpage, memcg, false);
@ -92,7 +92,7 @@ index 174329de4779..d0f245d80f93 100644
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
@@ -6446,6 +6449,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
@@ -6484,6 +6487,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@ -100,7 +100,7 @@ index 174329de4779..d0f245d80f93 100644
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
@@ -6491,13 +6495,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
@@ -6529,13 +6533,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
@ -119,5 +119,5 @@ index 174329de4779..d0f245d80f93 100644
/**
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 50eae40f0475c039a273e2f5441f4ecda84d104e Mon Sep 17 00:00:00 2001
From c8eeca08279f6363742db822856cc18ae1b7bdbd Mon Sep 17 00:00:00 2001
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Tue, 22 Mar 2016 11:16:09 +0100
Subject: [PATCH 086/283] mm/zsmalloc: copy with get_cpu_var() and locking
Subject: [PATCH 086/328] mm/zsmalloc: copy with get_cpu_var() and locking
get_cpu_var() disables preemption and triggers a might_sleep() splat later.
This is replaced with get_locked_var().
@ -17,18 +17,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 74 insertions(+), 6 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 9da65552e7ca..63c193c1ff96 100644
index 85cc29c93d93..63e83b47fa99 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -55,6 +55,7 @@
#include <linux/migrate.h>
@@ -56,6 +56,7 @@
#include <linux/wait.h>
#include <linux/pagemap.h>
#include <linux/fs.h>
+#include <linux/locallock.h>
#define ZSPAGE_MAGIC 0x58
@@ -72,9 +73,22 @@
@@ -73,9 +74,22 @@
*/
#define ZS_MAX_ZSPAGE_ORDER 2
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
@ -52,7 +52,7 @@ index 9da65552e7ca..63c193c1ff96 100644
/*
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (unsigned long) handle value.
@@ -320,7 +334,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
@@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
static int create_cache(struct zs_pool *pool)
{
@ -61,7 +61,7 @@ index 9da65552e7ca..63c193c1ff96 100644
0, 0, NULL);
if (!pool->handle_cachep)
return 1;
@@ -344,10 +358,27 @@ static void destroy_cache(struct zs_pool *pool)
@@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool)
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
@ -91,7 +91,7 @@ index 9da65552e7ca..63c193c1ff96 100644
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
{
kmem_cache_free(pool->handle_cachep, (void *)handle);
@@ -366,12 +397,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
@@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
static void record_obj(unsigned long handle, unsigned long obj)
{
@ -110,7 +110,7 @@ index 9da65552e7ca..63c193c1ff96 100644
}
/* zpool driver */
@@ -453,6 +490,7 @@ MODULE_ALIAS("zpool-zsmalloc");
@@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc");
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@ -118,7 +118,7 @@ index 9da65552e7ca..63c193c1ff96 100644
static bool is_zspage_isolated(struct zspage *zspage)
{
@@ -882,7 +920,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
@@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
static unsigned long handle_to_obj(unsigned long handle)
{
@ -132,7 +132,7 @@ index 9da65552e7ca..63c193c1ff96 100644
}
static unsigned long obj_to_head(struct page *page, void *obj)
@@ -896,22 +940,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
@@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
static inline int testpin_tag(unsigned long handle)
{
@ -179,7 +179,7 @@ index 9da65552e7ca..63c193c1ff96 100644
}
static void reset_page(struct page *page)
@@ -1337,7 +1405,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
@@ -1342,7 +1410,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK;
@ -188,7 +188,7 @@ index 9da65552e7ca..63c193c1ff96 100644
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
@@ -1391,7 +1459,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
@@ -1396,7 +1464,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
__zs_unmap_object(area, pages, off, class->size);
}
@ -198,5 +198,5 @@ index 9da65552e7ca..63c193c1ff96 100644
migrate_read_unlock(zspage);
unpin_tag(handle);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 3d625e1fb1f5adff8191330efe6d47017b0806bd Mon Sep 17 00:00:00 2001
From 58952b3995a060f4fc7fbc02552ac489639d565e Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 11 Dec 2018 21:53:43 +0100
Subject: [PATCH 087/283] x86/mm/pat: disable preemption __split_large_page()
Subject: [PATCH 087/328] x86/mm/pat: disable preemption __split_large_page()
after spin_lock()
Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a
@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 8 insertions(+)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e2d4b25c7aa4..9626ebb9e3c8 100644
index 101f3ad0d6ad..0b0396261ca1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -687,12 +687,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
@ -57,5 +57,5 @@ index e2d4b25c7aa4..9626ebb9e3c8 100644
return 0;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 0a7a65a5055b7a5a94c57ee2dc8404116cff804b Mon Sep 17 00:00:00 2001
From 7f7e6402ea1895f3d2197122d4379c46a3a7fe14 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Jan 2017 16:34:27 +0100
Subject: [PATCH 088/283] radix-tree: use local locks
Subject: [PATCH 088/328] radix-tree: use local locks
The preload functionality uses per-CPU variables and preempt-disable to
ensure that it does not switch CPUs during its usage. This patch adds
@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
3 files changed, 26 insertions(+), 18 deletions(-)
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3ec8628ce17f..54af68158f7d 100644
index b6c6151c7446..81c9df5c04fa 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const struct idr *idr)
@ -59,7 +59,7 @@ index 34149e8b5f73..affb0fc4c5b6 100644
int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bc03ecc4dfd2..44257463f683 100644
index e5cab5c4e383..9309e813bc1f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -38,7 +38,7 @@
@ -171,5 +171,5 @@ index bc03ecc4dfd2..44257463f683 100644
if (!this_cpu_read(ida_bitmap)) {
struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 5bbf9de052f34cd8d685120f60da34937f2b0772 Mon Sep 17 00:00:00 2001
From bb113ab4c9dea8e53db84af84d34864c4f3e9b2d Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
Subject: [PATCH 089/283] timers: Prepare for full preemption
Subject: [PATCH 089/328] timers: Prepare for full preemption
When softirqs can be preempted we need to make sure that cancelling
the timer from the active thread can not deadlock vs. a running timer
@ -29,7 +29,7 @@ index 7b066fd38248..54627d046b3a 100644
#else
# define del_timer_sync(t) del_timer(t)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7a39d56f6a6b..5de80f29ef57 100644
index e6022cc2605b..986ed04425be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -498,11 +498,14 @@ void resched_cpu(int cpu)
@ -59,7 +59,7 @@ index 7a39d56f6a6b..5de80f29ef57 100644
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index fa49cd753dea..bbe24e241643 100644
index ae64cb819a9a..9019c9caf146 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -44,6 +44,7 @@
@ -150,7 +150,7 @@ index fa49cd753dea..bbe24e241643 100644
raw_spin_lock_irq(&base->lock);
}
}
@@ -1681,8 +1715,8 @@ static inline void __run_timers(struct timer_base *base)
@@ -1683,8 +1717,8 @@ static inline void __run_timers(struct timer_base *base)
while (levels--)
expire_timers(base, heads + levels);
}
@ -160,7 +160,7 @@ index fa49cd753dea..bbe24e241643 100644
}
/*
@@ -1927,6 +1961,9 @@ static void __init init_timer_cpu(int cpu)
@@ -1929,6 +1963,9 @@ static void __init init_timer_cpu(int cpu)
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
@ -171,5 +171,5 @@ index fa49cd753dea..bbe24e241643 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 49f95baf1667e4853406b63d30062b94afff4a25 Mon Sep 17 00:00:00 2001
From c87615728aaaf5a59575f49682ed6339a9cb116f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 6 Nov 2011 12:26:18 +0100
Subject: [PATCH 090/283] x86: kvm Require const tsc for RT
Subject: [PATCH 090/328] x86: kvm Require const tsc for RT
Non constant TSC is a nightmare on bare metal already, but with
virtualization it becomes a complete disaster because the workarounds
@ -14,10 +14,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 7 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cea6568667c4..c90545667fd6 100644
index ade694f94a49..2dfb7c81743e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6756,6 +6756,13 @@ int kvm_arch_init(void *opaque)
@@ -6873,6 +6873,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
@ -32,5 +32,5 @@ index cea6568667c4..c90545667fd6 100644
if (r)
goto out_free_percpu;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 99fc3867798d14c5cff8c71c3872af84605d572d Mon Sep 17 00:00:00 2001
From d46161e1a4fa5ff7b32deb64ac2e7698d0a56e49 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 4 Oct 2017 10:24:23 +0200
Subject: [PATCH 091/283] pci/switchtec: Don't use completion's wait queue
Subject: [PATCH 091/328] pci/switchtec: Don't use completion's wait queue
The poll callback is using completion's wait_queue_head_t member and
puts it in poll_wait() so the poll() caller gets a wakeup after command
@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 72db2e0ebced..77d4fb86d05b 100644
index 43431816412c..a8df847bedee 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -43,10 +43,11 @@ struct switchtec_user {
@ -48,7 +48,7 @@ index 72db2e0ebced..77d4fb86d05b 100644
stuser->event_cnt = atomic_read(&stdev->event_cnt);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
@@ -151,7 +152,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
@@ -147,7 +148,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
@ -57,7 +57,7 @@ index 72db2e0ebced..77d4fb86d05b 100644
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
@@ -188,7 +189,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
@@ -184,7 +185,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
stuser->read_len);
out:
@ -67,7 +67,7 @@ index 72db2e0ebced..77d4fb86d05b 100644
list_del_init(&stuser->list);
stuser_put(stuser);
stdev->mrpc_busy = 0;
@@ -458,10 +460,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
@@ -454,10 +456,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
mutex_unlock(&stdev->mrpc_mutex);
if (filp->f_flags & O_NONBLOCK) {
@ -81,7 +81,7 @@ index 72db2e0ebced..77d4fb86d05b 100644
if (rc < 0)
return rc;
}
@@ -509,7 +512,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
@@ -505,7 +508,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
struct switchtec_dev *stdev = stuser->stdev;
__poll_t ret = 0;
@ -90,7 +90,7 @@ index 72db2e0ebced..77d4fb86d05b 100644
poll_wait(filp, &stdev->event_wq, wait);
if (lock_mutex_and_test_alive(stdev))
@@ -517,7 +520,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
@@ -513,7 +516,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
mutex_unlock(&stdev->mrpc_mutex);
@ -99,7 +99,7 @@ index 72db2e0ebced..77d4fb86d05b 100644
ret |= EPOLLIN | EPOLLRDNORM;
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
@@ -1041,7 +1044,8 @@ static void stdev_kill(struct switchtec_dev *stdev)
@@ -1037,7 +1040,8 @@ static void stdev_kill(struct switchtec_dev *stdev)
/* Wake up and kill any users waiting on an MRPC request */
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
@ -110,5 +110,5 @@ index 72db2e0ebced..77d4fb86d05b 100644
stuser_put(stuser);
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 88037fc07062d469557427c97507d3f95d7ca3a6 Mon Sep 17 00:00:00 2001
From 5048f6148f091b822260d482639172336a66cbc3 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 28 Oct 2013 12:19:57 +0100
Subject: [PATCH 092/283] wait.h: include atomic.h
Subject: [PATCH 092/328] wait.h: include atomic.h
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@ -37,5 +37,5 @@ index ed7c122cb31f..2b5ef8e94d19 100644
typedef struct wait_queue_entry wait_queue_entry_t;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 67478d9c6704de32600fd4363f3853bcdffcf391 Mon Sep 17 00:00:00 2001
From 370c2439db620266b1bb104cc624841eec515e5c Mon Sep 17 00:00:00 2001
From: Daniel Wagner <daniel.wagner@bmw-carit.de>
Date: Fri, 11 Jul 2014 15:26:11 +0200
Subject: [PATCH 093/283] work-simple: Simple work queue implemenation
Subject: [PATCH 093/328] work-simple: Simple work queue implemenation
Provides a framework for enqueuing callbacks from irq context
PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
@ -241,5 +241,5 @@ index 000000000000..a5b89fdacf19
+}
+EXPORT_SYMBOL_GPL(swork_put);
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 147a7822bfe8f027b88fa0ca82ae0d210e57bf34 Mon Sep 17 00:00:00 2001
From 8c88098a7081d7cd354fb9e2a64598e6e10ce525 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 10 Sep 2018 18:00:31 +0200
Subject: [PATCH 094/283] work-simple: drop a shit statement in
Subject: [PATCH 094/328] work-simple: drop a shit statement in
SWORK_EVENT_PENDING
Dan Carpenter reported
@ -32,5 +32,5 @@ index a5b89fdacf19..c90d14b9b126 100644
static DEFINE_MUTEX(worker_mutex);
static struct sworker *glob_worker;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 334dc78522991ee04c4704a53d24f0009be71172 Mon Sep 17 00:00:00 2001
From ae24940034c02ed671e3a5cc9c4cf31ebfc24fed Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 11 Jan 2013 11:23:51 +0100
Subject: [PATCH 095/283] completion: Use simple wait queues
Subject: [PATCH 095/328] completion: Use simple wait queues
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
@ -39,7 +39,7 @@ index e7075aaff1bb..1580464a9d5b 100644
res = -EINTR;
if (res) {
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 94ad6fe29e69..52a49f0bbc19 100644
index b704e4bce171..c364abaac548 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
@ -54,10 +54,10 @@ index 94ad6fe29e69..52a49f0bbc19 100644
break;
default:
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index aa15593a3ac4..5e9269cd14fa 100644
index 2050993fb58b..e2ca75a6e241 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1624,7 +1624,7 @@ static void ffs_data_put(struct ffs_data *ffs)
@@ -1626,7 +1626,7 @@ static void ffs_data_put(struct ffs_data *ffs)
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@ -319,10 +319,10 @@ index a1ad5b7d5521..755a58084978 100644
}
EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5de80f29ef57..337cc72e6a6a 100644
index 986ed04425be..584978640512 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7116,7 +7116,10 @@ void migrate_disable(void)
@@ -7154,7 +7154,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@ -334,7 +334,7 @@ index 5de80f29ef57..337cc72e6a6a 100644
#endif
if (p->migrate_disable) {
@@ -7146,7 +7149,10 @@ void migrate_enable(void)
@@ -7184,7 +7187,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@ -386,5 +386,5 @@ index 66b59ac77c22..c7cb30cdd1b7 100644
wait->task = current;
if (list_empty(&wait->task_list))
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From b576efb7cedb58ffa58242d7b0df24d14063ba0e Mon Sep 17 00:00:00 2001
From 4ab27b1ec5f678a5dd444c6e1d3cdff6eeabfa12 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 16 Feb 2015 18:49:10 +0100
Subject: [PATCH 096/283] fs/aio: simple simple work
Subject: [PATCH 096/328] fs/aio: simple simple work
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index 911e23087dfb..16dcf8521c2c 100644
index b5fbf2061868..93f8cf7fdeab 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -42,6 +42,7 @@
@ -84,5 +84,5 @@ index 911e23087dfb..16dcf8521c2c 100644
{
unsigned i, new_nr;
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From bac483c38a96edeadc43fa8dcf03c3e57c41cc62 Mon Sep 17 00:00:00 2001
From d349b691ea7fdefe94bb546a0533b63786c1857d Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 21 Aug 2013 17:48:46 +0200
Subject: [PATCH 097/283] genirq: Do not invoke the affinity callback via a
Subject: [PATCH 097/328] genirq: Do not invoke the affinity callback via a
workqueue on RT
Joe Korty reported, that __irq_set_affinity_locked() schedules a
@ -48,10 +48,10 @@ index 315f852b4981..a943c07b54ba 100644
void (*release)(struct kref *ref);
};
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c3b9f6dacd8f..af2a8757abfb 100644
index 3c26d0708709..eadcbfbd434a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -259,7 +259,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
@@ -285,7 +285,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
@ -64,7 +64,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644
}
irqd_set(data, IRQD_AFFINITY_SET);
@@ -297,10 +302,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
@@ -323,10 +328,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@ -76,7 +76,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
@@ -322,6 +325,35 @@ static void irq_affinity_notify(struct work_struct *work)
@@ -348,6 +351,35 @@ static void irq_affinity_notify(struct work_struct *work)
kref_put(&notify->kref, notify->release);
}
@ -112,7 +112,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
@@ -350,7 +382,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
@@ -376,7 +408,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
if (notify) {
notify->irq = irq;
kref_init(&notify->kref);
@ -125,7 +125,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644
}
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -359,7 +396,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
@@ -385,7 +422,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (old_notify) {
@ -137,5 +137,5 @@ index c3b9f6dacd8f..af2a8757abfb 100644
}
--
2.20.1
2.25.1

View File

@ -1,7 +1,7 @@
From 7ada38687fe4d4f0ff8b7390d1588f7fed28a28d Mon Sep 17 00:00:00 2001
From 059e9b393e1838e4ad06a521a8e11c21e7ea7919 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 15 Nov 2017 17:29:51 +0100
Subject: [PATCH 098/283] time/hrtimer: avoid schedule_work() with interrupts
Subject: [PATCH 098/328] time/hrtimer: avoid schedule_work() with interrupts
disabled
The NOHZ code tries to schedule a workqueue with interrupts disabled.
@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index bbe24e241643..696e7583137c 100644
index 9019c9caf146..3fab1c50bf1b 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -217,8 +217,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
@ -55,5 +55,5 @@ index bbe24e241643..696e7583137c 100644
void __user *buffer, size_t *lenp,
loff_t *ppos)
--
2.20.1
2.25.1

Some files were not shown because too many files have changed in this diff Show More