mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-09-02 15:37:11 +00:00
145 lines
4.9 KiB
Diff
145 lines
4.9 KiB
Diff
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Mon, 17 Aug 2020 12:28:10 +0200
|
|
Subject: [PATCH] u64_stats: Disable preemption on 32bit-UP/SMP with RT during
|
|
updates
|
|
|
|
On RT the seqcount_t is required even on UP because the softirq can be
|
|
preempted. The IRQ handler is threaded so it is also preemptible.
|
|
|
|
Disable preemption on 32bit-RT during value updates. There is no need to
|
|
disable interrupts on RT because the handler is run threaded. Therefore
|
|
disabling preemption is enough to guarantee that the update is not
|
|
interruped.
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
include/linux/u64_stats_sync.h | 42 +++++++++++++++++++++++++++--------------
|
|
1 file changed, 28 insertions(+), 14 deletions(-)
|
|
|
|
--- a/include/linux/u64_stats_sync.h
|
|
+++ b/include/linux/u64_stats_sync.h
|
|
@@ -66,7 +66,7 @@
|
|
#include <linux/seqlock.h>
|
|
|
|
struct u64_stats_sync {
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG==32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
seqcount_t seq;
|
|
#endif
|
|
};
|
|
@@ -117,22 +117,26 @@ static inline void u64_stats_inc(u64_sta
|
|
|
|
static inline void u64_stats_init(struct u64_stats_sync *syncp)
|
|
{
|
|
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
seqcount_init(&syncp->seq);
|
|
#endif
|
|
}
|
|
|
|
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
|
|
{
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
+ preempt_disable();
|
|
write_seqcount_begin(&syncp->seq);
|
|
#endif
|
|
}
|
|
|
|
static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
|
|
{
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
write_seqcount_end(&syncp->seq);
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
+ preempt_enable();
|
|
#endif
|
|
}
|
|
|
|
@@ -141,8 +145,11 @@ u64_stats_update_begin_irqsave(struct u6
|
|
{
|
|
unsigned long flags = 0;
|
|
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
- local_irq_save(flags);
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
+ preempt_disable();
|
|
+ else
|
|
+ local_irq_save(flags);
|
|
write_seqcount_begin(&syncp->seq);
|
|
#endif
|
|
return flags;
|
|
@@ -152,15 +159,18 @@ static inline void
|
|
u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
|
|
unsigned long flags)
|
|
{
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
write_seqcount_end(&syncp->seq);
|
|
- local_irq_restore(flags);
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
+ preempt_enable();
|
|
+ else
|
|
+ local_irq_restore(flags);
|
|
#endif
|
|
}
|
|
|
|
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
|
|
{
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
return read_seqcount_begin(&syncp->seq);
|
|
#else
|
|
return 0;
|
|
@@ -169,7 +179,7 @@ static inline unsigned int __u64_stats_f
|
|
|
|
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
|
|
{
|
|
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
|
|
preempt_disable();
|
|
#endif
|
|
return __u64_stats_fetch_begin(syncp);
|
|
@@ -178,7 +188,7 @@ static inline unsigned int u64_stats_fet
|
|
static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
|
|
unsigned int start)
|
|
{
|
|
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
|
|
return read_seqcount_retry(&syncp->seq, start);
|
|
#else
|
|
return false;
|
|
@@ -188,7 +198,7 @@ static inline bool __u64_stats_fetch_ret
|
|
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
|
|
unsigned int start)
|
|
{
|
|
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
|
|
preempt_enable();
|
|
#endif
|
|
return __u64_stats_fetch_retry(syncp, start);
|
|
@@ -202,7 +212,9 @@ static inline bool u64_stats_fetch_retry
|
|
*/
|
|
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
|
|
{
|
|
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
|
|
+ preempt_disable();
|
|
+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
|
|
local_irq_disable();
|
|
#endif
|
|
return __u64_stats_fetch_begin(syncp);
|
|
@@ -211,7 +223,9 @@ static inline unsigned int u64_stats_fet
|
|
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
|
|
unsigned int start)
|
|
{
|
|
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
|
|
+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
|
|
+ preempt_enable();
|
|
+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
|
|
local_irq_enable();
|
|
#endif
|
|
return __u64_stats_fetch_retry(syncp, start);
|