linuxkit/kernel/5.11.x-rt/patches/0129-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
Avi Deitcher cd12a8613d restructure kernel builds into directories
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2024-02-27 15:14:06 +02:00

137 lines
4.1 KiB
Diff

From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 18 Aug 2020 10:30:00 +0200
Subject: [PATCH] mm: memcontrol: Provide a local_lock for per-CPU memcg_stock
The interrupts are disabled to ensure CPU-local access to the per-CPU
variable `memcg_stock'.
As the code inside the interrupt disabled section acquires regular
spinlocks, which are converted to 'sleeping' spinlocks on a PREEMPT_RT
kernel, this conflicts with the RT semantics.
Convert it to a local_lock which allows RT kernels to substitute them with
a real per CPU lock. On non RT kernels this maps to local_irq_save() as
before, but provides also lockdep coverage of the critical region.
No functional change.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/memcontrol.c | 31 ++++++++++++++++++-------------
1 file changed, 18 insertions(+), 13 deletions(-)
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2234,6 +2234,7 @@ void unlock_page_memcg(struct page *page
EXPORT_SYMBOL(unlock_page_memcg);
struct memcg_stock_pcp {
+ local_lock_t lock;
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -2285,7 +2286,7 @@ static bool consume_stock(struct mem_cgr
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2293,7 +2294,7 @@ static bool consume_stock(struct mem_cgr
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
return ret;
}
@@ -2328,14 +2329,14 @@ static void drain_local_stock(struct wor
* The only protection from memory hotplug vs. drain_stock races is
* that we always operate on local CPU stock here with IRQ disabled
*/
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
drain_obj_stock(stock);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
}
/*
@@ -2347,7 +2348,7 @@ static void refill_stock(struct mem_cgro
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
@@ -2360,7 +2361,7 @@ static void refill_stock(struct mem_cgro
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
}
/*
@@ -3167,7 +3168,7 @@ static bool consume_obj_stock(struct obj
unsigned long flags;
bool ret = false;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
@@ -3175,7 +3176,7 @@ static bool consume_obj_stock(struct obj
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
return ret;
}
@@ -3234,7 +3235,7 @@ static void refill_obj_stock(struct obj_
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
@@ -3248,7 +3249,7 @@ static void refill_obj_stock(struct obj_
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -7089,9 +7090,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
- for_each_possible_cpu(cpu)
- INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
- drain_local_stock);
+ for_each_possible_cpu(cpu) {
+ struct memcg_stock_pcp *stock;
+
+ stock = per_cpu_ptr(&memcg_stock, cpu);
+ INIT_WORK(&stock->work, drain_local_stock);
+ local_lock_init(&stock->lock);
+ }
for_each_node(node) {
struct mem_cgroup_tree_per_node *rtpn;