mirror of
				https://github.com/linuxkit/linuxkit.git
				synced 2025-11-04 08:25:51 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			66 lines
		
	
	
		
			2.2 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			66 lines
		
	
	
		
			2.2 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
From 409c9265b1416ec33879fac9864e727f03af4931 Mon Sep 17 00:00:00 2001
 | 
						|
From: Thomas Gleixner <tglx@linutronix.de>
 | 
						|
Date: Wed, 8 Mar 2017 14:23:35 +0100
 | 
						|
Subject: [PATCH 149/450] futex: workaround migrate_disable/enable in different
 | 
						|
 context
 | 
						|
 | 
						|
migrate_disable()/migrate_enable() takes a different path in atomic() vs
 | 
						|
!atomic() context. These little hacks ensure that we don't underflow / overflow
 | 
						|
the migrate code counts properly while we lock the hb lockwith interrupts
 | 
						|
enabled and unlock it with interrupts disabled.
 | 
						|
 | 
						|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
 | 
						|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 | 
						|
---
 | 
						|
 kernel/futex.c | 19 +++++++++++++++++++
 | 
						|
 1 file changed, 19 insertions(+)
 | 
						|
 | 
						|
diff --git a/kernel/futex.c b/kernel/futex.c
 | 
						|
index 046cd780d057..e70b5b516371 100644
 | 
						|
--- a/kernel/futex.c
 | 
						|
+++ b/kernel/futex.c
 | 
						|
@@ -2816,9 +2816,18 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
 | 
						|
 	 * lock handoff sequence.
 | 
						|
 	 */
 | 
						|
 	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
 | 
						|
+	/*
 | 
						|
+	 * the migrate_disable() here disables migration in the in_atomic() fast
 | 
						|
+	 * path which is enabled again in the following spin_unlock(). We have
 | 
						|
+	 * one migrate_disable() pending in the slow-path which is reversed
 | 
						|
+	 * after the raw_spin_unlock_irq() where we leave the atomic context.
 | 
						|
+	 */
 | 
						|
+	migrate_disable();
 | 
						|
+
 | 
						|
 	spin_unlock(q.lock_ptr);
 | 
						|
 	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
 | 
						|
 	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
 | 
						|
+	migrate_enable();
 | 
						|
 
 | 
						|
 	if (ret) {
 | 
						|
 		if (ret == 1)
 | 
						|
@@ -2965,11 +2974,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 | 
						|
 		 * observed.
 | 
						|
 		 */
 | 
						|
 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
 | 
						|
+		/*
 | 
						|
+		 * Magic trickery for now to make the RT migrate disable
 | 
						|
+		 * logic happy. The following spin_unlock() happens with
 | 
						|
+		 * interrupts disabled so the internal migrate_enable()
 | 
						|
+		 * won't undo the migrate_disable() which was issued when
 | 
						|
+		 * locking hb->lock.
 | 
						|
+		 */
 | 
						|
+		migrate_disable();
 | 
						|
 		spin_unlock(&hb->lock);
 | 
						|
 
 | 
						|
 		/* drops pi_state->pi_mutex.wait_lock */
 | 
						|
 		ret = wake_futex_pi(uaddr, uval, pi_state);
 | 
						|
 
 | 
						|
+		migrate_enable();
 | 
						|
+
 | 
						|
 		put_pi_state(pi_state);
 | 
						|
 
 | 
						|
 		/*
 | 
						|
-- 
 | 
						|
2.19.2
 | 
						|
 |