mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-11-03 09:08:21 +00:00
241 lines
8.2 KiB
Diff
241 lines
8.2 KiB
Diff
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 1 Nov 2012 10:14:11 +0100
|
|
Subject: powerpc: Add support for lazy preemption
|
|
|
|
Implement the powerpc pieces for lazy preempt.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/powerpc/Kconfig | 1 +
|
|
arch/powerpc/include/asm/thread_info.h | 16 ++++++++++++----
|
|
arch/powerpc/kernel/asm-offsets.c | 1 +
|
|
arch/powerpc/kernel/entry_32.S | 23 ++++++++++++++++-------
|
|
arch/powerpc/kernel/entry_64.S | 24 +++++++++++++++++-------
|
|
5 files changed, 47 insertions(+), 18 deletions(-)
|
|
|
|
--- a/arch/powerpc/Kconfig
|
|
+++ b/arch/powerpc/Kconfig
|
|
@@ -221,6 +221,7 @@ config PPC
|
|
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
|
select HAVE_PERF_REGS
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select HAVE_RCU_TABLE_FREE
|
|
select HAVE_MMU_GATHER_PAGE_SIZE
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
--- a/arch/powerpc/include/asm/thread_info.h
|
|
+++ b/arch/powerpc/include/asm/thread_info.h
|
|
@@ -30,6 +30,8 @@
|
|
struct thread_info {
|
|
int preempt_count; /* 0 => preemptable,
|
|
<0 => BUG */
|
|
+ int preempt_lazy_count; /* 0 => preemptable,
|
|
+ <0 => BUG */
|
|
unsigned long local_flags; /* private flags for thread */
|
|
#ifdef CONFIG_LIVEPATCH
|
|
unsigned long *livepatch_sp;
|
|
@@ -80,11 +82,12 @@ void arch_setup_new_exec(void);
|
|
#define TIF_SINGLESTEP 8 /* singlestepping active */
|
|
#define TIF_NOHZ 9 /* in adaptive nohz mode */
|
|
#define TIF_SECCOMP 10 /* secure computing */
|
|
-#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
|
-#define TIF_NOERROR 12 /* Force successful syscall return */
|
|
+
|
|
+#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */
|
|
+#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */
|
|
+
|
|
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
|
#define TIF_UPROBE 14 /* breakpointed or single-stepping */
|
|
-#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
|
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
|
|
for stack store? */
|
|
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
|
@@ -93,6 +96,9 @@ void arch_setup_new_exec(void);
|
|
#endif
|
|
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
|
#define TIF_32BIT 20 /* 32 bit binary */
|
|
+#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */
|
|
+#define TIF_NOERROR 22 /* Force successful syscall return */
|
|
+
|
|
|
|
/* as above, but as bit values */
|
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
|
@@ -112,6 +118,7 @@ void arch_setup_new_exec(void);
|
|
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
|
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
|
|
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
|
|
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
|
|
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
|
@@ -121,8 +128,9 @@ void arch_setup_new_exec(void);
|
|
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
|
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
|
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
|
- _TIF_FSCHECK)
|
|
+ _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
|
|
|
/* Bits in local_flags */
|
|
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
|
--- a/arch/powerpc/kernel/asm-offsets.c
|
|
+++ b/arch/powerpc/kernel/asm-offsets.c
|
|
@@ -167,6 +167,7 @@ int main(void)
|
|
OFFSET(TI_FLAGS, thread_info, flags);
|
|
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
|
|
OFFSET(TI_PREEMPT, thread_info, preempt_count);
|
|
+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
|
|
|
|
#ifdef CONFIG_PPC64
|
|
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
|
|
--- a/arch/powerpc/kernel/entry_32.S
|
|
+++ b/arch/powerpc/kernel/entry_32.S
|
|
@@ -401,7 +401,9 @@
|
|
MTMSRD(r10)
|
|
lwz r9,TI_FLAGS(r2)
|
|
li r8,-MAX_ERRNO
|
|
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
|
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
|
|
+ ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
|
|
+ and. r0,r9,r0
|
|
bne- syscall_exit_work
|
|
cmplw 0,r3,r8
|
|
blt+ syscall_exit_cont
|
|
@@ -516,13 +518,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
|
|
b syscall_dotrace_cont
|
|
|
|
syscall_exit_work:
|
|
- andi. r0,r9,_TIF_RESTOREALL
|
|
+ andis. r0,r9,_TIF_RESTOREALL@h
|
|
beq+ 0f
|
|
REST_NVGPRS(r1)
|
|
b 2f
|
|
0: cmplw 0,r3,r8
|
|
blt+ 1f
|
|
- andi. r0,r9,_TIF_NOERROR
|
|
+ andis. r0,r9,_TIF_NOERROR@h
|
|
bne- 1f
|
|
lwz r11,_CCR(r1) /* Load CR */
|
|
neg r3,r3
|
|
@@ -531,12 +533,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
|
|
|
|
1: stw r6,RESULT(r1) /* Save result */
|
|
stw r3,GPR3(r1) /* Update return value */
|
|
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
|
|
+2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)@h
|
|
beq 4f
|
|
|
|
/* Clear per-syscall TIF flags if any are set. */
|
|
|
|
- li r11,_TIF_PERSYSCALL_MASK
|
|
+ li r11,_TIF_PERSYSCALL_MASK@h
|
|
addi r12,r2,TI_FLAGS
|
|
3: lwarx r8,0,r12
|
|
andc r8,r8,r11
|
|
@@ -904,7 +906,14 @@ user_exc_return: /* r10 contains MSR_KE
|
|
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
|
bne restore_kuap
|
|
andi. r8,r8,_TIF_NEED_RESCHED
|
|
+ bne+ 1f
|
|
+ lwz r0,TI_PREEMPT_LAZY(r2)
|
|
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
|
+ bne restore_kuap
|
|
+ lwz r0,TI_FLAGS(r2)
|
|
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
|
|
beq+ restore_kuap
|
|
+1:
|
|
lwz r3,_MSR(r1)
|
|
andi. r0,r3,MSR_EE /* interrupts off? */
|
|
beq restore_kuap /* don't schedule if so */
|
|
@@ -1225,7 +1234,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
|
|
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
|
|
|
|
do_work: /* r10 contains MSR_KERNEL here */
|
|
- andi. r0,r9,_TIF_NEED_RESCHED
|
|
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
|
beq do_user_signal
|
|
|
|
do_resched: /* r10 contains MSR_KERNEL here */
|
|
@@ -1246,7 +1255,7 @@ do_resched: /* r10 contains MSR_KERNEL
|
|
SYNC
|
|
MTMSRD(r10) /* disable interrupts */
|
|
lwz r9,TI_FLAGS(r2)
|
|
- andi. r0,r9,_TIF_NEED_RESCHED
|
|
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
|
bne- do_resched
|
|
andi. r0,r9,_TIF_USER_WORK_MASK
|
|
beq restore_user
|
|
--- a/arch/powerpc/kernel/entry_64.S
|
|
+++ b/arch/powerpc/kernel/entry_64.S
|
|
@@ -240,7 +240,9 @@ system_call: /* label this so stack tr
|
|
|
|
ld r9,TI_FLAGS(r12)
|
|
li r11,-MAX_ERRNO
|
|
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
|
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
|
|
+ ori r0,r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
|
|
+ and. r0,r9,r0
|
|
bne- .Lsyscall_exit_work
|
|
|
|
andi. r0,r8,MSR_FP
|
|
@@ -363,25 +365,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
|
|
If TIF_NOERROR is set, just save r3 as it is. */
|
|
|
|
- andi. r0,r9,_TIF_RESTOREALL
|
|
+ andis. r0,r9,_TIF_RESTOREALL@h
|
|
beq+ 0f
|
|
REST_NVGPRS(r1)
|
|
b 2f
|
|
0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
|
|
blt+ 1f
|
|
- andi. r0,r9,_TIF_NOERROR
|
|
+ andis. r0,r9,_TIF_NOERROR@h
|
|
bne- 1f
|
|
ld r5,_CCR(r1)
|
|
neg r3,r3
|
|
oris r5,r5,0x1000 /* Set SO bit in CR */
|
|
std r5,_CCR(r1)
|
|
1: std r3,GPR3(r1)
|
|
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
|
|
+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
|
|
beq 4f
|
|
|
|
/* Clear per-syscall TIF flags if any are set. */
|
|
|
|
- li r11,_TIF_PERSYSCALL_MASK
|
|
+ lis r11,(_TIF_PERSYSCALL_MASK)@h
|
|
addi r12,r12,TI_FLAGS
|
|
3: ldarx r10,0,r12
|
|
andc r10,r10,r11
|
|
@@ -786,7 +788,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|
bl restore_math
|
|
b restore
|
|
#endif
|
|
-1: andi. r0,r4,_TIF_NEED_RESCHED
|
|
+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
|
|
beq 2f
|
|
bl restore_interrupts
|
|
SCHEDULE_USER
|
|
@@ -848,10 +850,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|
|
|
#ifdef CONFIG_PREEMPTION
|
|
/* Check if we need to preempt */
|
|
+ lwz r8,TI_PREEMPT(r9)
|
|
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
|
|
+ bne restore
|
|
andi. r0,r4,_TIF_NEED_RESCHED
|
|
+ bne+ check_count
|
|
+
|
|
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
|
|
beq+ restore
|
|
+ lwz r8,TI_PREEMPT_LAZY(r9)
|
|
+
|
|
/* Check that preempt_count() == 0 and interrupts are enabled */
|
|
- lwz r8,TI_PREEMPT(r9)
|
|
+check_count:
|
|
cmpwi cr0,r8,0
|
|
bne restore
|
|
ld r0,SOFTE(r1)
|