Merge pull request #3492 from rn/kern-up

Update kernel to 5.4.30/4.19.114/4.14.175 and add 5.6.2
This commit is contained in:
Rolf Neugebauer 2020-04-11 19:05:42 +01:00 committed by GitHub
commit b1c344828e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
469 changed files with 9563 additions and 43734 deletions

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,6 +1,6 @@
# This is an example for building the open source components of Docker for Mac
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0 page_poison=1"
init:
- linuxkit/vpnkit-expose-port:v0.7 # install vpnkit-expose-port and vpnkit-iptables-wrapper on host

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: console=ttyS1
ucode: intel-ucode.cpio
init:

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,6 +1,6 @@
# Simple example of using an external logging service
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -5,7 +5,7 @@
# for arm64 then the 'ucode' line in the kernel section can be left
# out.
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyAMA0"
ucode: ""
onboot:

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: console=ttyS1
ucode: intel-ucode.cpio
init:

View File

@ -1,7 +1,7 @@
# Minimal YAML to run a redis server (used at DockerCon'17)
# connect: nc localhost 6379
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:4.19.106-rt
image: linuxkit/kernel:5.4.28-rt
cmdline: "console=tty0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0 root=/dev/vda"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=ttyS0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:5.4.28
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691

View File

@ -16,7 +16,6 @@ RUN apk add \
installkernel \
kmod \
elfutils-dev \
libunwind-dev \
linux-headers \
mpc1-dev \
mpfr-dev \
@ -31,6 +30,9 @@ RUN apk add \
xz-dev \
zlib-dev
# libunwind-dev pkg is missing for s390x for now. Only install on other arch
RUN [ $(uname -m) != s390x ] && apk add libunwind-dev || true
ARG KERNEL_VERSION
ARG KERNEL_SERIES
ARG EXTRA
@ -163,12 +165,14 @@ RUN case $(uname -m) in \
cp System.map /out && \
([ -n "${DEBUG}" ] && cp vmlinux /out || true)
# WireGuard
RUN curl -fsSL -o /wireguard.tar.xz "${WIREGUARD_URL}" && \
# WireGuard (skip kernels which have it in tree)
RUN if [ ! -d /linux/drivers/net/wireguard ]; then \
curl -fsSL -o /wireguard.tar.xz "${WIREGUARD_URL}" && \
echo "${WIREGUARD_SHA256} /wireguard.tar.xz" | sha256sum -c - && \
cp /wireguard.tar.xz /out/src/ && \
tar -C / --one-top-level=wireguard --strip-components=2 -xJf /wireguard.tar.xz "wireguard-linux-compat-${WIREGUARD_VERSION}/src" && \
make -j "$(getconf _NPROCESSORS_ONLN)" M="/wireguard" modules
make -j "$(getconf _NPROCESSORS_ONLN)" M="/wireguard" modules; \
fi
# Modules and Device Tree binaries
RUN make INSTALL_MOD_PATH=/tmp/kernel-modules modules_install && \

View File

@ -252,20 +252,21 @@ endef
# Debug targets only for latest stable and LTS stable
#
ifeq ($(ARCH),x86_64)
$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.6.2,5.6.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.4.28,5.4.x,-rt,))
$(eval $(call kernel,4.19.113,4.19.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.19.113,4.19.x,,-dbg))
$(eval $(call kernel,4.19.106,4.19.x,-rt,))
$(eval $(call kernel,4.14.174,4.14.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.19.114,4.19.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.19.114,4.19.x,,-dbg))
$(eval $(call kernel,4.14.175,4.14.x,$(EXTRA),$(DEBUG)))
else ifeq ($(ARCH),aarch64)
$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.6.2,5.6.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.4.28,5.4.x,-rt,))
$(eval $(call kernel,4.19.106,4.19.x,-rt,))
else ifeq ($(ARCH),s390x)
$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.6.2,5.6.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG)))
endif
# Target for kernel config

View File

@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 4.14.174 Kernel Configuration
# Linux/x86 4.14.175 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y

View File

@ -1,20 +0,0 @@
CONFIG_SLUB_DEBUG=y
# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
CONFIG_SLUB=y
# CONFIG_SLAB_FREELIST_HARDENED is not set
CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
CONFIG_PREEMPT=y
CONFIG_PREEMPT_RT_BASE=y
CONFIG_HAVE_PREEMPT_LAZY=y
CONFIG_PREEMPT_LAZY=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT__LL is not set
# CONFIG_PREEMPT_RTB is not set
CONFIG_PREEMPT_RT_FULL=y
CONFIG_PREEMPT_COUNT=y
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_PREEMPT_TRACER is not set
CONFIG_HZ_1000=y
CONFIG_HZ=1000

View File

@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 4.19.113 Kernel Configuration
# Linux/x86 4.19.114 Kernel Configuration
#
#

View File

@ -1,22 +0,0 @@
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_PREEMPT_RCU=y
CONFIG_TASKS_RCU=y
CONFIG_SLUB_DEBUG=y
# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
CONFIG_SLUB=y
# CONFIG_SLAB_FREELIST_HARDENED is not set
CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
CONFIG_PREEMPT=y
CONFIG_PREEMPT_RT_BASE=y
CONFIG_HAVE_PREEMPT_LAZY=y
CONFIG_PREEMPT_LAZY=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT__LL is not set
# CONFIG_PREEMPT_RTB is not set
CONFIG_PREEMPT_RT_FULL=y
CONFIG_PREEMPT_COUNT=y
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_PREEMPT_TRACER is not set

View File

@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/arm64 5.4.28 Kernel Configuration
# Linux/arm64 5.4.30 Kernel Configuration
#
#

View File

@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/s390 5.4.28 Kernel Configuration
# Linux/s390 5.4.30 Kernel Configuration
#
#

View File

@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 5.4.28 Kernel Configuration
# Linux/x86 5.4.30 Kernel Configuration
#
#

File diff suppressed because it is too large Load Diff

3738
kernel/config-5.6.x-s390x Normal file

File diff suppressed because it is too large Load Diff

4699
kernel/config-5.6.x-x86_64 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
From 56acceb9b7252dc0c9f3ee01625a9dad8461b8b7 Mon Sep 17 00:00:00 2001
From 24796cb1c1993f1d90742d39eda9cbec7ba9f93f Mon Sep 17 00:00:00 2001
From: Cheng-mean Liu <soccerl@microsoft.com>
Date: Tue, 11 Jul 2017 16:58:26 -0700
Subject: [PATCH 01/21] NVDIMM: reducded ND_MIN_NAMESPACE_SIZE from 4MB to 4KB

View File

@ -1,4 +1,4 @@
From 8c2062401403c8a9a203af268d0ec69ddff1103c Mon Sep 17 00:00:00 2001
From b438267e99a254b7429bbfba070ab6a10d7aa6aa Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:00 -0700
Subject: [PATCH 02/21] hyper-v: trace vmbus_on_msg_dpc()

View File

@ -1,4 +1,4 @@
From c2b71b1d0a9c4c7eefe8da97ffc4baa4615fa6e5 Mon Sep 17 00:00:00 2001
From 72a91023eeacb8a23aa133ec2eb990fa03b32bc5 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:01 -0700
Subject: [PATCH 03/21] hyper-v: trace vmbus_on_message()

View File

@ -1,4 +1,4 @@
From 595d68c0b18c7ebfaa3794fd22ef7b50d5e7bf40 Mon Sep 17 00:00:00 2001
From b4005dadd829aed2039d665cee1881f49a8b63a1 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:02 -0700
Subject: [PATCH 04/21] hyper-v: trace vmbus_onoffer()

View File

@ -1,4 +1,4 @@
From 2955351528e97452a9dae475eb64441f2dfefde9 Mon Sep 17 00:00:00 2001
From 80655cdbb7c8af7ac4e81fe714a58316d0c73d39 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:03 -0700
Subject: [PATCH 05/21] hyper-v: trace vmbus_onoffer_rescind()

View File

@ -1,4 +1,4 @@
From 7ec50c0fdbf7dd576cc7fcd3357fc804a2e4a4de Mon Sep 17 00:00:00 2001
From 0c0455f95486b47ff302b04bfcb9d218b615d3f9 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:04 -0700
Subject: [PATCH 06/21] hyper-v: trace vmbus_onopen_result()

View File

@ -1,4 +1,4 @@
From 588dd389b2928b24daa38e901d9145116c2db0d7 Mon Sep 17 00:00:00 2001
From 6e88aa6f249cc7f482feb19a00810a9a6022ab25 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:05 -0700
Subject: [PATCH 07/21] hyper-v: trace vmbus_ongpadl_created()

View File

@ -1,4 +1,4 @@
From 219a97671a3e1ae1914a62c5e7e269df5c68d82b Mon Sep 17 00:00:00 2001
From ec55daf877edc19269dd16f0ae0ae926bafeed89 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:06 -0700
Subject: [PATCH 08/21] hyper-v: trace vmbus_ongpadl_torndown()

View File

@ -1,4 +1,4 @@
From 18ffc33bbae039509d07f2cc2054499560532859 Mon Sep 17 00:00:00 2001
From afa9cce6947186ec0b17b0b9172c9d8a5c2d4b14 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:07 -0700
Subject: [PATCH 09/21] hyper-v: trace vmbus_onversion_response()

View File

@ -1,4 +1,4 @@
From cd66081931b2dc974da2c9de5ea3a84886282185 Mon Sep 17 00:00:00 2001
From ee0128fbee6aa73c5f832990c86a62f95d3fb5f9 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:08 -0700
Subject: [PATCH 10/21] hyper-v: trace vmbus_request_offers()

View File

@ -1,4 +1,4 @@
From f0fa37ea2a46cb24bf91ea3492623c16381b73d2 Mon Sep 17 00:00:00 2001
From 6a6c0474ea0640be0f79390c83b42ce0197e08bf Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:09 -0700
Subject: [PATCH 11/21] hyper-v: trace vmbus_open()

View File

@ -1,4 +1,4 @@
From 563dc5d22087ed6328c45b59d8b2e498acfd7513 Mon Sep 17 00:00:00 2001
From 5df7a692eacbe0b42f593bda2b81b1d9e9d154c8 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:10 -0700
Subject: [PATCH 12/21] hyper-v: trace vmbus_close_internal()

View File

@ -1,4 +1,4 @@
From 2090b4ac2819e64573787437960b92afccd4ccf3 Mon Sep 17 00:00:00 2001
From 733fc90ecf658625cf227ee85a242dc54ee165d8 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:11 -0700
Subject: [PATCH 13/21] hyper-v: trace vmbus_establish_gpadl()

View File

@ -1,4 +1,4 @@
From 89aeff0aa68cb9b1dc1adf28043f7f7f32386e7f Mon Sep 17 00:00:00 2001
From 514148f54891b8e9bdcf65bc06087f2992e2e1c6 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:12 -0700
Subject: [PATCH 14/21] hyper-v: trace vmbus_teardown_gpadl()

View File

@ -1,4 +1,4 @@
From 9671d96306b5b5b6f1b17822cd2623e2d1f596bb Mon Sep 17 00:00:00 2001
From 7aa5e1d4543decdf2d210df981b9a12d430fed60 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:13 -0700
Subject: [PATCH 15/21] hyper-v: trace vmbus_negotiate_version()

View File

@ -1,4 +1,4 @@
From 3d2d34fd831e942463030924f423bda8568ce912 Mon Sep 17 00:00:00 2001
From 68fe2679ce069ae3dee035f398d56a98979203c9 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:14 -0700
Subject: [PATCH 16/21] hyper-v: trace vmbus_release_relid()

View File

@ -1,4 +1,4 @@
From 3680bc215f62a93c46e8801ba8d988160d27db8b Mon Sep 17 00:00:00 2001
From dda4828610e0a1900af6b7c22c5fb99c73b14305 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:15 -0700
Subject: [PATCH 17/21] hyper-v: trace vmbus_send_tl_connect_request()

View File

@ -1,4 +1,4 @@
From 1a5e65cb9456a2d26e60b0249c470f4c90899108 Mon Sep 17 00:00:00 2001
From 5d99bb75cf678b865f7d7f4befa78a7565711830 Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@redhat.com>
Date: Sun, 29 Oct 2017 12:21:16 -0700
Subject: [PATCH 18/21] hyper-v: trace channel events

View File

@ -1,4 +1,4 @@
From 0da3e404cb0a4a6876a85f80260bd653dabc7826 Mon Sep 17 00:00:00 2001
From ac3a5d331cbb4ab7551b63a72797b35a5b7e6b3f Mon Sep 17 00:00:00 2001
From: Christian Borntraeger <borntraeger@de.ibm.com>
Date: Tue, 12 Dec 2017 09:08:35 +0100
Subject: [PATCH 19/21] serial: forbid 8250 on s390

View File

@ -1,4 +1,4 @@
From 3d5ab764aa966d80a2f8df397ade599a8980f443 Mon Sep 17 00:00:00 2001
From 8d79ab8e5806f8b7a8f2dc8fd6857780c8fb79f5 Mon Sep 17 00:00:00 2001
From: Cathy Avery <cavery@redhat.com>
Date: Tue, 31 Oct 2017 08:52:06 -0400
Subject: [PATCH 20/21] scsi: storvsc: Allow only one remove lun work item to

View File

@ -1,4 +1,4 @@
From 284d1e2fdfa1134ba366198e0e1bd671147399f9 Mon Sep 17 00:00:00 2001
From fe24c8d1c22e600fc8eaa1ea2ae9a8c0091fc0d5 Mon Sep 17 00:00:00 2001
From: Long Li <longli@microsoft.com>
Date: Tue, 31 Oct 2017 14:58:08 -0700
Subject: [PATCH 21/21] scsi: storvsc: Avoid excessive host scan on controller

View File

@ -1,209 +0,0 @@
From d831f2ac120e802a4ff642f48f6b88e543665514 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:18 +0200
Subject: [PATCH 001/328] ARM: at91: add TCB registers definitions
Add registers and bits definitions for the timer counter blocks found on
Atmel ARM SoCs.
Tested-by: Alexander Dahl <ada@thorsis.com>
Tested-by: Andras Szemzo <szemzo.andras@gmail.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/soc/at91/atmel_tcb.h | 183 +++++++++++++++++++++++++++++++++++
1 file changed, 183 insertions(+)
create mode 100644 include/soc/at91/atmel_tcb.h
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
new file mode 100644
index 000000000000..657e234b1483
--- /dev/null
+++ b/include/soc/at91/atmel_tcb.h
@@ -0,0 +1,183 @@
+//SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Microchip */
+
+#ifndef __SOC_ATMEL_TCB_H
+#define __SOC_ATMEL_TCB_H
+
+/* Channel registers */
+#define ATMEL_TC_COFFS(c) ((c) * 0x40)
+#define ATMEL_TC_CCR(c) ATMEL_TC_COFFS(c)
+#define ATMEL_TC_CMR(c) (ATMEL_TC_COFFS(c) + 0x4)
+#define ATMEL_TC_SMMR(c) (ATMEL_TC_COFFS(c) + 0x8)
+#define ATMEL_TC_RAB(c) (ATMEL_TC_COFFS(c) + 0xc)
+#define ATMEL_TC_CV(c) (ATMEL_TC_COFFS(c) + 0x10)
+#define ATMEL_TC_RA(c) (ATMEL_TC_COFFS(c) + 0x14)
+#define ATMEL_TC_RB(c) (ATMEL_TC_COFFS(c) + 0x18)
+#define ATMEL_TC_RC(c) (ATMEL_TC_COFFS(c) + 0x1c)
+#define ATMEL_TC_SR(c) (ATMEL_TC_COFFS(c) + 0x20)
+#define ATMEL_TC_IER(c) (ATMEL_TC_COFFS(c) + 0x24)
+#define ATMEL_TC_IDR(c) (ATMEL_TC_COFFS(c) + 0x28)
+#define ATMEL_TC_IMR(c) (ATMEL_TC_COFFS(c) + 0x2c)
+#define ATMEL_TC_EMR(c) (ATMEL_TC_COFFS(c) + 0x30)
+
+/* Block registers */
+#define ATMEL_TC_BCR 0xc0
+#define ATMEL_TC_BMR 0xc4
+#define ATMEL_TC_QIER 0xc8
+#define ATMEL_TC_QIDR 0xcc
+#define ATMEL_TC_QIMR 0xd0
+#define ATMEL_TC_QISR 0xd4
+#define ATMEL_TC_FMR 0xd8
+#define ATMEL_TC_WPMR 0xe4
+
+/* CCR fields */
+#define ATMEL_TC_CCR_CLKEN BIT(0)
+#define ATMEL_TC_CCR_CLKDIS BIT(1)
+#define ATMEL_TC_CCR_SWTRG BIT(2)
+
+/* Common CMR fields */
+#define ATMEL_TC_CMR_TCLKS_MSK GENMASK(2, 0)
+#define ATMEL_TC_CMR_TCLK(x) (x)
+#define ATMEL_TC_CMR_XC(x) ((x) + 5)
+#define ATMEL_TC_CMR_CLKI BIT(3)
+#define ATMEL_TC_CMR_BURST_MSK GENMASK(5, 4)
+#define ATMEL_TC_CMR_BURST_XC(x) (((x) + 1) << 4)
+#define ATMEL_TC_CMR_WAVE BIT(15)
+
+/* Capture mode CMR fields */
+#define ATMEL_TC_CMR_LDBSTOP BIT(6)
+#define ATMEL_TC_CMR_LDBDIS BIT(7)
+#define ATMEL_TC_CMR_ETRGEDG_MSK GENMASK(9, 8)
+#define ATMEL_TC_CMR_ETRGEDG_NONE (0 << 8)
+#define ATMEL_TC_CMR_ETRGEDG_RISING (1 << 8)
+#define ATMEL_TC_CMR_ETRGEDG_FALLING (2 << 8)
+#define ATMEL_TC_CMR_ETRGEDG_BOTH (3 << 8)
+#define ATMEL_TC_CMR_ABETRG BIT(10)
+#define ATMEL_TC_CMR_CPCTRG BIT(14)
+#define ATMEL_TC_CMR_LDRA_MSK GENMASK(17, 16)
+#define ATMEL_TC_CMR_LDRA_NONE (0 << 16)
+#define ATMEL_TC_CMR_LDRA_RISING (1 << 16)
+#define ATMEL_TC_CMR_LDRA_FALLING (2 << 16)
+#define ATMEL_TC_CMR_LDRA_BOTH (3 << 16)
+#define ATMEL_TC_CMR_LDRB_MSK GENMASK(19, 18)
+#define ATMEL_TC_CMR_LDRB_NONE (0 << 18)
+#define ATMEL_TC_CMR_LDRB_RISING (1 << 18)
+#define ATMEL_TC_CMR_LDRB_FALLING (2 << 18)
+#define ATMEL_TC_CMR_LDRB_BOTH (3 << 18)
+#define ATMEL_TC_CMR_SBSMPLR_MSK GENMASK(22, 20)
+#define ATMEL_TC_CMR_SBSMPLR(x) ((x) << 20)
+
+/* Waveform mode CMR fields */
+#define ATMEL_TC_CMR_CPCSTOP BIT(6)
+#define ATMEL_TC_CMR_CPCDIS BIT(7)
+#define ATMEL_TC_CMR_EEVTEDG_MSK GENMASK(9, 8)
+#define ATMEL_TC_CMR_EEVTEDG_NONE (0 << 8)
+#define ATMEL_TC_CMR_EEVTEDG_RISING (1 << 8)
+#define ATMEL_TC_CMR_EEVTEDG_FALLING (2 << 8)
+#define ATMEL_TC_CMR_EEVTEDG_BOTH (3 << 8)
+#define ATMEL_TC_CMR_EEVT_MSK GENMASK(11, 10)
+#define ATMEL_TC_CMR_EEVT_XC(x) (((x) + 1) << 10)
+#define ATMEL_TC_CMR_ENETRG BIT(12)
+#define ATMEL_TC_CMR_WAVESEL_MSK GENMASK(14, 13)
+#define ATMEL_TC_CMR_WAVESEL_UP (0 << 13)
+#define ATMEL_TC_CMR_WAVESEL_UPDOWN (1 << 13)
+#define ATMEL_TC_CMR_WAVESEL_UPRC (2 << 13)
+#define ATMEL_TC_CMR_WAVESEL_UPDOWNRC (3 << 13)
+#define ATMEL_TC_CMR_ACPA_MSK GENMASK(17, 16)
+#define ATMEL_TC_CMR_ACPA(a) (ATMEL_TC_CMR_ACTION_##a << 16)
+#define ATMEL_TC_CMR_ACPC_MSK GENMASK(19, 18)
+#define ATMEL_TC_CMR_ACPC(a) (ATMEL_TC_CMR_ACTION_##a << 18)
+#define ATMEL_TC_CMR_AEEVT_MSK GENMASK(21, 20)
+#define ATMEL_TC_CMR_AEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 20)
+#define ATMEL_TC_CMR_ASWTRG_MSK GENMASK(23, 22)
+#define ATMEL_TC_CMR_ASWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 22)
+#define ATMEL_TC_CMR_BCPB_MSK GENMASK(25, 24)
+#define ATMEL_TC_CMR_BCPB(a) (ATMEL_TC_CMR_ACTION_##a << 24)
+#define ATMEL_TC_CMR_BCPC_MSK GENMASK(27, 26)
+#define ATMEL_TC_CMR_BCPC(a) (ATMEL_TC_CMR_ACTION_##a << 26)
+#define ATMEL_TC_CMR_BEEVT_MSK GENMASK(29, 28)
+#define ATMEL_TC_CMR_BEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 28)
+#define ATMEL_TC_CMR_BSWTRG_MSK GENMASK(31, 30)
+#define ATMEL_TC_CMR_BSWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 30)
+#define ATMEL_TC_CMR_ACTION_NONE 0
+#define ATMEL_TC_CMR_ACTION_SET 1
+#define ATMEL_TC_CMR_ACTION_CLEAR 2
+#define ATMEL_TC_CMR_ACTION_TOGGLE 3
+
+/* SMMR fields */
+#define ATMEL_TC_SMMR_GCEN BIT(0)
+#define ATMEL_TC_SMMR_DOWN BIT(1)
+
+/* SR/IER/IDR/IMR fields */
+#define ATMEL_TC_COVFS BIT(0)
+#define ATMEL_TC_LOVRS BIT(1)
+#define ATMEL_TC_CPAS BIT(2)
+#define ATMEL_TC_CPBS BIT(3)
+#define ATMEL_TC_CPCS BIT(4)
+#define ATMEL_TC_LDRAS BIT(5)
+#define ATMEL_TC_LDRBS BIT(6)
+#define ATMEL_TC_ETRGS BIT(7)
+#define ATMEL_TC_CLKSTA BIT(16)
+#define ATMEL_TC_MTIOA BIT(17)
+#define ATMEL_TC_MTIOB BIT(18)
+
+/* EMR fields */
+#define ATMEL_TC_EMR_TRIGSRCA_MSK GENMASK(1, 0)
+#define ATMEL_TC_EMR_TRIGSRCA_TIOA 0
+#define ATMEL_TC_EMR_TRIGSRCA_PWMX 1
+#define ATMEL_TC_EMR_TRIGSRCB_MSK GENMASK(5, 4)
+#define ATMEL_TC_EMR_TRIGSRCB_TIOB (0 << 4)
+#define ATMEL_TC_EMR_TRIGSRCB_PWM (1 << 4)
+#define ATMEL_TC_EMR_NOCLKDIV BIT(8)
+
+/* BCR fields */
+#define ATMEL_TC_BCR_SYNC BIT(0)
+
+/* BMR fields */
+#define ATMEL_TC_BMR_TCXC_MSK(c) GENMASK(((c) * 2) + 1, (c) * 2)
+#define ATMEL_TC_BMR_TCXC(x, c) ((x) << (2 * (c)))
+#define ATMEL_TC_BMR_QDEN BIT(8)
+#define ATMEL_TC_BMR_POSEN BIT(9)
+#define ATMEL_TC_BMR_SPEEDEN BIT(10)
+#define ATMEL_TC_BMR_QDTRANS BIT(11)
+#define ATMEL_TC_BMR_EDGPHA BIT(12)
+#define ATMEL_TC_BMR_INVA BIT(13)
+#define ATMEL_TC_BMR_INVB BIT(14)
+#define ATMEL_TC_BMR_INVIDX BIT(15)
+#define ATMEL_TC_BMR_SWAP BIT(16)
+#define ATMEL_TC_BMR_IDXPHB BIT(17)
+#define ATMEL_TC_BMR_AUTOC BIT(18)
+#define ATMEL_TC_MAXFILT_MSK GENMASK(25, 20)
+#define ATMEL_TC_MAXFILT(x) (((x) - 1) << 20)
+#define ATMEL_TC_MAXCMP_MSK GENMASK(29, 26)
+#define ATMEL_TC_MAXCMP(x) ((x) << 26)
+
+/* QEDC fields */
+#define ATMEL_TC_QEDC_IDX BIT(0)
+#define ATMEL_TC_QEDC_DIRCHG BIT(1)
+#define ATMEL_TC_QEDC_QERR BIT(2)
+#define ATMEL_TC_QEDC_MPE BIT(3)
+#define ATMEL_TC_QEDC_DIR BIT(8)
+
+/* FMR fields */
+#define ATMEL_TC_FMR_ENCF(x) BIT(x)
+
+/* WPMR fields */
+#define ATMEL_TC_WPMR_WPKEY (0x54494d << 8)
+#define ATMEL_TC_WPMR_WPEN BIT(0)
+
+static const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
+
+static const struct of_device_id atmel_tcb_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-tcb",
+ .data = (void *)16,
+ }, {
+ .compatible = "atmel,at91sam9x5-tcb",
+ .data = (void *)32,
+ }, {
+ /* sentinel */
+ }
+};
+
+#endif /* __SOC_ATMEL_TCB_H */
--
2.25.1

View File

@ -1,484 +0,0 @@
From a8f6e3cf352d669d8b870469ab3bff8fc64c3367 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:19 +0200
Subject: [PATCH 002/328] clocksource/drivers: Add a new driver for the Atmel
ARM TC blocks
Add a driver for the Atmel Timer Counter Blocks. This driver provides a
clocksource and two clockevent devices.
One of the clockevent device is linked to the clocksource counter and so it
will run at the same frequency. This will be used when there is only on TCB
channel available for timers.
The other clockevent device runs on a separate TCB channel when available.
This driver uses regmap and syscon to be able to probe early in the boot
and avoid having to switch on the TCB clocksource later. Using regmap also
means that unused TCB channels may be used by other drivers (PWM for
example). read/writel are still used to access channel specific registers
to avoid the performance impact of regmap (mainly locking).
Tested-by: Alexander Dahl <ada@thorsis.com>
Tested-by: Andras Szemzo <szemzo.andras@gmail.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/clocksource/Kconfig | 8 +
drivers/clocksource/Makefile | 3 +-
drivers/clocksource/timer-atmel-tcb.c | 410 ++++++++++++++++++++++++++
3 files changed, 420 insertions(+), 1 deletion(-)
create mode 100644 drivers/clocksource/timer-atmel-tcb.c
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4d37f018d846..0ab22e7037f4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -415,6 +415,14 @@ config ATMEL_ST
help
Support for the Atmel ST timer.
+config ATMEL_ARM_TCB_CLKSRC
+ bool "Microchip ARM TC Block" if COMPILE_TEST
+ select REGMAP_MMIO
+ depends on GENERIC_CLOCKEVENTS
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated Timer Counter Blocks in Microchip ARM SoCs.
+
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index db51b2427e8a..0df9384a1230 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,7 +3,8 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
+obj-$(CONFIG_ATMEL_ARM_TCB_CLKSRC) += timer-atmel-tcb.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
new file mode 100644
index 000000000000..21fbe430f91b
--- /dev/null
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/sched_clock.h>
+#include <soc/at91/atmel_tcb.h>
+
+struct atmel_tcb_clksrc {
+ struct clocksource clksrc;
+ struct clock_event_device clkevt;
+ struct regmap *regmap;
+ void __iomem *base;
+ struct clk *clk[2];
+ char name[20];
+ int channels[2];
+ int bits;
+ int irq;
+ struct {
+ u32 cmr;
+ u32 imr;
+ u32 rc;
+ bool clken;
+ } cache[2];
+ u32 bmr_cache;
+ bool registered;
+ bool clk_enabled;
+};
+
+static struct atmel_tcb_clksrc tc;
+
+static struct clk *tcb_clk_get(struct device_node *node, int channel)
+{
+ struct clk *clk;
+ char clk_name[] = "t0_clk";
+
+ clk_name[1] += channel;
+ clk = of_clk_get_by_name(node->parent, clk_name);
+ if (!IS_ERR(clk))
+ return clk;
+
+ return of_clk_get_by_name(node->parent, "t0_clk");
+}
+
+/*
+ * Clocksource and clockevent using the same channel(s)
+ */
+static u64 tc_get_cycles(struct clocksource *cs)
+{
+ u32 lower, upper;
+
+ do {
+ upper = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1]));
+ lower = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0]));
+ } while (upper != readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1])));
+
+ return (upper << 16) | lower;
+}
+
+static u64 tc_get_cycles32(struct clocksource *cs)
+{
+ return readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0]));
+}
+
+static u64 notrace tc_sched_clock_read(void)
+{
+ return tc_get_cycles(&tc.clksrc);
+}
+
+static u64 notrace tc_sched_clock_read32(void)
+{
+ return tc_get_cycles32(&tc.clksrc);
+}
+
+static int tcb_clkevt_next_event(unsigned long delta,
+ struct clock_event_device *d)
+{
+ u32 old, next, cur;
+
+ old = readl(tc.base + ATMEL_TC_CV(tc.channels[0]));
+ next = old + delta;
+ writel(next, tc.base + ATMEL_TC_RC(tc.channels[0]));
+ cur = readl(tc.base + ATMEL_TC_CV(tc.channels[0]));
+
+ /* check whether the delta elapsed while setting the register */
+ if ((next < old && cur < old && cur > next) ||
+ (next > old && (cur < old || cur > next))) {
+ /*
+ * Clear the CPCS bit in the status register to avoid
+ * generating a spurious interrupt next time a valid
+ * timer event is configured.
+ */
+ old = readl(tc.base + ATMEL_TC_SR(tc.channels[0]));
+ return -ETIME;
+ }
+
+ writel(ATMEL_TC_CPCS, tc.base + ATMEL_TC_IER(tc.channels[0]));
+
+ return 0;
+}
+
+static irqreturn_t tc_clkevt_irq(int irq, void *handle)
+{
+ unsigned int sr;
+
+ sr = readl(tc.base + ATMEL_TC_SR(tc.channels[0]));
+ if (sr & ATMEL_TC_CPCS) {
+ tc.clkevt.event_handler(&tc.clkevt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int tcb_clkevt_oneshot(struct clock_event_device *dev)
+{
+ if (clockevent_state_oneshot(dev))
+ return 0;
+
+ /*
+ * Because both clockevent devices may share the same IRQ, we don't want
+ * the less likely one to stay requested
+ */
+ return request_irq(tc.irq, tc_clkevt_irq, IRQF_TIMER | IRQF_SHARED,
+ tc.name, &tc);
+}
+
+static int tcb_clkevt_shutdown(struct clock_event_device *dev)
+{
+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[0]));
+ if (tc.bits == 16)
+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[1]));
+
+ if (!clockevent_state_detached(dev))
+ free_irq(tc.irq, &tc);
+
+ return 0;
+}
+
+static void __init tcb_setup_dual_chan(struct atmel_tcb_clksrc *tc,
+ int mck_divisor_idx)
+{
+ /* first channel: waveform mode, input mclk/8, clock TIOA on overflow */
+ writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_CMR_WAVE
+ | ATMEL_TC_CMR_WAVESEL_UP /* free-run */
+ | ATMEL_TC_CMR_ACPA(SET) /* TIOA rises at 0 */
+ | ATMEL_TC_CMR_ACPC(CLEAR), /* (duty cycle 50%) */
+ tc->base + ATMEL_TC_CMR(tc->channels[0]));
+ writel(0x0000, tc->base + ATMEL_TC_RA(tc->channels[0]));
+ writel(0x8000, tc->base + ATMEL_TC_RC(tc->channels[0]));
+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */
+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0]));
+
+ /* second channel: waveform mode, input TIOA */
+ writel(ATMEL_TC_CMR_XC(tc->channels[1]) /* input: TIOA */
+ | ATMEL_TC_CMR_WAVE
+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */
+ tc->base + ATMEL_TC_CMR(tc->channels[1]));
+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[1])); /* no irqs */
+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[1]));
+
+ /* chain both channel, we assume the previous channel */
+ regmap_write(tc->regmap, ATMEL_TC_BMR,
+ ATMEL_TC_BMR_TCXC(1 + tc->channels[1], tc->channels[1]));
+ /* then reset all the timers */
+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
+}
+
+static void __init tcb_setup_single_chan(struct atmel_tcb_clksrc *tc,
+ int mck_divisor_idx)
+{
+ /* channel 0: waveform mode, input mclk/8 */
+ writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_CMR_WAVE
+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */
+ tc->base + ATMEL_TC_CMR(tc->channels[0]));
+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */
+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0]));
+
+ /* then reset all the timers */
+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
+}
+
+static void tc_clksrc_suspend(struct clocksource *cs)
+{
+ int i;
+
+ for (i = 0; i < 1 + (tc.bits == 16); i++) {
+ tc.cache[i].cmr = readl(tc.base + ATMEL_TC_CMR(tc.channels[i]));
+ tc.cache[i].imr = readl(tc.base + ATMEL_TC_IMR(tc.channels[i]));
+ tc.cache[i].rc = readl(tc.base + ATMEL_TC_RC(tc.channels[i]));
+ tc.cache[i].clken = !!(readl(tc.base +
+ ATMEL_TC_SR(tc.channels[i])) &
+ ATMEL_TC_CLKSTA);
+ }
+
+ if (tc.bits == 16)
+ regmap_read(tc.regmap, ATMEL_TC_BMR, &tc.bmr_cache);
+}
+
+static void tc_clksrc_resume(struct clocksource *cs)
+{
+ int i;
+
+ for (i = 0; i < 1 + (tc.bits == 16); i++) {
+ /* Restore registers for the channel, RA and RB are not used */
+ writel(tc.cache[i].cmr, tc.base + ATMEL_TC_CMR(tc.channels[i]));
+ writel(tc.cache[i].rc, tc.base + ATMEL_TC_RC(tc.channels[i]));
+ writel(0, tc.base + ATMEL_TC_RA(tc.channels[i]));
+ writel(0, tc.base + ATMEL_TC_RB(tc.channels[i]));
+ /* Disable all the interrupts */
+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[i]));
+ /* Reenable interrupts that were enabled before suspending */
+ writel(tc.cache[i].imr, tc.base + ATMEL_TC_IER(tc.channels[i]));
+
+ /* Start the clock if it was used */
+ if (tc.cache[i].clken)
+ writel(ATMEL_TC_CCR_CLKEN, tc.base +
+ ATMEL_TC_CCR(tc.channels[i]));
+ }
+
+ /* in case of dual channel, chain channels */
+ if (tc.bits == 16)
+ regmap_write(tc.regmap, ATMEL_TC_BMR, tc.bmr_cache);
+ /* Finally, trigger all the channels*/
+ regmap_write(tc.regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
+}
+
+static int __init tcb_clksrc_register(struct device_node *node,
+ struct regmap *regmap, void __iomem *base,
+ int channel, int channel1, int irq,
+ int bits)
+{
+ u32 rate, divided_rate = 0;
+ int best_divisor_idx = -1;
+ int i, err = -1;
+ u64 (*tc_sched_clock)(void);
+
+ tc.regmap = regmap;
+ tc.base = base;
+ tc.channels[0] = channel;
+ tc.channels[1] = channel1;
+ tc.irq = irq;
+ tc.bits = bits;
+
+ tc.clk[0] = tcb_clk_get(node, tc.channels[0]);
+ if (IS_ERR(tc.clk[0]))
+ return PTR_ERR(tc.clk[0]);
+ err = clk_prepare_enable(tc.clk[0]);
+ if (err) {
+ pr_debug("can't enable T0 clk\n");
+ goto err_clk;
+ }
+
+ /* How fast will we be counting? Pick something over 5 MHz. */
+ rate = (u32)clk_get_rate(tc.clk[0]);
+ for (i = 0; i < 5; i++) {
+ unsigned int divisor = atmel_tc_divisors[i];
+ unsigned int tmp;
+
+ if (!divisor)
+ continue;
+
+ tmp = rate / divisor;
+ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
+ if (best_divisor_idx > 0) {
+ if (tmp < 5 * 1000 * 1000)
+ continue;
+ }
+ divided_rate = tmp;
+ best_divisor_idx = i;
+ }
+
+ if (tc.bits == 32) {
+ tc.clksrc.read = tc_get_cycles32;
+ tcb_setup_single_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read32;
+ snprintf(tc.name, sizeof(tc.name), "%s:%d",
+ kbasename(node->parent->full_name), tc.channels[0]);
+ } else {
+ tc.clk[1] = tcb_clk_get(node, tc.channels[1]);
+ if (IS_ERR(tc.clk[1]))
+ goto err_disable_t0;
+
+ err = clk_prepare_enable(tc.clk[1]);
+ if (err) {
+ pr_debug("can't enable T1 clk\n");
+ goto err_clk1;
+ }
+ tc.clksrc.read = tc_get_cycles,
+ tcb_setup_dual_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read;
+ snprintf(tc.name, sizeof(tc.name), "%s:%d,%d",
+ kbasename(node->parent->full_name), tc.channels[0],
+ tc.channels[1]);
+ }
+
+ pr_debug("%s at %d.%03d MHz\n", tc.name,
+ divided_rate / 1000000,
+ ((divided_rate + 500000) % 1000000) / 1000);
+
+ tc.clksrc.name = tc.name;
+ tc.clksrc.suspend = tc_clksrc_suspend;
+ tc.clksrc.resume = tc_clksrc_resume;
+ tc.clksrc.rating = 200;
+ tc.clksrc.mask = CLOCKSOURCE_MASK(32);
+ tc.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ err = clocksource_register_hz(&tc.clksrc, divided_rate);
+ if (err)
+ goto err_disable_t1;
+
+ sched_clock_register(tc_sched_clock, 32, divided_rate);
+
+ tc.registered = true;
+
+ /* Set up and register clockevents */
+ tc.clkevt.name = tc.name;
+ tc.clkevt.cpumask = cpumask_of(0);
+ tc.clkevt.set_next_event = tcb_clkevt_next_event;
+ tc.clkevt.set_state_oneshot = tcb_clkevt_oneshot;
+ tc.clkevt.set_state_shutdown = tcb_clkevt_shutdown;
+ tc.clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
+ tc.clkevt.rating = 125;
+
+ clockevents_config_and_register(&tc.clkevt, divided_rate, 1,
+ BIT(tc.bits) - 1);
+
+ return 0;
+
+err_disable_t1:
+ if (tc.bits == 16)
+ clk_disable_unprepare(tc.clk[1]);
+
+err_clk1:
+ if (tc.bits == 16)
+ clk_put(tc.clk[1]);
+
+err_disable_t0:
+ clk_disable_unprepare(tc.clk[0]);
+
+err_clk:
+ clk_put(tc.clk[0]);
+
+ pr_err("%s: unable to register clocksource/clockevent\n",
+ tc.clksrc.name);
+
+ return err;
+}
+
+static int __init tcb_clksrc_init(struct device_node *node)
+{
+ const struct of_device_id *match;
+ struct regmap *regmap;
+ void __iomem *tcb_base;
+ u32 channel;
+ int irq, err, chan1 = -1;
+ unsigned bits;
+
+ if (tc.registered)
+ return -ENODEV;
+
+ /*
+ * The regmap has to be used to access registers that are shared
+ * between channels on the same TCB but we keep direct IO access for
+ * the counters to avoid the impact on performance
+ */
+ regmap = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ tcb_base = of_iomap(node->parent, 0);
+ if (!tcb_base) {
+ pr_err("%s +%d %s\n", __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ match = of_match_node(atmel_tcb_dt_ids, node->parent);
+ bits = (uintptr_t)match->data;
+
+ err = of_property_read_u32_index(node, "reg", 0, &channel);
+ if (err)
+ return err;
+
+ irq = of_irq_get(node->parent, channel);
+ if (irq < 0) {
+ irq = of_irq_get(node->parent, 0);
+ if (irq < 0)
+ return irq;
+ }
+
+ if (bits == 16) {
+ of_property_read_u32_index(node, "reg", 1, &chan1);
+ if (chan1 == -1) {
+ pr_err("%s: clocksource needs two channels\n",
+ node->parent->full_name);
+ return -EINVAL;
+ }
+ }
+
+ return tcb_clksrc_register(node, regmap, tcb_base, channel, chan1, irq,
+ bits);
+}
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
--
2.25.1

View File

@ -1,270 +0,0 @@
From f2e0ea85054574af7f632ca36991c5c1a25a7bfd Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:20 +0200
Subject: [PATCH 003/328] clocksource/drivers: timer-atmel-tcb: add clockevent
device on separate channel
Add an other clockevent device that uses a separate TCB channel when
available.
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/clocksource/timer-atmel-tcb.c | 217 +++++++++++++++++++++++++-
1 file changed, 212 insertions(+), 5 deletions(-)
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 21fbe430f91b..63ce3b69338a 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -32,7 +32,7 @@ struct atmel_tcb_clksrc {
bool clk_enabled;
};
-static struct atmel_tcb_clksrc tc;
+static struct atmel_tcb_clksrc tc, tce;
static struct clk *tcb_clk_get(struct device_node *node, int channel)
{
@@ -47,6 +47,203 @@ static struct clk *tcb_clk_get(struct device_node *node, int channel)
return of_clk_get_by_name(node->parent, "t0_clk");
}
+/*
+ * Clockevent device using its own channel
+ */
+
+static void tc_clkevt2_clk_disable(struct clock_event_device *d)
+{
+ clk_disable(tce.clk[0]);
+ tce.clk_enabled = false;
+}
+
+static void tc_clkevt2_clk_enable(struct clock_event_device *d)
+{
+ if (tce.clk_enabled)
+ return;
+ clk_enable(tce.clk[0]);
+ tce.clk_enabled = true;
+}
+
+static int tc_clkevt2_stop(struct clock_event_device *d)
+{
+ writel(0xff, tce.base + ATMEL_TC_IDR(tce.channels[0]));
+ writel(ATMEL_TC_CCR_CLKDIS, tce.base + ATMEL_TC_CCR(tce.channels[0]));
+
+ return 0;
+}
+
+static int tc_clkevt2_shutdown(struct clock_event_device *d)
+{
+ tc_clkevt2_stop(d);
+ if (!clockevent_state_detached(d))
+ tc_clkevt2_clk_disable(d);
+
+ return 0;
+}
+
+/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+ * because using one of the divided clocks would usually mean the
+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+ *
+ * A divided clock could be good for high resolution timers, since
+ * 30.5 usec resolution can seem "low".
+ */
+static int tc_clkevt2_set_oneshot(struct clock_event_device *d)
+{
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+ tc_clkevt2_stop(d);
+
+ tc_clkevt2_clk_enable(d);
+
+ /* slow clock, count up to RC, then irq and stop */
+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_CPCSTOP |
+ ATMEL_TC_CMR_WAVE | ATMEL_TC_CMR_WAVESEL_UPRC,
+ tce.base + ATMEL_TC_CMR(tce.channels[0]));
+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0]));
+
+ return 0;
+}
+
+static int tc_clkevt2_set_periodic(struct clock_event_device *d)
+{
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+ tc_clkevt2_stop(d);
+
+ /* By not making the gentime core emulate periodic mode on top
+ * of oneshot, we get lower overhead and improved accuracy.
+ */
+ tc_clkevt2_clk_enable(d);
+
+ /* slow clock, count up to RC, then irq and restart */
+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_WAVE |
+ ATMEL_TC_CMR_WAVESEL_UPRC,
+ tce.base + ATMEL_TC_CMR(tce.channels[0]));
+ writel((32768 + HZ / 2) / HZ, tce.base + ATMEL_TC_RC(tce.channels[0]));
+
+ /* Enable clock and interrupts on RC compare */
+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0]));
+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
+ tce.base + ATMEL_TC_CCR(tce.channels[0]));
+
+ return 0;
+}
+
+static int tc_clkevt2_next_event(unsigned long delta,
+ struct clock_event_device *d)
+{
+ writel(delta, tce.base + ATMEL_TC_RC(tce.channels[0]));
+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
+ tce.base + ATMEL_TC_CCR(tce.channels[0]));
+
+ return 0;
+}
+
+static irqreturn_t tc_clkevt2_irq(int irq, void *handle)
+{
+ unsigned int sr;
+
+ sr = readl(tce.base + ATMEL_TC_SR(tce.channels[0]));
+ if (sr & ATMEL_TC_CPCS) {
+ tce.clkevt.event_handler(&tce.clkevt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void tc_clkevt2_suspend(struct clock_event_device *d)
+{
+ tce.cache[0].cmr = readl(tce.base + ATMEL_TC_CMR(tce.channels[0]));
+ tce.cache[0].imr = readl(tce.base + ATMEL_TC_IMR(tce.channels[0]));
+ tce.cache[0].rc = readl(tce.base + ATMEL_TC_RC(tce.channels[0]));
+ tce.cache[0].clken = !!(readl(tce.base + ATMEL_TC_SR(tce.channels[0])) &
+ ATMEL_TC_CLKSTA);
+}
+
+static void tc_clkevt2_resume(struct clock_event_device *d)
+{
+ /* Restore registers for the channel, RA and RB are not used */
+ writel(tce.cache[0].cmr, tc.base + ATMEL_TC_CMR(tce.channels[0]));
+ writel(tce.cache[0].rc, tc.base + ATMEL_TC_RC(tce.channels[0]));
+ writel(0, tc.base + ATMEL_TC_RA(tce.channels[0]));
+ writel(0, tc.base + ATMEL_TC_RB(tce.channels[0]));
+ /* Disable all the interrupts */
+ writel(0xff, tc.base + ATMEL_TC_IDR(tce.channels[0]));
+ /* Reenable interrupts that were enabled before suspending */
+ writel(tce.cache[0].imr, tc.base + ATMEL_TC_IER(tce.channels[0]));
+
+ /* Start the clock if it was used */
+ if (tce.cache[0].clken)
+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
+ tc.base + ATMEL_TC_CCR(tce.channels[0]));
+}
+
+static int __init tc_clkevt_register(struct device_node *node,
+ struct regmap *regmap, void __iomem *base,
+ int channel, int irq, int bits)
+{
+ int ret;
+ struct clk *slow_clk;
+
+ tce.regmap = regmap;
+ tce.base = base;
+ tce.channels[0] = channel;
+ tce.irq = irq;
+
+ slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
+ if (IS_ERR(slow_clk))
+ return PTR_ERR(slow_clk);
+
+ ret = clk_prepare_enable(slow_clk);
+ if (ret)
+ return ret;
+
+ tce.clk[0] = tcb_clk_get(node, tce.channels[0]);
+ if (IS_ERR(tce.clk[0])) {
+ ret = PTR_ERR(tce.clk[0]);
+ goto err_slow;
+ }
+
+ snprintf(tce.name, sizeof(tce.name), "%s:%d",
+ kbasename(node->parent->full_name), channel);
+ tce.clkevt.cpumask = cpumask_of(0);
+ tce.clkevt.name = tce.name;
+ tce.clkevt.set_next_event = tc_clkevt2_next_event,
+ tce.clkevt.set_state_shutdown = tc_clkevt2_shutdown,
+ tce.clkevt.set_state_periodic = tc_clkevt2_set_periodic,
+ tce.clkevt.set_state_oneshot = tc_clkevt2_set_oneshot,
+ tce.clkevt.suspend = tc_clkevt2_suspend,
+ tce.clkevt.resume = tc_clkevt2_resume,
+ tce.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tce.clkevt.rating = 140;
+
+ /* try to enable clk to avoid future errors in mode change */
+ ret = clk_prepare_enable(tce.clk[0]);
+ if (ret)
+ goto err_slow;
+ clk_disable(tce.clk[0]);
+
+ clockevents_config_and_register(&tce.clkevt, 32768, 1,
+ CLOCKSOURCE_MASK(bits));
+
+ ret = request_irq(tce.irq, tc_clkevt2_irq, IRQF_TIMER | IRQF_SHARED,
+ tce.clkevt.name, &tce);
+ if (ret)
+ goto err_clk;
+
+ tce.registered = true;
+
+ return 0;
+
+err_clk:
+ clk_unprepare(tce.clk[0]);
+err_slow:
+ clk_disable_unprepare(slow_clk);
+
+ return ret;
+}
+
/*
* Clocksource and clockevent using the same channel(s)
*/
@@ -363,7 +560,7 @@ static int __init tcb_clksrc_init(struct device_node *node)
int irq, err, chan1 = -1;
unsigned bits;
- if (tc.registered)
+ if (tc.registered && tce.registered)
return -ENODEV;
/*
@@ -395,12 +592,22 @@ static int __init tcb_clksrc_init(struct device_node *node)
return irq;
}
+ if (tc.registered)
+ return tc_clkevt_register(node, regmap, tcb_base, channel, irq,
+ bits);
+
if (bits == 16) {
of_property_read_u32_index(node, "reg", 1, &chan1);
if (chan1 == -1) {
- pr_err("%s: clocksource needs two channels\n",
- node->parent->full_name);
- return -EINVAL;
+ if (tce.registered) {
+ pr_err("%s: clocksource needs two channels\n",
+ node->parent->full_name);
+ return -EINVAL;
+ } else {
+ return tc_clkevt_register(node, regmap,
+ tcb_base, channel,
+ irq, bits);
+ }
}
}
--
2.25.1

View File

@ -1,35 +0,0 @@
From 23ef2fe8b6933933fb81af9decf35cfae8c14571 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:21 +0200
Subject: [PATCH 004/328] clocksource/drivers: atmel-pit: make option silent
To conform with the other option, make the ATMEL_PIT option silent so it
can be selected from the platform
Tested-by: Alexander Dahl <ada@thorsis.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/clocksource/Kconfig | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 0ab22e7037f4..34b07047b91f 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -404,8 +404,11 @@ config ARMV7M_SYSTICK
This options enables support for the ARMv7M system timer unit
config ATMEL_PIT
+ bool "Microchip ARM Periodic Interval Timer (PIT)" if COMPILE_TEST
select TIMER_OF if OF
- def_bool SOC_AT91SAM9 || SOC_SAMA5
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated PIT in Microchip ARM SoCs.
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
--
2.25.1

View File

@ -1,54 +0,0 @@
From 56d1624c2b43a84717f237d3c2d58ac52cb37b33 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:22 +0200
Subject: [PATCH 005/328] ARM: at91: Implement clocksource selection
Allow selecting and unselecting the PIT clocksource driver so it doesn't
have to be compile when unused.
Tested-by: Alexander Dahl <ada@thorsis.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/mach-at91/Kconfig | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 903f23c309df..fa493a86e2bb 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -107,6 +107,31 @@ config SOC_AT91SAM9
AT91SAM9X35
AT91SAM9XE
+comment "Clocksource driver selection"
+
+config ATMEL_CLOCKSOURCE_PIT
+ bool "Periodic Interval Timer (PIT) support"
+ depends on SOC_AT91SAM9 || SOC_SAMA5
+ default SOC_AT91SAM9 || SOC_SAMA5
+ select ATMEL_PIT
+ help
+ Select this to get a clocksource based on the Atmel Periodic Interval
+ Timer. It has a relatively low resolution and the TC Block clocksource
+ should be preferred.
+
+config ATMEL_CLOCKSOURCE_TCB
+ bool "Timer Counter Blocks (TCB) support"
+ depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 || COMPILE_TEST
+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
+ depends on !ATMEL_TCLIB
+ select ATMEL_ARM_TCB_CLKSRC
+ help
+ Select this to get a high precision clocksource based on a
+ TC block with a 5+ MHz base clock rate.
+ On platforms with 16-bit counters, two timer channels are combined
+ to make a single 32-bit timer.
+ It can also be used as a clock event device supporting oneshot mode.
+
config HAVE_AT91_UTMI
bool
--
2.25.1

View File

@ -1,42 +0,0 @@
From 9591e618026011c31f7275edd0643d390e185e38 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:23 +0200
Subject: [PATCH 006/328] ARM: configs: at91: use new TCB timer driver
Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to
timer-atmel-tcb.
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/configs/at91_dt_defconfig | 1 -
arch/arm/configs/sama5_defconfig | 1 -
2 files changed, 2 deletions(-)
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index e4b1be66b3f5..09f262e59fef 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -64,7 +64,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 2080025556b5..f2bbc6339ca6 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -75,7 +75,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
--
2.25.1

View File

@ -1,43 +0,0 @@
From f58179ebd23db67a287e5267a5cbc2c1ae5d75d9 Mon Sep 17 00:00:00 2001
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
Date: Thu, 13 Sep 2018 13:30:24 +0200
Subject: [PATCH 007/328] ARM: configs: at91: unselect PIT
The PIT is not required anymore to successfully boot and may actually harm
in case preempt-rt is used because the PIT interrupt is shared.
Disable it so the TCB clocksource is used.
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/configs/at91_dt_defconfig | 1 +
arch/arm/configs/sama5_defconfig | 1 +
2 files changed, 2 insertions(+)
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 09f262e59fef..f4b253bd05ed 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -19,6 +19,7 @@ CONFIG_ARCH_MULTI_V5=y
CONFIG_ARCH_AT91=y
CONFIG_SOC_AT91RM9200=y
CONFIG_SOC_AT91SAM9=y
+# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
CONFIG_AEABI=y
CONFIG_UACCESS_WITH_MEMCPY=y
CONFIG_ZBOOT_ROM_TEXT=0x0
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index f2bbc6339ca6..be92871ab155 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -20,6 +20,7 @@ CONFIG_ARCH_AT91=y
CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
CONFIG_SOC_SAMA5D4=y
+# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
CONFIG_AEABI=y
CONFIG_UACCESS_WITH_MEMCPY=y
CONFIG_ZBOOT_ROM_TEXT=0x0
--
2.25.1

View File

@ -1,170 +0,0 @@
From f5fc79f507ee8c22a6f18709552cecbada48d328 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <marc.zyngier@arm.com>
Date: Fri, 27 Jul 2018 13:38:54 +0100
Subject: [PATCH 008/328] irqchip/gic-v3-its: Move pending table allocation to
init time
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/irqchip/irq-gic-v3-its.c | 80 +++++++++++++++++++-----------
include/linux/irqchip/arm-gic-v3.h | 1 +
2 files changed, 53 insertions(+), 28 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index bf7b69449b43..f93b8cd5eea2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -179,6 +179,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
@@ -1659,7 +1660,7 @@ static void its_free_prop_table(struct page *prop_page)
get_order(LPI_PROPBASE_SZ));
}
-static int __init its_alloc_lpi_tables(void)
+static int __init its_alloc_lpi_prop_table(void)
{
phys_addr_t paddr;
@@ -2007,30 +2008,47 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
return val;
}
-static void its_cpu_init_lpis(void)
+static int __init allocate_lpi_tables(void)
{
- void __iomem *rbase = gic_data_rdist_rd_base();
- struct page *pend_page;
- u64 val, tmp;
+ int err, cpu;
- /* If we didn't allocate the pending table yet, do it now */
- pend_page = gic_data_rdist()->pend_page;
- if (!pend_page) {
- phys_addr_t paddr;
+ err = its_alloc_lpi_prop_table();
+ if (err)
+ return err;
+
+ /*
+ * We allocate all the pending tables anyway, as we may have a
+ * mix of RDs that have had LPIs enabled, and some that
+ * don't. We'll free the unused ones as each CPU comes online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct page *pend_page;
pend_page = its_allocate_pending_table(GFP_NOWAIT);
if (!pend_page) {
- pr_err("Failed to allocate PENDBASE for CPU%d\n",
- smp_processor_id());
- return;
+ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
+ return -ENOMEM;
}
- paddr = page_to_phys(pend_page);
- pr_info("CPU%d: using LPI pending table @%pa\n",
- smp_processor_id(), &paddr);
- gic_data_rdist()->pend_page = pend_page;
+ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
}
+ return 0;
+}
+
+static void its_cpu_init_lpis(void)
+{
+ void __iomem *rbase = gic_data_rdist_rd_base();
+ struct page *pend_page;
+ phys_addr_t paddr;
+ u64 val, tmp;
+
+ if (gic_data_rdist()->lpi_enabled)
+ return;
+
+ pend_page = gic_data_rdist()->pend_page;
+ paddr = page_to_phys(pend_page);
+
/* set PROPBASE */
val = (page_to_phys(gic_rdists->prop_page) |
GICR_PROPBASER_InnerShareable |
@@ -2106,6 +2124,10 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
+ gic_data_rdist()->lpi_enabled = true;
+ pr_info("GICv3: CPU%d: using LPI pending table @%pa\n",
+ smp_processor_id(),
+ &paddr);
}
static void its_cpu_init_collection(struct its_node *its)
@@ -3585,16 +3607,6 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
- /*
- * If coming via a CPU hotplug event, we don't need to disable
- * LPIs before trying to re-enable them. They are already
- * configured and all is well in the world. Detect this case
- * by checking the allocation of the pending table for the
- * current CPU.
- */
- if (gic_data_rdist()->pend_page)
- return 0;
-
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
@@ -3604,7 +3616,18 @@ static int redist_disable_lpis(void)
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
- pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world.
+ */
+ if (gic_data_rdist()->lpi_enabled)
+ return 0;
+
+ /*
+ * From that point on, we only try to do some damage control.
+ */
+ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -3860,7 +3883,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
gic_rdists = rdists;
- err = its_alloc_lpi_tables();
+
+ err = allocate_lpi_tables();
if (err)
return err;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1d21e98d6854..fdddead7e307 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -585,6 +585,7 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
+ bool lpi_enabled;
} __percpu *rdist;
struct page *prop_page;
u64 flags;
--
2.25.1

View File

@ -1,202 +0,0 @@
From 89b3ba99261e5321ba2276305f817b5c0c9817ad Mon Sep 17 00:00:00 2001
From: Julia Cartwright <julia@ni.com>
Date: Fri, 28 Sep 2018 21:03:51 +0000
Subject: [PATCH 009/328] kthread: convert worker lock to raw spinlock
In order to enable the queuing of kthread work items from hardirq
context even when PREEMPT_RT_FULL is enabled, convert the worker
spin_lock to a raw_spin_lock.
This is only acceptable to do because the work performed under the lock
is well-bounded and minimal.
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Guenter Roeck <linux@roeck-us.net>
Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reported-by: Tim Sander <tim@krieglstein.org>
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/kthread.h | 2 +-
kernel/kthread.c | 42 ++++++++++++++++++++---------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..ad292898f7f2 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,7 +85,7 @@ enum {
struct kthread_worker {
unsigned int flags;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..5641b55783a6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
struct lock_class_key *key)
{
memset(worker, 0, sizeof(struct kthread_worker));
- spin_lock_init(&worker->lock);
+ raw_spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
- spin_lock_irq(&worker->lock);
+ raw_spin_lock_irq(&worker->lock);
worker->task = NULL;
- spin_unlock_irq(&worker->lock);
+ raw_spin_unlock_irq(&worker->lock);
return 0;
}
work = NULL;
- spin_lock_irq(&worker->lock);
+ raw_spin_lock_irq(&worker->lock);
if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
- spin_unlock_irq(&worker->lock);
+ raw_spin_unlock_irq(&worker->lock);
if (work) {
__set_current_state(TASK_RUNNING);
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
bool ret = false;
unsigned long flags;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list);
ret = true;
}
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
if (WARN_ON_ONCE(!worker))
return;
- spin_lock(&worker->lock);
+ raw_spin_lock(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);
- spin_unlock(&worker->lock);
+ raw_spin_unlock(&worker->lock);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
unsigned long flags;
bool ret = false;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
__kthread_queue_delayed_work(worker, dwork, delay);
ret = true;
}
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
if (!worker)
return;
- spin_lock_irq(&worker->lock);
+ raw_spin_lock_irq(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
else
noop = true;
- spin_unlock_irq(&worker->lock);
+ raw_spin_unlock_irq(&worker->lock);
if (!noop)
wait_for_completion(&fwork.done);
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
- spin_unlock_irqrestore(&worker->lock, *flags);
+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
- spin_lock_irqsave(&worker->lock, *flags);
+ raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}
@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
unsigned long flags;
int ret = false;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */
if (!work->worker)
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
if (!worker)
goto out;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
* In the meantime, block any queuing by setting the canceling counter.
*/
work->canceling++;
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
kthread_flush_work(work);
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
work->canceling--;
out_fast:
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
out:
return ret;
}
--
2.25.1

View File

@ -1,139 +0,0 @@
From 1e7f9f15b5cb5088ac28a0919a2fcc74bfc5f5c7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
Date: Mon, 8 Oct 2018 14:09:37 +0300
Subject: [PATCH 010/328] crypto: caam/qi - simplify CGR allocation, freeing
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[Upstream commit 29e83c757006fd751966bdc53392bb22d74179c6]
CGRs (Congestion Groups) have to be freed by the same CPU that
initialized them.
This is why currently the driver takes special measures; however, using
set_cpus_allowed_ptr() is incorrect - as reported by Sebastian.
Instead of the generic solution of replacing set_cpus_allowed_ptr() with
work_on_cpu_safe(), we use the qman_delete_cgr_safe() QBMan API instead
of qman_delete_cgr() - which internally takes care of proper CGR
deletion.
Link: https://lkml.kernel.org/r/20181005125443.dfhd2asqktm22ney@linutronix.de
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/caam/qi.c | 43 ++++------------------------------------
drivers/crypto/caam/qi.h | 2 +-
2 files changed, 5 insertions(+), 40 deletions(-)
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c42c93..b84e6c8b1e13 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -83,13 +83,6 @@ EXPORT_SYMBOL(caam_congested);
static u64 times_congested;
#endif
-/*
- * CPU from where the module initialised. This is required because QMan driver
- * requires CGRs to be removed from same CPU from where they were originally
- * allocated.
- */
-static int mod_init_cpu;
-
/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-int caam_qi_shutdown(struct device *qidev)
+void caam_qi_shutdown(struct device *qidev)
{
- int i, ret;
+ int i;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
- /*
- * QMan driver requires CGRs to be deleted from same CPU from where they
- * were instantiated. Hence we get the module removal execute from the
- * same CPU from where it was originally inserted.
- */
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- ret = qman_delete_cgr(&priv->cgr);
- if (ret)
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
- else
- qman_release_cgrid(priv->cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr);
+ qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
platform_device_unregister(priv->qi_pdev);
- return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
static struct platform_device_info qi_pdev_info = {
.name = "caam_qi",
.id = PLATFORM_DEVID_NONE
};
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
- * was originally allocated. Hence we need to note down the
- * initialisation CPU and use the same CPU for module exit.
- * We select the first CPU to from the list of portal owning CPUs.
- * Then we pin module init to this CPU.
- */
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
qi_pdev_info.parent = ctrldev;
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
qi_pdev = platform_device_register_full(&qi_pdev_info);
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
- /* Done with the CGRs; restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f57072..b6c8acc30853 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -174,7 +174,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-int caam_qi_shutdown(struct device *dev);
+void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
--
2.25.1

View File

@ -1,147 +0,0 @@
From 2a9fed89a7bea6fbe31e717ab5f277405e20826e Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 7 Jan 2019 13:52:31 +0100
Subject: [PATCH 011/328] sched/fair: Robustify CFS-bandwidth timer locking
Traditionally hrtimer callbacks were run with IRQs disabled, but with
the introduction of HRTIMER_MODE_SOFT it is possible they run from
SoftIRQ context, which does _NOT_ have IRQs disabled.
Allow for the CFS bandwidth timers (period_timer and slack_timer) to
be ran from SoftIRQ context; this entails removing the assumption that
IRQs are already disabled from the locking.
While mainline doesn't strictly need this, -RT forces all timers not
explicitly marked with MODE_HARD into MODE_SOFT and trips over this.
And marking these timers as MODE_HARD doesn't make sense as they're
not required for RT operation and can potentially be quite expensive.
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reported-by: Tom Putzeys <tom.putzeys@be.atlascopco.com>
Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20190107125231.GE14122@hirez.programming.kicks-ass.net
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/sched/fair.c | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7f4f4ab5bfef..0f1ba3d72336 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4576,7 +4576,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
- rq_lock(rq, &rf);
+ rq_lock_irqsave(rq, &rf);
if (!cfs_rq_throttled(cfs_rq))
goto next;
@@ -4595,7 +4595,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
unthrottle_cfs_rq(cfs_rq);
next:
- rq_unlock(rq, &rf);
+ rq_unlock_irqrestore(rq, &rf);
if (!remaining)
break;
@@ -4611,7 +4611,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
* used to track this state.
*/
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
{
u64 runtime;
int throttled;
@@ -4651,10 +4651,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
cfs_b->distribute_running = 1;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime);
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
@@ -4762,16 +4762,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
+ unsigned long flags;
/* confirm we're still not at a refresh boundary */
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
if (cfs_b->distribute_running) {
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
@@ -4781,17 +4782,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
if (runtime)
cfs_b->distribute_running = 1;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
if (!runtime)
return;
runtime = distribute_cfs_runtime(cfs_b, runtime);
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->runtime -= min(runtime, cfs_b->runtime);
cfs_b->distribute_running = 0;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
}
/*
@@ -4871,11 +4872,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
+ unsigned long flags;
int overrun;
int idle = 0;
int count = 0;
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
for (;;) {
overrun = hrtimer_forward_now(timer, cfs_b->period);
if (!overrun)
@@ -4911,11 +4913,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
count = 0;
}
- idle = do_sched_cfs_period_timer(cfs_b, overrun);
+ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
}
if (idle)
cfs_b->period_active = 0;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
--
2.25.1

View File

@ -1,431 +0,0 @@
From 7c89d978bdfea369853567288ced4880deddd0b1 Mon Sep 17 00:00:00 2001
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: [PATCH 012/328] arm: Convert arm boot_lock to raw
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the
lock, the attempt to wake it when the lock becomes available will fail:
try_to_wake_up()
...
activate_task()
enqueue_task()
p->sched_class->enqueue_task(rq, p, flags)
Fix by converting boot_lock to a raw spin lock.
Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Tony Lindgren <tony@atomide.com>
Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
Tested-by: Krzysztof Kozlowski <krzk@kernel.org> [Exynos5422 Linaro PM-QA]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/mach-exynos/platsmp.c | 12 ++++++------
arch/arm/mach-hisi/platmcpm.c | 22 +++++++++++-----------
arch/arm/mach-omap2/omap-smp.c | 10 +++++-----
arch/arm/mach-prima2/platsmp.c | 10 +++++-----
arch/arm/mach-qcom/platsmp.c | 10 +++++-----
arch/arm/mach-spear/platsmp.c | 10 +++++-----
arch/arm/mach-sti/platsmp.c | 10 +++++-----
arch/arm/plat-versatile/platsmp.c | 10 +++++-----
8 files changed, 47 insertions(+), 47 deletions(-)
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 6a1e682371b3..17dca0ff336e 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -239,7 +239,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void exynos_secondary_init(unsigned int cpu)
{
@@ -252,8 +252,8 @@ static void exynos_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
@@ -317,7 +317,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -344,7 +344,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return -ETIMEDOUT;
}
}
@@ -390,7 +390,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* calibrations, then wait for it to finish
*/
fail:
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? ret : 0;
}
diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
index f66815c3dd07..00524abd963f 100644
--- a/arch/arm/mach-hisi/platmcpm.c
+++ b/arch/arm/mach-hisi/platmcpm.c
@@ -61,7 +61,7 @@
static void __iomem *sysctrl, *fabric;
static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static u32 fabric_phys_addr;
/*
* [0]: bootwrapper physical address
@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
return -EINVAL;
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
if (hip04_cpu_table[cluster][cpu])
goto out;
@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
out:
hip04_cpu_table[cluster][cpu]++;
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
return 0;
}
@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
hip04_cpu_table[cluster][cpu]--;
if (hip04_cpu_table[cluster][cpu] == 1) {
/* A power_up request went ahead of us. */
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return;
} else if (hip04_cpu_table[cluster][cpu] > 1) {
pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
}
last_man = hip04_cluster_is_down(cluster);
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
if (last_man) {
/* Since it's Cortex A15, disable L2 prefetching. */
asm volatile(
@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
count = TIMEOUT_MSEC / POLL_MSEC;
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
for (tries = 0; tries < count; tries++) {
if (hip04_cpu_table[cluster][cpu])
goto err;
@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
if (data & CORE_WFI_STATUS(cpu))
break;
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
/* Wait for clean L2 when the whole cluster is down. */
msleep(POLL_MSEC);
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
}
if (tries >= count)
goto err;
@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
goto err;
if (hip04_cluster_is_down(cluster))
hip04_set_snoop_filter(cluster, 0);
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
return 1;
err:
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
return 0;
}
#endif
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 1c73694c871a..ac4d2f030b87 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -69,7 +69,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
.startup_addr = omap5_secondary_startup,
};
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
@@ -177,8 +177,8 @@ static void omap4_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -191,7 +191,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
@@ -270,7 +270,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return 0;
}
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 75ef5d4be554..c17c86e5d860 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -22,7 +22,7 @@
static void __iomem *clk_base;
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void sirfsoc_secondary_init(unsigned int cpu)
{
@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static const struct of_device_id clk_ids[] = {
@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
/* make sure write buffer is drained */
mb();
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index 5494c9e0c909..e8ce157d3548 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -46,7 +46,7 @@
extern void secondary_startup_arm(void);
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
#ifdef CONFIG_HOTPLUG_CPU
static void qcom_cpu_die(unsigned int cpu)
@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int scss_release_secondary(unsigned int cpu)
@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* Send the secondary CPU a soft interrupt, thereby causing
@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return ret;
}
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 39038a03836a..6da5c93872bf 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -32,7 +32,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
index 231f19e17436..a3419b7003e6 100644
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -35,7 +35,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void sti_secondary_init(unsigned int cpu)
{
@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index c2366510187a..6b60f582b738 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -32,7 +32,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void versatile_secondary_init(unsigned int cpu)
{
@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* This is really belt and braces; we hold unintended secondary
@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--
2.25.1

View File

@ -1,100 +0,0 @@
From 9ecaf2a8f433399cc3fabcfb9fbce9a88fe6f200 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 17 Jul 2018 18:25:31 +0200
Subject: [PATCH 013/328] x86/ioapic: Don't let setaffinity unmask threaded EOI
interrupt too early
There is an issue with threaded interrupts which are marked ONESHOT
and using the fasteoi handler.
if (IS_ONESHOT())
mask_irq();
....
....
cond_unmask_eoi_irq()
chip->irq_eoi();
So if setaffinity is pending then the interrupt will be moved and then
unmasked, which is wrong as it should be kept masked up to the point where
the threaded handler finished. It's not a real problem, the interrupt will
just be able to fire before the threaded handler has finished, though the irq
masked state will be wrong for a bit.
The patch below should cure the issue. It also renames the horribly
misnomed functions so it becomes clear what they are supposed to do.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bigeasy: add the body of the patch, use the same functions in both
ifdef paths (spotted by Andy Shevchenko)]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/kernel/apic/io_apic.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index fa3b85b222e3..1bdad61a3ef7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1722,7 +1722,7 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
return false;
}
-static inline bool ioapic_irqd_mask(struct irq_data *data)
+static inline bool ioapic_prepare_move(struct irq_data *data)
{
/* If we are moving the IRQ we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
@@ -1733,9 +1733,9 @@ static inline bool ioapic_irqd_mask(struct irq_data *data)
return false;
}
-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
{
- if (unlikely(masked)) {
+ if (unlikely(moveit)) {
/* Only migrate the irq if the ack has been received.
*
* On rare occasions the broadcast level triggered ack gets
@@ -1770,11 +1770,11 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
}
}
#else
-static inline bool ioapic_irqd_mask(struct irq_data *data)
+static inline bool ioapic_prepare_move(struct irq_data *data)
{
return false;
}
-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
{
}
#endif
@@ -1783,11 +1783,11 @@ static void ioapic_ack_level(struct irq_data *irq_data)
{
struct irq_cfg *cfg = irqd_cfg(irq_data);
unsigned long v;
- bool masked;
+ bool moveit;
int i;
irq_complete_move(cfg);
- masked = ioapic_irqd_mask(irq_data);
+ moveit = ioapic_prepare_move(irq_data);
/*
* It appears there is an erratum which affects at least version 0x11
@@ -1842,7 +1842,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
}
- ioapic_irqd_unmask(irq_data, masked);
+ ioapic_finish_move(irq_data, moveit);
}
static void ioapic_ir_ack_level(struct irq_data *irq_data)
--
2.25.1

View File

@ -1,49 +0,0 @@
From 759e6d7c318bbcff7507641d5a9fb6b5074b2a87 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 18:19:48 +0200
Subject: [PATCH 014/328] cgroup: use irqsave in cgroup_rstat_flush_locked()
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
either with spin_lock_irq() or spin_lock_irqsave().
cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which
is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated()
in IRQ context and therefore requires _irqsave() locking suffix in
cgroup_rstat_flush_locked().
Since there is no difference between spin_lock_t and raw_spin_lock_t
on !RT lockdep does not complain here. On RT lockdep complains because
the interrupts were not disabled here and a deadlock is possible.
Acquire the raw_spin_lock_t with disabled interrupts.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cgroup/rstat.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index bb95a35e8c2d..3266a9781b4e 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -159,8 +159,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
+ unsigned long flags;
- raw_spin_lock(cpu_lock);
+ raw_spin_lock_irqsave(cpu_lock, flags);
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
@@ -172,7 +173,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
- raw_spin_unlock(cpu_lock);
+ raw_spin_unlock_irqrestore(cpu_lock, flags);
/* if @may_sleep, play nice and yield if necessary */
if (may_sleep && (need_resched() ||
--
2.25.1

View File

@ -1,63 +0,0 @@
From 934128f28dd37073d6513a37f0433df6399c7953 Mon Sep 17 00:00:00 2001
From: Clark Williams <williams@redhat.com>
Date: Tue, 3 Jul 2018 13:34:30 -0500
Subject: [PATCH 015/328] fscache: initialize cookie hash table raw spinlocks
The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The
PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT
the structures get used uninitialized, causing warnings about bad magic numbers
when spinlock debugging is turned on.
Use the init function for fscache cookies.
Signed-off-by: Clark Williams <williams@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
fs/fscache/cookie.c | 8 ++++++++
fs/fscache/main.c | 1 +
include/linux/fscache.h | 1 +
3 files changed, 10 insertions(+)
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index c550512ce335..d5d57da32ffa 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie,
return -ESTALE;
}
EXPORT_SYMBOL(__fscache_check_consistency);
+
+void __init fscache_cookie_init(void)
+{
+ int i;
+
+ for (i = 0; i < (1 << fscache_cookie_hash_shift) - 1; i++)
+ INIT_HLIST_BL_HEAD(&fscache_cookie_hash[i]);
+}
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 30ad89db1efc..1d5f1d679ffa 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -149,6 +149,7 @@ static int __init fscache_init(void)
ret = -ENOMEM;
goto error_cookie_jar;
}
+ fscache_cookie_init();
fscache_root = kobject_create_and_add("fscache", kernel_kobj);
if (!fscache_root)
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 84b90a79d75a..87a9330eafa2 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -230,6 +230,7 @@ extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
bool (*)(void *), void *);
+extern void fscache_cookie_init(void);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
--
2.25.1

View File

@ -1,39 +0,0 @@
From 2a2f1a8c287a6b6fb14a4a1b5583e043d5897df4 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 29 Aug 2018 21:59:04 +0200
Subject: [PATCH 016/328] Drivers: hv: vmbus: include header for get_irq_regs()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
On !RT the header file get_irq_regs() gets pulled in via other header files. On
RT it does not and the build fails:
drivers/hv/vmbus_drv.c:975 implicit declaration of function get_irq_regs [-Werror=implicit-function-declaration]
drivers/hv/hv.c:115 implicit declaration of function get_irq_regs [-Werror=implicit-function-declaration]
Add the header file for get_irq_regs() in a common header so it used by
vmbus_drv.c by hv.c for their get_irq_regs() usage.
Reported-by: Bernhard Landauer <oberon@manjaro.org>
Reported-by: Ralf Ramsauer <ralf.ramsauer@oth-regensburg.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/hv/hyperv_vmbus.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 87d3d7da78f8..1d2d8a4b837d 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,7 @@
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include "hv_trace.h"
--
2.25.1

View File

@ -1,32 +0,0 @@
From d487edd01d698abf2b4f3ea4e3f27897b227250c Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 11 Oct 2018 16:39:59 +0200
Subject: [PATCH 017/328] percpu: include irqflags.h for raw_local_irq_save()
The header percpu.h header file is using raw_local_irq_save() but does
not include irqflags.h for its definition. It compiles because the
header file is included via an other header file.
On -RT the build fails because raw_local_irq_save() is not defined.
Include irqflags.h in percpu.h.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/asm-generic/percpu.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 1817a8415a5e..942d64c0476e 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -5,6 +5,7 @@
#include <linux/compiler.h>
#include <linux/threads.h>
#include <linux/percpu-defs.h>
+#include <linux/irqflags.h>
#ifdef CONFIG_SMP
--
2.25.1

View File

@ -1,31 +0,0 @@
From 5c77a75aaa23c5fc32b5485897d0d14e66fafd37 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:06:10 +0200
Subject: [PATCH 018/328] efi: Allow efi=runtime
In case the option "efi=noruntime" is default at built-time, the user
could overwrite its sate by `efi=runtime' and allow it again.
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/firmware/efi/efi.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index d54fca902e64..5db20908aa9c 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "runtime"))
+ disable_runtime = false;
+
return 0;
}
early_param("efi", parse_efi_cmdline);
--
2.25.1

View File

@ -1,54 +0,0 @@
From af50891c552632469b09b7b97abd197545aec804 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 24 Jul 2018 14:48:55 +0200
Subject: [PATCH 019/328] x86/efi: drop task_lock() from efi_switch_mm()
efi_switch_mm() is a wrapper around switch_mm() which saves current's
->active_mm, sets the requests mm as ->active_mm and invokes
switch_mm().
I don't think that task_lock() is required during that procedure. It
protects ->mm which isn't changed here.
It needs to be mentioned that during the whole procedure (switch to
EFI's mm and back) the preemption needs to be disabled. A context switch
at this point would reset the cr3 value based on current->mm. Also, this
function may not be invoked at the same time on a different CPU because
it would overwrite the efi_scratch.prev_mm information.
Remove task_lock() and also update the comment to reflect it.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/platform/efi/efi_64.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6db8f3598c80..c9ccaef8df57 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -620,18 +620,16 @@ void __init efi_dump_pagetable(void)
/*
* Makes the calling thread switch to/from efi_mm context. Can be used
- * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well
- * as during efi runtime calls i.e current->active_mm == current_mm.
- * We are not mm_dropping()/mm_grabbing() any mm, because we are not
- * losing/creating any references.
+ * in a kernel thread and user context. Preemption needs to remain disabled
+ * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
+ * can not change under us.
+ * It should be ensured that there are no concurent calls to this function.
*/
void efi_switch_mm(struct mm_struct *mm)
{
- task_lock(current);
efi_scratch.prev_mm = current->active_mm;
current->active_mm = mm;
switch_mm(efi_scratch.prev_mm, mm, NULL);
- task_unlock(current);
}
#ifdef CONFIG_EFI_MIXED
--
2.25.1

View File

@ -1,82 +0,0 @@
From c96c598b9bc12e2909dcec0a1bf8f4a1b846107e Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 09:13:42 +0200
Subject: [PATCH 020/328] arm64: KVM: compute_layout before altenates are
applied
compute_layout() is invoked as part of an alternative fixup under
stop_machine() and needs a sleeping lock as part of get_random_long().
Invoke compute_layout() before the alternatives are applied.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm64/include/asm/alternative.h | 6 ++++++
arch/arm64/kernel/alternative.c | 1 +
arch/arm64/kvm/va_layout.c | 7 +------
3 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 887a8512bf10..376561351bae 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length);
static inline void apply_alternatives_module(void *start, size_t length) { }
#endif
+#ifdef CONFIG_KVM_ARM_HOST
+void kvm_compute_layout(void);
+#else
+static inline void kvm_compute_layout(void) { }
+#endif
+
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
" .word 663f - .\n" /* new instruction */ \
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index b5d603992d40..f92815d56d17 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -224,6 +224,7 @@ static int __apply_alternatives_multi_stop(void *unused)
void __init apply_alternatives_all(void)
{
/* better not try code patching on a live SMP system */
+ kvm_compute_layout();
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index c712a7376bc1..792da0e125de 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -33,7 +33,7 @@ static u8 tag_lsb;
static u64 tag_val;
static u64 va_mask;
-static void compute_layout(void)
+__init void kvm_compute_layout(void)
{
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
u64 hyp_va_msb;
@@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
BUG_ON(nr_inst != 5);
- if (!has_vhe() && !va_mask)
- compute_layout();
for (i = 0; i < nr_inst; i++) {
u32 rd, rn, insn, oinsn;
@@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
return;
}
- if (!va_mask)
- compute_layout();
-
/*
* Compute HYP VA by using the same computation as kern_hyp_va()
*/
--
2.25.1

View File

@ -1,102 +0,0 @@
From 8779fdd5686d1f9be670c7ee5ea6dfaece9e37d8 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 31 Aug 2018 14:16:30 +0200
Subject: [PATCH 021/328] of: allocate / free phandle cache outside of the
devtree_lock
The phandle cache code allocates memory while holding devtree_lock which
is a raw_spinlock_t. Memory allocation (and free()) is not possible on
RT while a raw_spinlock_t is held.
Invoke the kfree() and kcalloc() while the lock is dropped.
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: devicetree@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/of/base.c | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f0dbb7ad88cf..c59b30bab0e0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -130,31 +130,34 @@ static u32 phandle_cache_mask;
/*
* Caller must hold devtree_lock.
*/
-static void __of_free_phandle_cache(void)
+static struct device_node** __of_free_phandle_cache(void)
{
u32 cache_entries = phandle_cache_mask + 1;
u32 k;
+ struct device_node **shadow;
if (!phandle_cache)
- return;
+ return NULL;
for (k = 0; k < cache_entries; k++)
of_node_put(phandle_cache[k]);
- kfree(phandle_cache);
+ shadow = phandle_cache;
phandle_cache = NULL;
+ return shadow;
}
int of_free_phandle_cache(void)
{
unsigned long flags;
+ struct device_node **shadow;
raw_spin_lock_irqsave(&devtree_lock, flags);
- __of_free_phandle_cache();
+ shadow = __of_free_phandle_cache();
raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
+ kfree(shadow);
return 0;
}
#if !defined(CONFIG_MODULES)
@@ -189,10 +192,11 @@ void of_populate_phandle_cache(void)
u32 cache_entries;
struct device_node *np;
u32 phandles = 0;
+ struct device_node **shadow;
raw_spin_lock_irqsave(&devtree_lock, flags);
- __of_free_phandle_cache();
+ shadow = __of_free_phandle_cache();
for_each_of_allnodes(np)
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
@@ -200,12 +204,14 @@ void of_populate_phandle_cache(void)
if (!phandles)
goto out;
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
cache_entries = roundup_pow_of_two(phandles);
phandle_cache_mask = cache_entries - 1;
phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
GFP_ATOMIC);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
if (!phandle_cache)
goto out;
@@ -217,6 +223,7 @@ void of_populate_phandle_cache(void)
out:
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ kfree(shadow);
}
void __init of_core_init(void)
--
2.25.1

View File

@ -1,97 +0,0 @@
From 7841950d4460ea93ee4ddd6a400ad67cfacee592 Mon Sep 17 00:00:00 2001
From: Clark Williams <williams@redhat.com>
Date: Tue, 18 Sep 2018 10:29:31 -0500
Subject: [PATCH 022/328] mm/kasan: make quarantine_lock a raw_spinlock_t
The static lock quarantine_lock is used in quarantine.c to protect the
quarantine queue datastructures. It is taken inside quarantine queue
manipulation routines (quarantine_put(), quarantine_reduce() and
quarantine_remove_cache()), with IRQs disabled.
This is not a problem on a stock kernel but is problematic on an RT
kernel where spin locks are sleeping spinlocks, which can sleep and can
not be acquired with disabled interrupts.
Convert the quarantine_lock to a raw spinlock_t. The usage of
quarantine_lock is confined to quarantine.c and the work performed while
the lock is held is limited.
Signed-off-by: Clark Williams <williams@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/kasan/quarantine.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 3a8ddf8baf7d..b209dbaefde8 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -103,7 +103,7 @@ static int quarantine_head;
static int quarantine_tail;
/* Total size of all objects in global_quarantine across all batches. */
static unsigned long quarantine_size;
-static DEFINE_SPINLOCK(quarantine_lock);
+static DEFINE_RAW_SPINLOCK(quarantine_lock);
DEFINE_STATIC_SRCU(remove_cache_srcu);
/* Maximum size of the global queue. */
@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp);
- spin_lock(&quarantine_lock);
+ raw_spin_lock(&quarantine_lock);
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
if (global_quarantine[quarantine_tail].bytes >=
@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (new_tail != quarantine_head)
quarantine_tail = new_tail;
}
- spin_unlock(&quarantine_lock);
+ raw_spin_unlock(&quarantine_lock);
}
local_irq_restore(flags);
@@ -230,7 +230,7 @@ void quarantine_reduce(void)
* expected case).
*/
srcu_idx = srcu_read_lock(&remove_cache_srcu);
- spin_lock_irqsave(&quarantine_lock, flags);
+ raw_spin_lock_irqsave(&quarantine_lock, flags);
/*
* Update quarantine size in case of hotplug. Allocate a fraction of
@@ -254,7 +254,7 @@ void quarantine_reduce(void)
quarantine_head = 0;
}
- spin_unlock_irqrestore(&quarantine_lock, flags);
+ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, NULL);
srcu_read_unlock(&remove_cache_srcu, srcu_idx);
@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache)
*/
on_each_cpu(per_cpu_remove_cache, cache, 1);
- spin_lock_irqsave(&quarantine_lock, flags);
+ raw_spin_lock_irqsave(&quarantine_lock, flags);
for (i = 0; i < QUARANTINE_BATCHES; i++) {
if (qlist_empty(&global_quarantine[i]))
continue;
qlist_move_cache(&global_quarantine[i], &to_free, cache);
/* Scanning whole quarantine can take a while. */
- spin_unlock_irqrestore(&quarantine_lock, flags);
+ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
cond_resched();
- spin_lock_irqsave(&quarantine_lock, flags);
+ raw_spin_lock_irqsave(&quarantine_lock, flags);
}
- spin_unlock_irqrestore(&quarantine_lock, flags);
+ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, cache);
--
2.25.1

View File

@ -1,50 +0,0 @@
From c7753a6fd996fcaa0285c1c8285fde721d519a0a Mon Sep 17 00:00:00 2001
From: "Paul E. McKenney" <paulmck@linux.ibm.com>
Date: Mon, 29 Oct 2018 11:53:01 +0100
Subject: [PATCH 023/328] EXP rcu: Revert expedited GP parallelization
cleverness
(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu)
This commit reverts a series of commits starting with fcc635436501 ("rcu:
Make expedited GPs handle CPU 0 being offline") and its successors, thus
queueing each rcu_node structure's expedited grace-period initialization
work on the first CPU of that rcu_node structure.
Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/rcu/tree_exp.h | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 0b2c2ad69629..a0486414edb4 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
smp_call_func_t func)
{
- int cpu;
struct rcu_node *rnp;
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
- /* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
- cpu = WORK_CPU_UNBOUND;
- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
- preempt_enable();
+ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
rnp->exp_need_flush = true;
}
--
2.25.1

View File

@ -1,168 +0,0 @@
From 4b0c7eda4403c5a7146714857bd1abffd2b080f8 Mon Sep 17 00:00:00 2001
From: He Zhe <zhe.he@windriver.com>
Date: Wed, 19 Dec 2018 16:30:57 +0100
Subject: [PATCH 024/328] kmemleak: Turn kmemleak_lock to raw spinlock on RT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
kmemleak_lock, as a rwlock on RT, can possibly be held in atomic context and
causes the follow BUG.
BUG: scheduling while atomic: migration/15/132/0x00000002
Preemption disabled at:
[<ffffffff8c927c11>] cpu_stopper_thread+0x71/0x100
CPU: 15 PID: 132 Comm: migration/15 Not tainted 4.19.0-rt1-preempt-rt #1
Call Trace:
schedule+0x3d/0xe0
__rt_spin_lock+0x26/0x30
__write_rt_lock+0x23/0x1a0
rt_write_lock+0x2a/0x30
find_and_remove_object+0x1e/0x80
delete_object_full+0x10/0x20
kmemleak_free+0x32/0x50
kfree+0x104/0x1f0
intel_pmu_cpu_dying+0x67/0x70
x86_pmu_dying_cpu+0x1a/0x30
cpuhp_invoke_callback+0x92/0x700
take_cpu_down+0x70/0xa0
multi_cpu_stop+0x62/0xc0
cpu_stopper_thread+0x79/0x100
smpboot_thread_fn+0x20f/0x2d0
kthread+0x121/0x140
And on v4.18 stable tree the following call trace, caused by grabbing
kmemleak_lock again, is also observed.
kernel BUG at kernel/locking/rtmutex.c:1048!
CPU: 5 PID: 689 Comm: mkfs.ext4 Not tainted 4.18.16-rt9-preempt-rt #1
Call Trace:
rt_write_lock+0x2a/0x30
create_object+0x17d/0x2b0
kmemleak_alloc+0x34/0x50
kmem_cache_alloc+0x146/0x220
mempool_alloc_slab+0x15/0x20
mempool_alloc+0x65/0x170
sg_pool_alloc+0x21/0x60
sg_alloc_table_chained+0x8b/0xb0
blk_flush_plug_list+0x204/0x230
schedule+0x87/0xe0
rt_write_lock+0x2a/0x30
create_object+0x17d/0x2b0
kmemleak_alloc+0x34/0x50
__kmalloc_node+0x1cd/0x340
alloc_request_size+0x30/0x70
mempool_alloc+0x65/0x170
get_request+0x4e3/0x8d0
blk_queue_bio+0x153/0x470
generic_make_request+0x1dc/0x3f0
submit_bio+0x49/0x140
kmemleak is an error detecting feature. We would not expect as good performance
as without it. As there is no raw rwlock defining helpers, we turn kmemleak_lock
to a raw spinlock.
Signed-off-by: He Zhe <zhe.he@windriver.com>
Cc: catalin.marinas@arm.com
Cc: bigeasy@linutronix.de
Cc: tglx@linutronix.de
Cc: rostedt@goodmis.org
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lkml.kernel.org/r/1542877459-144382-1-git-send-email-zhe.he@windriver.com
Link: https://lkml.kernel.org/r/20181218150744.GB20197@arrakis.emea.arm.com
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/kmemleak.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5eeabece0c17..92ce99b15f2b 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -26,7 +26,7 @@
*
* The following locks and mutexes are used by kmemleak:
*
- * - kmemleak_lock (rwlock): protects the object_list modifications and
+ * - kmemleak_lock (raw spinlock): protects the object_list modifications and
* accesses to the object_tree_root. The object_list is the main list
* holding the metadata (struct kmemleak_object) for the allocated memory
* blocks. The object_tree_root is a red black tree used to look-up
@@ -197,7 +197,7 @@ static LIST_HEAD(gray_list);
/* search tree for object boundaries */
static struct rb_root object_tree_root = RB_ROOT;
/* rw_lock protecting the access to object_list and object_tree_root */
-static DEFINE_RWLOCK(kmemleak_lock);
+static DEFINE_RAW_SPINLOCK(kmemleak_lock);
/* allocation caches for kmemleak internal data */
static struct kmem_cache *object_cache;
@@ -491,9 +491,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
struct kmemleak_object *object;
rcu_read_lock();
- read_lock_irqsave(&kmemleak_lock, flags);
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, alias);
- read_unlock_irqrestore(&kmemleak_lock, flags);
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
/* check whether the object is still available */
if (object && !get_object(object))
@@ -513,13 +513,13 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
unsigned long flags;
struct kmemleak_object *object;
- write_lock_irqsave(&kmemleak_lock, flags);
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, alias);
if (object) {
rb_erase(&object->rb_node, &object_tree_root);
list_del_rcu(&object->object_list);
}
- write_unlock_irqrestore(&kmemleak_lock, flags);
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
return object;
}
@@ -593,7 +593,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
/* kernel backtrace */
object->trace_len = __save_stack_trace(object->trace);
- write_lock_irqsave(&kmemleak_lock, flags);
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
min_addr = min(min_addr, ptr);
max_addr = max(max_addr, ptr + size);
@@ -624,7 +624,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
list_add_tail_rcu(&object->object_list, &object_list);
out:
- write_unlock_irqrestore(&kmemleak_lock, flags);
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
return object;
}
@@ -1310,7 +1310,7 @@ static void scan_block(void *_start, void *_end,
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags;
- read_lock_irqsave(&kmemleak_lock, flags);
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) {
struct kmemleak_object *object;
unsigned long pointer;
@@ -1367,7 +1367,7 @@ static void scan_block(void *_start, void *_end,
spin_unlock(&object->lock);
}
}
- read_unlock_irqrestore(&kmemleak_lock, flags);
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
}
/*
--
2.25.1

View File

@ -1,135 +0,0 @@
From 7cb617c6dac1356dfe57b1c4a976ec78ead046a0 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 28 Oct 2016 23:05:11 +0200
Subject: [PATCH 025/328] NFSv4: replace seqcount_t with a seqlock_t
The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
because it maps to preempt_disable() in -RT which I can't have at this
point. So I took a look at the code.
It the lockdep part was removed in commit abbec2da13f0 ("NFS: Use
raw_write_seqcount_begin/end int nfs4_reclaim_open_state") because
lockdep complained. The whole seqcount thing was introduced in commit
c137afabe330 ("NFSv4: Allow the state manager to mark an open_owner as
being recovered").
The recovery threads runs only once.
write_seqlock() does not work on !RT because it disables preemption and it the
writer side is preemptible (has to remain so despite the fact that it will
block readers).
Reported-by: kernel test robot <xiaolong.ye@intel.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
fs/nfs/delegation.c | 4 ++--
fs/nfs/nfs4_fs.h | 2 +-
fs/nfs/nfs4proc.c | 4 ++--
fs/nfs/nfs4state.c | 22 ++++++++++++++++------
4 files changed, 21 insertions(+), 11 deletions(-)
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index b0c0c2fc2fba..26565ba05dc1 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+ seq = read_seqbegin(&sp->so_reclaim_seqlock);
err = nfs4_open_delegation_recall(ctx, state, stateid);
if (!err)
err = nfs_delegation_claim_locks(state, stateid);
- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
put_nfs_open_context(ctx);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 5b61520dce88..2771aafaca19 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -114,7 +114,7 @@ struct nfs4_state_owner {
unsigned long so_flags;
struct list_head so_states;
struct nfs_seqid_counter so_seqid;
- seqcount_t so_reclaim_seqcount;
+ seqlock_t so_reclaim_seqlock;
struct mutex so_delegreturn_mutex;
};
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 668b648064b7..187d411668ed 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2870,7 +2870,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
unsigned int seq;
int ret;
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+ seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
@@ -2911,7 +2911,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+ if (read_seqretry(&sp->so_reclaim_seqlock, seq))
nfs4_schedule_stateid_recovery(server, state);
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b3086e99420c..c9bf1eb7e1b2 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -515,7 +515,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
nfs4_init_seqid_counter(&sp->so_seqid);
atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru);
- seqcount_init(&sp->so_reclaim_seqcount);
+ seqlock_init(&sp->so_reclaim_seqlock);
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
@@ -1583,8 +1583,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
+ write_seqlock(&sp->so_reclaim_seqlock);
+#else
+ write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
+#endif
spin_lock(&sp->so_lock);
- raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
@@ -1671,14 +1675,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
spin_lock(&sp->so_lock);
goto restart;
}
- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
spin_unlock(&sp->so_lock);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ write_sequnlock(&sp->so_reclaim_seqlock);
+#else
+ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
+#endif
return 0;
out_err:
nfs4_put_open_state(state);
- spin_lock(&sp->so_lock);
- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
- spin_unlock(&sp->so_lock);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ write_sequnlock(&sp->so_reclaim_seqlock);
+#else
+ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
+#endif
return status;
}
--
2.25.1

View File

@ -1,784 +0,0 @@
From 4906d6c574d916416e92a9de0b959c4d0ed0bc17 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 4 Apr 2017 12:50:16 +0200
Subject: [PATCH 026/328] kernel: sched: Provide a pointer to the valid CPU
mask
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed()
wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not
much difference in !RT but in RT we used this to implement
migrate_disable(). Within a migrate_disable() section the CPU mask is
restricted to single CPU while the "normal" CPU mask remains untouched.
As an alternative implementation Ingo suggested to use
struct task_struct {
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
};
with
t->cpus_allowed_ptr = &t->cpus_allowed;
In -RT we then can switch the cpus_ptr to
t->cpus_allowed_ptr = &cpumask_of(task_cpu(p));
in a migration disabled region. The rules are simple:
- Code that 'uses' ->cpus_allowed would use the pointer.
- Code that 'modifies' ->cpus_allowed would use the direct mask.
While converting the existing users I tried to stick with the rules
above however… well mostly CPUFREQ tries to temporary switch the CPU
mask to do something on a certain CPU and then switches the mask back it
its original value. So in theory `cpus_ptr' could or should be used.
However if this is invoked in a migration disabled region (which is not
the case because it would require something like preempt_disable() and
set_cpus_allowed_ptr() might sleep so it can't be) then the "restore"
part would restore the wrong mask. So it only looks strange and I go for
the pointer…
Some drivers copy the cpumask without cpumask_copy() and others use
cpumask_copy but without alloc_cpumask_var(). I did not fix those as
part of this, could do this as a follow up…
So is this the way we want it?
Is the usage of `cpus_ptr' vs `cpus_mask' for the set + restore part
(see cpufreq users) what we want? At some point it looks like they
should use a different interface for their doing. I am not sure why
switching to certain CPU is important but maybe it could be done via a
workqueue from the CPUFREQ core (so we have a comment desribing why are
doing this and a get_online_cpus() to ensure that the CPU does not go
offline too early).
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/ia64/kernel/mca.c | 2 +-
arch/mips/include/asm/switch_to.h | 4 +--
arch/mips/kernel/mips-mt-fpaff.c | 2 +-
arch/mips/kernel/traps.c | 6 ++--
arch/powerpc/platforms/cell/spufs/sched.c | 2 +-
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 2 +-
drivers/infiniband/hw/hfi1/affinity.c | 6 ++--
drivers/infiniband/hw/hfi1/sdma.c | 3 +-
drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++--
fs/proc/array.c | 4 +--
include/linux/sched.h | 5 +--
init/init_task.c | 3 +-
kernel/cgroup/cpuset.c | 2 +-
kernel/fork.c | 2 ++
kernel/sched/core.c | 40 ++++++++++-----------
kernel/sched/cpudeadline.c | 4 +--
kernel/sched/cpupri.c | 4 +--
kernel/sched/deadline.c | 6 ++--
kernel/sched/fair.c | 32 ++++++++---------
kernel/sched/rt.c | 4 +--
kernel/trace/trace_hwlat.c | 2 +-
lib/smp_processor_id.c | 2 +-
samples/trace_events/trace-events-sample.c | 2 +-
23 files changed, 74 insertions(+), 72 deletions(-)
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6115464d5f03..f09e34c8409c 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
ti->cpu = cpu;
p->stack = ti;
p->state = TASK_UNINTERRUPTIBLE;
- cpumask_set_cpu(cpu, &p->cpus_allowed);
+ cpumask_set_cpu(cpu, &p->cpus_mask);
INIT_LIST_HEAD(&p->tasks);
p->parent = p->real_parent = p->group_leader = p;
INIT_LIST_HEAD(&p->children);
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index e610473d61b8..1428b4febbc9 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
* inline to try to keep the overhead down. If we have been forced to run on
* a "CPU" with an FPU because of a previous high level of FP computation,
* but did not actually use the FPU during the most recent time-slice (CU1
- * isn't set), we undo the restriction on cpus_allowed.
+ * isn't set), we undo the restriction on cpus_mask.
*
* We're not calling set_cpus_allowed() here, because we have no need to
* force prompt migration - we're already switching the current CPU to a
@@ -57,7 +57,7 @@ do { \
test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
(!(KSTK_STATUS(prev) & ST0_CU1))) { \
clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
+ prev->cpus_mask = prev->thread.user_cpus_allowed; \
} \
next->thread.emulated_fp = 0; \
} while(0)
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index a7c0f97e4b0d..1a08428eedcf 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9dab0ed1b227..3623cf32f5f4 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void)
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
- if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
+ if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
- = current->cpus_allowed;
- cpumask_and(&tmask, &current->cpus_allowed,
+ = current->cpus_mask;
+ cpumask_and(&tmask, &current->cpus_mask,
&mt_fpu_cpumask);
set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index c9ef3c532169..cb10249b1125 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted.
*/
- cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
+ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
/* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index a999a58ca331..d6410d0740ea 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -1445,7 +1445,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
* may be scheduled elsewhere and invalidate entries in the
* pseudo-locked region.
*/
- if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
+ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
mutex_unlock(&rdtgroup_mutex);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 01ed0a667928..2c62de6b5bf1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1039,7 +1039,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
- *proc_mask = &current->cpus_allowed;
+ *proc_mask = current->cpus_ptr;
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
@@ -1047,7 +1047,7 @@ int hfi1_get_proc_affinity(int node)
* check whether process/context affinity has already
* been set
*/
- if (cpumask_weight(proc_mask) == 1) {
+ if (current->nr_cpus_allowed == 1) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
@@ -1058,7 +1058,7 @@ int hfi1_get_proc_affinity(int node)
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
+ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 291c12f588b5..05e7b28a03c1 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -853,14 +853,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
{
struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL;
- const struct cpumask *current_mask = &current->cpus_allowed;
unsigned long cpu_id;
/*
* To ensure that always the same sdma engine(s) will be
* selected make sure the process is pinned to this CPU only.
*/
- if (cpumask_weight(current_mask) != 1)
+ if (current->nr_cpus_allowed != 1)
goto out;
cpu_id = smp_processor_id();
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 98e1ce14fa2a..5d3828625017 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
{
struct qib_filedata *fd = fp->private_data;
- const unsigned int weight = cpumask_weight(&current->cpus_allowed);
+ const unsigned int weight = current->nr_cpus_allowed;
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
int local_cpu;
@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else {
int unit;
- const unsigned int cpu = cpumask_first(&current->cpus_allowed);
- const unsigned int weight =
- cpumask_weight(&current->cpus_allowed);
+ const unsigned int cpu = cpumask_first(current->cpus_ptr);
+ const unsigned int weight = current->nr_cpus_allowed;
if (weight == 1 && !test_bit(cpu, qib_cpulist))
if (!find_hca(cpu, &unit) && unit >= 0)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9eb99a43f849..e4d0cfebaac5 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -381,9 +381,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Cpus_allowed:\t%*pb\n",
- cpumask_pr_args(&task->cpus_allowed));
+ cpumask_pr_args(task->cpus_ptr));
seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
- cpumask_pr_args(&task->cpus_allowed));
+ cpumask_pr_args(task->cpus_ptr));
}
static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0530de9a4efc..4298a87b9de6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -660,7 +660,8 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
- cpumask_t cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1398,7 +1399,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
diff --git a/init/init_task.c b/init/init_task.c
index 5aebe3be4d7c..0b49b9cf5571 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -71,7 +71,8 @@ struct task_struct init_task
.static_prio = MAX_PRIO - 20,
.normal_prio = MAX_PRIO - 20,
.policy = SCHED_NORMAL,
- .cpus_allowed = CPU_MASK_ALL,
+ .cpus_ptr = &init_task.cpus_mask,
+ .cpus_mask = CPU_MASK_ALL,
.nr_cpus_allowed= NR_CPUS,
.mm = NULL,
.active_mm = &init_mm,
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ff956ccbb6df..7bb129c5b412 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2090,7 +2090,7 @@ static void cpuset_fork(struct task_struct *task)
if (task_css_is_root(task, cpuset_cgrp_id))
return;
- set_cpus_allowed_ptr(task, &current->cpus_allowed);
+ set_cpus_allowed_ptr(task, current->cpus_ptr);
task->mems_allowed = current->mems_allowed;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1a2d18e98bf9..bc182d6fa2a9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -850,6 +850,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
+ if (orig->cpus_ptr == &orig->cpus_mask)
+ tsk->cpus_ptr = &tsk->cpus_mask;
/*
* One for us, one for whoever does the "release_task()" (usually
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2befd2c4ce9e..07dc66137a26 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
if (is_per_cpu_kthread(p))
@@ -973,7 +973,7 @@ static int migration_cpu_stop(void *data)
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
- * __migrate_task() such that we will not miss enforcing cpus_allowed
+ * __migrate_task() such that we will not miss enforcing cpus_ptr
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
@@ -1004,7 +1004,7 @@ static int migration_cpu_stop(void *data)
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
- cpumask_copy(&p->cpus_allowed, new_mask);
+ cpumask_copy(&p->cpus_mask, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -1074,7 +1074,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
goto out;
}
- if (cpumask_equal(&p->cpus_allowed, new_mask))
+ if (cpumask_equal(p->cpus_ptr, new_mask))
goto out;
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
@@ -1237,10 +1237,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
- if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
+ if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
goto unlock;
- if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
+ if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
@@ -1282,10 +1282,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
- if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
+ if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
goto out;
- if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
+ if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
@@ -1430,7 +1430,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
- * ->cpus_allowed is protected by both rq->lock and p->pi_lock
+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
*
* A few notes on cpu_active vs cpu_online:
*
@@ -1470,14 +1470,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
return dest_cpu;
}
}
for (;;) {
/* Any allowed, online CPU? */
- for_each_cpu(dest_cpu, &p->cpus_allowed) {
+ for_each_cpu(dest_cpu, p->cpus_ptr) {
if (!is_cpu_allowed(p, dest_cpu))
continue;
@@ -1521,7 +1521,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
}
/*
- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
+ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
@@ -1531,11 +1531,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
- cpu = cpumask_any(&p->cpus_allowed);
+ cpu = cpumask_any(p->cpus_ptr);
/*
* In order not to call set_task_cpu() on a blocking task we need
- * to rely on ttwu() to place the task on a valid ->cpus_allowed
+ * to rely on ttwu() to place the task on a valid ->cpus_ptr
* CPU.
*
* Since this is common to all placement strategies, this lives here.
@@ -2402,7 +2402,7 @@ void wake_up_new_task(struct task_struct *p)
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
- * - cpus_allowed can change in the fork path
+ * - cpus_ptr can change in the fork path
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
@@ -4316,7 +4316,7 @@ static int __sched_setscheduler(struct task_struct *p,
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
- if (!cpumask_subset(span, &p->cpus_allowed) ||
+ if (!cpumask_subset(span, p->cpus_ptr) ||
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
@@ -4915,7 +4915,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
- cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+ cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
@@ -5496,7 +5496,7 @@ int task_can_attach(struct task_struct *p,
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
- * before cpus_allowed may be changed.
+ * before cpus_mask may be changed.
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
@@ -5523,7 +5523,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
if (curr_cpu == target_cpu)
return 0;
- if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
+ if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
return -EINVAL;
/* TODO: This is not properly updating schedstats */
@@ -5661,7 +5661,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
put_prev_task(rq, next);
/*
- * Rules for changing task_struct::cpus_allowed are holding
+ * Rules for changing task_struct::cpus_mask are holding
* both pi_lock and rq->lock, such that holding either
* stabilizes the mask.
*
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 50316455ea66..d57fb2f8ae67 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
+ cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
return 1;
} else {
int best_cpu = cpudl_maximum(cp);
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
- if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
+ if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index daaadf939ccb..f7d2c10b4c92 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (skip)
continue;
- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+ if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
continue;
if (lowest_mask) {
- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+ cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
/*
* We have to ensure that we have at least one bit
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ebec37cb3be9..4b13df38c069 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
- cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+ cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
@@ -1856,7 +1856,7 @@ static void set_curr_task_dl(struct rq *rq)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
- cpumask_test_cpu(cpu, &p->cpus_allowed))
+ cpumask_test_cpu(cpu, p->cpus_ptr))
return 1;
return 0;
}
@@ -2006,7 +2006,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
+ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
task_running(rq, task) ||
!dl_task(task) ||
!task_on_rq_queued(task))) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0f1ba3d72336..27f9f9a785c1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1678,7 +1678,7 @@ static void task_numa_compare(struct task_numa_env *env,
* be incurred if the tasks were swapped.
*/
/* Skip this swap candidate if cannot move to the source cpu */
- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
+ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
goto unlock;
/*
@@ -1776,7 +1776,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
- if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
+ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
continue;
env->dst_cpu = cpu;
@@ -5782,7 +5782,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
- &p->cpus_allowed))
+ p->cpus_ptr))
continue;
local_group = cpumask_test_cpu(this_cpu,
@@ -5914,7 +5914,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
- for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
+ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
@@ -5954,7 +5954,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
{
int new_cpu = cpu;
- if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
+ if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
return prev_cpu;
/*
@@ -6071,7 +6071,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
if (!test_idle_cores(target, false))
return -1;
- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
@@ -6105,7 +6105,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue;
if (available_idle_cpu(cpu))
return cpu;
@@ -6168,7 +6168,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue;
if (available_idle_cpu(cpu))
break;
@@ -6205,7 +6205,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
- cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
@@ -6423,7 +6423,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
- && cpumask_test_cpu(cpu, &p->cpus_allowed);
+ && cpumask_test_cpu(cpu, p->cpus_ptr);
}
rcu_read_lock();
@@ -7162,14 +7162,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
- * 2) cannot be migrated to this CPU due to cpus_allowed, or
+ * 2) cannot be migrated to this CPU due to cpus_ptr, or
* 3) running (obviously), or
* 4) are cache-hot on their current CPU.
*/
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0;
- if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
+ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
@@ -7189,7 +7189,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
- if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
+ if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
@@ -7786,7 +7786,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
- * groups is inadequate due to ->cpus_allowed constraints.
+ * groups is inadequate due to ->cpus_ptr constraints.
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
@@ -8401,7 +8401,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
- * isn't true due to cpus_allowed constraints and the like.
+ * isn't true due to cpus_ptr constraints and the like.
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
@@ -8797,7 +8797,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
+ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
env.flags |= LBF_ALL_PINNED;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b980cc96604f..b6ca4a630050 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1611,7 +1611,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
- cpumask_test_cpu(cpu, &p->cpus_allowed))
+ cpumask_test_cpu(cpu, p->cpus_ptr))
return 1;
return 0;
@@ -1748,7 +1748,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
+ !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
task_running(rq, task) ||
!rt_task(task) ||
!task_on_rq_queued(task))) {
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 8030e24dbf14..862f4b0139fc 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -279,7 +279,7 @@ static void move_to_next_cpu(void)
* of this thread, than stop migrating for the duration
* of the current test.
*/
- if (!cpumask_equal(current_mask, &current->cpus_allowed))
+ if (!cpumask_equal(current_mask, current->cpus_ptr))
goto disable;
get_online_cpus();
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 85925aaa4fff..fb35c45b9421 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+ if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
goto out;
/*
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 5522692100ba..8b4be8e1802a 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
/* Silly tracepoints */
trace_foo_bar("hello", cnt, array, random_strings[len],
- &current->cpus_allowed);
+ current->cpus_ptr);
trace_foo_with_template_simple("HELLO", cnt);
--
2.25.1

View File

@ -1,265 +0,0 @@
From 9f9cd889fa22fc1e25802f565f7210b271d136a2 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: [PATCH 027/328] kernel/sched/core: add migrate_disable()
---
include/linux/preempt.h | 23 +++++++
include/linux/sched.h | 7 +++
include/linux/smp.h | 3 +
kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 4 ++
5 files changed, 165 insertions(+), 2 deletions(-)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index c01813c3fbe9..3196d0e76719 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -185,6 +185,22 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
+#ifdef CONFIG_SMP
+
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+int __migrate_disabled(struct task_struct *p);
+
+#else
+#define migrate_disable() barrier()
+#define migrate_enable() barrier()
+static inline int __migrate_disabled(struct task_struct *p)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
@@ -253,6 +269,13 @@ do { \
#define preempt_enable_notrace() barrier()
#define preemptible() 0
+#define migrate_disable() barrier()
+#define migrate_enable() barrier()
+
+static inline int __migrate_disabled(struct task_struct *p)
+{
+ return 0;
+}
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4298a87b9de6..0489d3e0e78c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -662,6 +662,13 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+ int migrate_disable;
+ int migrate_disable_update;
+# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+# endif
+#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..5801e516ba63 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void)
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+#define put_cpu_light() migrate_enable()
+
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 07dc66137a26..d0450f06612c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+int __migrate_disabled(struct task_struct *p)
+{
+ return p->migrate_disable;
+}
+#endif
+
+static void __do_set_cpus_allowed_tail(struct task_struct *p,
+ const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
bool queued, running;
@@ -1037,6 +1045,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
set_curr_task(rq, p);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+ if (__migrate_disabled(p)) {
+ lockdep_assert_held(&p->pi_lock);
+
+ cpumask_copy(&p->cpus_mask, new_mask);
+ p->migrate_disable_update = 1;
+ return;
+ }
+#endif
+ __do_set_cpus_allowed_tail(p, new_mask);
+}
+
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
@@ -1096,9 +1118,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
}
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+ if (__migrate_disabled(p)) {
+ p->migrate_disable_update = 1;
+ goto out;
+ }
+#endif
+
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
@@ -7105,3 +7134,100 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
+
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (in_atomic() || irqs_disabled()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+#endif
+ return;
+ }
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ return;
+ }
+
+ preempt_disable();
+ p->migrate_disable = 1;
+
+ p->cpus_ptr = cpumask_of(smp_processor_id());
+ p->nr_cpus_allowed = 1;
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+
+ if (in_atomic() || irqs_disabled()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+ if (p->migrate_disable > 1) {
+ p->migrate_disable--;
+ return;
+ }
+
+ preempt_disable();
+
+ p->cpus_ptr = &p->cpus_mask;
+ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
+ p->migrate_disable = 0;
+
+ if (p->migrate_disable_update) {
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+
+ __do_set_cpus_allowed_tail(p, &p->cpus_mask);
+ task_rq_unlock(rq, p, &rf);
+
+ p->migrate_disable_update = 0;
+
+ WARN_ON(smp_processor_id() != task_cpu(p));
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ struct migration_arg arg;
+ unsigned int dest_cpu;
+
+ if (p->flags & PF_KTHREAD) {
+ /*
+ * Kernel threads are allowed on online && !active CPUs
+ */
+ cpu_valid_mask = cpu_online_mask;
+ }
+ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ return;
+ }
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
+#endif
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 78fadf0438ea..5027158d3908 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -982,6 +982,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(dl.runtime);
P(dl.deadline);
}
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+ P(migrate_disable);
+#endif
+ P(nr_cpus_allowed);
#undef PN_SCHEDSTAT
#undef PN
#undef __PN
--
2.25.1

View File

@ -1,37 +0,0 @@
From db2220843fd1c19c7b89db5f6e20382b5622fa05 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 9 Oct 2018 17:34:50 +0200
Subject: [PATCH 028/328] sched/migrate_disable: Add export_symbol_gpl for
__migrate_disabled
Jonathan reported that lttng/modules can't use __migrate_disabled().
This function is only used by sched/core itself and the tracing
infrastructure to report the migrate counter (lttng does probably the
same). Since the rework migrate_disable() it moved from sched.h to
preempt.h and is became an exported function instead of a "static
inline" due to the header recursion of preempt vs sched.
Since the compiler inlines the function for sched/core usage, add a
EXPORT_SYMBOL_GPL to allow the module/LTTNG usage.
Reported-by: Jonathan Rajott <jonathan.rajotte-julien@efficios.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/sched/core.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d0450f06612c..e6022cc2605b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1013,6 +1013,7 @@ int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
}
+EXPORT_SYMBOL_GPL(__migrate_disabled);
#endif
static void __do_set_cpus_allowed_tail(struct task_struct *p,
--
2.25.1

View File

@ -1,97 +0,0 @@
From b978b0a313d26ed5e51a9120c8744385a99e541a Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 9 Mar 2016 10:51:06 +0100
Subject: [PATCH 029/328] arm: at91: do not disable/enable clocks in a row
Currently the driver will disable the clock and enable it one line later
if it is switching from periodic mode into one shot.
This can be avoided and causes a needless warning on -RT.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/clocksource/tcb_clksrc.c | 33 ++++++++++++++++++++++++++++----
1 file changed, 29 insertions(+), 4 deletions(-)
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 43f4d5c4d6fa..de6baf564dfe 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -126,6 +126,7 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
+ bool clk_enabled;
void __iomem *regs;
};
@@ -143,6 +144,24 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
*/
static u32 timer_clock;
+static void tc_clk_disable(struct clock_event_device *d)
+{
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+
+ clk_disable(tcd->clk);
+ tcd->clk_enabled = false;
+}
+
+static void tc_clk_enable(struct clock_event_device *d)
+{
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+
+ if (tcd->clk_enabled)
+ return;
+ clk_enable(tcd->clk);
+ tcd->clk_enabled = true;
+}
+
static int tc_shutdown(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
@@ -150,8 +169,14 @@ static int tc_shutdown(struct clock_event_device *d)
writel(0xff, regs + ATMEL_TC_REG(2, IDR));
writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+ return 0;
+}
+
+static int tc_shutdown_clk_off(struct clock_event_device *d)
+{
+ tc_shutdown(d);
if (!clockevent_state_detached(d))
- clk_disable(tcd->clk);
+ tc_clk_disable(d);
return 0;
}
@@ -164,7 +189,7 @@ static int tc_set_oneshot(struct clock_event_device *d)
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
tc_shutdown(d);
- clk_enable(tcd->clk);
+ tc_clk_enable(d);
/* slow clock, count up to RC, then irq and stop */
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
@@ -186,7 +211,7 @@ static int tc_set_periodic(struct clock_event_device *d)
/* By not making the gentime core emulate periodic mode on top
* of oneshot, we get lower overhead and improved accuracy.
*/
- clk_enable(tcd->clk);
+ tc_clk_enable(d);
/* slow clock, count up to RC, then irq and restart */
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
@@ -220,7 +245,7 @@ static struct tc_clkevt_device clkevt = {
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
- .set_state_shutdown = tc_shutdown,
+ .set_state_shutdown = tc_shutdown_clk_off,
.set_state_periodic = tc_set_periodic,
.set_state_oneshot = tc_set_oneshot,
},
--
2.25.1

View File

@ -1,169 +0,0 @@
From 8549d4577f0573dddbc34e310c4310920a6bb714 Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: [PATCH 030/328] clocksource: TCLIB: Allow higher clock rates for
clock events
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.
(fixed up by Sami Pietikäinen <Sami.Pietikainen@wapice.com>)
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++-------------
drivers/misc/Kconfig | 12 +++++++++--
2 files changed, 31 insertions(+), 17 deletions(-)
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index de6baf564dfe..ba15242a6066 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -25,8 +25,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
- * source, used in either periodic or oneshot mode. This runs
- * at 32 KiHZ, and can handle delays of up to two seconds.
+ * source, used in either periodic or oneshot mode.
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
@@ -127,6 +126,7 @@ struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
bool clk_enabled;
+ u32 freq;
void __iomem *regs;
};
@@ -135,13 +135,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
static u32 timer_clock;
static void tc_clk_disable(struct clock_event_device *d)
@@ -191,7 +184,7 @@ static int tc_set_oneshot(struct clock_event_device *d)
tc_clk_enable(d);
- /* slow clock, count up to RC, then irq and stop */
+ /* count up to RC, then irq and stop */
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -213,10 +206,10 @@ static int tc_set_periodic(struct clock_event_device *d)
*/
tc_clk_enable(d);
- /* slow clock, count up to RC, then irq and restart */
+ /* count up to RC, then irq and restart */
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+ writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -243,7 +236,11 @@ static struct tc_clkevt_device clkevt = {
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
.rating = 125,
+#else
+ .rating = 200,
+#endif
.set_next_event = tc_next_event,
.set_state_shutdown = tc_shutdown_clk_off,
.set_state_periodic = tc_set_periodic,
@@ -265,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
return IRQ_NONE;
}
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
+ unsigned divisor = atmel_tc_divisors[divisor_idx];
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
@@ -287,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
- timer_clock = clk32k_divisor_idx;
+ timer_clock = divisor_idx;
+ if (!divisor)
+ clkevt.freq = 32768;
+ else
+ clkevt.freq = clk_get_rate(t2_clk) / divisor;
clkevt.clkevt.cpumask = cpumask_of(0);
@@ -298,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
return ret;
}
- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
return ret;
}
@@ -435,7 +437,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
ret = setup_clkevents(tc, clk32k_divisor_idx);
+#else
+ ret = setup_clkevents(tc, best_divisor_idx);
+#endif
if (ret)
goto err_unregister_clksrc;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3726eacdf65d..0900dec7ec04 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC
are combined to make a single 32-bit timer.
When GENERIC_CLOCKEVENTS is defined, the third timer channel
- may be used as a clock event device supporting oneshot mode
- (delays of up to two seconds) based on the 32 KiHz clock.
+ may be used as a clock event device supporting oneshot mode.
config ATMEL_TCB_CLKSRC_BLOCK
int
@@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
TC can be used for other purposes, such as PWM generation and
interval timing.
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ bool "TC Block use 32 KiHz clock"
+ depends on ATMEL_TCB_CLKSRC
+ default y
+ help
+ Select this to use 32 KiHz base clock rate as TC block clock
+ source for clock events.
+
+
config DUMMY_IRQ
tristate "Dummy IRQ handler"
default n
--
2.25.1

View File

@ -1,170 +0,0 @@
From 4948d026b217faebc43bfe4a21cb3c5e781508ad Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 14 Feb 2013 22:36:59 +0100
Subject: [PATCH 031/328] timekeeping: Split jiffies seqlock
Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
it can be taken in atomic context on RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/jiffies.c | 7 ++++---
kernel/time/tick-common.c | 10 ++++++----
kernel/time/tick-sched.c | 19 ++++++++++++-------
kernel/time/timekeeping.c | 6 ++++--
kernel/time/timekeeping.h | 3 ++-
5 files changed, 28 insertions(+), 17 deletions(-)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 497719127bf9..62acb8914c9e 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
.max_cycles = 10,
};
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
+__cacheline_aligned_in_smp seqcount_t jiffies_seq;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
@@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
ret = jiffies_64;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index a02e0f6b287c..32f5101f07ce 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
next = tick_next_period;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 48403fb653c2..e774a49176cc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -68,7 +68,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevaluate with jiffies_lock held */
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta >= tick_period) {
@@ -94,10 +95,12 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
return;
}
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -108,12 +111,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
return period;
}
@@ -656,10 +661,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
basemono = last_jiffies_update;
basejiff = jiffies;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
ts->last_jiffies = basejiff;
ts->timer_expires_base = basemono;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 81ee5b83c920..512db778f442 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2394,8 +2394,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
do_timer(ticks);
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 141ab3ab0354..099737f6f10c 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { }
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
-extern seqlock_t jiffies_lock;
+extern raw_spinlock_t jiffies_lock;
+extern seqcount_t jiffies_seq;
#define CS_NAME_LEN 32
--
2.25.1

View File

@ -1,37 +0,0 @@
From dad624b7531ae0a0275cab3c82ea0d7c6a29cc7c Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 21 Sep 2011 19:57:12 +0200
Subject: [PATCH 032/328] signal: Revert ptrace preempt magic
Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
than a bandaid around the ptrace design trainwreck. It's not a
correctness issue, it's merily a cosmetic bandaid.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/signal.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/kernel/signal.c b/kernel/signal.c
index 08911bb6fe9a..5e278f1540ad 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2103,15 +2103,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
- /*
- * Don't want to allow preemption here, because
- * sys_ptrace() needs this task to be inactive.
- *
- * XXX: implement read_unlock_no_resched().
- */
- preempt_disable();
read_unlock(&tasklist_lock);
- preempt_enable_no_resched();
freezable_schedule();
} else {
/*
--
2.25.1

View File

@ -1,63 +0,0 @@
From 5b974aebb7a0797ecc4c47dda6158e8c6788d50b Mon Sep 17 00:00:00 2001
From: Marc Kleine-Budde <mkl@pengutronix.de>
Date: Wed, 5 Mar 2014 00:49:47 +0100
Subject: [PATCH 033/328] net: sched: Use msleep() instead of yield()
On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
(by default). If a high priority userspace process tries to shut down a busy
network interface it might spin in a yield loop waiting for the device to
become idle. With the interrupt thread having a lower priority than the
looping process it might never be scheduled and so result in a deadlock on UP
systems.
With Magic SysRq the following backtrace can be produced:
> test_app R running 0 174 168 0x00000000
> [<c02c7070>] (__schedule+0x220/0x3fc) from [<c02c7870>] (preempt_schedule_irq+0x48/0x80)
> [<c02c7870>] (preempt_schedule_irq+0x48/0x80) from [<c0008fa8>] (svc_preempt+0x8/0x20)
> [<c0008fa8>] (svc_preempt+0x8/0x20) from [<c001a984>] (local_bh_enable+0x18/0x88)
> [<c001a984>] (local_bh_enable+0x18/0x88) from [<c025316c>] (dev_deactivate_many+0x220/0x264)
> [<c025316c>] (dev_deactivate_many+0x220/0x264) from [<c023be04>] (__dev_close_many+0x64/0xd4)
> [<c023be04>] (__dev_close_many+0x64/0xd4) from [<c023be9c>] (__dev_close+0x28/0x3c)
> [<c023be9c>] (__dev_close+0x28/0x3c) from [<c023f7f0>] (__dev_change_flags+0x88/0x130)
> [<c023f7f0>] (__dev_change_flags+0x88/0x130) from [<c023f904>] (dev_change_flags+0x10/0x48)
> [<c023f904>] (dev_change_flags+0x10/0x48) from [<c024c140>] (do_setlink+0x370/0x7ec)
> [<c024c140>] (do_setlink+0x370/0x7ec) from [<c024d2f0>] (rtnl_newlink+0x2b4/0x450)
> [<c024d2f0>] (rtnl_newlink+0x2b4/0x450) from [<c024cfa0>] (rtnetlink_rcv_msg+0x158/0x1f4)
> [<c024cfa0>] (rtnetlink_rcv_msg+0x158/0x1f4) from [<c0256740>] (netlink_rcv_skb+0xac/0xc0)
> [<c0256740>] (netlink_rcv_skb+0xac/0xc0) from [<c024bbd8>] (rtnetlink_rcv+0x18/0x24)
> [<c024bbd8>] (rtnetlink_rcv+0x18/0x24) from [<c02561b8>] (netlink_unicast+0x13c/0x198)
> [<c02561b8>] (netlink_unicast+0x13c/0x198) from [<c025651c>] (netlink_sendmsg+0x264/0x2e0)
> [<c025651c>] (netlink_sendmsg+0x264/0x2e0) from [<c022af98>] (sock_sendmsg+0x78/0x98)
> [<c022af98>] (sock_sendmsg+0x78/0x98) from [<c022bb50>] (___sys_sendmsg.part.25+0x268/0x278)
> [<c022bb50>] (___sys_sendmsg.part.25+0x268/0x278) from [<c022cf08>] (__sys_sendmsg+0x48/0x78)
> [<c022cf08>] (__sys_sendmsg+0x48/0x78) from [<c0009320>] (ret_fast_syscall+0x0/0x2c)
This patch works around the problem by replacing yield() by msleep(1), giving
the interrupt thread time to finish, similar to other changes contained in the
rt patch set. Using wait_for_completion() instead would probably be a better
solution.
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
net/sched/sch_generic.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8a4d01e427a2..4ab20f1138fd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1204,7 +1204,7 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev))
- yield();
+ msleep(1);
/* The new qdisc is assigned at this point so we can safely
* unwind stale skb lists and qdisc statistics
*/
--
2.25.1

View File

@ -1,36 +0,0 @@
From 38dbd44808bcdd34f0b973698b0f9bd65d2f2db5 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 27 Mar 2018 16:24:15 +0200
Subject: [PATCH 034/328] dm rq: remove BUG_ON(!irqs_disabled) check
In commit 052189a2ec95 ("dm: remove superfluous irq disablement in
dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a
check for disabled interrupts. Later the locking part was removed in
commit 2eb6e1e3aa87 ("dm: submit stacked requests in irq enabled
context") but the BUG_ON() check remained.
Since the original purpose for the "are-irqs-off" check is gone (the
->queue_lock has been removed) remove it.
Cc: Keith Busch <keith.busch@intel.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/md/dm-rq.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4d36373e1c0f..12ed08245130 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -692,7 +692,6 @@ static void dm_old_request_fn(struct request_queue *q)
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
}
}
--
2.25.1

View File

@ -1,45 +0,0 @@
From f31d5f36bfd80c261ba37fe3b8849f2be819c088 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 8 Nov 2013 17:34:54 +0100
Subject: [PATCH 035/328] usb: do no disable interrupts in giveback
Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet
context") the USB code disables interrupts before invoking the complete
callback.
This should not be required the HCD completes the URBs either in hard-irq
context or in BH context. Lockdep may report false positives if one has two
HCDs (one completes in IRQ and the other in BH context) and is using the same
USB driver (device) with both HCDs. This is safe since the same URBs are never
mixed with those two HCDs.
Longeterm we should force all HCDs to complete in the same context.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/usb/core/hcd.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index b82a7d787add..2f3015356124 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
- unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1766,9 +1765,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
- local_irq_save(flags);
urb->complete(urb);
- local_irq_restore(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
--
2.25.1

View File

@ -1,63 +0,0 @@
From f93f63735dec865d4013677969324e66da7f02c4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 12:39:57 +0200
Subject: [PATCH 036/328] rt: Provide PREEMPT_RT_BASE config switch
Introduce PREEMPT_RT_BASE which enables parts of
PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
substitutions for testing.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/Kconfig.preempt | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index cd1655122ec0..027db5976c2f 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,3 +1,10 @@
+config PREEMPT
+ bool
+ select PREEMPT_COUNT
+
+config PREEMPT_RT_BASE
+ bool
+ select PREEMPT
choice
prompt "Preemption Model"
@@ -34,10 +41,10 @@ config PREEMPT_VOLUNTARY
Select this if you are building a kernel for a desktop system.
-config PREEMPT
+config PREEMPT__LL
bool "Preemptible Kernel (Low-Latency Desktop)"
depends on !ARCH_NO_PREEMPT
- select PREEMPT_COUNT
+ select PREEMPT
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
help
This option reduces the latency of the kernel by making
@@ -54,7 +61,15 @@ config PREEMPT
embedded system with latency requirements in the milliseconds
range.
+config PREEMPT_RTB
+ bool "Preemptible Kernel (Basic RT)"
+ select PREEMPT_RT_BASE
+ help
+ This option is basically the same as (Low-Latency Desktop) but
+ enables changes which are preliminary for the full preemptible
+ RT kernel.
+
endchoice
config PREEMPT_COUNT
- bool
\ No newline at end of file
+ bool
--
2.25.1

View File

@ -1,75 +0,0 @@
From 824fc9b2ae92b317da3e2a42406a49f330e20a6d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
Subject: [PATCH 037/328] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
There are "valid" GFP_ATOMIC allocations such as
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
|in_atomic(): 1, irqs_disabled(): 0, pid: 2130, name: tar
|1 lock held by tar/2130:
| #0: (&mm->mmap_sem){++++++}, at: [<ffffffff811d4e89>] SyS_brk+0x39/0x190
|Preemption disabled at:[<ffffffff81063048>] flush_tlb_mm_range+0x28/0x350
|
|CPU: 1 PID: 2130 Comm: tar Tainted: G W 4.8.2-rt2+ #747
|Call Trace:
| [<ffffffff814d52dc>] dump_stack+0x86/0xca
| [<ffffffff810a26fb>] ___might_sleep+0x14b/0x240
| [<ffffffff819bc1d4>] rt_spin_lock+0x24/0x60
| [<ffffffff81194fba>] get_page_from_freelist+0x83a/0x11b0
| [<ffffffff81195e8b>] __alloc_pages_nodemask+0x15b/0x1190
| [<ffffffff811f0b81>] alloc_pages_current+0xa1/0x1f0
| [<ffffffff811f7df5>] new_slab+0x3e5/0x690
| [<ffffffff811fb0d5>] ___slab_alloc+0x495/0x660
| [<ffffffff811fb311>] __slab_alloc.isra.79+0x71/0xc0
| [<ffffffff811fb447>] __kmalloc_node+0xe7/0x240
| [<ffffffff814d4ee0>] alloc_cpumask_var_node+0x20/0x50
| [<ffffffff814d4f3e>] alloc_cpumask_var+0xe/0x10
| [<ffffffff810430c1>] native_send_call_func_ipi+0x21/0x130
| [<ffffffff8111c13f>] smp_call_function_many+0x22f/0x370
| [<ffffffff81062b64>] native_flush_tlb_others+0x1a4/0x3a0
| [<ffffffff8106309b>] flush_tlb_mm_range+0x7b/0x350
| [<ffffffff811c88e2>] tlb_flush_mmu_tlbonly+0x62/0xd0
| [<ffffffff811c9af4>] tlb_finish_mmu+0x14/0x50
| [<ffffffff811d1c84>] unmap_region+0xe4/0x110
| [<ffffffff811d3db3>] do_munmap+0x293/0x470
| [<ffffffff811d4f8c>] SyS_brk+0x13c/0x190
| [<ffffffff810032e2>] do_fast_syscall_32+0xb2/0x2f0
| [<ffffffff819be181>] entry_SYSENTER_compat+0x51/0x60
which forbid allocations at run-time.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/Kconfig | 2 +-
lib/Kconfig | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index af35f5caadbe..e40ba59efe7f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -934,7 +934,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
- select CPUMASK_OFFSTACK
+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
diff --git a/lib/Kconfig b/lib/Kconfig
index a3928d4438b5..a50b2158f7cd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -441,6 +441,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
+ depends on !PREEMPT_RT_FULL
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
--
2.25.1

Some files were not shown because too many files have changed in this diff Show More