From ee4ebc7f2a1d5cda4f40715d4e088e3d638b83a4 Mon Sep 17 00:00:00 2001 From: Tiejun Chen Date: Tue, 18 Jun 2019 16:23:49 -0700 Subject: [PATCH] update -rt to 4.19.50-rt22 Signed-off-by: Tiejun Chen --- examples/rt-for-vmware.yml | 2 +- kernel/Makefile | 4 +- ...M-at91-add-TCB-registers-definitions.patch | 4 +- ...ers-Add-a-new-driver-for-the-Atmel-A.patch | 8 +- ...ers-timer-atmel-tcb-add-clockevent-d.patch | 4 +- ...drivers-atmel-pit-make-option-silent.patch | 8 +- ...at91-Implement-clocksource-selection.patch | 4 +- ...onfigs-at91-use-new-TCB-timer-driver.patch | 4 +- .../0007-ARM-configs-at91-unselect-PIT.patch | 4 +- ...ts-Move-pending-table-allocation-to-.patch | 4 +- ...-convert-worker-lock-to-raw-spinlock.patch | 4 +- ...m-qi-simplify-CGR-allocation-freeing.patch | 4 +- ...obustify-CFS-bandwidth-timer-locking.patch | 22 +-- ...012-arm-Convert-arm-boot_lock-to-raw.patch | 4 +- ...-let-setaffinity-unmask-threaded-EOI.patch | 4 +- ...irqsave-in-cgroup_rstat_flush_locked.patch | 4 +- ...lize-cookie-hash-table-raw-spinlocks.patch | 4 +- ...mbus-include-header-for-get_irq_regs.patch | 4 +- ...de-irqflags.h-for-raw_local_irq_save.patch | 4 +- .../0018-efi-Allow-efi-runtime.patch | 4 +- ...fi-drop-task_lock-from-efi_switch_mm.patch | 4 +- ...e_layout-before-altenates-are-applie.patch | 4 +- ...-phandle-cache-outside-of-the-devtre.patch | 4 +- ...ake-quarantine_lock-a-raw_spinlock_t.patch | 4 +- ...xpedited-GP-parallelization-cleverne.patch | 4 +- ...-kmemleak_lock-to-raw-spinlock-on-RT.patch | 6 +- ...-replace-seqcount_t-with-a-seqlock_t.patch | 14 +- ...vide-a-pointer-to-the-valid-CPU-mask.patch | 48 +++---- ...ernel-sched-core-add-migrate_disable.patch | 10 +- ...sable-Add-export_symbol_gpl-for-__mi.patch | 6 +- ...o-not-disable-enable-clocks-in-a-row.patch | 4 +- ...B-Allow-higher-clock-rates-for-clock.patch | 4 +- ...31-timekeeping-Split-jiffies-seqlock.patch | 6 +- ...2-signal-Revert-ptrace-preempt-magic.patch | 6 +- ...et-sched-Use-msleep-instead-of-yield.patch | 4 +- ...rq-remove-BUG_ON-irqs_disabled-check.patch | 4 +- ...do-no-disable-interrupts-in-giveback.patch | 6 +- ...rovide-PREEMPT_RT_BASE-config-switch.patch | 4 +- ...sable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch | 4 +- ...abel-disable-if-stop_machine-is-used.patch | 6 +- ...config-options-which-are-not-RT-comp.patch | 6 +- .../0040-lockdep-disable-self-test.patch | 6 +- .../0041-mm-Allow-only-slub-on-RT.patch | 10 +- ...locking-Disable-spin-on-owner-for-RT.patch | 4 +- ...043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch | 4 +- ...044-rcu-make-RCU_BOOST-default-on-RT.patch | 4 +- ...-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch | 8 +- ...46-net-core-disable-NET_RX_BUSY_POLL.patch | 4 +- ...0047-arm-disable-NEON-in-kernel-mode.patch | 8 +- ...0048-powerpc-Use-generic-rwsem-on-RT.patch | 4 +- ...ble-in-kernel-MPIC-emulation-for-PRE.patch | 4 +- .../0050-powerpc-Disable-highmem-on-RT.patch | 4 +- .../0051-mips-Disable-highmem-on-RT.patch | 4 +- ...86-Use-generic-rwsem_spinlocks-on-rt.patch | 4 +- ...ds-trigger-disable-CPU-trigger-on-RT.patch | 4 +- ...rop-K8-s-driver-from-beeing-selected.patch | 4 +- .../0055-md-disable-bcache.patch | 4 +- ...6-efi-Disable-runtime-services-on-RT.patch | 4 +- ...0057-printk-Add-a-printk-kill-switch.patch | 4 +- ..._early_printk-boot-param-to-help-wit.patch | 4 +- ...pt-Provide-preempt_-_-no-rt-variants.patch | 4 +- ...-migrate_disable-enable-in-different.patch | 12 +- .../0061-rt-Add-local-irq-locks.patch | 4 +- ...provide-get-put-_locked_ptr-variants.patch | 4 +- ...catterlist-Do-not-disable-irqs-on-RT.patch | 4 +- ...-x86-Delay-calling-signals-in-atomic.patch | 10 +- ...ignal-delay-calling-signals-on-32bit.patch | 4 +- ...head-Replace-bh_uptodate_lock-for-rt.patch | 4 +- ...-state-lock-and-journal-head-lock-rt.patch | 6 +- ...st_bl-Make-list-head-locking-RT-safe.patch | 4 +- ...-list_bl-fixup-bogus-lockdep-warning.patch | 4 +- .../0070-genirq-Disable-irqpoll-on-rt.patch | 4 +- ...-genirq-Force-interrupt-thread-on-RT.patch | 6 +- ...d-zone-lock-while-freeing-pages-from.patch | 4 +- ...d-zone-lock-while-freeing-pages-from.patch | 4 +- ...B-change-list_lock-to-raw_spinlock_t.patch | 8 +- ...ving-back-empty-slubs-to-IRQ-enabled.patch | 4 +- ...page_alloc-rt-friendly-per-cpu-pages.patch | 4 +- ...077-mm-swap-Convert-to-percpu-locked.patch | 4 +- ...m-perform-lru_add_drain_all-remotely.patch | 4 +- ...t-per-cpu-variables-with-preempt-dis.patch | 4 +- ...plit-page-table-locks-for-vector-pag.patch | 4 +- .../0081-mm-Enable-SLUB-for-RT.patch | 4 +- ...0082-slub-Enable-irqs-for-__GFP_WAIT.patch | 4 +- .../0083-slub-Disable-SLUB_CPU_PARTIAL.patch | 8 +- ...n-t-call-schedule_work_on-in-preempt.patch | 4 +- ...place-local_irq_disable-with-local-l.patch | 4 +- ...oc-copy-with-get_cpu_var-and-locking.patch | 4 +- ...le-preemption-__split_large_page-aft.patch | 4 +- .../0088-radix-tree-use-local-locks.patch | 4 +- ...9-timers-Prepare-for-full-preemption.patch | 6 +- ...090-x86-kvm-Require-const-tsc-for-RT.patch | 8 +- ...ec-Don-t-use-completion-s-wait-queue.patch | 4 +- .../0092-wait.h-include-atomic.h.patch | 4 +- ...mple-Simple-work-queue-implemenation.patch | 4 +- ...-a-shit-statement-in-SWORK_EVENT_PEN.patch | 4 +- ...95-completion-Use-simple-wait-queues.patch | 18 +-- .../0096-fs-aio-simple-simple-work.patch | 12 +- ...voke-the-affinity-callback-via-a-wor.patch | 23 +++- ...id-schedule_work-with-interrupts-dis.patch | 4 +- ...ate-hrtimer_init-hrtimer_init_sleepe.patch | 18 +-- ...100-hrtimers-Prepare-full-preemption.patch | 4 +- ...s-by-default-into-the-softirq-contex.patch | 24 ++-- ...air-Make-the-hrtimers-non-hard-again.patch | 8 +- ...-schedule_work-call-to-helper-thread.patch | 4 +- ...te-change-before-hrtimer_cancel-in-d.patch | 4 +- ...timers-Thread-posix-cpu-timers-on-rt.patch | 8 +- ...ched-Move-task_struct-cleanup-to-RCU.patch | 6 +- ...-number-of-task-migrations-per-batch.patch | 6 +- .../0108-sched-Move-mmdrop-to-RCU-on-RT.patch | 8 +- ...e-stack-kprobe-clean-up-to-__put_tas.patch | 8 +- ...state-for-tasks-blocked-on-sleeping-.patch | 8 +- ...ount-rcu_preempt_depth-on-RT-in-migh.patch | 10 +- ...-proper-LOCK_OFFSET-for-cond_resched.patch | 4 +- .../0113-sched-Disable-TTWU_QUEUE-on-RT.patch | 4 +- ...Only-wake-up-idle-workers-if-not-blo.patch | 6 +- ...ease-the-nr-of-migratory-tasks-when-.patch | 12 +- ...-hotplug-Lightweight-get-online-cpus.patch | 18 +-- ...e-disabled-counter-to-tracing-output.patch | 12 +- .../0118-lockdep-Make-it-RT-aware.patch | 4 +- ...tasklets-from-going-into-infinite-sp.patch | 4 +- ...eemption-after-reenabling-interrupts.patch | 6 +- ...oftirq-Disable-softirq-stacks-for-RT.patch | 10 +- .../0122-softirq-Split-softirq-locks.patch | 6 +- ...-use-local_bh_disable-in-netif_rx_ni.patch | 6 +- ...abling-of-softirq-processing-in-irq-.patch | 10 +- ...plit-timer-softirqs-out-of-ksoftirqd.patch | 4 +- ...cal_softirq_pending-messages-if-ksof.patch | 4 +- ...cal_softirq_pending-messages-if-task.patch | 4 +- .../0128-rtmutex-trylock-is-okay-on-RT.patch | 4 +- ...-nfs-turn-rmdir_sem-into-a-semaphore.patch | 4 +- ...he-various-new-futex-race-conditions.patch | 16 +-- ...on-when-a-requeued-RT-task-times-out.patch | 4 +- ...k-unlock-symetry-versus-pi_lock-and-.patch | 6 +- .../0133-pid.h-include-atomic.h.patch | 4 +- ...arm-include-definition-for-cpumask_t.patch | 4 +- ...ure-Do-NOT-include-rwlock.h-directly.patch | 4 +- ...36-rtmutex-Add-rtmutex_lock_killable.patch | 4 +- ...0137-rtmutex-Make-lock_killable-work.patch | 4 +- ...spinlock-Split-the-lock-types-header.patch | 4 +- .../0139-rtmutex-Avoid-include-hell.patch | 4 +- ...-rbtree-don-t-include-the-rcu-header.patch | 8 +- ...tex-Provide-rt_mutex_slowlock_locked.patch | 4 +- ...ockdep-less-version-of-rt_mutex-s-lo.patch | 4 +- ...tex-add-sleeping-lock-implementation.patch | 18 +-- ...utex-implementation-based-on-rtmutex.patch | 4 +- ...wsem-implementation-based-on-rtmutex.patch | 4 +- ...lock-implementation-based-on-rtmutex.patch | 4 +- ...-preserve-state-like-a-sleeping-lock.patch | 4 +- .../0148-rtmutex-wire-up-RT-s-locking.patch | 4 +- ...utex-add-ww_mutex-addon-for-mutex-rt.patch | 4 +- .../0150-kconfig-Add-PREEMPT_RT_FULL.patch | 4 +- ...-fix-deadlock-in-device-mapper-block.patch | 4 +- ...utex-Flush-block-plug-on-__down_read.patch | 4 +- ...re-init-the-wait_lock-in-rt_mutex_in.patch | 4 +- ...ace-fix-ptrace-vs-tasklist_lock-race.patch | 10 +- ...mutex-annotate-sleeping-lock-context.patch | 8 +- ...sable-fallback-to-preempt_disable-in.patch | 12 +- ...eck-for-__LINUX_SPINLOCK_TYPES_H-on-.patch | 4 +- .../0158-rcu-Frob-softirq-test.patch | 4 +- ...59-rcu-Merge-RCU-bh-into-RCU-preempt.patch | 14 +- ...ke-ksoftirqd-do-RCU-quiescent-states.patch | 4 +- ...nate-softirq-processing-from-rcutree.patch | 4 +- ...-use-cpu_online-instead-custom-check.patch | 4 +- ...place-local_irqsave-with-a-locallock.patch | 4 +- ..._normal_after_boot-by-default-for-RT.patch | 4 +- ...erial-omap-Make-the-locking-RT-aware.patch | 4 +- ...al-pl011-Make-the-locking-work-on-RT.patch | 4 +- ...-explicitly-initialize-the-flags-var.patch | 4 +- ...mprove-the-serial-console-PASS_LIMIT.patch | 4 +- ...0-don-t-take-the-trylock-during-oops.patch | 4 +- ...wsem-Remove-preempt_disable-variants.patch | 4 +- ...ate_mm-by-preempt_-disable-enable-_r.patch | 4 +- ...back-explicit-INIT_HLIST_BL_HEAD-ini.patch | 12 +- ...e-preemption-on-i_dir_seq-s-write-si.patch | 18 +-- ...e-of-local-lock-in-multi_cpu-decompr.patch | 4 +- ...rmal-Defer-thermal-wakups-to-threads.patch | 4 +- ...e-preemption-around-local_bh_disable.patch | 4 +- ...poll-Do-not-disable-preemption-on-RT.patch | 4 +- ...er-preempt-disable-region-which-suck.patch | 4 +- .../0179-block-mq-use-cpu_light.patch | 4 +- ...ock-mq-do-not-invoke-preempt_disable.patch | 6 +- ...k-mq-don-t-complete-requests-via-IPI.patch | 8 +- ...-Make-raid5_percpu-handling-RT-aware.patch | 10 +- .../0183-rt-Introduce-cpu_chill.patch | 4 +- ...rtimer-Don-t-lose-state-in-cpu_chill.patch | 4 +- ...chill-save-task-state-in-saved_state.patch | 4 +- ...e-blk_queue_usage_counter_release-in.patch | 10 +- ...-block-Use-cpu_chill-for-retry-loops.patch | 4 +- ...cache-Use-cpu_chill-in-trylock-loops.patch | 4 +- ...t-Use-cpu_chill-instead-of-cpu_relax.patch | 8 +- ...use-swait_queue-instead-of-waitqueue.patch | 16 +-- .../0191-workqueue-Use-normal-rcu.patch | 30 ++-- ...cal-irq-lock-instead-of-irq-disable-.patch | 12 +- ...t-workqueue-versus-ata-piix-livelock.patch | 6 +- ...tangle-worker-accounting-from-rqlock.patch | 8 +- .../0195-debugobjects-Make-RT-aware.patch | 4 +- .../0196-seqlock-Prevent-rt-starvation.patch | 4 +- ...vc_xprt_do_enqueue-use-get_cpu_light.patch | 4 +- ...0198-net-Use-skbufhead-with-raw-lock.patch | 10 +- ...recursion-to-per-task-variable-on-RT.patch | 6 +- ...y-to-delegate-processing-a-softirq-t.patch | 6 +- ...ake-qdisc-s-busylock-in-__dev_xmit_s.patch | 6 +- ...Qdisc-use-a-seqlock-instead-seqcount.patch | 4 +- ...-missing-serialization-in-ip_send_un.patch | 4 +- .../0204-net-add-a-lock-around-icmp_sk.patch | 4 +- ...schedule_irqoff-disable-interrupts-o.patch | 6 +- ...-push-most-work-into-softirq-context.patch | 129 +++++++++++------- .../0207-printk-Make-rt-aware.patch | 4 +- ...n-t-try-to-print-from-IRQ-NMI-region.patch | 4 +- ...intk-Drop-the-logbuf_lock-more-often.patch | 4 +- ...n-translation-section-permission-fau.patch | 4 +- ...-irq_set_irqchip_state-documentation.patch | 8 +- ...wngrade-preempt_disable-d-region-to-.patch | 12 +- ...-preemp_disable-in-addition-to-local.patch | 4 +- ...14-kgdb-serial-Short-term-workaround.patch | 4 +- ...-sysfs-Add-sys-kernel-realtime-entry.patch | 4 +- .../0216-mm-rt-kmap_atomic-scheduling.patch | 10 +- ...highmem-Add-a-already-used-pte-check.patch | 4 +- .../0218-arm-highmem-Flush-tlb-on-unmap.patch | 4 +- .../0219-arm-Enable-highmem-for-rt.patch | 4 +- .../0220-scsi-fcoe-Make-RT-aware.patch | 4 +- ...ypto-Reduce-preempt-disabled-regions.patch | 4 +- ...-preempt-disabled-regions-more-algos.patch | 4 +- ...ypto-limit-more-FPU-enabled-sections.patch | 14 +- ...-serialize-RT-percpu-scratch-buffer-.patch | 4 +- ...d-a-lock-instead-preempt_disable-loc.patch | 4 +- ...andom_bytes-for-RT_FULL-in-init_oops.patch | 4 +- ...ackprotector-Avoid-random-pool-on-rt.patch | 4 +- .../0228-random-Make-it-work-on-rt.patch | 14 +- ...9-cpu-hotplug-Implement-CPU-pinning.patch} | 6 +- ...dom-avoid-preempt_disable-ed-section.patch | 80 ----------- ...d-user-tasks-to-be-awakened-to-the-.patch} | 6 +- ...uct-tape-RT-rwlock-usage-for-non-RT.patch} | 6 +- ...ve-preemption-disabling-in-netif_rx.patch} | 6 +- ...-local_irq_disable-kmalloc-headache.patch} | 4 +- ...users-of-napi_alloc_cache-against-r.patch} | 4 +- ...ialize-xt_write_recseq-sections-on-.patch} | 6 +- ...dd-a-mutex-around-devnet_rename_seq.patch} | 6 +- ...Only-do-hardirq-context-test-for-ra.patch} | 4 +- ...fix-warnings-due-to-missing-PREEMPT.patch} | 4 +- ...hed-Add-support-for-lazy-preemption.patch} | 44 +++--- ...0-ftrace-Fix-trace-header-alignment.patch} | 8 +- ...241-x86-Support-for-lazy-preemption.patch} | 18 +-- ...properly-check-against-preempt-mask.patch} | 4 +- ...use-proper-return-label-on-32bit-x8.patch} | 8 +- ...arm-Add-support-for-lazy-preemption.patch} | 6 +- ...rpc-Add-support-for-lazy-preemption.patch} | 4 +- ...arch-arm64-Add-lazy-preempt-support.patch} | 8 +- ...-Protect-send_msg-with-a-local-lock.patch} | 4 +- ...m-Replace-bit-spinlocks-with-rtmute.patch} | 10 +- ...t-disable-preemption-in-zcomp_strea.patch} | 12 +- ...zcomp_stream_get-smp_processor_id-u.patch} | 4 +- ...1-tpm_tis-fix-stall-after-iowrite-s.patch} | 4 +- ...-deferral-of-watchdogd-wakeup-on-RT.patch} | 4 +- ...se-preempt_disable-enable_rt-where-.patch} | 6 +- ...l_lock-unlock_irq-in-intel_pipe_upd.patch} | 4 +- ...0255-drm-i915-disable-tracing-on-RT.patch} | 4 +- ..._I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch} | 4 +- ...oups-use-simple-wait-in-css_release.patch} | 12 +- ...ert-callback_lock-to-raw_spinlock_t.patch} | 4 +- ...a-locallock-instead-preempt_disable.patch} | 4 +- ...kqueue-Prevent-deadlock-stall-on-RT.patch} | 8 +- ...-tasks-to-cache-one-sigqueue-struct.patch} | 10 +- ...262-Add-localversion-for-RT-release.patch} | 4 +- ...ommu-Use-a-locallock-instead-local_.patch} | 4 +- ... => 0264-powerpc-reshuffle-TIF-bits.patch} | 4 +- ...Convert-show_lock-to-raw_spinlock_t.patch} | 4 +- ...sable-interrupts-independently-of-t.patch} | 4 +- ...Fix-a-lockup-in-wait_for_completion.patch} | 4 +- ...h => 0268-Linux-4.19.50-rt22-REBASE.patch} | 8 +- 271 files changed, 934 insertions(+), 978 deletions(-) rename kernel/patches-4.19.x-rt/{0230-cpu-hotplug-Implement-CPU-pinning.patch => 0229-cpu-hotplug-Implement-CPU-pinning.patch} (94%) delete mode 100644 kernel/patches-4.19.x-rt/0229-random-avoid-preempt_disable-ed-section.patch rename kernel/patches-4.19.x-rt/{0231-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch => 0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch} (87%) rename kernel/patches-4.19.x-rt/{0232-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch => 0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch} (94%) rename kernel/patches-4.19.x-rt/{0233-net-Remove-preemption-disabling-in-netif_rx.patch => 0232-net-Remove-preemption-disabling-in-netif_rx.patch} (92%) rename kernel/patches-4.19.x-rt/{0234-net-Another-local_irq_disable-kmalloc-headache.patch => 0233-net-Another-local_irq_disable-kmalloc-headache.patch} (93%) rename kernel/patches-4.19.x-rt/{0235-net-core-protect-users-of-napi_alloc_cache-against-r.patch => 0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch} (96%) rename kernel/patches-4.19.x-rt/{0236-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch => 0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch} (93%) rename kernel/patches-4.19.x-rt/{0237-net-Add-a-mutex-around-devnet_rename_seq.patch => 0236-net-Add-a-mutex-around-devnet_rename_seq.patch} (95%) rename kernel/patches-4.19.x-rt/{0238-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch => 0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch} (94%) rename kernel/patches-4.19.x-rt/{0239-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch => 0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch} (97%) rename kernel/patches-4.19.x-rt/{0240-sched-Add-support-for-lazy-preemption.patch => 0239-sched-Add-support-for-lazy-preemption.patch} (94%) rename kernel/patches-4.19.x-rt/{0241-ftrace-Fix-trace-header-alignment.patch => 0240-ftrace-Fix-trace-header-alignment.patch} (90%) rename kernel/patches-4.19.x-rt/{0242-x86-Support-for-lazy-preemption.patch => 0241-x86-Support-for-lazy-preemption.patch} (94%) rename kernel/patches-4.19.x-rt/{0243-x86-lazy-preempt-properly-check-against-preempt-mask.patch => 0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch} (88%) rename kernel/patches-4.19.x-rt/{0244-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch => 0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch} (84%) rename kernel/patches-4.19.x-rt/{0245-arm-Add-support-for-lazy-preemption.patch => 0244-arm-Add-support-for-lazy-preemption.patch} (97%) rename kernel/patches-4.19.x-rt/{0246-powerpc-Add-support-for-lazy-preemption.patch => 0245-powerpc-Add-support-for-lazy-preemption.patch} (98%) rename kernel/patches-4.19.x-rt/{0247-arch-arm64-Add-lazy-preempt-support.patch => 0246-arch-arm64-Add-lazy-preempt-support.patch} (96%) rename kernel/patches-4.19.x-rt/{0248-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch => 0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch} (95%) rename kernel/patches-4.19.x-rt/{0249-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch => 0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch} (90%) rename kernel/patches-4.19.x-rt/{0250-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch => 0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch} (89%) rename kernel/patches-4.19.x-rt/{0251-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch => 0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch} (90%) rename kernel/patches-4.19.x-rt/{0252-tpm_tis-fix-stall-after-iowrite-s.patch => 0251-tpm_tis-fix-stall-after-iowrite-s.patch} (95%) rename kernel/patches-4.19.x-rt/{0253-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch => 0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch} (96%) rename kernel/patches-4.19.x-rt/{0254-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch => 0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch} (92%) rename kernel/patches-4.19.x-rt/{0255-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch => 0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch} (97%) rename kernel/patches-4.19.x-rt/{0256-drm-i915-disable-tracing-on-RT.patch => 0255-drm-i915-disable-tracing-on-RT.patch} (92%) rename kernel/patches-4.19.x-rt/{0257-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch => 0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch} (89%) rename kernel/patches-4.19.x-rt/{0258-cgroups-use-simple-wait-in-css_release.patch => 0257-cgroups-use-simple-wait-in-css_release.patch} (90%) rename kernel/patches-4.19.x-rt/{0259-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch => 0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch} (98%) rename kernel/patches-4.19.x-rt/{0260-apparmor-use-a-locallock-instead-preempt_disable.patch => 0259-apparmor-use-a-locallock-instead-preempt_disable.patch} (95%) rename kernel/patches-4.19.x-rt/{0261-workqueue-Prevent-deadlock-stall-on-RT.patch => 0260-workqueue-Prevent-deadlock-stall-on-RT.patch} (97%) rename kernel/patches-4.19.x-rt/{0262-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch => 0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch} (95%) rename kernel/patches-4.19.x-rt/{0263-Add-localversion-for-RT-release.patch => 0262-Add-localversion-for-RT-release.patch} (76%) rename kernel/patches-4.19.x-rt/{0264-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch => 0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch} (96%) rename kernel/patches-4.19.x-rt/{0265-powerpc-reshuffle-TIF-bits.patch => 0264-powerpc-reshuffle-TIF-bits.patch} (97%) rename kernel/patches-4.19.x-rt/{0266-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch => 0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch} (94%) rename kernel/patches-4.19.x-rt/{0267-drm-i915-Don-t-disable-interrupts-independently-of-t.patch => 0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch} (93%) rename kernel/patches-4.19.x-rt/{0268-sched-completion-Fix-a-lockup-in-wait_for_completion.patch => 0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch} (95%) rename kernel/patches-4.19.x-rt/{0269-Linux-4.19.37-rt20-REBASE.patch => 0268-Linux-4.19.50-rt22-REBASE.patch} (64%) diff --git a/examples/rt-for-vmware.yml b/examples/rt-for-vmware.yml index 6eeef6cab..0aead70b4 100644 --- a/examples/rt-for-vmware.yml +++ b/examples/rt-for-vmware.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:4.19.37-rt + image: linuxkit/kernel:4.19.50-rt cmdline: "console=tty0" init: - linuxkit/init:v0.7 diff --git a/kernel/Makefile b/kernel/Makefile index 1a673c539..3912d23c1 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -254,14 +254,14 @@ ifeq ($(ARCH),x86_64) $(eval $(call kernel,5.1.11,5.1.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,4.19.52,4.19.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,4.19.52,4.19.x,,-dbg)) -$(eval $(call kernel,4.19.37,4.19.x,-rt,)) +$(eval $(call kernel,4.19.50,4.19.x,-rt,)) $(eval $(call kernel,4.14.127,4.14.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,4.9.182,4.9.x,$(EXTRA),$(DEBUG))) else ifeq ($(ARCH),aarch64) $(eval $(call kernel,5.1.11,5.1.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,4.19.52,4.19.x,$(EXTRA),$(DEBUG))) -$(eval $(call kernel,4.19.37,4.19.x,-rt,)) +$(eval $(call kernel,4.19.50,4.19.x,-rt,)) else ifeq ($(ARCH),s390x) $(eval $(call kernel,5.1.11,5.1.x,$(EXTRA),$(DEBUG))) diff --git a/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch b/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch index 1a3f50e2c..75b83bfd1 100644 --- a/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch +++ b/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch @@ -1,7 +1,7 @@ -From bc4d8f04b5bd123853531af90f1ec548d8ab61e4 Mon Sep 17 00:00:00 2001 +From 51502f785ba92dab73427034abe6ebc29b827637 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:18 +0200 -Subject: [PATCH 001/269] ARM: at91: add TCB registers definitions +Subject: [PATCH 001/268] ARM: at91: add TCB registers definitions Add registers and bits definitions for the timer counter blocks found on Atmel ARM SoCs. diff --git a/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch index 980ed8571..42bce94d4 100644 --- a/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch +++ b/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch @@ -1,7 +1,7 @@ -From 1eef86c9b8aa09d8e57f4ee5684c7bfd28f6900f Mon Sep 17 00:00:00 2001 +From 0d17e392d25ce41fbd73368c207e9e3c8a509200 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:19 +0200 -Subject: [PATCH 002/269] clocksource/drivers: Add a new driver for the Atmel +Subject: [PATCH 002/268] clocksource/drivers: Add a new driver for the Atmel ARM TC blocks Add a driver for the Atmel Timer Counter Blocks. This driver provides a @@ -31,10 +31,10 @@ Signed-off-by: Sebastian Andrzej Siewior create mode 100644 drivers/clocksource/timer-atmel-tcb.c diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig -index c1ddafa4c299..c5a5ad4e22e7 100644 +index 4d37f018d846..0ab22e7037f4 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig -@@ -414,6 +414,14 @@ config ATMEL_ST +@@ -415,6 +415,14 @@ config ATMEL_ST help Support for the Atmel ST timer. diff --git a/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch b/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch index 963935f05..51686422f 100644 --- a/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch +++ b/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch @@ -1,7 +1,7 @@ -From f6803050ab0965a1255a3b407ca429a04c5cb230 Mon Sep 17 00:00:00 2001 +From a4ddee4eca713013e407261d0e2b49bec1d1cadc Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:20 +0200 -Subject: [PATCH 003/269] clocksource/drivers: timer-atmel-tcb: add clockevent +Subject: [PATCH 003/268] clocksource/drivers: timer-atmel-tcb: add clockevent device on separate channel Add an other clockevent device that uses a separate TCB channel when diff --git a/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch b/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch index 676593c6a..39657d4e0 100644 --- a/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch +++ b/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch @@ -1,7 +1,7 @@ -From 873075a203c574d322429e4a8cd0686541293903 Mon Sep 17 00:00:00 2001 +From c1264e05e78e9ef1643471e975f411ec4b1e9015 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:21 +0200 -Subject: [PATCH 004/269] clocksource/drivers: atmel-pit: make option silent +Subject: [PATCH 004/268] clocksource/drivers: atmel-pit: make option silent To conform with the other option, make the ATMEL_PIT option silent so it can be selected from the platform @@ -14,10 +14,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig -index c5a5ad4e22e7..076aa8184961 100644 +index 0ab22e7037f4..34b07047b91f 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig -@@ -403,8 +403,11 @@ config ARMV7M_SYSTICK +@@ -404,8 +404,11 @@ config ARMV7M_SYSTICK This options enables support for the ARMv7M system timer unit config ATMEL_PIT diff --git a/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch b/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch index b9a8c0ba7..e7eb87a1f 100644 --- a/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch +++ b/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch @@ -1,7 +1,7 @@ -From e0dc436f11c998b38ee3dc4cd269d5075ea12b7e Mon Sep 17 00:00:00 2001 +From e228d79e34fd799304ff0c6696e52c00e3caa066 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:22 +0200 -Subject: [PATCH 005/269] ARM: at91: Implement clocksource selection +Subject: [PATCH 005/268] ARM: at91: Implement clocksource selection Allow selecting and unselecting the PIT clocksource driver so it doesn't have to be compile when unused. diff --git a/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch b/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch index 3aca5ad36..bf756235a 100644 --- a/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch +++ b/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch @@ -1,7 +1,7 @@ -From ca4a1c8ce5f7224d99ef6c2a6754468cb72ea4c3 Mon Sep 17 00:00:00 2001 +From 719de0a8c6c4489eae0ea592aaa78898611c5f7f Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:23 +0200 -Subject: [PATCH 006/269] ARM: configs: at91: use new TCB timer driver +Subject: [PATCH 006/268] ARM: configs: at91: use new TCB timer driver Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to timer-atmel-tcb. diff --git a/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch b/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch index cfbd75bba..6cffbb3fb 100644 --- a/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch +++ b/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch @@ -1,7 +1,7 @@ -From 2c83222f4057f755febccd002f3720bbf73a6473 Mon Sep 17 00:00:00 2001 +From 4175bca0551889fda7dfdf2391a242b0c80bb59a Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:24 +0200 -Subject: [PATCH 007/269] ARM: configs: at91: unselect PIT +Subject: [PATCH 007/268] ARM: configs: at91: unselect PIT The PIT is not required anymore to successfully boot and may actually harm in case preempt-rt is used because the PIT interrupt is shared. diff --git a/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch index 75038df7f..62323b845 100644 --- a/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +++ b/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch @@ -1,7 +1,7 @@ -From bb357496d72d05e2841899655c8e709d7c369ab0 Mon Sep 17 00:00:00 2001 +From 860cae9aedf5937c7f9e6d7c228e02ece79b0e0a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Jul 2018 13:38:54 +0100 -Subject: [PATCH 008/269] irqchip/gic-v3-its: Move pending table allocation to +Subject: [PATCH 008/268] irqchip/gic-v3-its: Move pending table allocation to init time Signed-off-by: Marc Zyngier diff --git a/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch b/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch index f06be8897..073c532df 100644 --- a/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch +++ b/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch @@ -1,7 +1,7 @@ -From 9d8b1db47a7e355eb0c34a8af57f3613db6cb18c Mon Sep 17 00:00:00 2001 +From 9e8f94470c29280436a04d2d6d1eeb61667b0af8 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 009/269] kthread: convert worker lock to raw spinlock +Subject: [PATCH 009/268] kthread: convert worker lock to raw spinlock In order to enable the queuing of kthread work items from hardirq context even when PREEMPT_RT_FULL is enabled, convert the worker diff --git a/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch b/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch index 1e31a80bc..f30a31f14 100644 --- a/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch +++ b/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch @@ -1,7 +1,7 @@ -From b37ee7bd4ac42c97c3fce905634cf808345a25ac Mon Sep 17 00:00:00 2001 +From ab7516f814d1df7736f158b4217b78c99e40168e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 8 Oct 2018 14:09:37 +0300 -Subject: [PATCH 010/269] crypto: caam/qi - simplify CGR allocation, freeing +Subject: [PATCH 010/268] crypto: caam/qi - simplify CGR allocation, freeing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch index 9574293e2..d9bd1c468 100644 --- a/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch +++ b/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch @@ -1,7 +1,7 @@ -From 78f68e44994c830d70aa92bb86a47b204ff605c6 Mon Sep 17 00:00:00 2001 +From cfe30af225801582b76bde294f920c8018ccb75a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 7 Jan 2019 13:52:31 +0100 -Subject: [PATCH 011/269] sched/fair: Robustify CFS-bandwidth timer locking +Subject: [PATCH 011/268] sched/fair: Robustify CFS-bandwidth timer locking Traditionally hrtimer callbacks were run with IRQs disabled, but with the introduction of HRTIMER_MODE_SOFT it is possible they run from @@ -29,10 +29,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 4aa8e7d90c25..53acadf72cd9 100644 +index 4a433608ba74..289c966f907a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4553,7 +4553,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +@@ -4557,7 +4557,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; @@ -41,7 +41,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 if (!cfs_rq_throttled(cfs_rq)) goto next; -@@ -4570,7 +4570,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +@@ -4574,7 +4574,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, unthrottle_cfs_rq(cfs_rq); next: @@ -50,7 +50,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 if (!remaining) break; -@@ -4586,7 +4586,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +@@ -4590,7 +4590,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ @@ -59,7 +59,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 { u64 runtime, runtime_expires; int throttled; -@@ -4628,11 +4628,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) +@@ -4632,11 +4632,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; @@ -73,7 +73,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); -@@ -4741,17 +4741,18 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) +@@ -4745,17 +4745,18 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); @@ -95,7 +95,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 return; } -@@ -4762,18 +4763,18 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) +@@ -4766,18 +4767,18 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) if (runtime) cfs_b->distribute_running = 1; @@ -117,7 +117,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 } /* -@@ -4853,11 +4854,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +@@ -4857,11 +4858,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); @@ -131,7 +131,7 @@ index 4aa8e7d90c25..53acadf72cd9 100644 for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) -@@ -4885,11 +4887,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +@@ -4889,11 +4891,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) count = 0; } diff --git a/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch b/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch index 23d2519f0..ad2b7bb81 100644 --- a/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch +++ b/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch @@ -1,7 +1,7 @@ -From fa6e4c3d085352808073b23fdff79729db01930a Mon Sep 17 00:00:00 2001 +From cd3c3d6a852d1ad0077e1fbbd667c7a715b7daad Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Mon, 19 Sep 2011 14:51:14 -0700 -Subject: [PATCH 012/269] arm: Convert arm boot_lock to raw +Subject: [PATCH 012/268] arm: Convert arm boot_lock to raw The arm boot_lock is used by the secondary processor startup code. The locking task is the idle thread, which has idle->sched_class == &idle_sched_class. diff --git a/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch b/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch index 8c5e4d265..31aae50c2 100644 --- a/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch +++ b/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch @@ -1,7 +1,7 @@ -From 4debab2aa3d29fcdb5b9cd132416094c54e9361b Mon Sep 17 00:00:00 2001 +From d3b004343e524bde70e0ff21eb2ef4d4989a21d5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Jul 2018 18:25:31 +0200 -Subject: [PATCH 013/269] x86/ioapic: Don't let setaffinity unmask threaded EOI +Subject: [PATCH 013/268] x86/ioapic: Don't let setaffinity unmask threaded EOI interrupt too early There is an issue with threaded interrupts which are marked ONESHOT diff --git a/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch index 999b5618f..66171763d 100644 --- a/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch +++ b/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch @@ -1,7 +1,7 @@ -From 1117688ac7606703683b1ac8cacdbf02d47b4adb Mon Sep 17 00:00:00 2001 +From bce3ba7d9bf5c645f5425e98fc15f9c14c4d1bd5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 18:19:48 +0200 -Subject: [PATCH 014/269] cgroup: use irqsave in cgroup_rstat_flush_locked() +Subject: [PATCH 014/268] cgroup: use irqsave in cgroup_rstat_flush_locked() All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock either with spin_lock_irq() or spin_lock_irqsave(). diff --git a/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch index 20f180196..24954daa2 100644 --- a/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch +++ b/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch @@ -1,7 +1,7 @@ -From 8cf7a5b4f03a2829c823971a12c1a206bcba069d Mon Sep 17 00:00:00 2001 +From bdc5bf179f2fd29ef29ea44993ad316ffaae41b3 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Tue, 3 Jul 2018 13:34:30 -0500 -Subject: [PATCH 015/269] fscache: initialize cookie hash table raw spinlocks +Subject: [PATCH 015/268] fscache: initialize cookie hash table raw spinlocks The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT diff --git a/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch b/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch index f48fb4e6f..39edef896 100644 --- a/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch +++ b/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch @@ -1,7 +1,7 @@ -From 841d8b9e20d17d7907421dc223346198287e81a1 Mon Sep 17 00:00:00 2001 +From d748238ff973cd42f7e7d6d2eb0476960d2c5a77 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 29 Aug 2018 21:59:04 +0200 -Subject: [PATCH 016/269] Drivers: hv: vmbus: include header for get_irq_regs() +Subject: [PATCH 016/268] Drivers: hv: vmbus: include header for get_irq_regs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch b/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch index 319a92f3a..da13b6d69 100644 --- a/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch +++ b/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch @@ -1,7 +1,7 @@ -From d77a9b0754acbc89c7884b3505afdbb49677b36a Mon Sep 17 00:00:00 2001 +From b9113e5cdf8382192972523dbb746daea3aa90b1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 11 Oct 2018 16:39:59 +0200 -Subject: [PATCH 017/269] percpu: include irqflags.h for raw_local_irq_save() +Subject: [PATCH 017/268] percpu: include irqflags.h for raw_local_irq_save() The header percpu.h header file is using raw_local_irq_save() but does not include irqflags.h for its definition. It compiles because the diff --git a/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch b/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch index c43135163..5bbf828a6 100644 --- a/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch +++ b/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch @@ -1,7 +1,7 @@ -From 10c47a6dadf91edee1d414002f91cc73bbe59c90 Mon Sep 17 00:00:00 2001 +From 58823352a672562d5c098affcb951fbe43b6d721 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:06:10 +0200 -Subject: [PATCH 018/269] efi: Allow efi=runtime +Subject: [PATCH 018/268] efi: Allow efi=runtime In case the option "efi=noruntime" is default at built-time, the user could overwrite its sate by `efi=runtime' and allow it again. diff --git a/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch b/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch index b91f1fa0b..fef58eae8 100644 --- a/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch +++ b/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch @@ -1,7 +1,7 @@ -From d1af306cedb5a02314565763b49992b10ce5d802 Mon Sep 17 00:00:00 2001 +From eb484bfff976730c7eaac1843e193e1fb6c949c4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 24 Jul 2018 14:48:55 +0200 -Subject: [PATCH 019/269] x86/efi: drop task_lock() from efi_switch_mm() +Subject: [PATCH 019/268] x86/efi: drop task_lock() from efi_switch_mm() efi_switch_mm() is a wrapper around switch_mm() which saves current's ->active_mm, sets the requests mm as ->active_mm and invokes diff --git a/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch b/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch index b28093325..3f65a5014 100644 --- a/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch +++ b/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch @@ -1,7 +1,7 @@ -From 6d4ae829b2e8c46b1d730790bf2644e5a053cf14 Mon Sep 17 00:00:00 2001 +From 0d6bc3016c56136ba59d6a3519ba23bcaecf4e37 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 09:13:42 +0200 -Subject: [PATCH 020/269] arm64: KVM: compute_layout before altenates are +Subject: [PATCH 020/268] arm64: KVM: compute_layout before altenates are applied compute_layout() is invoked as part of an alternative fixup under diff --git a/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch index 25c69bdc1..efb6397a1 100644 --- a/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -1,7 +1,7 @@ -From 1ab1616de2aaaa7392ebb706a457af2fdcd2b82a Mon Sep 17 00:00:00 2001 +From 7912b78543a4d0312c0b7c625bb7ab26360c477a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 31 Aug 2018 14:16:30 +0200 -Subject: [PATCH 021/269] of: allocate / free phandle cache outside of the +Subject: [PATCH 021/268] of: allocate / free phandle cache outside of the devtree_lock The phandle cache code allocates memory while holding devtree_lock which diff --git a/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch index aa4be48c2..6e5a96243 100644 --- a/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch +++ b/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch @@ -1,7 +1,7 @@ -From a61c877f81f1f0b850090df19e08d51cf9465955 Mon Sep 17 00:00:00 2001 +From 6e5efbd87443d2c1a3df01401ed51638d28d50c1 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Tue, 18 Sep 2018 10:29:31 -0500 -Subject: [PATCH 022/269] mm/kasan: make quarantine_lock a raw_spinlock_t +Subject: [PATCH 022/268] mm/kasan: make quarantine_lock a raw_spinlock_t The static lock quarantine_lock is used in quarantine.c to protect the quarantine queue datastructures. It is taken inside quarantine queue diff --git a/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch index 95bd9530f..0b6a71b39 100644 --- a/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch +++ b/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch @@ -1,7 +1,7 @@ -From b710c9561c0a7ddf1c7fef8d3bd3bc6d9e140a4e Mon Sep 17 00:00:00 2001 +From 3e3086ae2a7f6bb9834e51d8322673247a1d3e4c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Oct 2018 11:53:01 +0100 -Subject: [PATCH 023/269] EXP rcu: Revert expedited GP parallelization +Subject: [PATCH 023/268] EXP rcu: Revert expedited GP parallelization cleverness (Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu) diff --git a/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch b/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch index e6f6587ad..145aa8ccf 100644 --- a/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch @@ -1,7 +1,7 @@ -From b32df881582f39cab5e57b894f554f8573170cf7 Mon Sep 17 00:00:00 2001 +From e27fcb8add849554b0073094e6685ca3248b0f01 Mon Sep 17 00:00:00 2001 From: He Zhe Date: Wed, 19 Dec 2018 16:30:57 +0100 -Subject: [PATCH 024/269] kmemleak: Turn kmemleak_lock to raw spinlock on RT +Subject: [PATCH 024/268] kmemleak: Turn kmemleak_lock to raw spinlock on RT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c -index 17dd883198ae..b68a3d0d075f 100644 +index 72e3fb3bb037..0ed549045074 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -26,7 +26,7 @@ diff --git a/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index e47fb7ae2..302a7e774 100644 --- a/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -1,7 +1,7 @@ -From 82889085f9639d9aed51313cf8fd8e8ca32b8e8b Mon Sep 17 00:00:00 2001 +From 8f433a327631f56d446edaa607b815681c84073d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 28 Oct 2016 23:05:11 +0200 -Subject: [PATCH 025/269] NFSv4: replace seqcount_t with a seqlock_t +Subject: [PATCH 025/268] NFSv4: replace seqcount_t with a seqlock_t The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me because it maps to preempt_disable() in -RT which I can't have at this @@ -57,7 +57,7 @@ index 63287d911c08..2ae55eaa4a1e 100644 }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index 580e37bc3fe2..9d010731f901 100644 +index 53cf8599a46e..42850fb5944b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2863,7 +2863,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, @@ -79,10 +79,10 @@ index 580e37bc3fe2..9d010731f901 100644 } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c -index d2f645d34eb1..1698dd2ca20b 100644 +index 3ba2087469ac..f10952680bd9 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c -@@ -511,7 +511,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, +@@ -515,7 +515,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, nfs4_init_seqid_counter(&sp->so_seqid); atomic_set(&sp->so_count, 1); INIT_LIST_HEAD(&sp->so_lru); @@ -91,7 +91,7 @@ index d2f645d34eb1..1698dd2ca20b 100644 mutex_init(&sp->so_delegreturn_mutex); return sp; } -@@ -1564,8 +1564,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs +@@ -1568,8 +1568,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ @@ -105,7 +105,7 @@ index d2f645d34eb1..1698dd2ca20b 100644 restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1652,14 +1656,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs +@@ -1656,14 +1660,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs spin_lock(&sp->so_lock); goto restart; } diff --git a/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 6ccf0b411..3d0182692 100644 --- a/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -1,7 +1,7 @@ -From 3ace22e122817ae9b6da2d0c49209a834f96375c Mon Sep 17 00:00:00 2001 +From e22533415f2d2e4fbc723286a1946a5ac1c3d2ed Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 4 Apr 2017 12:50:16 +0200 -Subject: [PATCH 026/269] kernel: sched: Provide a pointer to the valid CPU +Subject: [PATCH 026/268] kernel: sched: Provide a pointer to the valid CPU mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 @@ -165,7 +165,7 @@ index c9ef3c532169..cb10249b1125 100644 /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c -index f8c260d522ca..befeec6414b0 100644 +index 912d53939f4f..6b8dc68b5ccc 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c @@ -1435,7 +1435,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) @@ -320,7 +320,7 @@ index 266f10cb7222..ef085d84a940 100644 } diff --git a/kernel/fork.c b/kernel/fork.c -index 64ef113e387e..bfe9c5c3eb88 100644 +index 69874db3fba8..98c971cb1d36 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -845,6 +845,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) @@ -333,7 +333,7 @@ index 64ef113e387e..bfe9c5c3eb88 100644 /* * One for us, one for whoever does the "release_task()" (usually diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d7f409866cdf..80badc70c258 100644 +index 6859ea1d5c04..d6f690064cce 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) @@ -541,10 +541,10 @@ index daaadf939ccb..f7d2c10b4c92 100644 /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index 91e4202b0634..f927b1f45474 100644 +index 72c07059ef37..fb6e64417470 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p +@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * If we cannot preempt any rq, fall back to pick any * online CPU: */ @@ -553,7 +553,7 @@ index 91e4202b0634..f927b1f45474 100644 if (cpu >= nr_cpu_ids) { /* * Failed to find any suitable CPU. -@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq *rq) +@@ -1823,7 +1823,7 @@ static void set_curr_task_dl(struct rq *rq) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -562,7 +562,7 @@ index 91e4202b0634..f927b1f45474 100644 return 1; return 0; } -@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) +@@ -1973,7 +1973,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || @@ -572,7 +572,7 @@ index 91e4202b0634..f927b1f45474 100644 !dl_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 53acadf72cd9..c17d63b06026 100644 +index 289c966f907a..0048a32a3b4d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1630,7 +1630,7 @@ static void task_numa_compare(struct task_numa_env *env, @@ -593,7 +593,7 @@ index 53acadf72cd9..c17d63b06026 100644 continue; env->dst_cpu = cpu; -@@ -5737,7 +5737,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, +@@ -5741,7 +5741,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -602,7 +602,7 @@ index 53acadf72cd9..c17d63b06026 100644 continue; local_group = cpumask_test_cpu(this_cpu, -@@ -5869,7 +5869,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this +@@ -5873,7 +5873,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -611,7 +611,7 @@ index 53acadf72cd9..c17d63b06026 100644 if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -5909,7 +5909,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p +@@ -5913,7 +5913,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p { int new_cpu = cpu; @@ -620,7 +620,7 @@ index 53acadf72cd9..c17d63b06026 100644 return prev_cpu; /* -@@ -6026,7 +6026,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int +@@ -6030,7 +6030,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; @@ -629,7 +629,7 @@ index 53acadf72cd9..c17d63b06026 100644 for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6060,7 +6060,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t +@@ -6064,7 +6064,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -638,7 +638,7 @@ index 53acadf72cd9..c17d63b06026 100644 continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6123,7 +6123,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t +@@ -6127,7 +6127,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -647,7 +647,7 @@ index 53acadf72cd9..c17d63b06026 100644 continue; if (available_idle_cpu(cpu)) break; -@@ -6160,7 +6160,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) +@@ -6164,7 +6164,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -656,7 +656,7 @@ index 53acadf72cd9..c17d63b06026 100644 /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6378,7 +6378,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f +@@ -6382,7 +6382,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -665,7 +665,7 @@ index 53acadf72cd9..c17d63b06026 100644 } rcu_read_lock(); -@@ -7117,14 +7117,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7121,14 +7121,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -682,7 +682,7 @@ index 53acadf72cd9..c17d63b06026 100644 int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7144,7 +7144,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7148,7 +7148,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -691,7 +691,7 @@ index 53acadf72cd9..c17d63b06026 100644 env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7741,7 +7741,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) +@@ -7745,7 +7745,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -700,7 +700,7 @@ index 53acadf72cd9..c17d63b06026 100644 * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8356,7 +8356,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) +@@ -8360,7 +8360,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -709,7 +709,7 @@ index 53acadf72cd9..c17d63b06026 100644 */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8752,7 +8752,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, +@@ -8756,7 +8756,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * if the curr task on busiest CPU can't be * moved to this_cpu: */ @@ -719,7 +719,7 @@ index 53acadf72cd9..c17d63b06026 100644 flags); env.flags |= LBF_ALL_PINNED; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index 2e2955a8cf8f..4857ca145119 100644 +index b980cc96604f..b6ca4a630050 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1611,7 +1611,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) diff --git a/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch b/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch index 229f43d13..cd15a0a85 100644 --- a/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch +++ b/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch @@ -1,7 +1,7 @@ -From 2fc8b5c9ca4ff2df7913d6e6d75a98bdece9b264 Mon Sep 17 00:00:00 2001 +From de7795ed1a14201e737e05a2d1b240a9dc6aded6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 027/269] kernel/sched/core: add migrate_disable() +Subject: [PATCH 027/268] kernel/sched/core: add migrate_disable() --- include/linux/preempt.h | 23 +++++++ @@ -85,7 +85,7 @@ index 9fb239e12b82..5801e516ba63 100644 * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 80badc70c258..3df110e8c6f9 100644 +index d6f690064cce..b658f0147c3b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma @@ -144,7 +144,7 @@ index 80badc70c258..3df110e8c6f9 100644 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; -@@ -7060,3 +7089,100 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7067,3 +7096,100 @@ const u32 sched_prio_to_wmult[40] = { }; #undef CREATE_TRACE_POINTS @@ -246,7 +246,7 @@ index 80badc70c258..3df110e8c6f9 100644 +EXPORT_SYMBOL(migrate_enable); +#endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index 141ea9ff210e..34c27afae009 100644 +index 78fadf0438ea..5027158d3908 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -982,6 +982,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, diff --git a/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch index 87f95476d..c0fc03af5 100644 --- a/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch +++ b/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch @@ -1,7 +1,7 @@ -From 0af010b771c642c17c33fbc991e183c04427af59 Mon Sep 17 00:00:00 2001 +From 3eb644da699b9d5916ad3b0e8465e4edc5f6a333 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 9 Oct 2018 17:34:50 +0200 -Subject: [PATCH 028/269] sched/migrate_disable: Add export_symbol_gpl for +Subject: [PATCH 028/268] sched/migrate_disable: Add export_symbol_gpl for __migrate_disabled Jonathan reported that lttng/modules can't use __migrate_disabled(). @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 3df110e8c6f9..9c4a9f0a627b 100644 +index b658f0147c3b..7a39d56f6a6b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1013,6 +1013,7 @@ int __migrate_disabled(struct task_struct *p) diff --git a/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch b/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch index ddbd99720..feb24b4bf 100644 --- a/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch +++ b/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch @@ -1,7 +1,7 @@ -From 245bd7bd92ce193e01ef35fbdaae505d5eefd28b Mon Sep 17 00:00:00 2001 +From c397de61ffe5014b500d4cb9041776ee6f1fdf96 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Mar 2016 10:51:06 +0100 -Subject: [PATCH 029/269] arm: at91: do not disable/enable clocks in a row +Subject: [PATCH 029/268] arm: at91: do not disable/enable clocks in a row Currently the driver will disable the clock and enable it one line later if it is switching from periodic mode into one shot. diff --git a/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch b/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch index 94b5d25d1..b2074e730 100644 --- a/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch +++ b/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch @@ -1,7 +1,7 @@ -From 7b123775c97399cd5ca5394392bf72c5d73f2808 Mon Sep 17 00:00:00 2001 +From b0ab70a4f78b70454e5b35d8a21170fedc297c06 Mon Sep 17 00:00:00 2001 From: Benedikt Spranger Date: Mon, 8 Mar 2010 18:57:04 +0100 -Subject: [PATCH 030/269] clocksource: TCLIB: Allow higher clock rates for +Subject: [PATCH 030/268] clocksource: TCLIB: Allow higher clock rates for clock events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 diff --git a/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch b/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch index 13e1912aa..239564892 100644 --- a/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch +++ b/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch @@ -1,7 +1,7 @@ -From 5a0bfb35b3b826135a39a8e8744e9926b5be7607 Mon Sep 17 00:00:00 2001 +From b09d65be8e3e09795b3ce4ab655f04b5a7b42217 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Feb 2013 22:36:59 +0100 -Subject: [PATCH 031/269] timekeeping: Split jiffies seqlock +Subject: [PATCH 031/268] timekeeping: Split jiffies seqlock Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so it can be taken in atomic context on RT. @@ -135,7 +135,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644 ts->timer_expires_base = basemono; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c -index 7846ce24ecc0..68cf97548cba 100644 +index 9a6bfcd22dc6..5e584cdebd24 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2417,8 +2417,10 @@ EXPORT_SYMBOL(hardpps); diff --git a/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch b/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch index d8aee4bd7..5b264a2f7 100644 --- a/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch +++ b/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch @@ -1,7 +1,7 @@ -From a9a18a8c88bd90bdac5f33690be17244dc22bd22 Mon Sep 17 00:00:00 2001 +From f1184fbeee330b224af538e519e2215522bbc951 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Sep 2011 19:57:12 +0200 -Subject: [PATCH 032/269] signal: Revert ptrace preempt magic +Subject: [PATCH 032/268] signal: Revert ptrace preempt magic Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more than a bandaid around the ptrace design trainwreck. It's not a @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 8 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c -index 9102d60fc5c6..f29def2be652 100644 +index 0e6bc3049427..d5a9646b3538 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2094,15 +2094,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) diff --git a/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch b/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch index f13739e2d..adb04e39a 100644 --- a/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch +++ b/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch @@ -1,7 +1,7 @@ -From b1e277ed2b65bf647c2a6dc2d103ffe5aa2e4fa7 Mon Sep 17 00:00:00 2001 +From 677aefdaf998f7a5c0fe715201a903187adf03e3 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 5 Mar 2014 00:49:47 +0100 -Subject: [PATCH 033/269] net: sched: Use msleep() instead of yield() +Subject: [PATCH 033/268] net: sched: Use msleep() instead of yield() On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 (by default). If a high priority userspace process tries to shut down a busy diff --git a/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch b/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch index b1e260240..f65609ec1 100644 --- a/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch +++ b/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch @@ -1,7 +1,7 @@ -From 812137beb49a5dea2e269ea9739d0ed291e27375 Mon Sep 17 00:00:00 2001 +From 7f70887609c161140d9b37c3865fd0e01c755e11 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 27 Mar 2018 16:24:15 +0200 -Subject: [PATCH 034/269] dm rq: remove BUG_ON(!irqs_disabled) check +Subject: [PATCH 034/268] dm rq: remove BUG_ON(!irqs_disabled) check In commit 052189a2ec95 ("dm: remove superfluous irq disablement in dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a diff --git a/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch b/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch index 471514e68..9bbff023e 100644 --- a/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch +++ b/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch @@ -1,7 +1,7 @@ -From e958966734633c26363abc8920eca9c38e5cd7ce Mon Sep 17 00:00:00 2001 +From a7d0a6cef67c51b65ef7a826069f17fd3e027d7c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 8 Nov 2013 17:34:54 +0100 -Subject: [PATCH 035/269] usb: do no disable interrupts in giveback +Subject: [PATCH 035/268] usb: do no disable interrupts in giveback Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet context") the USB code disables interrupts before invoking the complete @@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 3 deletions(-) diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c -index 1c21955fe7c0..7863dec34f0b 100644 +index b82a7d787add..2f3015356124 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) diff --git a/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch b/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch index 0ce597586..4a8626726 100644 --- a/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch +++ b/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch @@ -1,7 +1,7 @@ -From 588e8fb01ec7915ef280606b80bd605f49c56915 Mon Sep 17 00:00:00 2001 +From f478188cf8735d85f62c91f1766b395f8294e72c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 17 Jun 2011 12:39:57 +0200 -Subject: [PATCH 036/269] rt: Provide PREEMPT_RT_BASE config switch +Subject: [PATCH 036/268] rt: Provide PREEMPT_RT_BASE config switch Introduce PREEMPT_RT_BASE which enables parts of PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT diff --git a/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch b/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch index b2f4a29f9..c319ebfb0 100644 --- a/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch +++ b/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch @@ -1,7 +1,7 @@ -From 9480b8b41cb649337466e43807eff3816a9530bc Mon Sep 17 00:00:00 2001 +From e32f48e4f42494ed9a8908091c76af925fca59d6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 14 Dec 2011 01:03:49 +0100 -Subject: [PATCH 037/269] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT +Subject: [PATCH 037/268] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT There are "valid" GFP_ATOMIC allocations such as diff --git a/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch b/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch index c1d5b0e61..da4c568d6 100644 --- a/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch +++ b/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch @@ -1,7 +1,7 @@ -From d23a435dc809c84e3185683681ef735f2097fe57 Mon Sep 17 00:00:00 2001 +From c4b0f04fc2d915cc272de6018b5581fce5d5dda7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 8 Jul 2015 17:14:48 +0200 -Subject: [PATCH 038/269] jump-label: disable if stop_machine() is used +Subject: [PATCH 038/268] jump-label: disable if stop_machine() is used Some architectures are using stop_machine() while switching the opcode which leads to latency spikes. @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index cd4c74daf71e..27a5f0b9ddc7 100644 +index 51794c7fa6d5..7d11242a37d2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -51,7 +51,7 @@ config ARM diff --git a/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch b/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch index 1b4dda183..e1519fd36 100644 --- a/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch +++ b/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch @@ -1,7 +1,7 @@ -From 6c83d4802fcd91010b16a5a69456c7370cd10f9f Mon Sep 17 00:00:00 2001 +From 29c669c7c70f5c80be40b69f20b4c4a8034ee7a1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 24 Jul 2011 12:11:43 +0200 -Subject: [PATCH 039/269] kconfig: Disable config options which are not RT +Subject: [PATCH 039/268] kconfig: Disable config options which are not RT compatible Disable stuff which is known to have issues on RT @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/Kconfig b/arch/Kconfig -index 6801123932a5..42b9062b9dbf 100644 +index a336548487e6..3f537b264852 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -28,6 +28,7 @@ config OPROFILE diff --git a/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch b/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch index 8866d416c..d1be9db78 100644 --- a/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch +++ b/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch @@ -1,7 +1,7 @@ -From 968d103b4727308889b77f3fa556e149bba6d56c Mon Sep 17 00:00:00 2001 +From 36f9204a770887f7280fb9db2450f6dd8ef11b33 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 17 Oct 2017 16:36:18 +0200 -Subject: [PATCH 040/269] lockdep: disable self-test +Subject: [PATCH 040/268] lockdep: disable self-test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 4966c4fbe7f7..92e7d88946f7 100644 +index 3dea52f7be9c..1504e6aa8418 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1207,7 +1207,7 @@ config DEBUG_ATOMIC_SLEEP diff --git a/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch b/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch index fede8384d..e478f9f29 100644 --- a/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch @@ -1,7 +1,7 @@ -From 16680836f36c75ccaff96ab3155869144b0dd028 Mon Sep 17 00:00:00 2001 +From 13ed901518597f2db47f69a5e06cd219389bc331 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:03 -0500 -Subject: [PATCH 041/269] mm: Allow only slub on RT +Subject: [PATCH 041/268] mm: Allow only slub on RT Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs. @@ -12,10 +12,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 2 insertions(+) diff --git a/init/Kconfig b/init/Kconfig -index 864af10bb1b9..f3f073942c30 100644 +index 47035b5a46f6..ae9a0113a699 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -1634,6 +1634,7 @@ choice +@@ -1637,6 +1637,7 @@ choice config SLAB bool "SLAB" @@ -23,7 +23,7 @@ index 864af10bb1b9..f3f073942c30 100644 select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work -@@ -1654,6 +1655,7 @@ config SLUB +@@ -1657,6 +1658,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" diff --git a/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch b/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch index 9e1993b9f..5ea5c0f60 100644 --- a/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch +++ b/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch @@ -1,7 +1,7 @@ -From a506cf490ae3e346c6082877f109fcf34568f22d Mon Sep 17 00:00:00 2001 +From 3437a78b60ceab08492a829b2e8662f7ec640860 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:51:45 +0200 -Subject: [PATCH 042/269] locking: Disable spin on owner for RT +Subject: [PATCH 042/268] locking: Disable spin on owner for RT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch b/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch index 471ae277d..e5c68b119 100644 --- a/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch @@ -1,7 +1,7 @@ -From 30987f403875e211eee90cac11127e04b1a27c73 Mon Sep 17 00:00:00 2001 +From 43b1a887cc73fc7f4685ceb5b0e34671e87989b7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 13:26:09 +0000 -Subject: [PATCH 043/269] rcu: Disable RCU_FAST_NO_HZ on RT +Subject: [PATCH 043/268] rcu: Disable RCU_FAST_NO_HZ on RT This uses a timer_list timer from the irq disabled guts of the idle code. Disable it for now to prevent wreckage. diff --git a/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch b/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch index a8d71c6b2..ac23c12b3 100644 --- a/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch @@ -1,7 +1,7 @@ -From 709173f4678f7f2f0b834e508d8044821d1c2354 Mon Sep 17 00:00:00 2001 +From f9b4cb4ccfa7e2d695d4510b8159b5205fe80c97 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Mar 2014 20:19:05 +0100 -Subject: [PATCH 044/269] rcu: make RCU_BOOST default on RT +Subject: [PATCH 044/268] rcu: make RCU_BOOST default on RT Since it is no longer invoked from the softirq people run into OOM more often if the priority of the RCU thread is too low. Making boosting diff --git a/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch b/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch index 756af2efc..fabd6db66 100644 --- a/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch @@ -1,7 +1,7 @@ -From 56d2f884391ba7e98721f6639f87698e46429c7f Mon Sep 17 00:00:00 2001 +From ca1b9c071f5b45c0d82722d5b9f957d4c2be185e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:03:52 +0200 -Subject: [PATCH 045/269] sched: Disable CONFIG_RT_GROUP_SCHED on RT +Subject: [PATCH 045/268] sched: Disable CONFIG_RT_GROUP_SCHED on RT Carsten reported problems when running: @@ -18,10 +18,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 1 insertion(+) diff --git a/init/Kconfig b/init/Kconfig -index f3f073942c30..707ca4d49944 100644 +index ae9a0113a699..61e8b531649b 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -781,6 +781,7 @@ config CFS_BANDWIDTH +@@ -784,6 +784,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED diff --git a/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch b/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch index 26f9d52fd..ea54e00db 100644 --- a/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch +++ b/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch @@ -1,7 +1,7 @@ -From a5a9737c0c6edf17eecb16a923a936432f11019e Mon Sep 17 00:00:00 2001 +From efe57b197e5cc1e85e486bebe98e95235f5c3410 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 046/269] net/core: disable NET_RX_BUSY_POLL +Subject: [PATCH 046/268] net/core: disable NET_RX_BUSY_POLL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch b/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch index 2f78bbd73..27bb28384 100644 --- a/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch +++ b/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch @@ -1,7 +1,7 @@ -From 0db6c523b2591dbf527c759ef1b3718f96bc3c29 Mon Sep 17 00:00:00 2001 +From f9353b78df9da136edb2afc49f247e8576de5764 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 1 Dec 2017 10:42:03 +0100 -Subject: [PATCH 047/269] arm*: disable NEON in kernel mode +Subject: [PATCH 047/268] arm*: disable NEON in kernel mode NEON in kernel mode is used by the crypto algorithms and raid6 code. While the raid6 code looks okay, the crypto algorithms do not: NEON @@ -20,10 +20,10 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 27a5f0b9ddc7..91f4f80a6f24 100644 +index 7d11242a37d2..e122dd212ab3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -2161,7 +2161,7 @@ config NEON +@@ -2162,7 +2162,7 @@ config NEON config KERNEL_MODE_NEON bool "Support for NEON in kernel mode" diff --git a/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch b/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch index 478b571b5..cf98d0b05 100644 --- a/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch @@ -1,7 +1,7 @@ -From 24bc2177006a16588c79a438ba84122ec215135a Mon Sep 17 00:00:00 2001 +From 11ee0822882b1b72d163eb6fbdf161b8f9178f7c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 048/269] powerpc: Use generic rwsem on RT +Subject: [PATCH 048/268] powerpc: Use generic rwsem on RT Use generic code which uses rtmutex diff --git a/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch index 8b81330dd..ae1bf1711 100644 --- a/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch +++ b/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch @@ -1,7 +1,7 @@ -From 86dd7e931e1f812e0fc9b44545ed1f9ffc80dcae Mon Sep 17 00:00:00 2001 +From ee886c042d19029e21776b96c9e1f1f9d00d2da4 Mon Sep 17 00:00:00 2001 From: Bogdan Purcareata Date: Fri, 24 Apr 2015 15:53:13 +0000 -Subject: [PATCH 049/269] powerpc/kvm: Disable in-kernel MPIC emulation for +Subject: [PATCH 049/268] powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL While converting the openpic emulation code to use a raw_spinlock_t enables diff --git a/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch b/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch index 3947fee11..d28a6e4bf 100644 --- a/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch @@ -1,7 +1,7 @@ -From f5b4401c967f9ead16662b347d2082f8f2743205 Mon Sep 17 00:00:00 2001 +From 9a80112c0cde9a52ce2e158bd6391f0e5035dc04 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:08:34 +0200 -Subject: [PATCH 050/269] powerpc: Disable highmem on RT +Subject: [PATCH 050/268] powerpc: Disable highmem on RT The current highmem handling on -RT is not compatible and needs fixups. diff --git a/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch b/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch index 44948abc2..9bb9691dc 100644 --- a/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch @@ -1,7 +1,7 @@ -From 29b46bfd781d871ae857c940e6ef76454bf356c2 Mon Sep 17 00:00:00 2001 +From 593308991349a9b9d16ccb020dee918680b9f6e4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:10:12 +0200 -Subject: [PATCH 051/269] mips: Disable highmem on RT +Subject: [PATCH 051/268] mips: Disable highmem on RT The current highmem handling on -RT is not compatible and needs fixups. diff --git a/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch b/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch index 38802d8e2..ca52bdf16 100644 --- a/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch +++ b/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch @@ -1,7 +1,7 @@ -From 789344b11534d2799fbc807496846f21869124b5 Mon Sep 17 00:00:00 2001 +From 4609ae9016c2cac126bafd6b4efb7bc5be590bbc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2009 02:21:32 +0200 -Subject: [PATCH 052/269] x86: Use generic rwsem_spinlocks on -rt +Subject: [PATCH 052/268] x86: Use generic rwsem_spinlocks on -rt Simplifies the separation of anon_rw_semaphores and rw_semaphores for -rt. diff --git a/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch b/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch index 070bb7bbc..a007efb87 100644 --- a/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch @@ -1,7 +1,7 @@ -From 7554227ac04319dadc334245535dd1d21d258de0 Mon Sep 17 00:00:00 2001 +From 542738bf81c9f5b0ab0de898b1c785fb12a5caf8 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 23 Jan 2014 14:45:59 +0100 -Subject: [PATCH 053/269] leds: trigger: disable CPU trigger on -RT +Subject: [PATCH 053/268] leds: trigger: disable CPU trigger on -RT as it triggers: |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 diff --git a/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch index 2208897b4..265949236 100644 --- a/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch +++ b/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch @@ -1,7 +1,7 @@ -From 57c3607ed990ada1d1636542d00bd3ed95e243da Mon Sep 17 00:00:00 2001 +From 0dcf02e4d56dcf2255f436d792959cf28a04dc54 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 9 Apr 2015 15:23:01 +0200 -Subject: [PATCH 054/269] cpufreq: drop K8's driver from beeing selected +Subject: [PATCH 054/268] cpufreq: drop K8's driver from beeing selected Ralf posted a picture of a backtrace from diff --git a/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch b/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch index 2c4bc18ad..5a1d2f728 100644 --- a/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch +++ b/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch @@ -1,7 +1,7 @@ -From 53eb768ccfb675d61d67bd236402aa90434a6923 Mon Sep 17 00:00:00 2001 +From a01400fc47cb4ce13c00b03a908533848af6cf74 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 11:48:57 +0200 -Subject: [PATCH 055/269] md: disable bcache +Subject: [PATCH 055/268] md: disable bcache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch b/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch index 412995274..c15bad5cb 100644 --- a/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch @@ -1,7 +1,7 @@ -From 62309a1da779bde384a7645a7d3e2713520a76da Mon Sep 17 00:00:00 2001 +From d141803da471bfe539c38f8022cf8f6297aa27a6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:03:16 +0200 -Subject: [PATCH 056/269] efi: Disable runtime services on RT +Subject: [PATCH 056/268] efi: Disable runtime services on RT Based on meassurements the EFI functions get_variable / get_next_variable take up to 2us which looks okay. diff --git a/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch b/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch index bcfcfc57a..cdc19e09b 100644 --- a/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch +++ b/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch @@ -1,7 +1,7 @@ -From 09acfc4d67168f054485eb40955069fa2390a5ec Mon Sep 17 00:00:00 2001 +From 9de1c1091c3b7a7d6037733b3d3026601eb096cd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 22 Jul 2011 17:58:40 +0200 -Subject: [PATCH 057/269] printk: Add a printk kill switch +Subject: [PATCH 057/268] printk: Add a printk kill switch Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that it does not dead-lock with the early printk code. diff --git a/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch b/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch index 46d56fa3c..2290009b2 100644 --- a/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch +++ b/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch @@ -1,7 +1,7 @@ -From 3dd75cbf0c1ddd8dc0a7c0492e86f7293a730145 Mon Sep 17 00:00:00 2001 +From 55ac9fe1a16eab7f85e646280cb842f4cf31e27d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Sep 2011 14:41:29 +0200 -Subject: [PATCH 058/269] printk: Add "force_early_printk" boot param to help +Subject: [PATCH 058/268] printk: Add "force_early_printk" boot param to help with debugging Gives me an option to screw printk and actually see what the machine diff --git a/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch b/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch index d9eb70eac..7bbb64ef5 100644 --- a/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch +++ b/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch @@ -1,7 +1,7 @@ -From 31772df387205be3a95e3d0bc21b7b81a244f6df Mon Sep 17 00:00:00 2001 +From bfb747584de1b0f321d8f3b893580b4d39f93fec Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jul 2009 12:38:56 +0200 -Subject: [PATCH 059/269] preempt: Provide preempt_*_(no)rt variants +Subject: [PATCH 059/268] preempt: Provide preempt_*_(no)rt variants RT needs a few preempt_disable/enable points which are not necessary otherwise. Implement variants to avoid #ifdeffery. diff --git a/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch b/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch index b489a8cb7..422ec179b 100644 --- a/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch +++ b/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch @@ -1,7 +1,7 @@ -From c78bd62f56b86aa7717ac7a79e288fa8b3978573 Mon Sep 17 00:00:00 2001 +From 66ea8bcf4a5d31c51bb0b08b6fcfdb920bf7df51 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 8 Mar 2017 14:23:35 +0100 -Subject: [PATCH 060/269] futex: workaround migrate_disable/enable in different +Subject: [PATCH 060/268] futex: workaround migrate_disable/enable in different context migrate_disable()/migrate_enable() takes a different path in atomic() vs @@ -16,10 +16,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 19 insertions(+) diff --git a/kernel/futex.c b/kernel/futex.c -index 5a26d843a015..1bd0950bea4e 100644 +index afdc5eadce6e..304f07d08c95 100644 --- a/kernel/futex.c +++ b/kernel/futex.c -@@ -2859,6 +2859,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, +@@ -2876,6 +2876,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, * before __rt_mutex_start_proxy_lock() is done. */ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); @@ -34,7 +34,7 @@ index 5a26d843a015..1bd0950bea4e 100644 spin_unlock(q.lock_ptr); /* * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter -@@ -2867,6 +2875,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, +@@ -2884,6 +2892,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, */ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); @@ -42,7 +42,7 @@ index 5a26d843a015..1bd0950bea4e 100644 if (ret) { if (ret == 1) -@@ -3015,11 +3024,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) +@@ -3032,11 +3041,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) * rt_waiter. Also see the WARN in wake_futex_pi(). */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); diff --git a/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch b/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch index 2d8d9f01a..3c1255566 100644 --- a/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch +++ b/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch @@ -1,7 +1,7 @@ -From 5b811e266fa9c293395c73c7a21e7e5c5a51deb1 Mon Sep 17 00:00:00 2001 +From 4104d3d4d2d94ac19edef31fef10a471b4085c24 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 20 Jun 2011 09:03:47 +0200 -Subject: [PATCH 061/269] rt: Add local irq locks +Subject: [PATCH 061/268] rt: Add local irq locks Introduce locallock. For !RT this maps to preempt_disable()/ local_irq_disable() so there is not much that changes. For RT this will diff --git a/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch b/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch index 2baa52693..98068d0a9 100644 --- a/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch +++ b/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch @@ -1,7 +1,7 @@ -From 251ca7087d744d8b174f8488d2f7ea42cedaccf3 Mon Sep 17 00:00:00 2001 +From c16b283427931d20279125c773aca78b01235f68 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Mon, 7 May 2018 08:58:56 -0500 -Subject: [PATCH 062/269] locallock: provide {get,put}_locked_ptr() variants +Subject: [PATCH 062/268] locallock: provide {get,put}_locked_ptr() variants Provide a set of locallocked accessors for pointers to per-CPU data; this is useful for dynamically-allocated per-CPU regions, for example. diff --git a/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch b/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch index 7de15391b..c3f496a09 100644 --- a/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch @@ -1,7 +1,7 @@ -From bdf1c5db6f1c5d8fe706592f9373849948d65813 Mon Sep 17 00:00:00 2001 +From c8f195054bbdf59c6e791bbd698d3ffce9776265 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:34 -0500 -Subject: [PATCH 063/269] mm/scatterlist: Do not disable irqs on RT +Subject: [PATCH 063/268] mm/scatterlist: Do not disable irqs on RT For -RT it is enough to keep pagefault disabled (which is currently handled by kmap_atomic()). diff --git a/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch b/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch index 586b93743..7e2560150 100644 --- a/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch +++ b/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch @@ -1,7 +1,7 @@ -From d892f2116baf1643d4d3c792231c687fa49b71ce Mon Sep 17 00:00:00 2001 +From 41445095315411d8baebfc36b2f6f30aefe2abc6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 064/269] signal/x86: Delay calling signals in atomic +Subject: [PATCH 064/268] signal/x86: Delay calling signals in atomic On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using @@ -37,10 +37,10 @@ Signed-off-by: Thomas Gleixner 4 files changed, 59 insertions(+), 2 deletions(-) diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c -index 3b2490b81918..ec46ee700791 100644 +index 8353348ddeaf..91676b0d2d4c 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c -@@ -151,6 +151,13 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) +@@ -152,6 +152,13 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) if (cached_flags & _TIF_NEED_RESCHED) schedule(); @@ -94,7 +94,7 @@ index df39ad5916e7..535e57775208 100644 size_t sas_ss_size; unsigned int sas_ss_flags; diff --git a/kernel/signal.c b/kernel/signal.c -index f29def2be652..57c48b3d1491 100644 +index d5a9646b3538..56edb0580a3a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1268,8 +1268,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, diff --git a/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch b/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch index a6a4c561a..6c249e779 100644 --- a/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch +++ b/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch @@ -1,7 +1,7 @@ -From 6828880f532efdf1ded1248f5e0ea555e9520eda Mon Sep 17 00:00:00 2001 +From f9d175548eeb780a2da87b33b2055b747bdf4618 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 10 Dec 2015 10:58:51 -0800 -Subject: [PATCH 065/269] x86/signal: delay calling signals on 32bit +Subject: [PATCH 065/268] x86/signal: delay calling signals on 32bit When running some ptrace single step tests on x86-32 machine, the below problem is triggered: diff --git a/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch b/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch index e299bd0df..673493e88 100644 --- a/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch +++ b/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch @@ -1,7 +1,7 @@ -From 651a49976e8e481190cc465a5590940a6f6bbcf9 Mon Sep 17 00:00:00 2001 +From 4412fbb024c634740aa2ab4db609244bb957fb43 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 09:18:52 +0100 -Subject: [PATCH 066/269] buffer_head: Replace bh_uptodate_lock for -rt +Subject: [PATCH 066/268] buffer_head: Replace bh_uptodate_lock for -rt Wrap the bit_spin_lock calls into a separate inline and add the RT replacements with a real spinlock. diff --git a/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch b/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch index dea54a626..d7f0a7943 100644 --- a/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch +++ b/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch @@ -1,7 +1,7 @@ -From 6107effb93a85ff7db4857dca4a0acc2ec4a7d5c Mon Sep 17 00:00:00 2001 +From 35cce66d60508a5221b8ffdc4a6dabd3548ba2c9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:11:25 +0100 -Subject: [PATCH 067/269] fs: jbd/jbd2: Make state lock and journal head lock +Subject: [PATCH 067/268] fs: jbd/jbd2: Make state lock and journal head lock rt safe bit_spin_locks break under RT. @@ -44,7 +44,7 @@ index 8a1bcfb145d7..5869330d1f38 100644 } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h -index b708e5169d1d..018665350951 100644 +index 583b82b5a1e9..57f4ad8d45a5 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) diff --git a/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch b/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch index fe64a30c3..cf9d08222 100644 --- a/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch +++ b/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch @@ -1,7 +1,7 @@ -From 44a67462ebab9e354cfa669144248912fa92ca24 Mon Sep 17 00:00:00 2001 +From 39396411087b5831b20ac4fa694b3001d358989d Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 21 Jun 2013 15:07:25 -0400 -Subject: [PATCH 068/269] list_bl: Make list head locking RT safe +Subject: [PATCH 068/268] list_bl: Make list head locking RT safe As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal diff --git a/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch b/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch index dd13f6d1c..2e4178ad6 100644 --- a/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch +++ b/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch @@ -1,7 +1,7 @@ -From 20f64514264a9d0ea1533f4743f542a1fb056a16 Mon Sep 17 00:00:00 2001 +From fb2fdfdc67deb3b7b5541d28f9d2cdfbebbc7a9b Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 31 Mar 2016 00:04:25 -0500 -Subject: [PATCH 069/269] list_bl: fixup bogus lockdep warning +Subject: [PATCH 069/268] list_bl: fixup bogus lockdep warning At first glance, the use of 'static inline' seems appropriate for INIT_HLIST_BL_HEAD(). diff --git a/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch b/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch index 5e2c63346..71d10b46b 100644 --- a/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch +++ b/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch @@ -1,7 +1,7 @@ -From 7520cd851f5733f5e69fe73008893f4be48506f9 Mon Sep 17 00:00:00 2001 +From 3eada8624fd7eee399757276e5f33f311f2baba3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:57 -0500 -Subject: [PATCH 070/269] genirq: Disable irqpoll on -rt +Subject: [PATCH 070/268] genirq: Disable irqpoll on -rt Creates long latencies for no value diff --git a/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch b/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch index cddab1f9e..caa8b2f9c 100644 --- a/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch @@ -1,7 +1,7 @@ -From 22860bd2c33dc3abc1b0aa695f8f455595762a93 Mon Sep 17 00:00:00 2001 +From 3681b7011d489da9a82f90fee87711d1bab983fe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 3 Apr 2011 11:57:29 +0200 -Subject: [PATCH 071/269] genirq: Force interrupt thread on RT +Subject: [PATCH 071/268] genirq: Force interrupt thread on RT Force threaded_irqs and optimize the code (force_irqthreads) in regard to this. @@ -29,7 +29,7 @@ index eeceac3376fc..315f852b4981 100644 #define force_irqthreads (0) #endif diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 5c0ba5ca5930..94a18cf54293 100644 +index cd4f9f3e8345..c3b9f6dacd8f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -23,6 +23,7 @@ diff --git a/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index 614b743f0..1fee929de 100644 --- a/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -1,7 +1,7 @@ -From 3c22477fe8ef4919a3fb0314834751ad2e2134d8 Mon Sep 17 00:00:00 2001 +From 95156c42eddf17dc207a94b719ec18b031e4a713 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:20 +0200 -Subject: [PATCH 072/269] Split IRQ-off and zone->lock while freeing pages from +Subject: [PATCH 072/268] Split IRQ-off and zone->lock while freeing pages from PCP list #1 Split the IRQ-off section while accessing the PCP list from zone->lock diff --git a/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index 21ec6592e..3ca194601 100644 --- a/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -1,7 +1,7 @@ -From e4639c8f6abcfb4b8b26aa296089349739103578 Mon Sep 17 00:00:00 2001 +From 85fe71fd7fa2ac8945d33c49966dde1662a13228 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:21 +0200 -Subject: [PATCH 073/269] Split IRQ-off and zone->lock while freeing pages from +Subject: [PATCH 073/268] Split IRQ-off and zone->lock while freeing pages from PCP list #2 Split the IRQ-off section while accessing the PCP list from zone->lock diff --git a/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch index ff15da48a..26bbf5cf8 100644 --- a/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ b/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -1,7 +1,7 @@ -From 21da9341b8a6c5d9308bf0c2fa3fe4647749f125 Mon Sep 17 00:00:00 2001 +From e38ec11acd94c6fe346d33ce75e30349b1734e40 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 28 May 2018 15:24:22 +0200 -Subject: [PATCH 074/269] mm/SLxB: change list_lock to raw_spinlock_t +Subject: [PATCH 074/268] mm/SLxB: change list_lock to raw_spinlock_t The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t otherwise the interrupts won't be disabled on -RT. The locking rules remain @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 73 insertions(+), 73 deletions(-) diff --git a/mm/slab.c b/mm/slab.c -index b8e0ec74330f..21fe15fb9624 100644 +index 018d32496e8d..8ccc092fcd39 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) @@ -368,7 +368,7 @@ index b8e0ec74330f..21fe15fb9624 100644 } num_objs = total_slabs * cachep->num; active_slabs = total_slabs - free_slabs; -@@ -4333,13 +4333,13 @@ static int leaks_show(struct seq_file *m, void *p) +@@ -4334,13 +4334,13 @@ static int leaks_show(struct seq_file *m, void *p) for_each_kmem_cache_node(cachep, node, n) { check_irq_on(); diff --git a/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch index c2a243b5e..4e393bb8f 100644 --- a/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ b/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch @@ -1,7 +1,7 @@ -From 7950585d96adfc3a0b99a639041dbaed50e2a496 Mon Sep 17 00:00:00 2001 +From e5dbe8c3a2188b0eed47fc087904da4df4f2711a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jun 2018 17:29:19 +0200 -Subject: [PATCH 075/269] mm/SLUB: delay giving back empty slubs to IRQ enabled +Subject: [PATCH 075/268] mm/SLUB: delay giving back empty slubs to IRQ enabled regions __free_slab() is invoked with disabled interrupts which increases the diff --git a/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch b/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch index ff8294218..dcf35eea5 100644 --- a/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -1,7 +1,7 @@ -From 31695882006c45fad86890ceff90dd7d65ea5dd3 Mon Sep 17 00:00:00 2001 +From 1dc89f1379f4b2b2d2c1f1928faa9781232a804c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:37 -0500 -Subject: [PATCH 076/269] mm: page_alloc: rt-friendly per-cpu pages +Subject: [PATCH 076/268] mm: page_alloc: rt-friendly per-cpu pages rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. diff --git a/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch b/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch index 654a21a01..cd3cbfa6d 100644 --- a/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch +++ b/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch @@ -1,7 +1,7 @@ -From 25ce0ae0ad1ef1ed724757c0137241db28a8208d Mon Sep 17 00:00:00 2001 +From 3f837e858bd7d4ee248e9b075cce128beaf94252 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:51 -0500 -Subject: [PATCH 077/269] mm/swap: Convert to percpu locked +Subject: [PATCH 077/268] mm/swap: Convert to percpu locked Replace global locks (get_cpu + local_irq_save) with "local_locks()". Currently there is one of for "rotate" and one for "swap". diff --git a/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch b/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch index 2f230172c..52e7f424e 100644 --- a/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch +++ b/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch @@ -1,7 +1,7 @@ -From c6e0c51ac7fe1d0892449e41e6792babe4d7c3fa Mon Sep 17 00:00:00 2001 +From e06defe13153f9c3195a350898b54b88fe38867b Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Fri, 27 May 2016 15:03:28 +0200 -Subject: [PATCH 078/269] mm: perform lru_add_drain_all() remotely +Subject: [PATCH 078/268] mm: perform lru_add_drain_all() remotely lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run on all CPUs that have non-empty LRU pagevecs and then waiting for diff --git a/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch b/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch index 7c908c68e..e46c59c97 100644 --- a/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch +++ b/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch @@ -1,7 +1,7 @@ -From b0971a2847fd9cd9f59eb19e6761f6800a33150d Mon Sep 17 00:00:00 2001 +From c111e38989bb30e13df9f5480f5f549b31351d28 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:13 -0500 -Subject: [PATCH 079/269] mm/vmstat: Protect per cpu variables with preempt +Subject: [PATCH 079/268] mm/vmstat: Protect per cpu variables with preempt disable on RT Disable preemption on -RT for the vmstat code. On vanila the code runs in diff --git a/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch b/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch index 84ef3ad84..f487b4098 100644 --- a/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch +++ b/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch @@ -1,7 +1,7 @@ -From 1062ea19aa6e1c3dacb44d07747c89b4f66dadc2 Mon Sep 17 00:00:00 2001 +From 6d873013d0191323247ffcc8345c08178c272988 Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Sat, 1 Oct 2011 18:58:13 -0700 -Subject: [PATCH 080/269] ARM: Initialize split page table locks for vector +Subject: [PATCH 080/268] ARM: Initialize split page table locks for vector page Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if diff --git a/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch b/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch index f5dcce567..62ec636bd 100644 --- a/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch +++ b/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch @@ -1,7 +1,7 @@ -From 7bd789a93c5b97d553b15fd8e446228d23456aff Mon Sep 17 00:00:00 2001 +From 50c1ce833125e10e59496d55aadac08ac0dfec71 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 Oct 2012 10:32:35 +0100 -Subject: [PATCH 081/269] mm: Enable SLUB for RT +Subject: [PATCH 081/268] mm: Enable SLUB for RT Avoid the memory allocation in IRQ section diff --git a/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch b/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch index 26bbab3cb..fcbad0114 100644 --- a/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch +++ b/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch @@ -1,7 +1,7 @@ -From 11224977de88f7f3ddc92b29390c44fdf9a85820 Mon Sep 17 00:00:00 2001 +From 64d21bbe1300711eaf9a6c7d1977308201d8220b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 12:08:15 +0100 -Subject: [PATCH 082/269] slub: Enable irqs for __GFP_WAIT +Subject: [PATCH 082/268] slub: Enable irqs for __GFP_WAIT SYSTEM_RUNNING might be too late for enabling interrupts. Allocations with GFP_WAIT can happen before that. So use this as an indicator. diff --git a/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch b/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch index 6fb7a2170..0c881953e 100644 --- a/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch +++ b/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch @@ -1,7 +1,7 @@ -From b8b912f1bb257eb44228b3bdb7652c4d6dcda56b Mon Sep 17 00:00:00 2001 +From b652776b565212235e147d11064da4bf8f63c5df Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 15 Apr 2015 19:00:47 +0200 -Subject: [PATCH 083/269] slub: Disable SLUB_CPU_PARTIAL +Subject: [PATCH 083/268] slub: Disable SLUB_CPU_PARTIAL |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7 @@ -36,10 +36,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig -index 707ca4d49944..68b4e39e421b 100644 +index 61e8b531649b..b4e88fb19c26 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -1698,7 +1698,7 @@ config SLAB_FREELIST_HARDENED +@@ -1701,7 +1701,7 @@ config SLAB_FREELIST_HARDENED config SLUB_CPU_PARTIAL default y diff --git a/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch index 104645c66..0b17fbb62 100644 --- a/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ b/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -1,7 +1,7 @@ -From 107eee1a14857d0aecad3c1f56f8b4cabbadcf89 Mon Sep 17 00:00:00 2001 +From 74f8110e2380913103e09eb16ed6f41996d29278 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 30 Oct 2013 11:48:33 -0700 -Subject: [PATCH 084/269] mm/memcontrol: Don't call schedule_work_on in +Subject: [PATCH 084/268] mm/memcontrol: Don't call schedule_work_on in preemption disabled context The following trace is triggered when running ltp oom test cases: diff --git a/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch b/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch index 86a59777b..4cd086f9d 100644 --- a/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch +++ b/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch @@ -1,7 +1,7 @@ -From b1fa5897c72583b68655f7eeca2e598dbfa8a0b5 Mon Sep 17 00:00:00 2001 +From 9fe5cd2edadfa8760b3ebc58af32d2d0f3ee8af4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 28 Jan 2015 17:14:16 +0100 -Subject: [PATCH 085/269] mm/memcontrol: Replace local_irq_disable with local +Subject: [PATCH 085/268] mm/memcontrol: Replace local_irq_disable with local locks There are a few local_irq_disable() which then take sleeping locks. This diff --git a/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch index 67aa36302..548244c84 100644 --- a/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch +++ b/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch @@ -1,7 +1,7 @@ -From 83e42c20f52f70e65d03b214fd9c8579b0128f47 Mon Sep 17 00:00:00 2001 +From eafb2205942eee5257b3fd40aa6b513b07a21661 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 22 Mar 2016 11:16:09 +0100 -Subject: [PATCH 086/269] mm/zsmalloc: copy with get_cpu_var() and locking +Subject: [PATCH 086/268] mm/zsmalloc: copy with get_cpu_var() and locking get_cpu_var() disables preemption and triggers a might_sleep() splat later. This is replaced with get_locked_var(). diff --git a/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch b/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch index d7fefb813..fe865593f 100644 --- a/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch +++ b/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch @@ -1,7 +1,7 @@ -From 2543c80b6aadc59c70c6b6e912ed1e6a9965b3c0 Mon Sep 17 00:00:00 2001 +From 789271b9a5c962ee26a3ec8d65c7c58db403d9e4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 21:53:43 +0100 -Subject: [PATCH 087/269] x86/mm/pat: disable preemption __split_large_page() +Subject: [PATCH 087/268] x86/mm/pat: disable preemption __split_large_page() after spin_lock() Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a diff --git a/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch b/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch index 3754f603b..46ff6803a 100644 --- a/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch +++ b/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch @@ -1,7 +1,7 @@ -From 11c1fef6d646f26007271dd7486fe14176d6e6f6 Mon Sep 17 00:00:00 2001 +From c279de8e446c1b1c790aef81b598e7b259ebd6e6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Jan 2017 16:34:27 +0100 -Subject: [PATCH 088/269] radix-tree: use local locks +Subject: [PATCH 088/268] radix-tree: use local locks The preload functionality uses per-CPU variables and preempt-disable to ensure that it does not switch CPUs during its usage. This patch adds diff --git a/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch b/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch index 008963c58..2cd5aa039 100644 --- a/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch +++ b/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch @@ -1,7 +1,7 @@ -From 558451a44923dab908e500200b3f6f02fd6e4fae Mon Sep 17 00:00:00 2001 +From 76691edf9b2f9e9bce47a7fad48aed97af769508 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 089/269] timers: Prepare for full preemption +Subject: [PATCH 089/268] timers: Prepare for full preemption When softirqs can be preempted we need to make sure that cancelling the timer from the active thread can not deadlock vs. a running timer @@ -29,7 +29,7 @@ index 7b066fd38248..54627d046b3a 100644 #else # define del_timer_sync(t) del_timer(t) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 9c4a9f0a627b..ddf6282d9780 100644 +index 7a39d56f6a6b..5de80f29ef57 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -498,11 +498,14 @@ void resched_cpu(int cpu) diff --git a/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch b/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch index 249874077..beb8966d3 100644 --- a/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch +++ b/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch @@ -1,7 +1,7 @@ -From ea0ad5586875098798cbf5d53bb21f2a5b82e537 Mon Sep 17 00:00:00 2001 +From 78ee89fc14623b109a4b5383aac8ea85bb7cde47 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 6 Nov 2011 12:26:18 +0100 -Subject: [PATCH 090/269] x86: kvm Require const tsc for RT +Subject: [PATCH 090/268] x86: kvm Require const tsc for RT Non constant TSC is a nightmare on bare metal already, but with virtualization it becomes a complete disaster because the workarounds @@ -14,10 +14,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 4a61e1609c97..0b4fd313b626 100644 +index 7fed1d6dd1a1..b2b11374c663 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6725,6 +6725,13 @@ int kvm_arch_init(void *opaque) +@@ -6756,6 +6756,13 @@ int kvm_arch_init(void *opaque) goto out; } diff --git a/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch index a1e0f68d6..4ac307081 100644 --- a/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch +++ b/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch @@ -1,7 +1,7 @@ -From 8d76a7f3ba4284defc688a9131aa96e66eb1310a Mon Sep 17 00:00:00 2001 +From 7e0d19b7cfe86f42ae378cdb488c207ad2ff152e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 4 Oct 2017 10:24:23 +0200 -Subject: [PATCH 091/269] pci/switchtec: Don't use completion's wait queue +Subject: [PATCH 091/268] pci/switchtec: Don't use completion's wait queue The poll callback is using completion's wait_queue_head_t member and puts it in poll_wait() so the poll() caller gets a wakeup after command diff --git a/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch b/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch index b12bd9efa..773e177e3 100644 --- a/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch +++ b/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch @@ -1,7 +1,7 @@ -From f8a4f74be5bbce9f9664ebf005bb35f26875858f Mon Sep 17 00:00:00 2001 +From f38b5d4083660218919ae8aaf04084ec81984b3f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 12:19:57 +0100 -Subject: [PATCH 092/269] wait.h: include atomic.h +Subject: [PATCH 092/268] wait.h: include atomic.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch b/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch index f62083cba..e1e806353 100644 --- a/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch +++ b/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch @@ -1,7 +1,7 @@ -From 7cf55f71248f4f3c603383a84c73c5e44bfb9229 Mon Sep 17 00:00:00 2001 +From 1f3734247d6d14bcd0adce848f10ed802dbbcacc Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Fri, 11 Jul 2014 15:26:11 +0200 -Subject: [PATCH 093/269] work-simple: Simple work queue implemenation +Subject: [PATCH 093/268] work-simple: Simple work queue implemenation Provides a framework for enqueuing callbacks from irq context PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. diff --git a/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch b/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch index 1370fc7bd..e8cd9182e 100644 --- a/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch +++ b/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch @@ -1,7 +1,7 @@ -From ba25a567c5891e2b1acd586212b0fd92ce755e71 Mon Sep 17 00:00:00 2001 +From 6e00748d2128e6af60b7e37506da39208dc8e0ca Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 10 Sep 2018 18:00:31 +0200 -Subject: [PATCH 094/269] work-simple: drop a shit statement in +Subject: [PATCH 094/268] work-simple: drop a shit statement in SWORK_EVENT_PENDING Dan Carpenter reported diff --git a/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch b/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch index bfd797138..52fc04e1c 100644 --- a/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch +++ b/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch @@ -1,7 +1,7 @@ -From d24dfe04ec75d5329d870c0d20f56f2cba4563ec Mon Sep 17 00:00:00 2001 +From 1ae4e1e50bf83abfe832fb78fab3b4ca6797ee18 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 11 Jan 2013 11:23:51 +0100 -Subject: [PATCH 095/269] completion: Use simple wait queues +Subject: [PATCH 095/268] completion: Use simple wait queues Completions have no long lasting callbacks and therefor do not need the complex waitqueue variant. Use simple waitqueues which reduces the @@ -160,10 +160,10 @@ index 73e06e9986d4..f426a0661aa0 100644 extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c -index abef759de7c8..69e418787f21 100644 +index f5ce9f7ec132..0f00ba01376f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c -@@ -681,6 +681,10 @@ static int load_image_and_restore(void) +@@ -690,6 +690,10 @@ static int load_image_and_restore(void) return error; } @@ -174,7 +174,7 @@ index abef759de7c8..69e418787f21 100644 /** * hibernate - Carry out system hibernation, including saving the image. */ -@@ -694,6 +698,8 @@ int hibernate(void) +@@ -703,6 +707,8 @@ int hibernate(void) return -EPERM; } @@ -183,7 +183,7 @@ index abef759de7c8..69e418787f21 100644 lock_system_sleep(); /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { -@@ -772,6 +778,7 @@ int hibernate(void) +@@ -781,6 +787,7 @@ int hibernate(void) atomic_inc(&snapshot_device_available); Unlock: unlock_system_sleep(); @@ -319,10 +319,10 @@ index a1ad5b7d5521..755a58084978 100644 } EXPORT_SYMBOL(completion_done); diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index ddf6282d9780..8272d920b749 100644 +index 5de80f29ef57..337cc72e6a6a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7109,7 +7109,10 @@ void migrate_disable(void) +@@ -7116,7 +7116,10 @@ void migrate_disable(void) return; } #ifdef CONFIG_SCHED_DEBUG @@ -334,7 +334,7 @@ index ddf6282d9780..8272d920b749 100644 #endif if (p->migrate_disable) { -@@ -7139,7 +7142,10 @@ void migrate_enable(void) +@@ -7146,7 +7149,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch b/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch index 672de2fce..9ff7c18f7 100644 --- a/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch +++ b/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch @@ -1,7 +1,7 @@ -From 39010d30f3244de6b51646a0325b6292d8c84282 Mon Sep 17 00:00:00 2001 +From 488ed4279b12fb35f1d3e865c60e7c2841807834 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: [PATCH 096/269] fs/aio: simple simple work +Subject: [PATCH 096/268] fs/aio: simple simple work |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/fs/aio.c b/fs/aio.c -index 45d5ef8dd0a8..7db10b87c9bc 100644 +index 911e23087dfb..16dcf8521c2c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -42,6 +42,7 @@ @@ -48,7 +48,7 @@ index 45d5ef8dd0a8..7db10b87c9bc 100644 /* * signals when all in-flight requests are done -@@ -255,6 +257,7 @@ static int __init aio_setup(void) +@@ -265,6 +267,7 @@ static int __init aio_setup(void) .mount = aio_mount, .kill_sb = kill_anon_super, }; @@ -56,7 +56,7 @@ index 45d5ef8dd0a8..7db10b87c9bc 100644 aio_mnt = kern_mount(&aio_fs); if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); -@@ -596,9 +599,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) +@@ -606,9 +609,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - * now it's safe to cancel any that need to be. */ @@ -68,7 +68,7 @@ index 45d5ef8dd0a8..7db10b87c9bc 100644 struct aio_kiocb *req; spin_lock_irq(&ctx->ctx_lock); -@@ -616,6 +619,14 @@ static void free_ioctx_users(struct percpu_ref *ref) +@@ -626,6 +629,14 @@ static void free_ioctx_users(struct percpu_ref *ref) percpu_ref_put(&ctx->reqs); } diff --git a/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch index c2904d389..d49c074b3 100644 --- a/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -1,7 +1,7 @@ -From 2010005b28eea662f9390937d92563ea1c466e24 Mon Sep 17 00:00:00 2001 +From 46fbd681dc222e3c76385b0421c6663371aa1738 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 097/269] genirq: Do not invoke the affinity callback via a +Subject: [PATCH 097/268] genirq: Do not invoke the affinity callback via a workqueue on RT Joe Korty reported, that __irq_set_affinity_locked() schedules a @@ -11,9 +11,9 @@ This patch uses swork_queue() instead. Signed-off-by: Sebastian Andrzej Siewior --- - include/linux/interrupt.h | 6 ++++++ - kernel/irq/manage.c | 43 ++++++++++++++++++++++++++++++++++++--- - 2 files changed, 46 insertions(+), 3 deletions(-) + include/linux/interrupt.h | 6 +++++ + kernel/irq/manage.c | 46 ++++++++++++++++++++++++++++++++++++--- + 2 files changed, 49 insertions(+), 3 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 315f852b4981..a943c07b54ba 100644 @@ -48,7 +48,7 @@ index 315f852b4981..a943c07b54ba 100644 void (*release)(struct kref *ref); }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 94a18cf54293..d2270f61d335 100644 +index c3b9f6dacd8f..af2a8757abfb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -259,7 +259,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, @@ -125,6 +125,17 @@ index 94a18cf54293..d2270f61d335 100644 } raw_spin_lock_irqsave(&desc->lock, flags); +@@ -359,7 +396,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) + raw_spin_unlock_irqrestore(&desc->lock, flags); + + if (old_notify) { ++#ifndef CONFIG_PREEMPT_RT_BASE ++ /* Need to address this for PREEMPT_RT */ + cancel_work_sync(&old_notify->work); ++#endif + kref_put(&old_notify->kref, old_notify->release); + } + -- 2.20.1 diff --git a/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch index 44692eab6..d4a26a11f 100644 --- a/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch +++ b/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch @@ -1,7 +1,7 @@ -From 49622b7282a6c10c5a70f3987df4ccfe3a32c92b Mon Sep 17 00:00:00 2001 +From c9994c24a20393e9dc9af05bf1859a0a81aeebba Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 15 Nov 2017 17:29:51 +0100 -Subject: [PATCH 098/269] time/hrtimer: avoid schedule_work() with interrupts +Subject: [PATCH 098/268] time/hrtimer: avoid schedule_work() with interrupts disabled The NOHZ code tries to schedule a workqueue with interrupts disabled. diff --git a/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index 4eedbb27e..8ae34da50 100644 --- a/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -1,7 +1,7 @@ -From 7223736bbeccbb731d509b603b15adcbf36bdade Mon Sep 17 00:00:00 2001 +From 7a13f46fcc126f6aa31e1b203a33cb8ba7b17c2e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 11:25:41 +0200 -Subject: [PATCH 099/269] hrtimer: consolidate hrtimer_init() + +Subject: [PATCH 099/268] hrtimer: consolidate hrtimer_init() + hrtimer_init_sleeper() calls hrtimer_init_sleeper() calls require a prior initialisation of the @@ -27,10 +27,10 @@ Signed-off-by: Anna-Maria Gleixner 7 files changed, 67 insertions(+), 34 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c -index 7d53f2314d7c..b0d0b74cf5a6 100644 +index 4e563ee462cb..fa984527b1ae 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -3124,10 +3124,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, +@@ -3126,10 +3126,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, kt = nsecs; mode = HRTIMER_MODE_REL; @@ -125,10 +125,10 @@ index 2b5ef8e94d19..94bd2e841de6 100644 hrtimer_start_range_ns(&__t.timer, timeout, \ current->timer_slack_ns, \ diff --git a/kernel/futex.c b/kernel/futex.c -index 1bd0950bea4e..fadd9bff6e3c 100644 +index 304f07d08c95..ccf933ac2997 100644 --- a/kernel/futex.c +++ b/kernel/futex.c -@@ -2684,10 +2684,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, +@@ -2701,10 +2701,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, if (abs_time) { to = &timeout; @@ -142,7 +142,7 @@ index 1bd0950bea4e..fadd9bff6e3c 100644 hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } -@@ -2786,9 +2785,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, +@@ -2803,9 +2802,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, if (time) { to = &timeout; @@ -154,7 +154,7 @@ index 1bd0950bea4e..fadd9bff6e3c 100644 hrtimer_set_expires(&to->timer, *time); } -@@ -3212,10 +3210,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3242,10 +3240,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (abs_time) { to = &timeout; @@ -263,7 +263,7 @@ index e1a549c9e399..4f43ece42f3b 100644 if (likely(t.task)) diff --git a/net/core/pktgen.c b/net/core/pktgen.c -index 7f6938405fa1..b71d9eef334e 100644 +index 092fa3d75b32..9d472d626aaa 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) diff --git a/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch b/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch index 23bc3dd41..44bf2996e 100644 --- a/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch +++ b/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch @@ -1,7 +1,7 @@ -From 87f5cf4447982ad964655f0831ea4deff2c59819 Mon Sep 17 00:00:00 2001 +From 33ca6a98e6b332e1e52099b27151e7672af6516b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 100/269] hrtimers: Prepare full preemption +Subject: [PATCH 100/268] hrtimers: Prepare full preemption Make cancellation of a running callback in softirq context safe against preemption. diff --git a/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch b/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch index 156a3d6f8..66837d925 100644 --- a/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch +++ b/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch @@ -1,7 +1,7 @@ -From 7bbc9e32ebfc904f317e3e3808164cdcba6f7f6d Mon Sep 17 00:00:00 2001 +From d064ab26bcb4da0b801921c6b1115d344abb9496 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 Jul 2009 08:44:31 -0500 -Subject: [PATCH 101/269] hrtimer: by timers by default into the softirq +Subject: [PATCH 101/268] hrtimer: by timers by default into the softirq context We can't have hrtimers callbacks running in hardirq context on RT. Therefore @@ -28,10 +28,10 @@ Signed-off-by: Sebastian Andrzej Siewior 11 files changed, 37 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index 3692de84c420..e3c95654b0d1 100644 +index cba414db14cb..e4e4147daa93 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c -@@ -2250,7 +2250,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) +@@ -2252,7 +2252,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, @@ -65,7 +65,7 @@ index 73ad7309436a..2bdb047c7656 100644 /* diff --git a/kernel/events/core.c b/kernel/events/core.c -index 87bd96399d1c..36661d7a8581 100644 +index 171b83ebed4a..a7807c609c22 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1102,7 +1102,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) @@ -77,7 +77,7 @@ index 87bd96399d1c..36661d7a8581 100644 timer->function = perf_mux_hrtimer_handler; } -@@ -9183,7 +9183,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) +@@ -9216,7 +9216,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) if (!is_sampling_event(event)) return; @@ -87,7 +87,7 @@ index 87bd96399d1c..36661d7a8581 100644 /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 8272d920b749..4ed3b29cb0c8 100644 +index 337cc72e6a6a..1f997ceec454 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq) @@ -100,10 +100,10 @@ index 8272d920b749..4ed3b29cb0c8 100644 } #else /* CONFIG_SCHED_HRTICK */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index f927b1f45474..ad2a793a912b 100644 +index fb6e64417470..1794e152d888 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -1054,7 +1054,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) +@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) { struct hrtimer *timer = &dl_se->dl_timer; @@ -113,10 +113,10 @@ index f927b1f45474..ad2a793a912b 100644 } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index c17d63b06026..4193041b3cab 100644 +index 0048a32a3b4d..4022ad749d85 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4904,9 +4904,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -4908,9 +4908,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -129,7 +129,7 @@ index c17d63b06026..4193041b3cab 100644 cfs_b->distribute_running = 0; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index 4857ca145119..32c9a9f54495 100644 +index b6ca4a630050..aeb99395c03b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) diff --git a/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch b/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch index cef095d8a..f980ebd03 100644 --- a/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch +++ b/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch @@ -1,7 +1,7 @@ -From f498fc065cd56d96f2583801142a348eb801e631 Mon Sep 17 00:00:00 2001 +From 2aa3a8a817b84f4145d811fa84365a6249b7472f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 8 Jan 2019 12:31:06 +0100 -Subject: [PATCH 102/269] sched/fair: Make the hrtimers non-hard again +Subject: [PATCH 102/268] sched/fair: Make the hrtimers non-hard again Since commit "sched/fair: Robustify CFS-bandwidth timer locking" both hrtimer can run in softirq context because now interrupts are disabled @@ -13,10 +13,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 4193041b3cab..c17d63b06026 100644 +index 4022ad749d85..0048a32a3b4d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4904,9 +4904,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -4908,9 +4908,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch b/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch index a53418657..430984cb2 100644 --- a/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch +++ b/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch @@ -1,7 +1,7 @@ -From ca493505f2f12750ca207582fc7b6ca69cbf504e Mon Sep 17 00:00:00 2001 +From 0f08536dde0688190121bf0856a78c26227c0aeb Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Mon, 16 Sep 2013 14:09:19 -0700 -Subject: [PATCH 103/269] hrtimer: Move schedule_work call to helper thread +Subject: [PATCH 103/268] hrtimer: Move schedule_work call to helper thread When run ltp leapsec_timer test, the following call trace is caught: diff --git a/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch index ecba5a6a2..c4502258e 100644 --- a/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch +++ b/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch @@ -1,7 +1,7 @@ -From 78fffa8243d75e61f9508289b2f68d2f66cf34f6 Mon Sep 17 00:00:00 2001 +From 9a8c2b871dbfc84555fccac1a07f73733462045b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 6 Dec 2018 10:15:13 +0100 -Subject: [PATCH 104/269] hrtimer: move state change before hrtimer_cancel in +Subject: [PATCH 104/268] hrtimer: move state change before hrtimer_cancel in do_nanosleep() There is a small window between setting t->task to NULL and waking the diff --git a/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch b/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch index dd3fb426a..68e56ed44 100644 --- a/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch +++ b/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch @@ -1,7 +1,7 @@ -From 34b024b3a992c144a3df653c0ad623a8a69dc735 Mon Sep 17 00:00:00 2001 +From 57f8b04f189ae0ed6d736994fb38f3ea540c2f4b Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 3 Jul 2009 08:29:58 -0500 -Subject: [PATCH 105/269] posix-timers: Thread posix-cpu-timers on -rt +Subject: [PATCH 105/268] posix-timers: Thread posix-cpu-timers on -rt posix-cpu-timer code takes non -rt safe locks in hard irq context. Move it to a thread. @@ -57,10 +57,10 @@ index 0b49b9cf5571..9e3362748214 100644 .thread_group = LIST_HEAD_INIT(init_task.thread_group), .thread_node = LIST_HEAD_INIT(init_signals.thread_head), diff --git a/kernel/fork.c b/kernel/fork.c -index bfe9c5c3eb88..1b8ac523aa99 100644 +index 98c971cb1d36..492bc898b09a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1575,6 +1575,9 @@ static void rt_mutex_init_task(struct task_struct *p) +@@ -1585,6 +1585,9 @@ static void rt_mutex_init_task(struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { diff --git a/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch b/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch index 6962054f4..453d85ca2 100644 --- a/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch +++ b/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch @@ -1,7 +1,7 @@ -From 3c13de2cc91a9379fe1de22e474cad11805812f9 Mon Sep 17 00:00:00 2001 +From 581d1c64f3bb23b8270f64380ef672cd0eeed527 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 May 2011 16:59:16 +0200 -Subject: [PATCH 106/269] sched: Move task_struct cleanup to RCU +Subject: [PATCH 106/268] sched: Move task_struct cleanup to RCU __put_task_struct() does quite some expensive work. We don't want to burden random tasks with that. @@ -57,7 +57,7 @@ index 108ede99e533..bb98c5b43f81 100644 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT diff --git a/kernel/fork.c b/kernel/fork.c -index 1b8ac523aa99..b7e0aac93ee5 100644 +index 492bc898b09a..cba3cade3d5b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -671,7 +671,9 @@ static inline void put_signal_struct(struct signal_struct *sig) diff --git a/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch b/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch index e176d5c61..6bbf4f996 100644 --- a/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch +++ b/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch @@ -1,7 +1,7 @@ -From 043af6e53425a94e13a6648ac0206a006f2d7792 Mon Sep 17 00:00:00 2001 +From 673aef2016b9f06dbf3ca839d06d1ca5d38fe883 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:12:51 +0200 -Subject: [PATCH 107/269] sched: Limit the number of task migrations per batch +Subject: [PATCH 107/268] sched: Limit the number of task migrations per batch Put an upper limit on the number of tasks which are migrated per batch to avoid large latencies. @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 4 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 4ed3b29cb0c8..f6504beff565 100644 +index 1f997ceec454..88a886c751ca 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_features = diff --git a/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch b/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch index 8be5b64ab..679f1519c 100644 --- a/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch @@ -1,7 +1,7 @@ -From 2870b4f8c6cadeb84fb963b2d58ffc546a4c3371 Mon Sep 17 00:00:00 2001 +From feffa6aee3aaca77452731da0452ebbd27e28d5e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:20:33 +0200 -Subject: [PATCH 108/269] sched: Move mmdrop to RCU on RT +Subject: [PATCH 108/268] sched: Move mmdrop to RCU on RT Takes sleeping locks and calls into the memory allocator, so nothing we want to do in task switch and oder atomic contexts. @@ -59,7 +59,7 @@ index cebb79fe2c72..6e578905e4ec 100644 * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_sem for writing before modifying the diff --git a/kernel/fork.c b/kernel/fork.c -index b7e0aac93ee5..857ce1a7269f 100644 +index cba3cade3d5b..098130002cda 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -637,6 +637,19 @@ void __mmdrop(struct mm_struct *mm) @@ -83,7 +83,7 @@ index b7e0aac93ee5..857ce1a7269f 100644 { struct mm_struct *mm; diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index f6504beff565..551ce1adea4a 100644 +index 88a886c751ca..0c916e7010a2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2729,9 +2729,13 @@ static struct rq *finish_task_switch(struct task_struct *prev) diff --git a/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index 27505f0eb..5b5dbda34 100644 --- a/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -1,7 +1,7 @@ -From 5237487b97c59d69fbd880f60b8cc9ca5414a52a Mon Sep 17 00:00:00 2001 +From a113d3ac7f8347ca2f05072f50d5d98307f03833 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 21 Nov 2016 19:31:08 +0100 -Subject: [PATCH 109/269] kernel/sched: move stack + kprobe clean up to +Subject: [PATCH 109/268] kernel/sched: move stack + kprobe clean up to __put_task_struct() There is no need to free the stack before the task struct (except for reasons @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c -index 857ce1a7269f..8a9241afefb0 100644 +index 098130002cda..247b08eb66c8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -40,6 +40,7 @@ @@ -45,7 +45,7 @@ index 857ce1a7269f..8a9241afefb0 100644 task_numa_free(tsk); security_task_free(tsk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 551ce1adea4a..788947117ed2 100644 +index 0c916e7010a2..31d8e5828ece 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2741,15 +2741,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) diff --git a/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch index 6eb751e26..30ebbff41 100644 --- a/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch +++ b/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch @@ -1,7 +1,7 @@ -From 63a798ec299b7daacf684067fbe7917856193133 Mon Sep 17 00:00:00 2001 +From 7f463ba82dfb810763df6caed8502b90e9c587c5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jun 2011 09:21:04 +0200 -Subject: [PATCH 110/269] sched: Add saved_state for tasks blocked on sleeping +Subject: [PATCH 110/268] sched: Add saved_state for tasks blocked on sleeping locks Spinlocks are state preserving in !RT. RT changes the state when a @@ -39,7 +39,7 @@ index a6f2f76b1162..ad44849fba2e 100644 #ifdef CONFIG_SMP diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 788947117ed2..e7dccbb9973a 100644 +index 31d8e5828ece..5734699b0812 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1999,8 +1999,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) @@ -91,7 +91,7 @@ index 788947117ed2..e7dccbb9973a 100644 { return try_to_wake_up(p, state, 0); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 4c7a837d7c14..dd6ae39957ce 100644 +index 9a7c3d08b39f..49ae30da28ee 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1443,6 +1443,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) diff --git a/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch b/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch index 50bc8e846..720edd546 100644 --- a/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch +++ b/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch @@ -1,7 +1,7 @@ -From 01cbb896854fa0cccd07b728402d50b349946011 Mon Sep 17 00:00:00 2001 +From 6ebb53ba7f0d4bdcd9b6ff936f018aa66e8963fb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jun 2011 09:19:06 +0200 -Subject: [PATCH 111/269] sched: Do not account rcu_preempt_depth on RT in +Subject: [PATCH 111/268] sched: Do not account rcu_preempt_depth on RT in might_sleep() RT changes the rcu_preempt_depth semantics, so we cannot check for it @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 75e5b393cf44..0539f55bf7b3 100644 +index e102c5bccbb9..87eafcb3312f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -73,6 +73,11 @@ void synchronize_rcu(void); @@ -29,7 +29,7 @@ index 75e5b393cf44..0539f55bf7b3 100644 #else /* #ifdef CONFIG_PREEMPT_RCU */ -@@ -98,6 +103,8 @@ static inline int rcu_preempt_depth(void) +@@ -96,6 +101,8 @@ static inline int rcu_preempt_depth(void) return 0; } @@ -39,7 +39,7 @@ index 75e5b393cf44..0539f55bf7b3 100644 /* Internal to kernel */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index e7dccbb9973a..8033a8f4efdd 100644 +index 5734699b0812..0be1cc1120db 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6156,7 +6156,7 @@ void __init sched_init(void) diff --git a/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch b/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch index 016894f35..40d1da920 100644 --- a/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch +++ b/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch @@ -1,7 +1,7 @@ -From 575557e0c67be96034f9528399a7b7361dae5dd2 Mon Sep 17 00:00:00 2001 +From a8e4e323368bb9fcc7b3c14664411438fb641578 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 22:51:33 +0200 -Subject: [PATCH 112/269] sched: Use the proper LOCK_OFFSET for cond_resched() +Subject: [PATCH 112/268] sched: Use the proper LOCK_OFFSET for cond_resched() RT does not increment preempt count when a 'sleeping' spinlock is locked. Update PREEMPT_LOCK_OFFSET for that case. diff --git a/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch b/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch index d7699738a..5d5839f69 100644 --- a/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch @@ -1,7 +1,7 @@ -From 5e05ad5c470039b646a457459138f582bc139f3f Mon Sep 17 00:00:00 2001 +From a443b3530e4b114f3a1cfa5c3918f7157b0ad169 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 13 Sep 2011 16:42:35 +0200 -Subject: [PATCH 113/269] sched: Disable TTWU_QUEUE on RT +Subject: [PATCH 113/268] sched: Disable TTWU_QUEUE on RT The queued remote wakeup mechanism can introduce rather large latencies if the number of migrated tasks is high. Disable it for RT. diff --git a/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch index 496b5b345..a735c587a 100644 --- a/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ b/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -1,7 +1,7 @@ -From 1241476225268360ae571ec5de750f504cac3604 Mon Sep 17 00:00:00 2001 +From dfc2626360c00270806603e89a95316ece262a01 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 18 Mar 2013 15:12:49 -0400 -Subject: [PATCH 114/269] sched/workqueue: Only wake up idle workers if not +Subject: [PATCH 114/268] sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock In -rt, most spin_locks() turn into mutexes. One of these spin_lock @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 8033a8f4efdd..acca3e94ee27 100644 +index 0be1cc1120db..5ca8f53ba4fd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3498,8 +3498,10 @@ static void __sched notrace __schedule(bool preempt) diff --git a/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch index d36c4347c..dc4580011 100644 --- a/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ b/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch @@ -1,7 +1,7 @@ -From 5fe7427b8a7b38b8b395ce68c2c6cb06b2f95a58 Mon Sep 17 00:00:00 2001 +From e325e8bf70d1919c09b47af2880b2eccdda59e9d Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Mon, 26 Jun 2017 17:07:15 +0200 -Subject: [PATCH 115/269] rt: Increase/decrease the nr of migratory tasks when +Subject: [PATCH 115/268] rt: Increase/decrease the nr of migratory tasks when enabling/disabling migration There is a problem in the migrate_disable()/enable() implementation @@ -81,10 +81,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index acca3e94ee27..eb752804e8cf 100644 +index 5ca8f53ba4fd..434fd8946629 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7140,6 +7140,47 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7147,6 +7147,47 @@ const u32 sched_prio_to_wmult[40] = { #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) @@ -132,7 +132,7 @@ index acca3e94ee27..eb752804e8cf 100644 void migrate_disable(void) { struct task_struct *p = current; -@@ -7163,10 +7204,9 @@ void migrate_disable(void) +@@ -7170,10 +7211,9 @@ void migrate_disable(void) } preempt_disable(); @@ -145,7 +145,7 @@ index acca3e94ee27..eb752804e8cf 100644 preempt_enable(); } -@@ -7198,9 +7238,8 @@ void migrate_enable(void) +@@ -7205,9 +7245,8 @@ void migrate_enable(void) preempt_disable(); diff --git a/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch b/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch index bc416cb21..028bd1054 100644 --- a/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch +++ b/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch @@ -1,7 +1,7 @@ -From 1e1a0808ffc8df10c6bc1e46f40a4948395f72a6 Mon Sep 17 00:00:00 2001 +From 56c3100fe3a1b4a512d394b5c851473ac95bad3b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 15 Jun 2011 12:36:06 +0200 -Subject: [PATCH 116/269] hotplug: Lightweight get online cpus +Subject: [PATCH 116/268] hotplug: Lightweight get online cpus get_online_cpus() is a heavy weight function which involves a global mutex. migrate_disable() wants a simpler construct which prevents only @@ -19,10 +19,10 @@ Signed-off-by: Thomas Gleixner 3 files changed, 24 insertions(+) diff --git a/include/linux/cpu.h b/include/linux/cpu.h -index 5041357d0297..3403eab853b7 100644 +index 006f69f9277b..d45ea5c98cdd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h -@@ -111,6 +111,8 @@ extern void cpu_hotplug_disable(void); +@@ -113,6 +113,8 @@ extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); void clear_tasks_mm_cpumask(int cpu); int cpu_down(unsigned int cpu); @@ -31,7 +31,7 @@ index 5041357d0297..3403eab853b7 100644 #else /* CONFIG_HOTPLUG_CPU */ -@@ -122,6 +124,9 @@ static inline int cpus_read_trylock(void) { return true; } +@@ -124,6 +126,9 @@ static inline int cpus_read_trylock(void) { return true; } static inline void lockdep_assert_cpus_held(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } @@ -42,7 +42,7 @@ index 5041357d0297..3403eab853b7 100644 /* Wrappers which go away once all code is converted */ diff --git a/kernel/cpu.c b/kernel/cpu.c -index dc250ec2c096..f684f41492d3 100644 +index 5d65eae893bd..e1efb98a56de 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -281,6 +281,21 @@ static int cpu_hotplug_disabled; @@ -68,10 +68,10 @@ index dc250ec2c096..f684f41492d3 100644 void cpus_read_lock(void) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index eb752804e8cf..516f05702550 100644 +index 434fd8946629..a8e9283f018c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7204,6 +7204,7 @@ void migrate_disable(void) +@@ -7211,6 +7211,7 @@ void migrate_disable(void) } preempt_disable(); @@ -79,7 +79,7 @@ index eb752804e8cf..516f05702550 100644 migrate_disable_update_cpus_allowed(p); p->migrate_disable = 1; -@@ -7269,12 +7270,15 @@ void migrate_enable(void) +@@ -7276,12 +7277,15 @@ void migrate_enable(void) arg.task = p; arg.dest_cpu = dest_cpu; diff --git a/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch b/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch index 1ffddc0d0..16da2c950 100644 --- a/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch +++ b/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch @@ -1,7 +1,7 @@ -From e93174d8da86d81922b37dd559f026f1eb4cafb8 Mon Sep 17 00:00:00 2001 +From 0add084e2fc599e34a02b6d87f1657895c25bac2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:56:42 +0200 -Subject: [PATCH 117/269] trace: Add migrate-disabled counter to tracing output +Subject: [PATCH 117/268] trace: Add migrate-disabled counter to tracing output Signed-off-by: Thomas Gleixner --- @@ -25,10 +25,10 @@ index 78a010e19ed4..0403d9696944 100644 #define TRACE_EVENT_TYPE_MAX \ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index c65cea71d1ee..0af14953d52d 100644 +index 1bd7a758583b..10843c80cffb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2146,6 +2146,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, +@@ -2149,6 +2149,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); @@ -37,7 +37,7 @@ index c65cea71d1ee..0af14953d52d 100644 } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); -@@ -3349,9 +3351,10 @@ static void print_lat_help_header(struct seq_file *m) +@@ -3352,9 +3354,10 @@ static void print_lat_help_header(struct seq_file *m) "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" @@ -52,7 +52,7 @@ index c65cea71d1ee..0af14953d52d 100644 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index f94be0c2827b..acdb2c2067c6 100644 +index 7345f5f8f3fe..6455e2ca7987 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -188,6 +188,8 @@ static int trace_define_common_fields(void) diff --git a/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch b/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch index 2678553ac..0e47b83e1 100644 --- a/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch +++ b/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch @@ -1,7 +1,7 @@ -From 1a31bace22b513efaa0864bd1d32d7d4c698a618 Mon Sep 17 00:00:00 2001 +From 3b768b5a33bd0ddea3c1eb9523a725be70d798a3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 18:51:23 +0200 -Subject: [PATCH 118/269] lockdep: Make it RT aware +Subject: [PATCH 118/268] lockdep: Make it RT aware teach lockdep that we don't really do softirqs on -RT. diff --git a/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch b/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch index 299c47093..7b6a1dfa0 100644 --- a/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch +++ b/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch @@ -1,7 +1,7 @@ -From f0dbaae62eb8d03e46818d0babb5889b3a5ce6eb Mon Sep 17 00:00:00 2001 +From 6929ef433a0ceeecc81b41d3cedf4008023ebec7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 29 Nov 2011 20:18:22 -0500 -Subject: [PATCH 119/269] tasklet: Prevent tasklets from going into infinite +Subject: [PATCH 119/268] tasklet: Prevent tasklets from going into infinite spin in RT When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, diff --git a/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch b/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch index ee4970952..50022f507 100644 --- a/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch +++ b/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch @@ -1,7 +1,7 @@ -From dcfab76d9eab264a1e79cc42713a004d2ef7658b Mon Sep 17 00:00:00 2001 +From fc9facec00715571d089cdefb406c72a8b05e8a3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 13 Nov 2011 17:17:09 +0100 -Subject: [PATCH 120/269] softirq: Check preemption after reenabling interrupts +Subject: [PATCH 120/268] softirq: Check preemption after reenabling interrupts raise_softirq_irqoff() disables interrupts and wakes the softirq daemon, but after reenabling interrupts there is no preemption check, @@ -116,7 +116,7 @@ index 86a709954f5a..9c069ef83d6d 100644 return 0; } diff --git a/net/core/dev.c b/net/core/dev.c -index 3bcec116a5f2..3362d8897058 100644 +index 138951d28643..48c4dc728d1b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2726,6 +2726,7 @@ static void __netif_reschedule(struct Qdisc *q) diff --git a/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch b/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch index f15595b88..2c4befc48 100644 --- a/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch +++ b/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch @@ -1,7 +1,7 @@ -From 7a6ae7f96331bdaeeac96006086d01805ca48612 Mon Sep 17 00:00:00 2001 +From d77fb7d27741bbe41b0f531f90d2db7a8a88c265 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 13:59:17 +0200 -Subject: [PATCH 121/269] softirq: Disable softirq stacks for RT +Subject: [PATCH 121/268] softirq: Disable softirq stacks for RT Disable extra stacks for softirqs. We want to preempt softirqs and having them on special IRQ-stack does not make this easier. @@ -119,10 +119,10 @@ index 713670e6d13d..5dfc715343f9 100644 #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index 617df50a11d9..ce2a6587ed11 100644 +index c90e00db5c13..7b29f2c10d01 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -1043,6 +1043,7 @@ bad_gs: +@@ -1059,6 +1059,7 @@ bad_gs: jmp 2b .previous @@ -130,7 +130,7 @@ index 617df50a11d9..ce2a6587ed11 100644 /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp -@@ -1053,6 +1054,7 @@ ENTRY(do_softirq_own_stack) +@@ -1069,6 +1070,7 @@ ENTRY(do_softirq_own_stack) leaveq ret ENDPROC(do_softirq_own_stack) diff --git a/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch b/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch index 729db5e9f..414f76a14 100644 --- a/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch +++ b/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch @@ -1,7 +1,7 @@ -From 35e1d70c2ede4d34ff411570acf377f7ffe77e70 Mon Sep 17 00:00:00 2001 +From 3957a5e25779f87d825a2309f3da9084dfacae01 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Oct 2012 14:20:47 +0100 -Subject: [PATCH 122/269] softirq: Split softirq locks +Subject: [PATCH 122/268] softirq: Split softirq locks The 3.x RT series removed the split softirq implementation in favour of pushing softirq processing into the context of the thread which @@ -198,7 +198,7 @@ index ad44849fba2e..7ecccccbd358 100644 #define PF_EXITING 0x00000004 /* Getting shut down */ #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ diff --git a/init/main.c b/init/main.c -index e083fac08aed..1647cb052be5 100644 +index 020972fed117..4a7471606e53 100644 --- a/init/main.c +++ b/init/main.c @@ -561,6 +561,7 @@ asmlinkage __visible void __init start_kernel(void) diff --git a/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch b/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch index 42f334e5a..4e980a3df 100644 --- a/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ b/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch @@ -1,7 +1,7 @@ -From e4b4f2fba2b81120beca06cd1c49f37ceb8bd9c2 Mon Sep 17 00:00:00 2001 +From 88bee7f8c3b942350cdafe85c78387c056a2d049 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 16 Jun 2017 19:03:16 +0200 -Subject: [PATCH 123/269] net/core: use local_bh_disable() in netif_rx_ni() +Subject: [PATCH 123/268] net/core: use local_bh_disable() in netif_rx_ni() In 2004 netif_rx_ni() gained a preempt_disable() section around netif_rx() and its do_softirq() + testing for it. The do_softirq() part @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index 3362d8897058..b8208b940b5d 100644 +index 48c4dc728d1b..abaf8a73403b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4526,11 +4526,9 @@ int netif_rx_ni(struct sk_buff *skb) diff --git a/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch b/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch index 8e2f91255..1b1d9407c 100644 --- a/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch +++ b/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch @@ -1,7 +1,7 @@ -From 68c9fb7ded900fff5f4e0a41978b36eb36292c66 Mon Sep 17 00:00:00 2001 +From b4d0cc11ac3bdc7d27daab0b5481b1587eadd044 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Jan 2012 13:01:27 +0100 -Subject: [PATCH 124/269] genirq: Allow disabling of softirq processing in irq +Subject: [PATCH 124/268] genirq: Allow disabling of softirq processing in irq thread context The processing of softirqs in irq thread context is a performance gain @@ -68,10 +68,10 @@ index c9bffda04a45..73d3146db74d 100644 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index d2270f61d335..ba5bba5f1ffd 100644 +index af2a8757abfb..69b4bfd4654c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -973,7 +973,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) +@@ -978,7 +978,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); @@ -88,7 +88,7 @@ index d2270f61d335..ba5bba5f1ffd 100644 return ret; } -@@ -1483,6 +1491,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) +@@ -1488,6 +1496,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } diff --git a/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch index 4e6061e88..7873a517d 100644 --- a/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch +++ b/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch @@ -1,7 +1,7 @@ -From 5b5c9a38190fcf09aad69449f6552598a2502bf8 Mon Sep 17 00:00:00 2001 +From a5b5fd887096b76347cf769dae55c87dbef87422 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 16:34:17 +0100 -Subject: [PATCH 125/269] softirq: split timer softirqs out of ksoftirqd +Subject: [PATCH 125/268] softirq: split timer softirqs out of ksoftirqd The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with timer wakeup which can not happen in hardirq context. The prio has been diff --git a/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch b/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch index afb33b2d9..89d3293c9 100644 --- a/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch +++ b/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch @@ -1,7 +1,7 @@ -From f76ac7c02f06f8b40b041c7b9ff9bc13c55bb353 Mon Sep 17 00:00:00 2001 +From 352ba4a234b354e8dffc9bf8ce2f76159d635c81 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Feb 2019 13:19:59 +0100 -Subject: [PATCH 126/269] softirq: Avoid "local_softirq_pending" messages if +Subject: [PATCH 126/268] softirq: Avoid "local_softirq_pending" messages if ksoftirqd is blocked If the ksoftirqd thread has a softirq pending and is blocked on the diff --git a/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch b/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch index 310285f8c..da75a4682 100644 --- a/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch +++ b/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch @@ -1,7 +1,7 @@ -From 35b95587b8a912221d7eb0bdbb7aefb126c7db5d Mon Sep 17 00:00:00 2001 +From 9abf3c7d8b43d087c3154991c899d8547e677332 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Feb 2019 16:49:29 +0100 -Subject: [PATCH 127/269] softirq: Avoid "local_softirq_pending" messages if +Subject: [PATCH 127/268] softirq: Avoid "local_softirq_pending" messages if task is in cpu_chill() If the softirq thread enters cpu_chill() then ->state is UNINTERRUPTIBLE diff --git a/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch b/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch index 6128ba19f..c68165fab 100644 --- a/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch @@ -1,7 +1,7 @@ -From 86d0b19c922c5c25ec598f869e859be148c058e2 Mon Sep 17 00:00:00 2001 +From 9e7dadefc501fae6061a5523691c87e41a02086b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 2 Dec 2015 11:34:07 +0100 -Subject: [PATCH 128/269] rtmutex: trylock is okay on -RT +Subject: [PATCH 128/268] rtmutex: trylock is okay on -RT non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On -RT we don't run softirqs in IRQ context but in thread context so it is diff --git a/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch index 41a023ec1..e8c66a560 100644 --- a/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch +++ b/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch @@ -1,7 +1,7 @@ -From 8e56a215d6f5df86b3cfcf2386facd511db3d0ed Mon Sep 17 00:00:00 2001 +From 4e0f711dde32659589c68d7bb50274bfdda538cb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 15 Sep 2016 10:51:27 +0200 -Subject: [PATCH 129/269] fs/nfs: turn rmdir_sem into a semaphore +Subject: [PATCH 129/268] fs/nfs: turn rmdir_sem into a semaphore The RW semaphore had a reader side which used the _non_owner version because it most likely took the reader lock in one thread and released it diff --git a/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch b/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch index 7138c83b7..723f68420 100644 --- a/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch +++ b/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch @@ -1,7 +1,7 @@ -From 915b60215e529acc7c55ded1a85af2ad92a5c9c3 Mon Sep 17 00:00:00 2001 +From 1d0178f658356c2001dd5144ac3ab7974d32b872 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 10 Jun 2011 11:04:15 +0200 -Subject: [PATCH 130/269] rtmutex: Handle the various new futex race conditions +Subject: [PATCH 130/268] rtmutex: Handle the various new futex race conditions RT opens a few new interesting race conditions in the rtmutex/futex combo due to futex hash bucket lock being a 'sleeping' spinlock and @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner 3 files changed, 94 insertions(+), 21 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c -index fadd9bff6e3c..be06626b29d2 100644 +index ccf933ac2997..b2a90c66d8f4 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2146,6 +2146,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, @@ -35,7 +35,7 @@ index fadd9bff6e3c..be06626b29d2 100644 } else if (ret) { /* * rt_mutex_start_proxy_lock() detected a -@@ -3194,7 +3204,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3224,7 +3234,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, struct hrtimer_sleeper timeout, *to = NULL; struct futex_pi_state *pi_state = NULL; struct rt_mutex_waiter rt_waiter; @@ -44,7 +44,7 @@ index fadd9bff6e3c..be06626b29d2 100644 union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; -@@ -3252,20 +3262,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3282,20 +3292,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); @@ -111,7 +111,7 @@ index fadd9bff6e3c..be06626b29d2 100644 /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { -@@ -3274,7 +3319,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3304,7 +3349,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { @@ -121,7 +121,7 @@ index fadd9bff6e3c..be06626b29d2 100644 ret = fixup_pi_state_owner(uaddr2, &q, current); if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { pi_state = q.pi_state; -@@ -3285,7 +3331,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3315,7 +3361,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * the requeue_pi() code acquired for us. */ put_pi_state(q.pi_state); @@ -130,7 +130,7 @@ index fadd9bff6e3c..be06626b29d2 100644 } } else { struct rt_mutex *pi_mutex; -@@ -3299,7 +3345,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3329,7 +3375,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); diff --git a/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch index 1ce398ac7..abd26bacd 100644 --- a/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch +++ b/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch @@ -1,7 +1,7 @@ -From c1664acee8627620a0406cc55b13d81c710f2bac Mon Sep 17 00:00:00 2001 +From c3c07a68fab785f20de5eaa7034a2df733074865 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 131/269] futex: Fix bug on when a requeued RT task times out +Subject: [PATCH 131/268] futex: Fix bug on when a requeued RT task times out Requeue with timeout causes a bug with PREEMPT_RT_FULL. diff --git a/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch index 9c9520667..959a43cb8 100644 --- a/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch +++ b/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -1,7 +1,7 @@ -From 03de38c7dbb4653aa5f13353b834b6be244a727d Mon Sep 17 00:00:00 2001 +From eaaf66fa04a418e1d9e6b587eb089e3977b3b7e5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2013 11:17:42 +0100 -Subject: [PATCH 132/269] futex: Ensure lock/unlock symetry versus pi_lock and +Subject: [PATCH 132/268] futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock In exit_pi_state_list() we have the following locking construct: @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+) diff --git a/kernel/futex.c b/kernel/futex.c -index be06626b29d2..eeb3e16fb9ec 100644 +index b2a90c66d8f4..4d6501d689b5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -918,7 +918,9 @@ void exit_pi_state_list(struct task_struct *curr) diff --git a/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch b/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch index 69f4a69ab..ca309deaa 100644 --- a/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch +++ b/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch @@ -1,7 +1,7 @@ -From fab65ac89d2148c60793f1043b3391b8431674d1 Mon Sep 17 00:00:00 2001 +From 921ce8878aa0e4f3ab10bf9026cd1129c9055393 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jul 2015 19:43:56 +0300 -Subject: [PATCH 133/269] pid.h: include atomic.h +Subject: [PATCH 133/268] pid.h: include atomic.h This patch fixes build error: CC kernel/pid_namespace.o diff --git a/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch b/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch index c2458b92f..2b2e4484e 100644 --- a/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch +++ b/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch @@ -1,7 +1,7 @@ -From 3286a3abb2234e5ecf7605154781fbd762b3d726 Mon Sep 17 00:00:00 2001 +From 83b8d6b7af22014f1cc6001b8509627485bef74a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 22 Dec 2016 17:28:33 +0100 -Subject: [PATCH 134/269] arm: include definition for cpumask_t +Subject: [PATCH 134/268] arm: include definition for cpumask_t This definition gets pulled in by other files. With the (later) split of RCU and spinlock.h it won't compile anymore. diff --git a/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch index 45bf57a30..21a205b61 100644 --- a/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ b/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch @@ -1,7 +1,7 @@ -From 55274d88157f847bb93b54d4b3c0d569995b8443 Mon Sep 17 00:00:00 2001 +From 5f5987421a2880669ee5a3fa75d1435cef7d3118 Mon Sep 17 00:00:00 2001 From: "Wolfgang M. Reimer" Date: Tue, 21 Jul 2015 16:20:07 +0200 -Subject: [PATCH 135/269] locking: locktorture: Do NOT include rwlock.h +Subject: [PATCH 135/268] locking: locktorture: Do NOT include rwlock.h directly Including rwlock.h directly will cause kernel builds to fail diff --git a/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch b/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch index 0bbdc1cd3..3455460d0 100644 --- a/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch +++ b/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch @@ -1,7 +1,7 @@ -From 84d0c68fcaa44acc03d15941d982f4a0157903d0 Mon Sep 17 00:00:00 2001 +From 49e5af76f13c77609d54ea56c3b9e0c51e68d2e7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 9 Jun 2011 11:43:52 +0200 -Subject: [PATCH 136/269] rtmutex: Add rtmutex_lock_killable() +Subject: [PATCH 136/268] rtmutex: Add rtmutex_lock_killable() Add "killable" type to rtmutex. We need this since rtmutex are used as "normal" mutexes which do use this type. diff --git a/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch b/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch index 59d804455..b9c48472b 100644 --- a/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch +++ b/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch @@ -1,7 +1,7 @@ -From 05fb36753dd6a8fb6b5af57e77d7f195083d3348 Mon Sep 17 00:00:00 2001 +From ad096777cd7f279ffd9083c8f410331955d1816a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 1 Apr 2017 12:50:59 +0200 -Subject: [PATCH 137/269] rtmutex: Make lock_killable work +Subject: [PATCH 137/268] rtmutex: Make lock_killable work Locking an rt mutex killable does not work because signal handling is restricted to TASK_INTERRUPTIBLE. diff --git a/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch b/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch index 3aa67f2d2..ab7e7b39c 100644 --- a/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch +++ b/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch @@ -1,7 +1,7 @@ -From 8eee663cf2becdb10a170336c2cf3fba5fe3be80 Mon Sep 17 00:00:00 2001 +From a963738794f761dbf9138b2bce910ddc33d8f750 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 19:34:01 +0200 -Subject: [PATCH 138/269] spinlock: Split the lock types header +Subject: [PATCH 138/268] spinlock: Split the lock types header Split raw_spinlock into its own file and the remaining spinlock_t into its own non-RT header. The non-RT header will be replaced later by sleeping diff --git a/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch b/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch index 5a0d3eee1..b8e9080a1 100644 --- a/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch +++ b/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch @@ -1,7 +1,7 @@ -From a006197b4fa5fcec0fd8bee40072cf420689c354 Mon Sep 17 00:00:00 2001 +From 750ec04c4e7ab26c993aa398b5ce6a282d6e9f31 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 20:06:39 +0200 -Subject: [PATCH 139/269] rtmutex: Avoid include hell +Subject: [PATCH 139/268] rtmutex: Avoid include hell Include only the required raw types. This avoids pulling in the complete spinlock header which in turn requires rtmutex.h at some point. diff --git a/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch b/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch index 17adb1488..6d29940e9 100644 --- a/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch +++ b/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch @@ -1,7 +1,7 @@ -From 3465bbb3bbbf562cd3d67f1c2f387eaa48a1af70 Mon Sep 17 00:00:00 2001 +From d1b0b3ad3b5dd89c0ec4ca6abffc706b47d60305 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 16:56:02 +0100 -Subject: [PATCH 140/269] rbtree: don't include the rcu header +Subject: [PATCH 140/268] rbtree: don't include the rcu header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -103,7 +103,7 @@ index 000000000000..7066962a4379 + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 0539f55bf7b3..63cd0a1a99a0 100644 +index 87eafcb3312f..b73715c3c3c2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -42,6 +42,7 @@ @@ -114,7 +114,7 @@ index 0539f55bf7b3..63cd0a1a99a0 100644 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) -@@ -371,54 +372,6 @@ static inline void rcu_preempt_sleep_check(void) { } +@@ -369,54 +370,6 @@ static inline void rcu_preempt_sleep_check(void) { } ((typeof(*p) __force __kernel *)(________p1)); \ }) diff --git a/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch index 1251e1280..bef16f85a 100644 --- a/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch +++ b/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch @@ -1,7 +1,7 @@ -From 28e2025df13c6a1c66fae452e91d26f8d2755460 Mon Sep 17 00:00:00 2001 +From 827993141f8e1aad6255a651f570a2e77b2649aa Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:14:22 +0200 -Subject: [PATCH 141/269] rtmutex: Provide rt_mutex_slowlock_locked() +Subject: [PATCH 141/268] rtmutex: Provide rt_mutex_slowlock_locked() This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt. diff --git a/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch index 965c9be31..afd064e2a 100644 --- a/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch +++ b/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch @@ -1,7 +1,7 @@ -From cc9444912602fb283e5e75dc9ca36ee98cf8d0e9 Mon Sep 17 00:00:00 2001 +From b3ba05bf82300eb35078a61e5e33a1c12c9b8e6f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:36:39 +0200 -Subject: [PATCH 142/269] rtmutex: export lockdep-less version of rt_mutex's +Subject: [PATCH 142/268] rtmutex: export lockdep-less version of rt_mutex's lock, trylock and unlock Required for lock implementation ontop of rtmutex. diff --git a/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch b/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch index 603ed5791..b5570d37e 100644 --- a/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch +++ b/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch @@ -1,7 +1,7 @@ -From 162034b085d74f4c4131bf4dc0c229a4c971cfae Mon Sep 17 00:00:00 2001 +From cfc52d28d5405bb2efe7c9d06ebc5920852245ad Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:11:19 +0200 -Subject: [PATCH 143/269] rtmutex: add sleeping lock implementation +Subject: [PATCH 143/268] rtmutex: add sleeping lock implementation Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior create mode 100644 include/linux/spinlock_types_rt.h diff --git a/include/linux/kernel.h b/include/linux/kernel.h -index d6aac75b51ba..e3f1a7c3b953 100644 +index 3d83ebb302cf..d81a153df451 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -259,6 +259,9 @@ extern int _cond_resched(void); @@ -395,7 +395,7 @@ index 000000000000..3e3d8c5f7a9a + +#endif diff --git a/kernel/fork.c b/kernel/fork.c -index 8a9241afefb0..f62ae61064c7 100644 +index 247b08eb66c8..96297e71019c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -895,6 +895,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) @@ -407,10 +407,10 @@ index 8a9241afefb0..f62ae61064c7 100644 account_kernel_stack(tsk, 1); diff --git a/kernel/futex.c b/kernel/futex.c -index eeb3e16fb9ec..2c5a5e180223 100644 +index 4d6501d689b5..fe90164aa6ec 100644 --- a/kernel/futex.c +++ b/kernel/futex.c -@@ -1474,6 +1474,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ +@@ -1476,6 +1476,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ struct task_struct *new_owner; bool postunlock = false; DEFINE_WAKE_Q(wake_q); @@ -435,7 +435,7 @@ index eeb3e16fb9ec..2c5a5e180223 100644 return ret; } -@@ -2853,7 +2854,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, +@@ -2870,7 +2871,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, goto no_block; } @@ -444,7 +444,7 @@ index eeb3e16fb9ec..2c5a5e180223 100644 /* * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not -@@ -3233,7 +3234,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3263,7 +3264,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ @@ -1143,7 +1143,7 @@ index 5955ad2aa2a8..6fcf0a3e180d 100644 #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 516f05702550..e699500aea26 100644 +index a8e9283f018c..868d3395c3cf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -401,9 +401,15 @@ static bool set_nr_if_polling(struct task_struct *p) diff --git a/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch index 36b22d9f6..96298795f 100644 --- a/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch +++ b/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch @@ -1,7 +1,7 @@ -From b2eccb42878894e44f005029aa9b2fc9962d9093 Mon Sep 17 00:00:00 2001 +From 3f4998a83b09b0003d56f8f6a9932bcdc38ddfa9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:17:03 +0200 -Subject: [PATCH 144/269] rtmutex: add mutex implementation based on rtmutex +Subject: [PATCH 144/268] rtmutex: add mutex implementation based on rtmutex Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch index fcb7b11a6..d83658936 100644 --- a/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch +++ b/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch @@ -1,7 +1,7 @@ -From d8e44c235bb3238fc1848c72b906014f1d9a5fb1 Mon Sep 17 00:00:00 2001 +From 7db979724ca86664b3bc0b4448eb05fcc06eb226 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:28:34 +0200 -Subject: [PATCH 145/269] rtmutex: add rwsem implementation based on rtmutex +Subject: [PATCH 145/268] rtmutex: add rwsem implementation based on rtmutex The RT specific R/W semaphore implementation restricts the number of readers to one because a writer cannot block on multiple readers and inherit its diff --git a/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch index 02a983455..f98b8109a 100644 --- a/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch +++ b/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch @@ -1,7 +1,7 @@ -From d49ee1d88e89db7c1a404171e67553b7695c349c Mon Sep 17 00:00:00 2001 +From 4902321bc0e6616144d8a98ddadff7f48f0ecebc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:18:06 +0200 -Subject: [PATCH 146/269] rtmutex: add rwlock implementation based on rtmutex +Subject: [PATCH 146/268] rtmutex: add rwlock implementation based on rtmutex The implementation is bias-based, similar to the rwsem implementation. diff --git a/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch b/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch index adbbbab00..069804916 100644 --- a/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch +++ b/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch @@ -1,7 +1,7 @@ -From f4e21a9f84eb9919949bfe5763eb96637b90bb1e Mon Sep 17 00:00:00 2001 +From 647b6c69a2a6ddfeb44274f9e73f00ceebc3abff Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 11 Jan 2019 21:16:31 +0100 -Subject: [PATCH 147/269] rtmutex/rwlock: preserve state like a sleeping lock +Subject: [PATCH 147/268] rtmutex/rwlock: preserve state like a sleeping lock The rwlock is spinning while acquiring a lock. Therefore it must become a sleeping lock on RT and preserve its task state while sleeping and diff --git a/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch b/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch index 151cf7766..f2f76ecb3 100644 --- a/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch +++ b/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch @@ -1,7 +1,7 @@ -From 145de90802b872003bf17064f49d5b1ea94f1a5f Mon Sep 17 00:00:00 2001 +From 36b9c9892b97509dee373fb324375d3c91b057b3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:31:14 +0200 -Subject: [PATCH 148/269] rtmutex: wire up RT's locking +Subject: [PATCH 148/268] rtmutex: wire up RT's locking Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch index c7cb5e1c6..5420e9870 100644 --- a/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch +++ b/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch @@ -1,7 +1,7 @@ -From cb5d05fc6f3f2a23c0dc2d3cdf925e62d8e9e13f Mon Sep 17 00:00:00 2001 +From 587ca9ed2107d61e00da8de96a3e9b978ebc3841 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 17:34:38 +0200 -Subject: [PATCH 149/269] rtmutex: add ww_mutex addon for mutex-rt +Subject: [PATCH 149/268] rtmutex: add ww_mutex addon for mutex-rt Signed-off-by: Sebastian Andrzej Siewior --- diff --git a/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch b/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch index 3430fe1c3..eef706c9e 100644 --- a/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch +++ b/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch @@ -1,7 +1,7 @@ -From 77032b07bcce84656ba960fea1a786fda5dcd81a Mon Sep 17 00:00:00 2001 +From 0714c0e1fc16782acb7f0db5ae6f50d6b30ec26f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 14:58:57 +0200 -Subject: [PATCH 150/269] kconfig: Add PREEMPT_RT_FULL +Subject: [PATCH 150/268] kconfig: Add PREEMPT_RT_FULL Introduce the final symbol for PREEMPT_RT_FULL. diff --git a/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch index f13969df9..6b422085c 100644 --- a/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch +++ b/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch @@ -1,7 +1,7 @@ -From 810f1d5d210b1101d5b93300358d6362861ea392 Mon Sep 17 00:00:00 2001 +From 4cd478418b45517f4dfd003715b1b3af02880d5a Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 13 Nov 2017 12:56:53 -0500 -Subject: [PATCH 151/269] locking/rt-mutex: fix deadlock in device mapper / +Subject: [PATCH 151/268] locking/rt-mutex: fix deadlock in device mapper / block-IO When some block device driver creates a bio and submits it to another diff --git a/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch index f28a3c08c..e2580ace0 100644 --- a/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch +++ b/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch @@ -1,7 +1,7 @@ -From 9c3afee65f743bf1492e76f16139111e10d8f205 Mon Sep 17 00:00:00 2001 +From 46adf7b6e8e7cc3956f5181cc5ef01a76fbc70da Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 4 Jan 2019 15:33:21 -0500 -Subject: [PATCH 152/269] locking/rt-mutex: Flush block plug on __down_read() +Subject: [PATCH 152/268] locking/rt-mutex: Flush block plug on __down_read() __down_read() bypasses the rtmutex frontend to call rt_mutex_slowlock_locked() directly, and thus it needs to call diff --git a/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch index ac45429ab..7f25aed67 100644 --- a/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch +++ b/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch @@ -1,7 +1,7 @@ -From 4a9a885ab4f7e220568aa7c19704f1f6b020f545 Mon Sep 17 00:00:00 2001 +From 58b55a9d70bdab31ff9d6119ce0958ae8c7234c0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 16 Nov 2017 16:48:48 +0100 -Subject: [PATCH 153/269] locking/rtmutex: re-init the wait_lock in +Subject: [PATCH 153/268] locking/rtmutex: re-init the wait_lock in rt_mutex_init_proxy_locked() We could provide a key-class for the lockdep (and fixup all callers) or diff --git a/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch index 86fbca9f9..ce422bc04 100644 --- a/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -1,7 +1,7 @@ -From de7eff6fda53e683a83289d9c0c0a2d774fbfe92 Mon Sep 17 00:00:00 2001 +From 91b6c0acd7435a12a250d41c7c7cc9c6f38cddae Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 18:21:04 +0200 -Subject: [PATCH 154/269] ptrace: fix ptrace vs tasklist_lock race +Subject: [PATCH 154/268] ptrace: fix ptrace vs tasklist_lock race As explained by Alexander Fyodorov : @@ -99,10 +99,10 @@ index 1797fd3c8cbb..25e9a40f9576 100644 * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index 21fec73d45d4..9c8d6f9f3a3a 100644 +index fc0d667f5792..536a917ac6c0 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c -@@ -175,7 +175,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) +@@ -176,7 +176,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !__fatal_signal_pending(task)) { @@ -119,7 +119,7 @@ index 21fec73d45d4..9c8d6f9f3a3a 100644 } spin_unlock_irq(&task->sighand->siglock); diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index e699500aea26..14eb51dae23d 100644 +index 868d3395c3cf..b2149a7ed3cd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1349,6 +1349,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, diff --git a/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch b/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch index 97418efde..0547794cc 100644 --- a/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch +++ b/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch @@ -1,7 +1,7 @@ -From 2a9b009589ed8b11c6c94e2af70c3d6fc4c957b8 Mon Sep 17 00:00:00 2001 +From bc76479aeddec4b75a6e18bf57b6ac3df6b6ce97 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Sep 2017 14:25:13 +0200 -Subject: [PATCH 155/269] rtmutex: annotate sleeping lock context +Subject: [PATCH 155/268] rtmutex: annotate sleeping lock context The RCU code complains on schedule() within a rcu_readlock() section. The valid scenario on -RT is if a sleeping is held. In order to suppress @@ -249,10 +249,10 @@ index a97c20ea9bce..564e3927e7b0 100644 !t->rcu_read_unlock_special.b.blocked) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 14eb51dae23d..a5226728e407 100644 +index b2149a7ed3cd..ce1cb23cb78f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7309,4 +7309,49 @@ void migrate_enable(void) +@@ -7316,4 +7316,49 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); diff --git a/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch b/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch index 417953fee..f902648ad 100644 --- a/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ b/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch @@ -1,7 +1,7 @@ -From 09cc5496ae17c924c25e80d5a300901957c44b54 Mon Sep 17 00:00:00 2001 +From 2e8a1d88c6613f72f219e3cc24178658f6abed52 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 5 Jul 2018 14:44:51 +0200 -Subject: [PATCH 156/269] sched/migrate_disable: fallback to preempt_disable() +Subject: [PATCH 156/268] sched/migrate_disable: fallback to preempt_disable() instead barrier() On SMP + !RT migrate_disable() is still around. It is not part of spin_lock() @@ -93,7 +93,7 @@ index 8f0bb5f6d39e..a023e1ba5d8f 100644 # endif #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index a5226728e407..fb205b1ec799 100644 +index ce1cb23cb78f..36f791ff52bc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1031,7 +1031,7 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma @@ -123,7 +123,7 @@ index a5226728e407..fb205b1ec799 100644 if (__migrate_disabled(p)) { p->migrate_disable_update = 1; goto out; -@@ -7165,7 +7165,7 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7172,7 +7172,7 @@ const u32 sched_prio_to_wmult[40] = { #undef CREATE_TRACE_POINTS @@ -132,7 +132,7 @@ index a5226728e407..fb205b1ec799 100644 static inline void update_nr_migratory(struct task_struct *p, long delta) -@@ -7313,45 +7313,44 @@ EXPORT_SYMBOL(migrate_enable); +@@ -7320,45 +7320,44 @@ EXPORT_SYMBOL(migrate_enable); #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) void migrate_disable(void) { @@ -186,7 +186,7 @@ index a5226728e407..fb205b1ec799 100644 EXPORT_SYMBOL(migrate_enable); #endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index 34c27afae009..cb6ad6fd2320 100644 +index 5027158d3908..dd6c364d6f01 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -982,7 +982,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, diff --git a/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch index a5c135502..82014fca4 100644 --- a/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch +++ b/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch @@ -1,7 +1,7 @@ -From e283cad9ed8ce6e508399dc21fde2645ff2a9259 Mon Sep 17 00:00:00 2001 +From 0d77c9cbc5377be77ff4b40a1ce75de234d6179d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 17:40:42 +0200 -Subject: [PATCH 157/269] locking: don't check for __LINUX_SPINLOCK_TYPES_H on +Subject: [PATCH 157/268] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT archs Upstream uses arch_spinlock_t within spinlock_t and requests that diff --git a/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch b/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch index e9c931860..4bd8b9ac5 100644 --- a/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch +++ b/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch @@ -1,7 +1,7 @@ -From 0a4604cc3cc194643ed11ab6909612b9bed4b4ad Mon Sep 17 00:00:00 2001 +From b7863852fbe6ca45e5d867b15f392c1ee3412a5c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 13 Aug 2011 00:23:17 +0200 -Subject: [PATCH 158/269] rcu: Frob softirq test +Subject: [PATCH 158/268] rcu: Frob softirq test With RT_FULL we get the below wreckage: diff --git a/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch b/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch index 41805695c..e446b3137 100644 --- a/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch +++ b/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch @@ -1,7 +1,7 @@ -From dd8eae9da2e22bd7b41cea43792b107b3deb3fd7 Mon Sep 17 00:00:00 2001 +From 4eb76fc1dccdec1b6b64cb5be42a2fe21d1e3e67 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Oct 2011 11:59:38 -0700 -Subject: [PATCH 159/269] rcu: Merge RCU-bh into RCU-preempt +Subject: [PATCH 159/268] rcu: Merge RCU-bh into RCU-preempt The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, @@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner 7 files changed, 73 insertions(+), 2 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 63cd0a1a99a0..60a9b5feefe2 100644 +index b73715c3c3c2..241a4a9577a0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -56,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); @@ -49,7 +49,7 @@ index 63cd0a1a99a0..60a9b5feefe2 100644 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); void rcu_barrier_tasks(void); -@@ -263,7 +267,14 @@ extern struct lockdep_map rcu_sched_lock_map; +@@ -261,7 +265,14 @@ extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); @@ -64,7 +64,7 @@ index 63cd0a1a99a0..60a9b5feefe2 100644 int rcu_read_lock_sched_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -@@ -663,10 +674,14 @@ static inline void rcu_read_unlock(void) +@@ -661,10 +672,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); @@ -79,7 +79,7 @@ index 63cd0a1a99a0..60a9b5feefe2 100644 } /* -@@ -676,10 +691,14 @@ static inline void rcu_read_lock_bh(void) +@@ -674,10 +689,14 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { @@ -155,7 +155,7 @@ index 4d04683c31b2..808cce9a5d43 100644 #ifdef CONFIG_RCU_NOCB_CPU diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c -index c596c6f1e457..7d2a615601e7 100644 +index 0b7af7e2bcbb..e95d121efc80 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = { diff --git a/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch b/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch index 4abe02f10..50d3d92b3 100644 --- a/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch +++ b/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch @@ -1,7 +1,7 @@ -From 435eba4b4298b15db7304d4b60e313d95f9b004f Mon Sep 17 00:00:00 2001 +From 7dd717c039fab874d3827b2a76502589997db0f7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 5 Oct 2011 11:45:18 -0700 -Subject: [PATCH 160/269] rcu: Make ksoftirqd do RCU quiescent states +Subject: [PATCH 160/268] rcu: Make ksoftirqd do RCU quiescent states Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore diff --git a/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch b/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch index c3f02ce94..35b5c1ae2 100644 --- a/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch +++ b/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch @@ -1,7 +1,7 @@ -From ca691ed27290645375a66795b1d87fb910501211 Mon Sep 17 00:00:00 2001 +From 9fec7184287147b427b5a3a3cb0a8c1eaf546f17 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 4 Nov 2013 13:21:10 -0800 -Subject: [PATCH 161/269] rcu: Eliminate softirq processing from rcutree +Subject: [PATCH 161/268] rcu: Eliminate softirq processing from rcutree Running RCU out of softirq is a problem for some workloads that would like to manage RCU core processing independently of other softirq work, diff --git a/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch b/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch index eafc6f037..5f4dfc3e2 100644 --- a/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch +++ b/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch @@ -1,7 +1,7 @@ -From cf507028c7a29d61fc47c6209aeca2d9d7cd0876 Mon Sep 17 00:00:00 2001 +From 65dde8c4bc16a1dde81f4aac939c46b6a5fe3c14 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 14:43:41 +0200 -Subject: [PATCH 162/269] srcu: use cpu_online() instead custom check +Subject: [PATCH 162/268] srcu: use cpu_online() instead custom check The current check via srcu_online is slightly racy because after looking at srcu_online there could be an interrupt that interrupted us long diff --git a/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch b/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch index e3658cc81..52cbf2ea5 100644 --- a/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch +++ b/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch @@ -1,7 +1,7 @@ -From 162767bbf4dfe16744f93ead7a5c938defc00489 Mon Sep 17 00:00:00 2001 +From 938c5d8e0a8dce8e3781b26dfb9c8f60a81d2072 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 18:37:12 +0200 -Subject: [PATCH 163/269] srcu: replace local_irqsave() with a locallock +Subject: [PATCH 163/268] srcu: replace local_irqsave() with a locallock There are two instances which disable interrupts in order to become a stable this_cpu_ptr() pointer. The restore part is coupled with diff --git a/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch index bbe5a9de0..c7771bad4 100644 --- a/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch +++ b/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch @@ -1,7 +1,7 @@ -From f723e17e9826ed2e03a4b4c40c575ea2e2bf2c56 Mon Sep 17 00:00:00 2001 +From 12c9b9d7ba61f8ce4f88d27f7888b3952e4a37ae Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Wed, 12 Oct 2016 11:21:14 -0500 -Subject: [PATCH 164/269] rcu: enable rcu_normal_after_boot by default for RT +Subject: [PATCH 164/268] rcu: enable rcu_normal_after_boot by default for RT The forcing of an expedited grace period is an expensive and very RT-application unfriendly operation, as it forcibly preempts all running diff --git a/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch b/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch index 7acb9a394..187908603 100644 --- a/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch +++ b/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch @@ -1,7 +1,7 @@ -From ccd76e8feed9271e97bc207e13fce803567e1017 Mon Sep 17 00:00:00 2001 +From b49f07e955da380f07112ea70807f067ff8f58d6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 28 Jul 2011 13:32:57 +0200 -Subject: [PATCH 165/269] tty/serial/omap: Make the locking RT aware +Subject: [PATCH 165/268] tty/serial/omap: Make the locking RT aware The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and diff --git a/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch b/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch index db6500f64..60f426d6c 100644 --- a/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch @@ -1,7 +1,7 @@ -From 9ad06fff0efb4629430d5ced37c81e4f3ef040bf Mon Sep 17 00:00:00 2001 +From 2e24a03516d7f9ec59e6566116447f590378de1b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 8 Jan 2013 21:36:51 +0100 -Subject: [PATCH 166/269] tty/serial/pl011: Make the locking work on RT +Subject: [PATCH 166/268] tty/serial/pl011: Make the locking work on RT The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and non-RT. diff --git a/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch b/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch index c445ed404..c43fd55ca 100644 --- a/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch +++ b/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch @@ -1,7 +1,7 @@ -From e30b0dc820111e11ecc71383d20682d2eee77061 Mon Sep 17 00:00:00 2001 +From 44737bfdb1bdf0012da1f5a8ceea5d865ad2d7cc Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 24 Sep 2018 10:29:01 +0200 -Subject: [PATCH 167/269] tty: serial: pl011: explicitly initialize the flags +Subject: [PATCH 167/268] tty: serial: pl011: explicitly initialize the flags variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 diff --git a/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch b/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch index 6ffd03ec2..3851bc154 100644 --- a/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch +++ b/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch @@ -1,7 +1,7 @@ -From 0a6ea176915e05db911401e89a925ee948f4434f Mon Sep 17 00:00:00 2001 +From a8c8d56e51832376b4f5ebc2328cff44af8d17c4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 14 Dec 2011 13:05:54 +0100 -Subject: [PATCH 168/269] rt: Improve the serial console PASS_LIMIT +Subject: [PATCH 168/268] rt: Improve the serial console PASS_LIMIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch index 9218696f3..f94bca77c 100644 --- a/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch +++ b/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch @@ -1,7 +1,7 @@ -From 511eaf0e0ecbd9898b7f680f08ab0636062f3c7e Mon Sep 17 00:00:00 2001 +From b746b54e8838d94983e63144613ce2762b2efaa2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Apr 2016 16:55:02 +0200 -Subject: [PATCH 169/269] tty: serial: 8250: don't take the trylock during oops +Subject: [PATCH 169/268] tty: serial: 8250: don't take the trylock during oops An oops with irqs off (panic() from irqsafe hrtimer like the watchdog timer) will lead to a lockdep warning on each invocation and as such diff --git a/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch b/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch index ba7ee281a..b8fbbf09e 100644 --- a/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch +++ b/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch @@ -1,7 +1,7 @@ -From 7b2e3123b8a2c8f1df0aa040b4c58d2f443fa8a5 Mon Sep 17 00:00:00 2001 +From 659ec146a8ae4a93cb3db931421eebfb5aef1c58 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Nov 2016 16:29:32 +0100 -Subject: [PATCH 170/269] locking/percpu-rwsem: Remove preempt_disable variants +Subject: [PATCH 170/268] locking/percpu-rwsem: Remove preempt_disable variants Effective revert commit: diff --git a/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch b/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch index db596df00..dbe5b6c0a 100644 --- a/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch +++ b/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch @@ -1,7 +1,7 @@ -From 28f91f849d8485292f7b25ce6a2ceae9fe18fb4d Mon Sep 17 00:00:00 2001 +From 072dae00e2d88ef93bfa2c80b4be0dad7ac8b15b Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Tue, 15 May 2012 13:53:56 +0800 -Subject: [PATCH 171/269] mm: Protect activate_mm() by +Subject: [PATCH 171/268] mm: Protect activate_mm() by preempt_[disable&enable]_rt() User preempt_*_rt instead of local_irq_*_rt or otherwise there will be diff --git a/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch b/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch index 0d9b01181..e73035424 100644 --- a/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch +++ b/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch @@ -1,7 +1,7 @@ -From bbbfae78f8bad17199822dcfb994d1c927de5c32 Mon Sep 17 00:00:00 2001 +From 97bc78094bf8bd0789398abb1bfdefef58b8b0a2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 12:32:34 +0200 -Subject: [PATCH 172/269] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD +Subject: [PATCH 172/268] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed @@ -15,10 +15,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 11 insertions(+) diff --git a/fs/dcache.c b/fs/dcache.c -index cb515f183482..7e15f1bff5ea 100644 +index 6e0022326afe..10225a9135fb 100644 --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -3058,6 +3058,8 @@ __setup("dhash_entries=", set_dhash_entries); +@@ -3060,6 +3060,8 @@ __setup("dhash_entries=", set_dhash_entries); static void __init dcache_init_early(void) { @@ -27,7 +27,7 @@ index cb515f183482..7e15f1bff5ea 100644 /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ -@@ -3074,11 +3076,16 @@ static void __init dcache_init_early(void) +@@ -3076,11 +3078,16 @@ static void __init dcache_init_early(void) NULL, 0, 0); @@ -44,7 +44,7 @@ index cb515f183482..7e15f1bff5ea 100644 /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature -@@ -3102,6 +3109,10 @@ static void __init dcache_init(void) +@@ -3104,6 +3111,10 @@ static void __init dcache_init(void) NULL, 0, 0); diff --git a/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index 6e927e56d..8ee05d2b0 100644 --- a/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -1,7 +1,7 @@ -From 2f25e633c3f100305735735e8f7728a335395f94 Mon Sep 17 00:00:00 2001 +From 4a8d6c41cb7db68eb7c40595089bbc166c5d6f2a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 20 Oct 2017 11:29:53 +0200 -Subject: [PATCH 173/269] fs/dcache: disable preemption on i_dir_seq's write +Subject: [PATCH 173/268] fs/dcache: disable preemption on i_dir_seq's write side i_dir_seq is an opencoded seqcounter. Based on the code it looks like we @@ -24,10 +24,10 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c -index 7e15f1bff5ea..173b53b536f0 100644 +index 10225a9135fb..dcde8ffe384c 100644 --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2400,9 +2400,10 @@ EXPORT_SYMBOL(d_rehash); +@@ -2404,9 +2404,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { @@ -40,7 +40,7 @@ index 7e15f1bff5ea..173b53b536f0 100644 return n; cpu_relax(); } -@@ -2410,7 +2411,8 @@ static inline unsigned start_dir_add(struct inode *dir) +@@ -2414,7 +2415,8 @@ static inline unsigned start_dir_add(struct inode *dir) static inline void end_dir_add(struct inode *dir, unsigned n) { @@ -50,7 +50,7 @@ index 7e15f1bff5ea..173b53b536f0 100644 } static void d_wait_lookup(struct dentry *dentry) -@@ -2443,7 +2445,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, +@@ -2447,7 +2449,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, retry: rcu_read_lock(); @@ -59,7 +59,7 @@ index 7e15f1bff5ea..173b53b536f0 100644 r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { -@@ -2471,7 +2473,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, +@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, } hlist_bl_lock(b); @@ -114,10 +114,10 @@ index 0fb590d79f30..cd95874a1952 100644 } diff --git a/include/linux/fs.h b/include/linux/fs.h -index 7b6084854bfe..6782a83a8d4f 100644 +index d4e1b43a53c3..72749feed0e3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -669,7 +669,7 @@ struct inode { +@@ -678,7 +678,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; diff --git a/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch b/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch index d9e669185..a7b8945f8 100644 --- a/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch +++ b/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch @@ -1,7 +1,7 @@ -From cef566ebb92c429f8d12735d50bf7d6772daa4dc Mon Sep 17 00:00:00 2001 +From c2d337965b344aacba20864e4ddcb6272c6d40e6 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Mon, 7 May 2018 08:58:57 -0500 -Subject: [PATCH 174/269] squashfs: make use of local lock in multi_cpu +Subject: [PATCH 174/268] squashfs: make use of local lock in multi_cpu decompressor Currently, the squashfs multi_cpu decompressor makes use of diff --git a/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch b/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch index 2774e13e3..e6dbbbea6 100644 --- a/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch +++ b/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch @@ -1,7 +1,7 @@ -From 63284d578bc862d28f5f85f74fdc9fdadc90bea3 Mon Sep 17 00:00:00 2001 +From 29f4a4c9e93f54fc4db7a32b3963a3e5cf931839 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: [PATCH 175/269] thermal: Defer thermal wakups to threads +Subject: [PATCH 175/268] thermal: Defer thermal wakups to threads On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will call schedule while we run in irq context. diff --git a/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch b/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch index 6fbed2ef9..dfbe8f52d 100644 --- a/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch +++ b/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch @@ -1,7 +1,7 @@ -From ac8e13bf3ba7c4ef2587d4b8932ca56d30ca4841 Mon Sep 17 00:00:00 2001 +From 9b5de25508fdcf3dfaccbe5a2c99832a22508f21 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 15:10:33 +0100 -Subject: [PATCH 176/269] x86/fpu: Disable preemption around local_bh_disable() +Subject: [PATCH 176/268] x86/fpu: Disable preemption around local_bh_disable() __fpu__restore_sig() restores the content of the FPU state in the CPUs and in order to avoid concurency it disbles BH. On !RT it also disables diff --git a/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch b/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch index f893c3102..c83174656 100644 --- a/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch @@ -1,7 +1,7 @@ -From 364aac82cf51da276aaf325fbcc1d837b41ebd6d Mon Sep 17 00:00:00 2001 +From 84494b284beb71d7beb044cebab91eee663d083c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 16:35:35 +0200 -Subject: [PATCH 177/269] fs/epoll: Do not disable preemption on RT +Subject: [PATCH 177/268] fs/epoll: Do not disable preemption on RT ep_call_nested() takes a sleeping lock so we can't disable preemption. The light version is enough since ep_call_nested() doesn't mind beeing diff --git a/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch index c5e8b74c7..8ff1db0c8 100644 --- a/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch +++ b/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch @@ -1,7 +1,7 @@ -From 27414c4ed0a59bb7044e708938c07d3141da2f38 Mon Sep 17 00:00:00 2001 +From d8ebbf146ccc6acd85fd1ef0e0902b6862e20dbc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 11:39:36 +0200 -Subject: [PATCH 178/269] mm/vmalloc: Another preempt disable region which +Subject: [PATCH 178/268] mm/vmalloc: Another preempt disable region which sucks Avoid the preempt disable version of get_cpu_var(). The inner-lock should diff --git a/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch b/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch index 83d65236d..a4cd22224 100644 --- a/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch +++ b/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch @@ -1,7 +1,7 @@ -From 42ff48e7b8242871b11a0c7c5e8753c702c8aee5 Mon Sep 17 00:00:00 2001 +From 4997e907787c2348986768e3f3ad13f6276c19fa Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Apr 2014 10:37:23 +0200 -Subject: [PATCH 179/269] block: mq: use cpu_light() +Subject: [PATCH 179/268] block: mq: use cpu_light() there is a might sleep splat because get_cpu() disables preemption and later we grab a lock. As a workaround for this we use get_cpu_light(). diff --git a/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch b/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch index 82ea47664..5d268adc2 100644 --- a/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch +++ b/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch @@ -1,7 +1,7 @@ -From 1574b433606302c16705ba46441b23c6f286e3a0 Mon Sep 17 00:00:00 2001 +From 58883e4aa517102ac5ff7360efd99eac09818b8a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 180/269] block/mq: do not invoke preempt_disable() +Subject: [PATCH 180/268] block/mq: do not invoke preempt_disable() preempt_disable() and get_cpu() don't play well together with the sleeping locks it tries to allocate later. @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c -index b0d0b74cf5a6..430037cda971 100644 +index fa984527b1ae..8d00d8dc9e59 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -570,7 +570,7 @@ static void __blk_mq_complete_request(struct request *rq) diff --git a/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch b/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch index 3564ecb61..f03749ffc 100644 --- a/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch +++ b/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch @@ -1,7 +1,7 @@ -From 9ec5d3b932b407e0b6780392ddb1f7f2fe1251e4 Mon Sep 17 00:00:00 2001 +From 51879ee35d8ba158a433bdb91e441c4ce9c51e01 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Jan 2015 15:10:08 +0100 -Subject: [PATCH 181/269] block/mq: don't complete requests via IPI +Subject: [PATCH 181/268] block/mq: don't complete requests via IPI The IPI runs in hardirq context and there are sleeping locks. This patch moves the completion into a workqueue. @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c -index eb8b52241453..581bf704154a 100644 +index 33488b1426b7..709a4108f78c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq) @@ -29,7 +29,7 @@ index eb8b52241453..581bf704154a 100644 rq->q = q; rq->__sector = (sector_t) -1; diff --git a/block/blk-mq.c b/block/blk-mq.c -index 430037cda971..9560ebae322d 100644 +index 8d00d8dc9e59..9df82adf428b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, diff --git a/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch index bd6ec9e39..d7eebea76 100644 --- a/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch +++ b/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch @@ -1,7 +1,7 @@ -From 6c971609e903127436e633a14252b0f3cf42c919 Mon Sep 17 00:00:00 2001 +From ea45edac555bc904634e0a24666a413fd8f45ce8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 -Subject: [PATCH 182/269] md: raid5: Make raid5_percpu handling RT aware +Subject: [PATCH 182/268] md: raid5: Make raid5_percpu handling RT aware __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic @@ -19,7 +19,7 @@ Tested-by: Udo van den Heuvel 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index ae38895c44b2..abc559dc516f 100644 +index f237d6f30752..adec2947c3e1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -43,7 +43,7 @@ index ae38895c44b2..abc559dc516f 100644 } static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) -@@ -6803,6 +6805,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) +@@ -6811,6 +6813,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) __func__, cpu); return -ENOMEM; } @@ -51,7 +51,7 @@ index ae38895c44b2..abc559dc516f 100644 return 0; } -@@ -6813,7 +6816,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) +@@ -6821,7 +6824,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; diff --git a/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch b/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch index 902762262..4d63aee27 100644 --- a/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch +++ b/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch @@ -1,7 +1,7 @@ -From 70f8f6e166aff0215e6e440d9365f8ce0ade2336 Mon Sep 17 00:00:00 2001 +From 0374fcc1623d37d71ef922ad3b9f2b6d401ff11f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 20:51:03 +0100 -Subject: [PATCH 183/269] rt: Introduce cpu_chill() +Subject: [PATCH 183/268] rt: Introduce cpu_chill() Retry loops on RT might loop forever when the modifying side was preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() diff --git a/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch b/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch index 93d0dc1d8..8ac87e653 100644 --- a/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch +++ b/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch @@ -1,7 +1,7 @@ -From 420f45d08b300f698438e0a208f03e0f89aa8009 Mon Sep 17 00:00:00 2001 +From 176a3497716d87ec778211490d443f51dbcd9bc9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Feb 2019 16:59:15 +0100 -Subject: [PATCH 184/269] hrtimer: Don't lose state in cpu_chill() +Subject: [PATCH 184/268] hrtimer: Don't lose state in cpu_chill() In cpu_chill() the state is set to TASK_UNINTERRUPTIBLE and a timer is programmed. On return the state is always TASK_RUNNING which means we diff --git a/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch b/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch index e0a10d096..fe5635d78 100644 --- a/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch +++ b/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch @@ -1,7 +1,7 @@ -From 39c4c7819a0377ee59a1197664454bc54012907b Mon Sep 17 00:00:00 2001 +From aa5a624b58a513730b1c7b73a1a2a9d1e52265b7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 12:31:10 +0100 -Subject: [PATCH 185/269] hrtimer: cpu_chill(): save task state in +Subject: [PATCH 185/268] hrtimer: cpu_chill(): save task state in ->saved_state() In the previous change I saved the current task state on stack. This was diff --git a/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch index d82e8df3b..d517c366f 100644 --- a/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -1,7 +1,7 @@ -From 3933bc43d3be58eb86a118b1bd147cd4a2c9b33d Mon Sep 17 00:00:00 2001 +From 34c9d1a4c3123b2571722e9f3d04cca17841acc1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 13 Mar 2018 13:49:16 +0100 -Subject: [PATCH 186/269] block: blk-mq: move blk_queue_usage_counter_release() +Subject: [PATCH 186/268] block: blk-mq: move blk_queue_usage_counter_release() into process context | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 @@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c -index 581bf704154a..0a651b442cec 100644 +index 709a4108f78c..6c1421ea14df 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -968,12 +968,21 @@ void blk_queue_exit(struct request_queue *q) @@ -77,7 +77,7 @@ index 581bf704154a..0a651b442cec 100644 } static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1066,6 +1075,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, +@@ -1070,6 +1079,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); @@ -85,7 +85,7 @@ index 581bf704154a..0a651b442cec 100644 /* * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3956,6 +3966,8 @@ int __init blk_dev_init(void) +@@ -3960,6 +3970,8 @@ int __init blk_dev_init(void) if (!kblockd_workqueue) panic("Failed to create kblockd\n"); diff --git a/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch b/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch index 8b194178d..cfe5a1c56 100644 --- a/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch +++ b/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch @@ -1,7 +1,7 @@ -From 608d51b75238d882851b21f980b37aa54d26620e Mon Sep 17 00:00:00 2001 +From 6860d2d566e69688c552d8044e68c006c52f1893 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 20 Dec 2012 18:28:26 +0100 -Subject: [PATCH 187/269] block: Use cpu_chill() for retry loops +Subject: [PATCH 187/268] block: Use cpu_chill() for retry loops Retry loops on RT might loop forever when the modifying side was preempted. Steven also observed a live lock when there was a diff --git a/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch b/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch index 1375f0ab1..d6170c86a 100644 --- a/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch +++ b/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch @@ -1,7 +1,7 @@ -From 4e8f4b38754fe437338d35cde5fafd8bfa53aaa3 Mon Sep 17 00:00:00 2001 +From 1ab863bb2d985228b7bec14117d4467e44058b03 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:00:34 +0100 -Subject: [PATCH 188/269] fs: dcache: Use cpu_chill() in trylock loops +Subject: [PATCH 188/268] fs: dcache: Use cpu_chill() in trylock loops Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system diff --git a/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch b/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch index f7a3cfe77..bdd41ae92 100644 --- a/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch +++ b/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch @@ -1,7 +1,7 @@ -From 128245989afa7b20f2b7e7fc43727086cce5bf13 Mon Sep 17 00:00:00 2001 +From b51485a019f68d142abc91c7b4aae67bab9dc76f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:10:04 +0100 -Subject: [PATCH 189/269] net: Use cpu_chill() instead of cpu_relax() +Subject: [PATCH 189/268] net: Use cpu_chill() instead of cpu_relax() Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index a0d295478e69..ce1bfcbbda45 100644 +index d98fcf926166..e99b69846aeb 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -63,6 +63,7 @@ @@ -44,7 +44,7 @@ index a0d295478e69..ce1bfcbbda45 100644 } prb_close_block(pkc, pbd, po, status); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c -index 63c8d107adcf..671f8ad38864 100644 +index 0b347f46b2f4..f395f06031bc 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -34,6 +34,7 @@ diff --git a/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch index c8e75b182..9675c4fa4 100644 --- a/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -1,7 +1,7 @@ -From 0e5745ddcc9a0454ba787dfcb0da5e9753b787dc Mon Sep 17 00:00:00 2001 +From 39d3c3e68c29022f49c912be08823922d6864dce Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 14:35:49 +0200 -Subject: [PATCH 190/269] fs/dcache: use swait_queue instead of waitqueue +Subject: [PATCH 190/268] fs/dcache: use swait_queue instead of waitqueue __d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock() which disables preemption. As a workaround convert it to swait. @@ -35,10 +35,10 @@ index 3925a7bfc74d..33f7723fb83e 100644 cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); diff --git a/fs/dcache.c b/fs/dcache.c -index 173b53b536f0..7cb44c7218a4 100644 +index dcde8ffe384c..b2a00f3ff7df 100644 --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2417,21 +2417,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n) +@@ -2421,21 +2421,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n) static void d_wait_lookup(struct dentry *dentry) { @@ -74,7 +74,7 @@ index 173b53b536f0..7cb44c7218a4 100644 { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2546,7 +2549,7 @@ void __d_lookup_done(struct dentry *dentry) +@@ -2550,7 +2553,7 @@ void __d_lookup_done(struct dentry *dentry) hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); @@ -163,7 +163,7 @@ index ce9100b5604d..839bfa76f41e 100644 status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/proc/base.c b/fs/proc/base.c -index 81d77b15b347..2c0ac4338e17 100644 +index f999e8bd3771..bf9476600c73 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1872,7 +1872,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, @@ -176,7 +176,7 @@ index 81d77b15b347..2c0ac4338e17 100644 if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index d65390727541..abd4d1632e7c 100644 +index 7325baa8f9d4..31f25ff3999f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -677,7 +677,7 @@ static bool proc_sys_fill_cache(struct file *file, @@ -189,7 +189,7 @@ index d65390727541..abd4d1632e7c 100644 if (IS_ERR(child)) return false; diff --git a/include/linux/dcache.h b/include/linux/dcache.h -index ef4b70f64f33..be6ab83705aa 100644 +index 0880baefd85f..8b4d6c8c1f7f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -105,7 +105,7 @@ struct dentry { diff --git a/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch b/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch index 4f38270ca..801a7c283 100644 --- a/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch +++ b/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch @@ -1,7 +1,7 @@ -From e29f4dc4c3456a8de27d079dc97e6489b05b61b0 Mon Sep 17 00:00:00 2001 +From e3c37d47f9a87accb9845a5c0289a011017e0e3f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Jul 2013 15:26:54 +0200 -Subject: [PATCH 191/269] workqueue: Use normal rcu +Subject: [PATCH 191/268] workqueue: Use normal rcu There is no need for sched_rcu. The undocumented reason why sched_rcu is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 52 insertions(+), 43 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 0280deac392e..ca8014edaa84 100644 +index cd8b61bded78..88d7db5e0105 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -127,7 +127,7 @@ enum { @@ -210,7 +210,7 @@ index 0280deac392e..ca8014edaa84 100644 return false; } -@@ -3341,7 +3346,7 @@ static void rcu_free_pool(struct rcu_head *rcu) +@@ -3344,7 +3349,7 @@ static void rcu_free_pool(struct rcu_head *rcu) * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * @@ -219,7 +219,7 @@ index 0280deac392e..ca8014edaa84 100644 * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). -@@ -3395,8 +3400,8 @@ static void put_unbound_pool(struct worker_pool *pool) +@@ -3398,8 +3403,8 @@ static void put_unbound_pool(struct worker_pool *pool) del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); @@ -230,7 +230,7 @@ index 0280deac392e..ca8014edaa84 100644 } /** -@@ -3503,14 +3508,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) +@@ -3506,14 +3511,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); @@ -247,7 +247,7 @@ index 0280deac392e..ca8014edaa84 100644 } /** -@@ -4195,7 +4200,7 @@ void destroy_workqueue(struct workqueue_struct *wq) +@@ -4198,7 +4203,7 @@ void destroy_workqueue(struct workqueue_struct *wq) * The base ref is never dropped on per-cpu pwqs. Directly * schedule RCU free. */ @@ -256,7 +256,7 @@ index 0280deac392e..ca8014edaa84 100644 } else { /* * We're the sole accessor of @wq at this point. Directly -@@ -4305,7 +4310,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) +@@ -4308,7 +4313,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) struct pool_workqueue *pwq; bool ret; @@ -266,7 +266,7 @@ index 0280deac392e..ca8014edaa84 100644 if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); -@@ -4316,7 +4322,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) +@@ -4319,7 +4325,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); @@ -276,7 +276,7 @@ index 0280deac392e..ca8014edaa84 100644 return ret; } -@@ -4342,15 +4349,15 @@ unsigned int work_busy(struct work_struct *work) +@@ -4345,15 +4352,15 @@ unsigned int work_busy(struct work_struct *work) if (work_pending(work)) ret |= WORK_BUSY_PENDING; @@ -296,7 +296,7 @@ index 0280deac392e..ca8014edaa84 100644 return ret; } -@@ -4534,7 +4541,7 @@ void show_workqueue_state(void) +@@ -4537,7 +4544,7 @@ void show_workqueue_state(void) unsigned long flags; int pi; @@ -305,7 +305,7 @@ index 0280deac392e..ca8014edaa84 100644 pr_info("Showing busy workqueues and worker pools:\n"); -@@ -4599,7 +4606,7 @@ void show_workqueue_state(void) +@@ -4602,7 +4609,7 @@ void show_workqueue_state(void) touch_nmi_watchdog(); } @@ -314,7 +314,7 @@ index 0280deac392e..ca8014edaa84 100644 } /* used to show worker information through /proc/PID/{comm,stat,status} */ -@@ -4986,16 +4993,16 @@ bool freeze_workqueues_busy(void) +@@ -4989,16 +4996,16 @@ bool freeze_workqueues_busy(void) * nr_active is monotonically decreasing. It's safe * to peek without lock. */ @@ -334,7 +334,7 @@ index 0280deac392e..ca8014edaa84 100644 } out_unlock: mutex_unlock(&wq_pool_mutex); -@@ -5190,7 +5197,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, +@@ -5193,7 +5200,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, const char *delim = ""; int node, written = 0; @@ -344,7 +344,7 @@ index 0280deac392e..ca8014edaa84 100644 for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, -@@ -5198,7 +5206,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, +@@ -5201,7 +5209,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); diff --git a/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch b/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch index 3b99a937e..26449eef4 100644 --- a/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch +++ b/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch @@ -1,7 +1,7 @@ -From 693d52e4cc082c2aafb8154ee7581e38f4c584d3 Mon Sep 17 00:00:00 2001 +From a760e3d3db919ed90f2a71a124ff2ba1239752f4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:42:26 +0200 -Subject: [PATCH 192/269] workqueue: Use local irq lock instead of irq disable +Subject: [PATCH 192/268] workqueue: Use local irq lock instead of irq disable regions Use a local_irq_lock as a replacement for irq off regions. We keep the @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index ca8014edaa84..1e8b2ff804e3 100644 +index 88d7db5e0105..d168a5581c7f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -49,6 +49,7 @@ @@ -149,7 +149,7 @@ index ca8014edaa84..1e8b2ff804e3 100644 } /** -@@ -2999,7 +3014,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) +@@ -3002,7 +3017,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); @@ -158,7 +158,7 @@ index ca8014edaa84..1e8b2ff804e3 100644 /* * This allows canceling during early boot. We know that @work -@@ -3060,10 +3075,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); +@@ -3063,10 +3078,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { @@ -171,7 +171,7 @@ index ca8014edaa84..1e8b2ff804e3 100644 return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); -@@ -3101,7 +3116,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) +@@ -3104,7 +3119,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); diff --git a/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch b/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch index 6d700c3f1..92cb4f014 100644 --- a/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch +++ b/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch @@ -1,7 +1,7 @@ -From d874f4bd157934c3b8f5f30c0291b9716f86e849 Mon Sep 17 00:00:00 2001 +From 07649b01e48cbe190f7deb2cc1498ab8734ebc47 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 1 Jul 2013 11:02:42 +0200 -Subject: [PATCH 193/269] workqueue: Prevent workqueue versus ata-piix livelock +Subject: [PATCH 193/268] workqueue: Prevent workqueue versus ata-piix livelock An Intel i7 system regularly detected rcu_preempt stalls after the kernel was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no @@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 1e8b2ff804e3..f6551d189ca4 100644 +index d168a5581c7f..0a11d2f64424 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -50,6 +50,7 @@ diff --git a/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch b/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch index 1485b5ebb..848d6d45a 100644 --- a/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch +++ b/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch @@ -1,7 +1,7 @@ -From 4452796adea3514d123d9e41188dfcfc86adc6d0 Mon Sep 17 00:00:00 2001 +From 362bcb09ec2183fe4a7d43abf3924ffe9be6f071 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jun 2011 19:47:03 +0200 -Subject: [PATCH 194/269] sched: Distangle worker accounting from rqlock +Subject: [PATCH 194/268] sched: Distangle worker accounting from rqlock The worker accounting for cpu bound workers is plugged into the core scheduler code and the wakeup code. This is not a hard requirement and @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 47 insertions(+), 100 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index fb205b1ec799..1cd1abc45097 100644 +index 36f791ff52bc..08052198031a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1704,10 +1704,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl @@ -169,7 +169,7 @@ index fb205b1ec799..1cd1abc45097 100644 EXPORT_SYMBOL(schedule); diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index f6551d189ca4..bf7be926ce5f 100644 +index 0a11d2f64424..aa39924bd3b5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -843,43 +843,32 @@ static void wake_up_worker(struct worker_pool *pool) diff --git a/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch b/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch index 067e08778..25851a9bd 100644 --- a/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch +++ b/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch @@ -1,7 +1,7 @@ -From bfbfd69e3adaeffcc546f391f1f039dd715b2d57 Mon Sep 17 00:00:00 2001 +From f4fdeabd2e1324863707187f288ee4c544046192 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:41:35 +0200 -Subject: [PATCH 195/269] debugobjects: Make RT aware +Subject: [PATCH 195/268] debugobjects: Make RT aware Avoid filling the pool / allocating memory with irqs off(). diff --git a/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch b/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch index ce7014c25..39c293be0 100644 --- a/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch +++ b/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch @@ -1,7 +1,7 @@ -From 62e2b0613933b1d4557d86f4557375a9ee647fa7 Mon Sep 17 00:00:00 2001 +From b3d4335aafea80587783e32061db060077554d7c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Feb 2012 12:03:30 +0100 -Subject: [PATCH 196/269] seqlock: Prevent rt starvation +Subject: [PATCH 196/268] seqlock: Prevent rt starvation If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. diff --git a/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch index 3e9ff2643..4f8a791cb 100644 --- a/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch +++ b/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch @@ -1,7 +1,7 @@ -From b1572dc20a39a216ac1fbb36998f32af0f79b9ae Mon Sep 17 00:00:00 2001 +From a8814418fedb85297ffed1a4688372b20533d462 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 18 Feb 2015 16:05:28 +0100 -Subject: [PATCH 197/269] sunrpc: Make svc_xprt_do_enqueue() use +Subject: [PATCH 197/268] sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light() |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 diff --git a/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch b/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch index d119b921c..27f893f21 100644 --- a/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch +++ b/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch @@ -1,7 +1,7 @@ -From 4893c0317fda3cc20eac3b4bbfcdd808ef3db828 Mon Sep 17 00:00:00 2001 +From 74e3131e8138baa49cf40c50b4b433c3323a00be Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 15:38:34 +0200 -Subject: [PATCH 198/269] net: Use skbufhead with raw lock +Subject: [PATCH 198/268] net: Use skbufhead with raw lock Use the rps lock as rawlock so we can keep irq-off regions. It looks low latency. However we can't kfree() from this context therefore we defer this @@ -27,7 +27,7 @@ index 8c2fec0bcb26..384c63ecb9ae 100644 }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index 820903ceac4f..f7f3abb41acb 100644 +index 28baccb1efd5..b4412944db54 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -287,6 +287,7 @@ struct sk_buff_head { @@ -38,7 +38,7 @@ index 820903ceac4f..f7f3abb41acb 100644 }; struct sk_buff; -@@ -1702,6 +1703,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) +@@ -1704,6 +1705,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } @@ -52,7 +52,7 @@ index 820903ceac4f..f7f3abb41acb 100644 struct lock_class_key *class) { diff --git a/net/core/dev.c b/net/core/dev.c -index b8208b940b5d..327a985bf0c7 100644 +index abaf8a73403b..616429a4715c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -217,14 +217,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) diff --git a/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 53def4411..f230f4ecc 100644 --- a/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -1,7 +1,7 @@ -From e6cdcf7dbf2aa921c55ed19673c775491efc2a75 Mon Sep 17 00:00:00 2001 +From 0978e6214358cf3fcc499500916fc616b6ba1193 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jan 2016 15:55:02 +0100 -Subject: [PATCH 199/269] net: move xmit_recursion to per-task variable on -RT +Subject: [PATCH 199/268] net: move xmit_recursion to per-task variable on -RT A softirq on -RT can be preempted. That means one task is in __dev_queue_xmit(), gets preempted and another task may enter @@ -197,7 +197,7 @@ index a023e1ba5d8f..a9a5edfa9689 100644 int pagefault_disabled; #ifdef CONFIG_MMU diff --git a/net/core/dev.c b/net/core/dev.c -index 327a985bf0c7..ee90223959fc 100644 +index 616429a4715c..1a8677236939 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3537,8 +3537,10 @@ static void skb_update_prio(struct sk_buff *skb) diff --git a/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch index c42b0e78b..de1976885 100644 --- a/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -1,7 +1,7 @@ -From 0ba4f1b56a7639a293956b84416566f0211c8c77 Mon Sep 17 00:00:00 2001 +From 3e228646d2f374c2f898558bf902fd53df73ff03 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 15:39:05 +0100 -Subject: [PATCH 200/269] net: provide a way to delegate processing a softirq +Subject: [PATCH 200/268] net: provide a way to delegate processing a softirq to ksoftirqd If the NET_RX uses up all of his budget it moves the following NAPI @@ -71,7 +71,7 @@ index 27a4bb2303d0..25bcf2f2714b 100644 * This function must run with irqs disabled! */ diff --git a/net/core/dev.c b/net/core/dev.c -index ee90223959fc..da95705ccb67 100644 +index 1a8677236939..0da36fb20153 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6382,7 +6382,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) diff --git a/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch index 8d4e95701..0db85147e 100644 --- a/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ b/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch @@ -1,7 +1,7 @@ -From 9e7513a103f18db66ffaf2bcfd13c834cba602d7 Mon Sep 17 00:00:00 2001 +From aff3a2f1f04ebcf3d036033357d6ff327dc77384 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 30 Mar 2016 13:36:29 +0200 -Subject: [PATCH 201/269] net: dev: always take qdisc's busylock in +Subject: [PATCH 201/268] net: dev: always take qdisc's busylock in __dev_xmit_skb() The root-lock is dropped before dev_hard_start_xmit() is invoked and after @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c -index da95705ccb67..351e81f8a72d 100644 +index 0da36fb20153..305bf1240e8a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3465,7 +3465,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, diff --git a/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch b/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch index 62e3e6179..e83e3d7d1 100644 --- a/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch +++ b/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch @@ -1,7 +1,7 @@ -From 8f5f7360b52bbe5081ba3204a2004f6fdeb75114 Mon Sep 17 00:00:00 2001 +From d501fb01e4c03fe9bad99083b085f38d2332df8d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 17:36:35 +0200 -Subject: [PATCH 202/269] net/Qdisc: use a seqlock instead seqcount +Subject: [PATCH 202/268] net/Qdisc: use a seqlock instead seqcount The seqcount disables preemption on -RT while it is held which can't remove. Also we don't want the reader to spin for ages if the writer is diff --git a/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch b/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch index cf4e2292b..8f002d828 100644 --- a/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch +++ b/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch @@ -1,7 +1,7 @@ -From de40c876cec758a0735fda3a4dffd05924f12a4b Mon Sep 17 00:00:00 2001 +From 0538ba3efa31e5af2fefbd88fe76103981d512cf Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:21:56 +0200 -Subject: [PATCH 203/269] net: add back the missing serialization in +Subject: [PATCH 203/268] net: add back the missing serialization in ip_send_unicast_reply() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 diff --git a/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch b/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch index f4d24b875..51b76fedc 100644 --- a/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch +++ b/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch @@ -1,7 +1,7 @@ -From c35d9dd75bf9f6d2e39202e23d04a8850172240f Mon Sep 17 00:00:00 2001 +From 7b8e2481727fd1d4a6b292964b2d8af45a53e0b7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:54:09 +0200 -Subject: [PATCH 204/269] net: add a lock around icmp_sk() +Subject: [PATCH 204/268] net: add a lock around icmp_sk() It looks like the this_cpu_ptr() access in icmp_sk() is protected with local_bh_disable(). To avoid missing serialization in -RT I am adding diff --git a/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index 7fc11ad5d..1453a76c3 100644 --- a/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -1,7 +1,7 @@ -From bdd2169d3d5cc93fcaca144c2166ac375331e25d Mon Sep 17 00:00:00 2001 +From 17c4aca36cbb68d9df8945b5a1eadfca5dbad225 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 6 Dec 2016 17:50:30 -0500 -Subject: [PATCH 205/269] net: Have __napi_schedule_irqoff() disable interrupts +Subject: [PATCH 205/268] net: Have __napi_schedule_irqoff() disable interrupts on RT A customer hit a crash where the napi sd->poll_list became corrupted. @@ -52,7 +52,7 @@ index b6a75296eb46..946875cae933 100644 static inline bool napi_disable_pending(struct napi_struct *n) { diff --git a/net/core/dev.c b/net/core/dev.c -index 351e81f8a72d..50fe1e3ee26d 100644 +index 305bf1240e8a..d86972449f63 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5952,6 +5952,7 @@ bool napi_schedule_prep(struct napi_struct *n) diff --git a/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch b/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch index eb9be8bcc..5c0dfdcb3 100644 --- a/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch +++ b/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch @@ -1,7 +1,7 @@ -From 01a7f110c5d6b059012d7f6cf4c1b3af79253a7c Mon Sep 17 00:00:00 2001 +From 778b3f11b6a4fbea6c57ac0482509d90187009c9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 23 Jun 2015 15:32:51 +0200 -Subject: [PATCH 206/269] irqwork: push most work into softirq context +Subject: [PATCH 206/268] irqwork: push most work into softirq context Initially we defered all irqwork into softirq because we didn't want the latency spikes if perf or another user was busy and delayed the RT task. @@ -22,13 +22,13 @@ Mike Galbraith, hard and soft variant] Signed-off-by: Sebastian Andrzej Siewior --- - include/linux/irq_work.h | 8 ++++++ - kernel/irq_work.c | 60 +++++++++++++++++++++++++++++++--------- + include/linux/irq_work.h | 8 +++++ + kernel/irq_work.c | 75 ++++++++++++++++++++++++++++++---------- kernel/rcu/tree.c | 1 + kernel/sched/topology.c | 1 + kernel/time/tick-sched.c | 1 + kernel/time/timer.c | 2 ++ - 6 files changed, 60 insertions(+), 13 deletions(-) + 6 files changed, 70 insertions(+), 18 deletions(-) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index b11fcdfd0770..0c50559987c5 100644 @@ -55,7 +55,7 @@ index b11fcdfd0770..0c50559987c5 100644 + #endif /* _LINUX_IRQ_WORK_H */ diff --git a/kernel/irq_work.c b/kernel/irq_work.c -index 6b7cdf17ccf8..7b41d9aa3e9b 100644 +index 73288914ed5e..2940622da5b3 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -17,6 +17,7 @@ @@ -66,43 +66,13 @@ index 6b7cdf17ccf8..7b41d9aa3e9b 100644 #include -@@ -64,6 +65,8 @@ void __weak arch_irq_work_raise(void) - */ - bool irq_work_queue_on(struct irq_work *work, int cpu) +@@ -57,29 +58,35 @@ void __weak arch_irq_work_raise(void) + } + + /* Enqueue on current CPU, work must already be claimed and preempt disabled */ +-static void __irq_work_queue_local(struct irq_work *work) ++static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) { -+ struct llist_head *list; -+ - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); - -@@ -76,7 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) - if (!irq_work_claim(work)) - return false; - -- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) -+ list = &per_cpu(lazy_list, cpu); -+ else -+ list = &per_cpu(raised_list, cpu); -+ -+ if (llist_add(&work->llnode, list)) - arch_send_call_function_single_ipi(cpu); - - #else /* #ifdef CONFIG_SMP */ -@@ -89,6 +97,9 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) - /* Enqueue the irq work @work on the current CPU */ - bool irq_work_queue(struct irq_work *work) - { -+ struct llist_head *list; -+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); -+ - /* Only queue if not already pending */ - if (!irq_work_claim(work)) - return false; -@@ -96,13 +107,15 @@ bool irq_work_queue(struct irq_work *work) - /* Queue the entry and raise the IPI if needed. */ - preempt_disable(); - - /* If the work is "lazy", handle it from next tick if any */ - if (work->flags & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && @@ -110,19 +80,74 @@ index 6b7cdf17ccf8..7b41d9aa3e9b 100644 - arch_irq_work_raise(); - } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) -+ lazy_work = work->flags & IRQ_WORK_LAZY; +- arch_irq_work_raise(); +- } ++ bool empty; + -+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) ++ empty = llist_add(&work->llnode, list); ++ ++ if (empty && ++ (!(work->flags & IRQ_WORK_LAZY) || ++ tick_nohz_tick_stopped())) ++ arch_irq_work_raise(); + } + + /* Enqueue the irq work @work on the current CPU */ + bool irq_work_queue(struct irq_work *work) + { ++ struct llist_head *list; ++ + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return false; + + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); +- __irq_work_queue_local(work); ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) + list = this_cpu_ptr(&lazy_list); + else + list = this_cpu_ptr(&raised_list); + -+ if (llist_add(&work->llnode, list)) { -+ if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); - } ++ __irq_work_queue_local(work, list); + preempt_enable(); -@@ -119,9 +132,8 @@ bool irq_work_needs_cpu(void) + return true; +@@ -98,6 +105,9 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) + return irq_work_queue(work); + + #else /* CONFIG_SMP: */ ++ struct llist_head *list; ++ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); ++ + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + +@@ -106,13 +116,21 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) + return false; + + preempt_disable(); ++ ++ lazy_work = work->flags & IRQ_WORK_LAZY; ++ ++ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) ++ list = &per_cpu(lazy_list, cpu); ++ else ++ list = &per_cpu(raised_list, cpu); ++ + if (cpu != smp_processor_id()) { + /* Arch remote IPI send/receive backend aren't NMI safe */ + WARN_ON_ONCE(in_nmi()); +- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) ++ if (llist_add(&work->llnode, list)) + arch_send_call_function_single_ipi(cpu); + } else { +- __irq_work_queue_local(work); ++ __irq_work_queue_local(work, list); + } + preempt_enable(); + +@@ -128,9 +146,8 @@ bool irq_work_needs_cpu(void) raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); @@ -134,7 +159,7 @@ index 6b7cdf17ccf8..7b41d9aa3e9b 100644 /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -135,8 +147,12 @@ static void irq_work_run_list(struct llist_head *list) +@@ -144,8 +161,12 @@ static void irq_work_run_list(struct llist_head *list) struct llist_node *llnode; unsigned long flags; @@ -148,7 +173,7 @@ index 6b7cdf17ccf8..7b41d9aa3e9b 100644 if (llist_empty(list)) return; -@@ -168,7 +184,16 @@ static void irq_work_run_list(struct llist_head *list) +@@ -177,7 +198,16 @@ static void irq_work_run_list(struct llist_head *list) void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); @@ -166,7 +191,7 @@ index 6b7cdf17ccf8..7b41d9aa3e9b 100644 } EXPORT_SYMBOL_GPL(irq_work_run); -@@ -178,8 +203,17 @@ void irq_work_tick(void) +@@ -187,8 +217,17 @@ void irq_work_tick(void) if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); diff --git a/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch b/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch index 4d4b87b22..8b1d1d75f 100644 --- a/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch +++ b/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch @@ -1,7 +1,7 @@ -From 4d49bcfa2103be6571f2f53e06e8fa71d49feb9b Mon Sep 17 00:00:00 2001 +From fe95afa24ff0e274547225ca1b67655341576a90 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Sep 2012 14:50:37 +0200 -Subject: [PATCH 207/269] printk: Make rt aware +Subject: [PATCH 207/268] printk: Make rt aware Drop the lock before calling the console driver and do not disable interrupts while printing to a serial console. diff --git a/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch index fd480ad8a..bd74f70c4 100644 --- a/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch +++ b/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch @@ -1,7 +1,7 @@ -From 160a19dcfe1a664e430a678562901a32630f7ee2 Mon Sep 17 00:00:00 2001 +From 465429cd60203ac29544c5597dc2048c699d9573 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 19 May 2016 17:45:27 +0200 -Subject: [PATCH 208/269] kernel/printk: Don't try to print from IRQ/NMI region +Subject: [PATCH 208/268] kernel/printk: Don't try to print from IRQ/NMI region On -RT we try to acquire sleeping locks which might lead to warnings from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on diff --git a/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch b/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch index da49f51d3..b271aeed2 100644 --- a/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch +++ b/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch @@ -1,7 +1,7 @@ -From bf31931f09583088100f40d4c4b255571cc72578 Mon Sep 17 00:00:00 2001 +From 3b86d75b24e224d91346205c94d36a90d0daa695 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Mar 2013 19:01:05 +0100 -Subject: [PATCH 209/269] printk: Drop the logbuf_lock more often +Subject: [PATCH 209/268] printk: Drop the logbuf_lock more often The lock is hold with irgs off. The latency drops 500us+ on my arm bugs with a "full" buffer after executing "dmesg" on the shell. diff --git a/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch b/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch index e379a2146..4b94ac23f 100644 --- a/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch +++ b/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch @@ -1,7 +1,7 @@ -From 740bf3655673f2b77230957eb21238798aa0b203 Mon Sep 17 00:00:00 2001 +From e6b9951b4a32e58860c21d934897143be2c8e60c Mon Sep 17 00:00:00 2001 From: "Yadi.hu" Date: Wed, 10 Dec 2014 10:32:09 +0800 -Subject: [PATCH 210/269] ARM: enable irq in translation/section permission +Subject: [PATCH 210/268] ARM: enable irq in translation/section permission fault handlers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 diff --git a/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch b/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch index 45139f702..2af6f21be 100644 --- a/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch +++ b/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch @@ -1,7 +1,7 @@ -From 9179df818d04fdf3d3cc195a5d19fac4b4c904f1 Mon Sep 17 00:00:00 2001 +From 7363a96960b8c2b297273a23b1a23af568962cfb Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:00 -0600 -Subject: [PATCH 211/269] genirq: update irq_set_irqchip_state documentation +Subject: [PATCH 211/268] genirq: update irq_set_irqchip_state documentation On -rt kernels, the use of migrate_disable()/migrate_enable() is sufficient to guarantee a task isn't moved to another CPU. Update the @@ -14,10 +14,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index ba5bba5f1ffd..48c2690070f3 100644 +index 69b4bfd4654c..aafe2256bd39 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -2277,7 +2277,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); +@@ -2282,7 +2282,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * diff --git a/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch index b183d9b0b..c25a99ec1 100644 --- a/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch +++ b/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch @@ -1,7 +1,7 @@ -From 7635f97cb803db25caa49d5fd48ecb46672272d9 Mon Sep 17 00:00:00 2001 +From d3e608bd6ba544ecdb306b66afccd053e55f81c6 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:01 -0600 -Subject: [PATCH 212/269] KVM: arm/arm64: downgrade preempt_disable()d region +Subject: [PATCH 212/268] KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable() kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating @@ -23,10 +23,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c -index 1415e36fed3d..8d8caad49eb6 100644 +index 02bac8abd206..d36802fe2825 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c -@@ -709,7 +709,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +@@ -712,7 +712,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -35,7 +35,7 @@ index 1415e36fed3d..8d8caad49eb6 100644 kvm_pmu_flush_hwstate(vcpu); -@@ -758,7 +758,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +@@ -761,7 +761,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -44,7 +44,7 @@ index 1415e36fed3d..8d8caad49eb6 100644 continue; } -@@ -836,7 +836,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +@@ -839,7 +839,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, run, ret); diff --git a/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch index 10bb1711e..d3699a4f1 100644 --- a/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch +++ b/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch @@ -1,7 +1,7 @@ -From 25f8f6ec0e7c56b6029b247d513eec0ba512da9b Mon Sep 17 00:00:00 2001 +From 6b294425c012e6e77ff2f11ff45f6828b9e33eb2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Jul 2018 14:02:38 +0200 -Subject: [PATCH 213/269] arm64: fpsimd: use preemp_disable in addition to +Subject: [PATCH 213/268] arm64: fpsimd: use preemp_disable in addition to local_bh_disable() In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The diff --git a/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch b/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch index 8f65bd685..5276508fd 100644 --- a/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch +++ b/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch @@ -1,7 +1,7 @@ -From b9a4d200f0fc873f1ad960b730b283ea779c74a4 Mon Sep 17 00:00:00 2001 +From 8daad04cf92b929c682cb97f546645e4301b5567 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 28 Jul 2011 12:42:23 -0500 -Subject: [PATCH 214/269] kgdb/serial: Short term workaround +Subject: [PATCH 214/268] kgdb/serial: Short term workaround On 07/27/2011 04:37 PM, Thomas Gleixner wrote: > - KGDB (not yet disabled) is reportedly unusable on -rt right now due diff --git a/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch b/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch index 4e3a81ff6..32116ed7a 100644 --- a/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch +++ b/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch @@ -1,7 +1,7 @@ -From 65880324093a78662b662259e6d79ad55ac8a4bf Mon Sep 17 00:00:00 2001 +From 29d5795cda74974977e2766d6c2a34da4924cc77 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Sat, 30 Jul 2011 21:55:53 -0500 -Subject: [PATCH 215/269] sysfs: Add /sys/kernel/realtime entry +Subject: [PATCH 215/268] sysfs: Add /sys/kernel/realtime entry Add a /sys/kernel entry to indicate that the kernel is a realtime kernel. diff --git a/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch b/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch index 465c28745..5990428e6 100644 --- a/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch +++ b/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch @@ -1,7 +1,7 @@ -From e8dfb76eeb36e00d6827406f9b0d110eee60a084 Mon Sep 17 00:00:00 2001 +From eea2f1ac38ce1affeba31e7e4b1e79d475e1078d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Jul 2011 10:43:51 +0200 -Subject: [PATCH 216/269] mm, rt: kmap_atomic scheduling +Subject: [PATCH 216/268] mm, rt: kmap_atomic scheduling In fact, with migrate_disable() existing one could play games with kmap_atomic. You could save/restore the kmap_atomic slots on context @@ -30,7 +30,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins 7 files changed, 88 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c -index d3e593eb189f..84afe55625f8 100644 +index 020efe0f9614..5d0c975559ad 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -38,6 +38,7 @@ @@ -41,7 +41,7 @@ index d3e593eb189f..84afe55625f8 100644 #include #include -@@ -198,6 +199,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) +@@ -205,6 +206,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); @@ -77,7 +77,7 @@ index d3e593eb189f..84afe55625f8 100644 /* * switch_to(x,y) should switch tasks from x to y. -@@ -267,6 +297,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) +@@ -274,6 +304,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_to_extra(prev_p, next_p); diff --git a/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch b/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch index e7224911e..1c041f260 100644 --- a/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch +++ b/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch @@ -1,7 +1,7 @@ -From c22bb5db4da4e6b17aa8a6387ffcd503dea51ec5 Mon Sep 17 00:00:00 2001 +From 65b251a960fc18a5a2e8b01902c4a1b4f76930e5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 17:09:55 +0100 -Subject: [PATCH 217/269] x86/highmem: Add a "already used pte" check +Subject: [PATCH 217/268] x86/highmem: Add a "already used pte" check This is a copy from kmap_atomic_prot(). diff --git a/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch b/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch index eae79a306..d3a0c858c 100644 --- a/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch +++ b/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch @@ -1,7 +1,7 @@ -From fba4ff7b8883d22067b9453a1d158c520f067b70 Mon Sep 17 00:00:00 2001 +From 953d5ce85b3a6927cc557f046c1fd2ae21345d46 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 21:37:27 +0100 -Subject: [PATCH 218/269] arm/highmem: Flush tlb on unmap +Subject: [PATCH 218/268] arm/highmem: Flush tlb on unmap The tlb should be flushed on unmap and thus make the mapping entry invalid. This is only done in the non-debug case which does not look diff --git a/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch b/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch index 621a0ec00..05d8f1fe5 100644 --- a/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch +++ b/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch @@ -1,7 +1,7 @@ -From 1a0e06d9a75c6d9d6ec21e345030430e78e81a84 Mon Sep 17 00:00:00 2001 +From 85b324f15d13d7122adee8404b78b5fc692e7ece Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Feb 2013 11:03:11 +0100 -Subject: [PATCH 219/269] arm: Enable highmem for rt +Subject: [PATCH 219/268] arm: Enable highmem for rt fixup highmem for ARM. diff --git a/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch b/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch index b59acb33c..3f9c504b6 100644 --- a/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch +++ b/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch @@ -1,7 +1,7 @@ -From f4644bebeab291324244e2cb3d957c692cec7168 Mon Sep 17 00:00:00 2001 +From 51b529bfe4f44e5e7a13a52273aca383cf831e3c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 12 Nov 2011 14:00:48 +0100 -Subject: [PATCH 220/269] scsi/fcoe: Make RT aware. +Subject: [PATCH 220/268] scsi/fcoe: Make RT aware. Do not disable preemption while taking sleeping locks. All user look safe for migrate_diable() only. diff --git a/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch b/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch index c74f8ad4a..8c981bb9c 100644 --- a/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch +++ b/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch @@ -1,7 +1,7 @@ -From 3f5be0658bbd8160961eec6f903d89aad36f03f1 Mon Sep 17 00:00:00 2001 +From ead629ca3a3d22e77e5a0fa3203542549b2a060b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Nov 2011 18:19:27 +0100 -Subject: [PATCH 221/269] x86: crypto: Reduce preempt disabled regions +Subject: [PATCH 221/268] x86: crypto: Reduce preempt disabled regions Restrict the preempt disabled regions to the actual floating point operations and enable preemption for the administrative actions. diff --git a/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch b/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch index ef6d5f4bf..e044ea222 100644 --- a/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch +++ b/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch @@ -1,7 +1,7 @@ -From e17c7ea4fb043fe1d4e89e4a42ff80b20d157f12 Mon Sep 17 00:00:00 2001 +From 61a4f04386766f585ad475f63f249bacc7d22278 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Feb 2014 17:24:04 +0100 -Subject: [PATCH 222/269] crypto: Reduce preempt disabled regions, more algos +Subject: [PATCH 222/268] crypto: Reduce preempt disabled regions, more algos Don Estabrook reported | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() diff --git a/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch b/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch index 65e5c34c2..982085bac 100644 --- a/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch +++ b/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch @@ -1,7 +1,7 @@ -From da94fdf57dbc4e55dd359d103c8f61cc2811f47c Mon Sep 17 00:00:00 2001 +From 6555c5da9c6903d1fb1af6a7feaabfc3415f393e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 30 Nov 2017 13:40:10 +0100 -Subject: [PATCH 223/269] crypto: limit more FPU-enabled sections +Subject: [PATCH 223/268] crypto: limit more FPU-enabled sections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -68,11 +68,11 @@ index dce7c5d39c2f..6194160b7fbc 100644 } diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h -index a9caac9d4a72..18b31f22ca5d 100644 +index b56d504af654..e51c7094075d 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h -@@ -25,6 +25,7 @@ extern void __kernel_fpu_begin(void); - extern void __kernel_fpu_end(void); +@@ -20,6 +20,7 @@ + */ extern void kernel_fpu_begin(void); extern void kernel_fpu_end(void); +extern void kernel_fpu_resched(void); @@ -80,10 +80,10 @@ index a9caac9d4a72..18b31f22ca5d 100644 /* diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c -index 2ea85b32421a..6914dc569d1e 100644 +index 2e5003fef51a..768c53767bb2 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c -@@ -138,6 +138,18 @@ void kernel_fpu_end(void) +@@ -136,6 +136,18 @@ void kernel_fpu_end(void) } EXPORT_SYMBOL_GPL(kernel_fpu_end); diff --git a/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch index 7264edc0c..44e2f7bfe 100644 --- a/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch +++ b/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch @@ -1,7 +1,7 @@ -From d46edae98108392143e56a64ada43af295b537a9 Mon Sep 17 00:00:00 2001 +From ceff40742ef6d19e8620a89092524380f46ea9e9 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 11 Jul 2018 17:14:47 +0200 -Subject: [PATCH 224/269] crypto: scompress - serialize RT percpu scratch +Subject: [PATCH 224/268] crypto: scompress - serialize RT percpu scratch buffer access with a local lock | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974 diff --git a/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch index 669fbef4b..949948ae3 100644 --- a/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch +++ b/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch @@ -1,7 +1,7 @@ -From b1616c1d9f52000a3614707e3c3ffe2b63c5fde9 Mon Sep 17 00:00:00 2001 +From 147f9e0fa93fb03bce31ee35f51c29a8cda94666 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 18:52:00 +0200 -Subject: [PATCH 225/269] crypto: cryptd - add a lock instead +Subject: [PATCH 225/268] crypto: cryptd - add a lock instead preempt_disable/local_bh_disable cryptd has a per-CPU lock which protected with local_bh_disable() and diff --git a/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch b/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch index 2263b74f8..f640cf962 100644 --- a/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch +++ b/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch @@ -1,7 +1,7 @@ -From c3ce683225b678190d7c42bd8bc695ad74595ac8 Mon Sep 17 00:00:00 2001 +From a58cde5b2641d0fa53a57bfc89eb238ab8e83a21 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 226/269] panic: skip get_random_bytes for RT_FULL in +Subject: [PATCH 226/268] panic: skip get_random_bytes for RT_FULL in init_oops_id Disable on -RT. If this is invoked from irq-context we will have problems diff --git a/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch b/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch index 23fee6867..df385ec6c 100644 --- a/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch +++ b/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch @@ -1,7 +1,7 @@ -From 3daaf6574c9be1128d8384deff5de6c53bc2712f Mon Sep 17 00:00:00 2001 +From 39c8ab81df7e133ea2a8fee04ac9b52566029a02 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 16 Dec 2010 14:25:18 +0100 -Subject: [PATCH 227/269] x86: stackprotector: Avoid random pool on rt +Subject: [PATCH 227/268] x86: stackprotector: Avoid random pool on rt CPU bringup calls into the random pool to initialize the stack canary. During boot that works nicely even on RT as the might sleep diff --git a/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch b/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch index 09eb38f87..54f5088e5 100644 --- a/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch +++ b/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch @@ -1,7 +1,7 @@ -From 5310182891f60d9a88c1abbc7512eca69f680a99 Mon Sep 17 00:00:00 2001 +From fee18c5914434c940ccc463c9185e6153a6a2266 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 21 Aug 2012 20:38:50 +0200 -Subject: [PATCH 228/269] random: Make it work on rt +Subject: [PATCH 228/268] random: Make it work on rt Delegate the random insertion to the forced threaded interrupt handler. Store the return IP of the hard interrupt handler in the irq @@ -20,10 +20,10 @@ Signed-off-by: Thomas Gleixner 7 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c -index c75b6cdf0053..4c20da67edd5 100644 +index 0a84b7f468ad..75ae2d9e8720 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c -@@ -1229,28 +1229,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) +@@ -1232,28 +1232,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) return *ptr; } @@ -58,7 +58,7 @@ index c75b6cdf0053..4c20da67edd5 100644 fast_mix(fast_pool); add_interrupt_bench(cycles); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c -index 748a1c4172a6..4258244fa314 100644 +index 8e923e70e594..e77716a62351 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -112,10 +112,12 @@ int hv_post_message(union hv_connection_id connection_id, @@ -145,10 +145,10 @@ index 38554bc35375..06a80bbf78af 100644 if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 48c2690070f3..9d7be2c33d19 100644 +index aafe2256bd39..7f4041357d2f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -1079,6 +1079,12 @@ static int irq_thread(void *data) +@@ -1084,6 +1084,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); diff --git a/kernel/patches-4.19.x-rt/0230-cpu-hotplug-Implement-CPU-pinning.patch b/kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch similarity index 94% rename from kernel/patches-4.19.x-rt/0230-cpu-hotplug-Implement-CPU-pinning.patch rename to kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch index 702b05839..ff12dec28 100644 --- a/kernel/patches-4.19.x-rt/0230-cpu-hotplug-Implement-CPU-pinning.patch +++ b/kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch @@ -1,7 +1,7 @@ -From d4c787bcf728f34398550a7ad54acb389cd41654 Mon Sep 17 00:00:00 2001 +From d9742afe3b864bbca10b951c7ed0b8b972aee020 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Jul 2017 17:31:20 +0200 -Subject: [PATCH 230/269] cpu/hotplug: Implement CPU pinning +Subject: [PATCH 229/268] cpu/hotplug: Implement CPU pinning Signed-off-by: Thomas Gleixner --- @@ -22,7 +22,7 @@ index 76e6cdafb992..0445d5c7ced0 100644 int migrate_disable_atomic; # endif diff --git a/kernel/cpu.c b/kernel/cpu.c -index f684f41492d3..3340c4f873ad 100644 +index e1efb98a56de..4f9121244618 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -75,6 +75,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { diff --git a/kernel/patches-4.19.x-rt/0229-random-avoid-preempt_disable-ed-section.patch b/kernel/patches-4.19.x-rt/0229-random-avoid-preempt_disable-ed-section.patch deleted file mode 100644 index cfaf35113..000000000 --- a/kernel/patches-4.19.x-rt/0229-random-avoid-preempt_disable-ed-section.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 58450ccb54ddabe50f8c0990f4ea69f7cdaabdac Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 12 May 2017 15:46:17 +0200 -Subject: [PATCH 229/269] random: avoid preempt_disable()ed section - -extract_crng() will use sleeping locks while in a preempt_disable() -section due to get_cpu_var(). -Work around it with local_locks. - -Cc: stable-rt@vger.kernel.org # where it applies to -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/char/random.c | 11 +++++++---- - 1 file changed, 7 insertions(+), 4 deletions(-) - -diff --git a/drivers/char/random.c b/drivers/char/random.c -index 4c20da67edd5..91c1972b6a17 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -265,6 +265,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -2223,6 +2224,7 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ - * at any point prior. - */ - static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); -+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock); - u64 get_random_u64(void) - { - u64 ret; -@@ -2243,7 +2245,7 @@ u64 get_random_u64(void) - warn_unseeded_randomness(&previous); - - use_lock = READ_ONCE(crng_init) < 2; -- batch = &get_cpu_var(batched_entropy_u64); -+ batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64); - if (use_lock) - read_lock_irqsave(&batched_entropy_reset_lock, flags); - if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { -@@ -2253,12 +2255,13 @@ u64 get_random_u64(void) - ret = batch->entropy_u64[batch->position++]; - if (use_lock) - read_unlock_irqrestore(&batched_entropy_reset_lock, flags); -- put_cpu_var(batched_entropy_u64); -+ put_locked_var(batched_entropy_u64_lock, batched_entropy_u64); - return ret; - } - EXPORT_SYMBOL(get_random_u64); - - static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); -+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock); - u32 get_random_u32(void) - { - u32 ret; -@@ -2273,7 +2276,7 @@ u32 get_random_u32(void) - warn_unseeded_randomness(&previous); - - use_lock = READ_ONCE(crng_init) < 2; -- batch = &get_cpu_var(batched_entropy_u32); -+ batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32); - if (use_lock) - read_lock_irqsave(&batched_entropy_reset_lock, flags); - if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { -@@ -2283,7 +2286,7 @@ u32 get_random_u32(void) - ret = batch->entropy_u32[batch->position++]; - if (use_lock) - read_unlock_irqrestore(&batched_entropy_reset_lock, flags); -- put_cpu_var(batched_entropy_u32); -+ put_locked_var(batched_entropy_u32_lock, batched_entropy_u32); - return ret; - } - EXPORT_SYMBOL(get_random_u32); --- -2.20.1 - diff --git a/kernel/patches-4.19.x-rt/0231-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch similarity index 87% rename from kernel/patches-4.19.x-rt/0231-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch rename to kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch index 939c76a05..504637ace 100644 --- a/kernel/patches-4.19.x-rt/0231-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch +++ b/kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch @@ -1,7 +1,7 @@ -From 579810b4daa730ec872b6c1e8940d5ab6625bb44 Mon Sep 17 00:00:00 2001 +From 514b9e065d13e89df397678ec72ecec1c7228363 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 19 Aug 2018 08:28:35 +0200 -Subject: [PATCH 231/269] sched: Allow pinned user tasks to be awakened to the +Subject: [PATCH 230/268] sched: Allow pinned user tasks to be awakened to the CPU they pinned Since commit 7af443ee16976 ("sched/core: Require cpu_active() in @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 1cd1abc45097..960271e088ab 100644 +index 08052198031a..33e81e7be168 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -904,7 +904,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) diff --git a/kernel/patches-4.19.x-rt/0232-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch similarity index 94% rename from kernel/patches-4.19.x-rt/0232-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch rename to kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch index cb313022a..e7be53b0e 100644 --- a/kernel/patches-4.19.x-rt/0232-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch +++ b/kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch @@ -1,7 +1,7 @@ -From e8484e1a8250b915f8da072e0693769465f9e956 Mon Sep 17 00:00:00 2001 +From 7dba9624ee317e72ab248f8fa3e7a8f7cd97fc84 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 18:31:00 +0200 -Subject: [PATCH 232/269] hotplug: duct-tape RT-rwlock usage for non-RT +Subject: [PATCH 231/268] hotplug: duct-tape RT-rwlock usage for non-RT This type is only available on -RT. We need to craft something for non-RT. Since the only migrate_disable() user is -RT only, there is no @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/kernel/cpu.c b/kernel/cpu.c -index 3340c4f873ad..ad2d23d9fee2 100644 +index 4f9121244618..3c2135e8cbac 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -75,7 +75,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { diff --git a/kernel/patches-4.19.x-rt/0233-net-Remove-preemption-disabling-in-netif_rx.patch b/kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch similarity index 92% rename from kernel/patches-4.19.x-rt/0233-net-Remove-preemption-disabling-in-netif_rx.patch rename to kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch index 692cf668a..677bd8740 100644 --- a/kernel/patches-4.19.x-rt/0233-net-Remove-preemption-disabling-in-netif_rx.patch +++ b/kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch @@ -1,7 +1,7 @@ -From d11da9d22d701a9a3e48a6ce8b2e94bfb3c922c2 Mon Sep 17 00:00:00 2001 +From 4b04560187beea2dbdfcc16696f52d7a6f17593a Mon Sep 17 00:00:00 2001 From: Priyanka Jain Date: Thu, 17 May 2012 09:35:11 +0530 -Subject: [PATCH 233/269] net: Remove preemption disabling in netif_rx() +Subject: [PATCH 232/268] net: Remove preemption disabling in netif_rx() 1)enqueue_to_backlog() (called from netif_rx) should be bind to a particluar CPU. This can be achieved by @@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index 50fe1e3ee26d..0c7238cc6ae2 100644 +index d86972449f63..cdf356fe054c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4484,7 +4484,7 @@ static int netif_rx_internal(struct sk_buff *skb) diff --git a/kernel/patches-4.19.x-rt/0234-net-Another-local_irq_disable-kmalloc-headache.patch b/kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch similarity index 93% rename from kernel/patches-4.19.x-rt/0234-net-Another-local_irq_disable-kmalloc-headache.patch rename to kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch index ae355fe1e..3df430461 100644 --- a/kernel/patches-4.19.x-rt/0234-net-Another-local_irq_disable-kmalloc-headache.patch +++ b/kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch @@ -1,7 +1,7 @@ -From c82cf443e33d996e2ec0d6ea914dbb03c9540f12 Mon Sep 17 00:00:00 2001 +From 53f0e68188d4f6d6b4cfae7150446199bafc00cb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Sep 2012 16:21:08 +0200 -Subject: [PATCH 234/269] net: Another local_irq_disable/kmalloc headache +Subject: [PATCH 233/268] net: Another local_irq_disable/kmalloc headache Replace it by a local lock. Though that's pretty inefficient :( diff --git a/kernel/patches-4.19.x-rt/0235-net-core-protect-users-of-napi_alloc_cache-against-r.patch b/kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch similarity index 96% rename from kernel/patches-4.19.x-rt/0235-net-core-protect-users-of-napi_alloc_cache-against-r.patch rename to kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch index eb1381cd3..bacb97bf3 100644 --- a/kernel/patches-4.19.x-rt/0235-net-core-protect-users-of-napi_alloc_cache-against-r.patch +++ b/kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch @@ -1,7 +1,7 @@ -From aee85b9563699974c6712aa097ca316a0ad1949b Mon Sep 17 00:00:00 2001 +From 4e22e8fa0088f0fe1d6b2c2649fcfd195cccb384 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 15 Jan 2016 16:33:34 +0100 -Subject: [PATCH 235/269] net/core: protect users of napi_alloc_cache against +Subject: [PATCH 234/268] net/core: protect users of napi_alloc_cache against reentrance On -RT the code running in BH can not be moved to another CPU so CPU diff --git a/kernel/patches-4.19.x-rt/0236-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch b/kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch similarity index 93% rename from kernel/patches-4.19.x-rt/0236-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch rename to kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch index 9aac89aa8..5fc841265 100644 --- a/kernel/patches-4.19.x-rt/0236-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch +++ b/kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch @@ -1,7 +1,7 @@ -From 0cabd4b2f5b341ccb079e8a59ec58999bd69ed9b Mon Sep 17 00:00:00 2001 +From 61b1586ad82dc40ec6c262a76c390cfe1b64d256 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 11:18:08 +0100 -Subject: [PATCH 236/269] net: netfilter: Serialize xt_write_recseq sections on +Subject: [PATCH 235/268] net: netfilter: Serialize xt_write_recseq sections on RT The netfilter code relies only on the implicit semantics of @@ -55,7 +55,7 @@ index 9077b3ebea08..1710f2aff350 100644 /* diff --git a/net/netfilter/core.c b/net/netfilter/core.c -index dc240cb47ddf..9bd8f062ebc1 100644 +index 93aaec3a54ec..b364cf8e5776 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -20,6 +20,7 @@ diff --git a/kernel/patches-4.19.x-rt/0237-net-Add-a-mutex-around-devnet_rename_seq.patch b/kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch similarity index 95% rename from kernel/patches-4.19.x-rt/0237-net-Add-a-mutex-around-devnet_rename_seq.patch rename to kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch index 520b21947..91b6545b3 100644 --- a/kernel/patches-4.19.x-rt/0237-net-Add-a-mutex-around-devnet_rename_seq.patch +++ b/kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch @@ -1,7 +1,7 @@ -From 7beec1c3857d0010fff01b209cbb4fa4c6674c1b Mon Sep 17 00:00:00 2001 +From 8292ac83590c63465d9f1eb234358dc12148d06d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Mar 2013 18:06:20 +0100 -Subject: [PATCH 237/269] net: Add a mutex around devnet_rename_seq +Subject: [PATCH 236/268] net: Add a mutex around devnet_rename_seq On RT write_seqcount_begin() disables preemption and device_rename() allocates memory with GFP_KERNEL and grabs later the sysfs_mutex @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index 0c7238cc6ae2..848937d85a41 100644 +index cdf356fe054c..63b3058dd172 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPUS; diff --git a/kernel/patches-4.19.x-rt/0238-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch b/kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch similarity index 94% rename from kernel/patches-4.19.x-rt/0238-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch rename to kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch index 1b9d60d8e..470589499 100644 --- a/kernel/patches-4.19.x-rt/0238-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch +++ b/kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch @@ -1,7 +1,7 @@ -From fdee0604e425474b4b3ba2935764f5b995764ba4 Mon Sep 17 00:00:00 2001 +From 25b09445ad17e5bff831abafb029bcc3373b3e85 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Mon, 16 Apr 2012 15:01:56 +0800 -Subject: [PATCH 238/269] lockdep: selftest: Only do hardirq context test for +Subject: [PATCH 237/268] lockdep: selftest: Only do hardirq context test for raw spinlock On -rt there is no softirq context any more and rwlock is sleepable, diff --git a/kernel/patches-4.19.x-rt/0239-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch similarity index 97% rename from kernel/patches-4.19.x-rt/0239-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch rename to kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch index 9839b5a94..748617ca0 100644 --- a/kernel/patches-4.19.x-rt/0239-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch +++ b/kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch @@ -1,7 +1,7 @@ -From 726d6192b03ebe0886b0592a3cb6e071b84f9580 Mon Sep 17 00:00:00 2001 +From 05624840469b292b98e66584cfbe5234cc37a2fa Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Wed, 28 Jan 2015 13:08:45 -0600 -Subject: [PATCH 239/269] lockdep: selftest: fix warnings due to missing +Subject: [PATCH 238/268] lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals "lockdep: Selftest: Only do hardirq context test for raw spinlock" diff --git a/kernel/patches-4.19.x-rt/0240-sched-Add-support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch similarity index 94% rename from kernel/patches-4.19.x-rt/0240-sched-Add-support-for-lazy-preemption.patch rename to kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch index 311164b31..4a7207f2c 100644 --- a/kernel/patches-4.19.x-rt/0240-sched-Add-support-for-lazy-preemption.patch +++ b/kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch @@ -1,7 +1,7 @@ -From ccc79764a3c2281d5d0f7e15ba4628bceabd7a37 Mon Sep 17 00:00:00 2001 +From ee6a6c573101060336e51ddacf0faa38099ddd9d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Oct 2012 18:50:54 +0100 -Subject: [PATCH 240/269] sched: Add support for lazy preemption +Subject: [PATCH 239/268] sched: Add support for lazy preemption It has become an obsession to mitigate the determinism vs. throughput loss of RT. Looking at the mainline semantics of preemption points @@ -244,7 +244,7 @@ index 907d72b3ba95..306567f72a3e 100644 prompt "Preemption Model" default PREEMPT_NONE diff --git a/kernel/cpu.c b/kernel/cpu.c -index ad2d23d9fee2..46118ba36e3e 100644 +index 3c2135e8cbac..22a479fb720d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -304,11 +304,13 @@ void pin_current_cpu(void) @@ -262,7 +262,7 @@ index ad2d23d9fee2..46118ba36e3e 100644 __read_rt_unlock(cpuhp_pin); goto again; diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 960271e088ab..6d06dd682cd5 100644 +index 33e81e7be168..7831756f2097 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -493,6 +493,48 @@ void resched_curr(struct rq *rq) @@ -394,7 +394,7 @@ index 960271e088ab..6d06dd682cd5 100644 /* * The idle tasks have their own, simple scheduling class: */ -@@ -7183,6 +7259,7 @@ void migrate_disable(void) +@@ -7190,6 +7266,7 @@ void migrate_disable(void) } preempt_disable(); @@ -402,7 +402,7 @@ index 960271e088ab..6d06dd682cd5 100644 pin_current_cpu(); migrate_disable_update_cpus_allowed(p); -@@ -7250,6 +7327,7 @@ void migrate_enable(void) +@@ -7257,6 +7334,7 @@ void migrate_enable(void) arg.dest_cpu = dest_cpu; unpin_current_cpu(); @@ -410,7 +410,7 @@ index 960271e088ab..6d06dd682cd5 100644 preempt_enable(); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); -@@ -7258,6 +7336,7 @@ void migrate_enable(void) +@@ -7265,6 +7343,7 @@ void migrate_enable(void) } } unpin_current_cpu(); @@ -419,10 +419,10 @@ index 960271e088ab..6d06dd682cd5 100644 } EXPORT_SYMBOL(migrate_enable); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index c17d63b06026..3b29a0b6748a 100644 +index 0048a32a3b4d..2cca09d59019 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4017,7 +4017,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4021,7 +4021,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -431,7 +431,7 @@ index c17d63b06026..3b29a0b6748a 100644 /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4041,7 +4041,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4045,7 +4045,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) @@ -440,7 +440,7 @@ index c17d63b06026..3b29a0b6748a 100644 } static void -@@ -4183,7 +4183,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +@@ -4187,7 +4187,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { @@ -449,7 +449,7 @@ index c17d63b06026..3b29a0b6748a 100644 return; } /* -@@ -4367,7 +4367,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) +@@ -4371,7 +4371,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -458,7 +458,7 @@ index c17d63b06026..3b29a0b6748a 100644 } static __always_inline -@@ -5063,7 +5063,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) +@@ -5067,7 +5067,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) @@ -467,7 +467,7 @@ index c17d63b06026..3b29a0b6748a 100644 return; } hrtick_start(rq, delta); -@@ -6639,7 +6639,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -6643,7 +6643,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: @@ -476,7 +476,7 @@ index c17d63b06026..3b29a0b6748a 100644 /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9726,7 +9726,7 @@ static void task_fork_fair(struct task_struct *p) +@@ -9734,7 +9734,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -485,7 +485,7 @@ index c17d63b06026..3b29a0b6748a 100644 } se->vruntime -= cfs_rq->min_vruntime; -@@ -9750,7 +9750,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) +@@ -9758,7 +9758,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) @@ -509,7 +509,7 @@ index 68de18405857..12a12be6770b 100644 /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index dd6ae39957ce..58d3972ae0d4 100644 +index 49ae30da28ee..f7c1c262457f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1638,6 +1638,15 @@ extern void reweight_task(struct task_struct *p, int prio); @@ -529,10 +529,10 @@ index dd6ae39957ce..58d3972ae0d4 100644 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 0af14953d52d..02a29282b828 100644 +index 10843c80cffb..d51fc6825a6b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2134,6 +2134,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, +@@ -2137,6 +2137,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; @@ -540,7 +540,7 @@ index 0af14953d52d..02a29282b828 100644 entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -2144,7 +2145,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, +@@ -2147,7 +2148,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | @@ -550,7 +550,7 @@ index 0af14953d52d..02a29282b828 100644 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; -@@ -3346,15 +3348,17 @@ get_total_entries(struct trace_buffer *buf, +@@ -3349,15 +3351,17 @@ get_total_entries(struct trace_buffer *buf, static void print_lat_help_header(struct seq_file *m) { @@ -577,7 +577,7 @@ index 0af14953d52d..02a29282b828 100644 } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -3392,15 +3396,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file +@@ -3395,15 +3399,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); diff --git a/kernel/patches-4.19.x-rt/0241-ftrace-Fix-trace-header-alignment.patch b/kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch similarity index 90% rename from kernel/patches-4.19.x-rt/0241-ftrace-Fix-trace-header-alignment.patch rename to kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch index 92d7dce6a..f302aa3e7 100644 --- a/kernel/patches-4.19.x-rt/0241-ftrace-Fix-trace-header-alignment.patch +++ b/kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch @@ -1,7 +1,7 @@ -From 9dc4f4dc93a57dce9f30fb429753a23f0e339749 Mon Sep 17 00:00:00 2001 +From b5f1bda181e054ae5afcfb2a2fb671ec16ea211f Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 16 Oct 2016 05:08:30 +0200 -Subject: [PATCH 241/269] ftrace: Fix trace header alignment +Subject: [PATCH 240/268] ftrace: Fix trace header alignment Line up helper arrows to the right column. @@ -14,10 +14,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 02a29282b828..fb2ff2dfd134 100644 +index d51fc6825a6b..2ce8428d0f45 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -3348,17 +3348,17 @@ get_total_entries(struct trace_buffer *buf, +@@ -3351,17 +3351,17 @@ get_total_entries(struct trace_buffer *buf, static void print_lat_help_header(struct seq_file *m) { diff --git a/kernel/patches-4.19.x-rt/0242-x86-Support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch similarity index 94% rename from kernel/patches-4.19.x-rt/0242-x86-Support-for-lazy-preemption.patch rename to kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch index 181aa082b..35ee7545c 100644 --- a/kernel/patches-4.19.x-rt/0242-x86-Support-for-lazy-preemption.patch +++ b/kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch @@ -1,7 +1,7 @@ -From 85dc65ec7e8efc7a7842a1c52b964fe3a5f3214e Mon Sep 17 00:00:00 2001 +From d6770ae0dff21e273f805126b99ad09e602ad27f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 11:03:47 +0100 -Subject: [PATCH 242/269] x86: Support for lazy preemption +Subject: [PATCH 241/268] x86: Support for lazy preemption Implement the x86 pieces for lazy preempt. @@ -29,10 +29,10 @@ index 1b05ae86bdde..736e369e141b 100644 select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c -index ec46ee700791..fbb14008bd43 100644 +index 91676b0d2d4c..3b5e41d9b29d 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c -@@ -133,7 +133,7 @@ static long syscall_trace_enter(struct pt_regs *regs) +@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct pt_regs *regs) #define EXIT_TO_USERMODE_LOOP_FLAGS \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ @@ -41,7 +41,7 @@ index ec46ee700791..fbb14008bd43 100644 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { -@@ -148,7 +148,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) +@@ -149,7 +149,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) /* We have work to do. */ local_irq_enable(); @@ -51,10 +51,10 @@ index ec46ee700791..fbb14008bd43 100644 #ifdef ARCH_RT_DELAYS_SIGNAL_SEND diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index fbbf1ba57ec6..0169c257cfff 100644 +index b5c2b1091b18..83d43eb2f556 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S -@@ -764,8 +764,25 @@ END(ret_from_exception) +@@ -766,8 +766,25 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) .Lneed_resched: @@ -81,10 +81,10 @@ index fbbf1ba57ec6..0169c257cfff 100644 jz restore_all_kernel call preempt_schedule_irq diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index ce2a6587ed11..d01d68de64ae 100644 +index 7b29f2c10d01..23dda6f4a69f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -706,7 +706,23 @@ retint_kernel: +@@ -708,7 +708,23 @@ retint_kernel: btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) diff --git a/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-properly-check-against-preempt-mask.patch b/kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch similarity index 88% rename from kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-properly-check-against-preempt-mask.patch rename to kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch index 5ec769fa0..e5f9a32e0 100644 --- a/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-properly-check-against-preempt-mask.patch +++ b/kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch @@ -1,7 +1,7 @@ -From d35f3a1ee1cf19c8b8aefe555a8af80a5f5b8fe1 Mon Sep 17 00:00:00 2001 +From 5e63d7fdebaf84c9c726a49717401f852d254be4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Feb 2019 16:57:09 +0100 -Subject: [PATCH 243/269] x86: lazy-preempt: properly check against +Subject: [PATCH 242/268] x86: lazy-preempt: properly check against preempt-mask should_resched() should check against preempt_offset after unmasking the diff --git a/kernel/patches-4.19.x-rt/0244-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch b/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch similarity index 84% rename from kernel/patches-4.19.x-rt/0244-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch rename to kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch index 563e670d6..99df63d1c 100644 --- a/kernel/patches-4.19.x-rt/0244-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch +++ b/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch @@ -1,7 +1,7 @@ -From 48a22e409d7de1904f5577d83d0c8f9cb69ce766 Mon Sep 17 00:00:00 2001 +From 1061769ef29ec710d0bf7711fe2d26545802a9e0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 14:53:49 +0100 -Subject: [PATCH 244/269] x86: lazy-preempt: use proper return label on +Subject: [PATCH 243/268] x86: lazy-preempt: use proper return label on 32bit-x86 The lazy-preempt uses the wrong return label in case preemption isn't @@ -16,10 +16,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index 0169c257cfff..e6f61c813baf 100644 +index 83d43eb2f556..0b25d2efdb87 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S -@@ -773,15 +773,15 @@ ENTRY(resume_kernel) +@@ -775,15 +775,15 @@ ENTRY(resume_kernel) # atleast preempt count == 0 ? cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) diff --git a/kernel/patches-4.19.x-rt/0245-arm-Add-support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch similarity index 97% rename from kernel/patches-4.19.x-rt/0245-arm-Add-support-for-lazy-preemption.patch rename to kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch index dc625b966..5b444498e 100644 --- a/kernel/patches-4.19.x-rt/0245-arm-Add-support-for-lazy-preemption.patch +++ b/kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch @@ -1,7 +1,7 @@ -From 4b5643e59aaece3f42def2a9ea0fe2dd07cab601 Mon Sep 17 00:00:00 2001 +From a60760dfedf815d50588123352546ba525d3adac Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 31 Oct 2012 12:04:11 +0100 -Subject: [PATCH 245/269] arm: Add support for lazy preemption +Subject: [PATCH 244/268] arm: Add support for lazy preemption Implement the arm pieces for lazy preempt. @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner 6 files changed, 33 insertions(+), 8 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 91f4f80a6f24..cba596677f6e 100644 +index e122dd212ab3..9413ad933336 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -90,6 +90,7 @@ config ARM diff --git a/kernel/patches-4.19.x-rt/0246-powerpc-Add-support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch similarity index 98% rename from kernel/patches-4.19.x-rt/0246-powerpc-Add-support-for-lazy-preemption.patch rename to kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch index 807e40149..92781db2d 100644 --- a/kernel/patches-4.19.x-rt/0246-powerpc-Add-support-for-lazy-preemption.patch +++ b/kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch @@ -1,7 +1,7 @@ -From 0f9163aaaab913d5d2fe2dc92e8c82e588eef09b Mon Sep 17 00:00:00 2001 +From 5cd763fb8c07928da73bfd294ac7ede226d022d9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 10:14:11 +0100 -Subject: [PATCH 246/269] powerpc: Add support for lazy preemption +Subject: [PATCH 245/268] powerpc: Add support for lazy preemption Implement the powerpc pieces for lazy preempt. diff --git a/kernel/patches-4.19.x-rt/0247-arch-arm64-Add-lazy-preempt-support.patch b/kernel/patches-4.19.x-rt/0246-arch-arm64-Add-lazy-preempt-support.patch similarity index 96% rename from kernel/patches-4.19.x-rt/0247-arch-arm64-Add-lazy-preempt-support.patch rename to kernel/patches-4.19.x-rt/0246-arch-arm64-Add-lazy-preempt-support.patch index ae97a17d4..8479af84b 100644 --- a/kernel/patches-4.19.x-rt/0247-arch-arm64-Add-lazy-preempt-support.patch +++ b/kernel/patches-4.19.x-rt/0246-arch-arm64-Add-lazy-preempt-support.patch @@ -1,7 +1,7 @@ -From 896a4ed8a9134811455719d2bc0ba8e5248c5a0f Mon Sep 17 00:00:00 2001 +From e8dc1790091506a4fad530f2fc6da1428bf2f698 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 14 May 2015 17:52:17 +0200 -Subject: [PATCH 247/269] arch/arm64: Add lazy preempt support +Subject: [PATCH 246/268] arch/arm64: Add lazy preempt support arm64 is missing support for PREEMPT_RT. The main feature which is lacking is support for lazy preemption. The arch-specific entry code, @@ -20,7 +20,7 @@ Signed-off-by: Anders Roxell 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 1b1a0e95c751..418a75d30f5c 100644 +index 8790a29d0af4..4a4db69c5e9a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -140,6 +140,7 @@ config ARM64 @@ -71,7 +71,7 @@ index cb2c10a8f0a8..f1820f7318b6 100644 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_NOHZ) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c -index 323aeb5f2fe6..7edd5a2668ea 100644 +index 92fba851ce53..844c71bc865b 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -41,6 +41,7 @@ int main(void) diff --git a/kernel/patches-4.19.x-rt/0248-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch similarity index 95% rename from kernel/patches-4.19.x-rt/0248-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch rename to kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch index 1d33f820d..b7189551a 100644 --- a/kernel/patches-4.19.x-rt/0248-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch +++ b/kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch @@ -1,7 +1,7 @@ -From 221c555911b760b4e7b8712860fe2368dd85d4e2 Mon Sep 17 00:00:00 2001 +From 8169023aa101ae83455a3d6ffa9d92c9a22260cc Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 16 Oct 2016 05:11:54 +0200 -Subject: [PATCH 248/269] connector/cn_proc: Protect send_msg() with a local +Subject: [PATCH 247/268] connector/cn_proc: Protect send_msg() with a local lock on RT |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 diff --git a/kernel/patches-4.19.x-rt/0249-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch similarity index 90% rename from kernel/patches-4.19.x-rt/0249-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch rename to kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch index 4330ccf73..7d29907e1 100644 --- a/kernel/patches-4.19.x-rt/0249-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch +++ b/kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch @@ -1,7 +1,7 @@ -From 666113236b467b8463b3a9f1976d21bd61e8f88e Mon Sep 17 00:00:00 2001 +From dcd28a1256254829109ceff081064f369eb310f8 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 31 Mar 2016 04:08:28 +0200 -Subject: [PATCH 249/269] drivers/block/zram: Replace bit spinlocks with +Subject: [PATCH 248/268] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt They're nondeterministic, and lead to ___might_sleep() splats in -rt. @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 41 insertions(+) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index a65505db09e5..f35eccc43558 100644 +index 70cbd0ee1b07..42de45ebfb43 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -53,6 +53,40 @@ static size_t huge_class_size; @@ -67,7 +67,7 @@ index a65505db09e5..f35eccc43558 100644 static inline bool init_done(struct zram *zram) { -@@ -900,6 +935,8 @@ static DEVICE_ATTR_RO(io_stat); +@@ -901,6 +936,8 @@ static DEVICE_ATTR_RO(io_stat); static DEVICE_ATTR_RO(mm_stat); static DEVICE_ATTR_RO(debug_stat); @@ -76,7 +76,7 @@ index a65505db09e5..f35eccc43558 100644 static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; -@@ -930,6 +967,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) +@@ -931,6 +968,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); diff --git a/kernel/patches-4.19.x-rt/0250-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch similarity index 89% rename from kernel/patches-4.19.x-rt/0250-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch rename to kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch index d7f82ee52..de0721ac8 100644 --- a/kernel/patches-4.19.x-rt/0250-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch +++ b/kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch @@ -1,7 +1,7 @@ -From 84d4ca0b3c56c0dbc248508726c5f69cbf14d0cc Mon Sep 17 00:00:00 2001 +From 7ebc2fcbf40e267f5fc4785c62b5325e77dad1ce Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 20 Oct 2016 11:15:22 +0200 -Subject: [PATCH 250/269] drivers/zram: Don't disable preemption in +Subject: [PATCH 249/268] drivers/zram: Don't disable preemption in zcomp_stream_get/put() In v4.7, the driver switched to percpu compression streams, disabling @@ -66,10 +66,10 @@ index 41c1002a7d7d..d424eafcbf8e 100644 /* dynamic per-device compression frontend */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index f35eccc43558..b2a347b8b517 100644 +index 42de45ebfb43..ffa3e9d67571 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c -@@ -1026,6 +1026,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, +@@ -1027,6 +1027,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, unsigned long handle; unsigned int size; void *src, *dst; @@ -77,7 +77,7 @@ index f35eccc43558..b2a347b8b517 100644 if (zram_wb_enabled(zram)) { zram_slot_lock(zram, index); -@@ -1060,6 +1061,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, +@@ -1061,6 +1062,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, size = zram_get_obj_size(zram, index); @@ -85,7 +85,7 @@ index f35eccc43558..b2a347b8b517 100644 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { dst = kmap_atomic(page); -@@ -1067,14 +1069,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, +@@ -1068,14 +1070,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, kunmap_atomic(dst); ret = 0; } else { diff --git a/kernel/patches-4.19.x-rt/0251-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch b/kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch similarity index 90% rename from kernel/patches-4.19.x-rt/0251-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch rename to kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch index 51bd20e91..8a207c84b 100644 --- a/kernel/patches-4.19.x-rt/0251-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch +++ b/kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch @@ -1,7 +1,7 @@ -From 24fddbe29940c9217a8e2f5e9443ca29f941281a Mon Sep 17 00:00:00 2001 +From fd52008a2b0352c7b7ce12ab4e30805d94564ab3 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 23 Aug 2017 11:57:29 +0200 -Subject: [PATCH 251/269] drivers/zram: fix zcomp_stream_get() +Subject: [PATCH 250/268] drivers/zram: fix zcomp_stream_get() smp_processor_id() use in preemptible code Use get_local_ptr() instead this_cpu_ptr() to avoid a warning regarding diff --git a/kernel/patches-4.19.x-rt/0252-tpm_tis-fix-stall-after-iowrite-s.patch b/kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch similarity index 95% rename from kernel/patches-4.19.x-rt/0252-tpm_tis-fix-stall-after-iowrite-s.patch rename to kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch index 6338f0652..a435a8959 100644 --- a/kernel/patches-4.19.x-rt/0252-tpm_tis-fix-stall-after-iowrite-s.patch +++ b/kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch @@ -1,7 +1,7 @@ -From e17cfb4da190f56567819460296b640854ef8af0 Mon Sep 17 00:00:00 2001 +From 110d89555fb0f82f8491bbcf96736254479f125f Mon Sep 17 00:00:00 2001 From: Haris Okanovic Date: Tue, 15 Aug 2017 15:13:08 -0500 -Subject: [PATCH 252/269] tpm_tis: fix stall after iowrite*()s +Subject: [PATCH 251/268] tpm_tis: fix stall after iowrite*()s ioread8() operations to TPM MMIO addresses can stall the cpu when immediately following a sequence of iowrite*()'s to the same region. diff --git a/kernel/patches-4.19.x-rt/0253-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch similarity index 96% rename from kernel/patches-4.19.x-rt/0253-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch rename to kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch index 027c4bc96..176336cb0 100644 --- a/kernel/patches-4.19.x-rt/0253-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch @@ -1,7 +1,7 @@ -From 2e143bef6376db39d9e876eae3e3f1f718ff0b23 Mon Sep 17 00:00:00 2001 +From 0a3f47112dded98a022bfe83cc49f4b601110324 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 253/269] watchdog: prevent deferral of watchdogd wakeup on RT +Subject: [PATCH 252/268] watchdog: prevent deferral of watchdogd wakeup on RT When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are deferred for execution into the context of ktimersoftd unless otherwise diff --git a/kernel/patches-4.19.x-rt/0254-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch b/kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch similarity index 92% rename from kernel/patches-4.19.x-rt/0254-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch rename to kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch index 9ee35e474..adc3f65fa 100644 --- a/kernel/patches-4.19.x-rt/0254-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch +++ b/kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch @@ -1,7 +1,7 @@ -From 14ab946c30ebc65a97dd2a3a68f5f1bb0bfb8c7a Mon Sep 17 00:00:00 2001 +From 5f676ed79ba9cc4ec81db474c5928fa2e0131bc3 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 27 Feb 2016 08:09:11 +0100 -Subject: [PATCH 254/269] drm,radeon,i915: Use preempt_disable/enable_rt() +Subject: [PATCH 253/268] drm,radeon,i915: Use preempt_disable/enable_rt() where recommended DRM folks identified the spots, so use them. @@ -36,7 +36,7 @@ index 29877969310d..f65817c51c2a 100644 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index 9d3ac8b981da..bde228c7739a 100644 +index d8e2d7b3b836..072b831aaf4f 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1813,6 +1813,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, diff --git a/kernel/patches-4.19.x-rt/0255-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch b/kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch similarity index 97% rename from kernel/patches-4.19.x-rt/0255-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch rename to kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch index c22488c0b..76a59bb28 100644 --- a/kernel/patches-4.19.x-rt/0255-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch +++ b/kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch @@ -1,7 +1,7 @@ -From fa836f911e7122a32cf1d934a9736497b5dee45d Mon Sep 17 00:00:00 2001 +From 510af70547824cb615012d83872da2e69ced62ed Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 27 Feb 2016 09:01:42 +0100 -Subject: [PATCH 255/269] drm,i915: Use local_lock/unlock_irq() in +Subject: [PATCH 254/268] drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end() [ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918 diff --git a/kernel/patches-4.19.x-rt/0256-drm-i915-disable-tracing-on-RT.patch b/kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch similarity index 92% rename from kernel/patches-4.19.x-rt/0256-drm-i915-disable-tracing-on-RT.patch rename to kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch index 729dceb0a..c44b45bd0 100644 --- a/kernel/patches-4.19.x-rt/0256-drm-i915-disable-tracing-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch @@ -1,7 +1,7 @@ -From 0d087f448e0154cd673da85a57d305bb17f43f48 Mon Sep 17 00:00:00 2001 +From 680c563b9d44305039cfce83a00de477d11d2c9e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 6 Dec 2018 09:52:20 +0100 -Subject: [PATCH 256/269] drm/i915: disable tracing on -RT +Subject: [PATCH 255/268] drm/i915: disable tracing on -RT Luca Abeni reported this: | BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003 diff --git a/kernel/patches-4.19.x-rt/0257-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch similarity index 89% rename from kernel/patches-4.19.x-rt/0257-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch rename to kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch index 8640dd0ae..65ae9303f 100644 --- a/kernel/patches-4.19.x-rt/0257-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch +++ b/kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch @@ -1,7 +1,7 @@ -From 1e0d82558c60f1e889452550fe5766802e54c9bc Mon Sep 17 00:00:00 2001 +From 26dabed799217a8f8f8b23777f78341813d319b5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 19 Dec 2018 10:47:02 +0100 -Subject: [PATCH 257/269] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with +Subject: [PATCH 256/268] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with NOTRACE The order of the header files is important. If this header file is diff --git a/kernel/patches-4.19.x-rt/0258-cgroups-use-simple-wait-in-css_release.patch b/kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch similarity index 90% rename from kernel/patches-4.19.x-rt/0258-cgroups-use-simple-wait-in-css_release.patch rename to kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch index bc0b65a05..91267668b 100644 --- a/kernel/patches-4.19.x-rt/0258-cgroups-use-simple-wait-in-css_release.patch +++ b/kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch @@ -1,7 +1,7 @@ -From 12874386b3141dd4afa5b6e4aee17e99f529f37e Mon Sep 17 00:00:00 2001 +From bbb2678c2b60796f1abfd72bfe5492ee96013605 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 13 Feb 2015 15:52:24 +0100 -Subject: [PATCH 258/269] cgroups: use simple wait in css_release() +Subject: [PATCH 257/268] cgroups: use simple wait in css_release() To avoid: |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 @@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h -index 6002275937f5..ba64953d53d9 100644 +index a6090154b2ab..46a706e2ba35 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -20,6 +20,7 @@ @@ -54,7 +54,7 @@ index 6002275937f5..ba64953d53d9 100644 /* diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c -index 63dae7e0ccae..4377e0fd8827 100644 +index 81441117f611..7b536796daf8 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4628,10 +4628,10 @@ static void css_free_rwork_fn(struct work_struct *work) @@ -70,7 +70,7 @@ index 63dae7e0ccae..4377e0fd8827 100644 struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; -@@ -4691,8 +4691,8 @@ static void css_release(struct percpu_ref *ref) +@@ -4693,8 +4693,8 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); @@ -81,7 +81,7 @@ index 63dae7e0ccae..4377e0fd8827 100644 } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5414,6 +5414,7 @@ static int __init cgroup_wq_init(void) +@@ -5420,6 +5420,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); diff --git a/kernel/patches-4.19.x-rt/0259-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch similarity index 98% rename from kernel/patches-4.19.x-rt/0259-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch rename to kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch index 82c41f985..9809b5679 100644 --- a/kernel/patches-4.19.x-rt/0259-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch +++ b/kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch @@ -1,7 +1,7 @@ -From 3bf07cd523e1ceabae1252c9c286b5fa88608994 Mon Sep 17 00:00:00 2001 +From 660cd6e4eb91ed508585d0ec7bdf19c87a17f1e0 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 8 Jan 2017 09:32:25 +0100 -Subject: [PATCH 259/269] cpuset: Convert callback_lock to raw_spinlock_t +Subject: [PATCH 258/268] cpuset: Convert callback_lock to raw_spinlock_t The two commits below add up to a cpuset might_sleep() splat for RT: diff --git a/kernel/patches-4.19.x-rt/0260-apparmor-use-a-locallock-instead-preempt_disable.patch b/kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch similarity index 95% rename from kernel/patches-4.19.x-rt/0260-apparmor-use-a-locallock-instead-preempt_disable.patch rename to kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch index 0c5246307..04b82bd10 100644 --- a/kernel/patches-4.19.x-rt/0260-apparmor-use-a-locallock-instead-preempt_disable.patch +++ b/kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch @@ -1,7 +1,7 @@ -From f03e611745700fad514b850296eab0b098b3c12d Mon Sep 17 00:00:00 2001 +From 871cfd9e5e7aafaf8b0bcbbf0a4c4336e250fc27 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 11 Oct 2017 17:43:49 +0200 -Subject: [PATCH 260/269] apparmor: use a locallock instead preempt_disable() +Subject: [PATCH 259/268] apparmor: use a locallock instead preempt_disable() get_buffers() disables preemption which acts as a lock for the per-CPU variable. Since we can't disable preemption here on RT, a local_lock is diff --git a/kernel/patches-4.19.x-rt/0261-workqueue-Prevent-deadlock-stall-on-RT.patch b/kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch similarity index 97% rename from kernel/patches-4.19.x-rt/0261-workqueue-Prevent-deadlock-stall-on-RT.patch rename to kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch index a37a49945..2c7b9649e 100644 --- a/kernel/patches-4.19.x-rt/0261-workqueue-Prevent-deadlock-stall-on-RT.patch +++ b/kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch @@ -1,7 +1,7 @@ -From 936c037e636229e54d45ea6887e110d47d891059 Mon Sep 17 00:00:00 2001 +From 0a4863ce60343bab3b2dd5a078a1c4b9d6a74284 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 27 Jun 2014 16:24:52 +0200 -Subject: [PATCH 261/269] workqueue: Prevent deadlock/stall on RT +Subject: [PATCH 260/268] workqueue: Prevent deadlock/stall on RT Austin reported a XFS deadlock/stall on RT where scheduled work gets never exececuted and tasks are waiting for each other for ever. @@ -42,7 +42,7 @@ Cc: Steven Rostedt 2 files changed, 51 insertions(+), 15 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6d06dd682cd5..d2a475e00af8 100644 +index 7831756f2097..91a9b2556fb0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3569,9 +3569,8 @@ void __noreturn do_task_dead(void) @@ -67,7 +67,7 @@ index 6d06dd682cd5..d2a475e00af8 100644 * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index bf7be926ce5f..84397c2a4465 100644 +index aa39924bd3b5..12137825bf5a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -125,6 +125,11 @@ enum { diff --git a/kernel/patches-4.19.x-rt/0262-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch similarity index 95% rename from kernel/patches-4.19.x-rt/0262-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch rename to kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch index bf453a098..5ae2136e9 100644 --- a/kernel/patches-4.19.x-rt/0262-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -1,7 +1,7 @@ -From d05a6a9bf872f14f98543e61c6ef160307078b7c Mon Sep 17 00:00:00 2001 +From c5297a99ba9d1c0b75c3650cc5b18cbecec62838 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:56 -0500 -Subject: [PATCH 262/269] signals: Allow rt tasks to cache one sigqueue struct +Subject: [PATCH 261/268] signals: Allow rt tasks to cache one sigqueue struct To avoid allocation allow rt tasks to cache one sigqueue struct in task struct. @@ -54,10 +54,10 @@ index 5c0964dc805a..47d4161d1104 100644 spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c -index f62ae61064c7..1cd87e9c9f17 100644 +index 96297e71019c..aa4905338ff4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1802,6 +1802,7 @@ static __latent_entropy struct task_struct *copy_process( +@@ -1827,6 +1827,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -66,7 +66,7 @@ index f62ae61064c7..1cd87e9c9f17 100644 p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/kernel/signal.c b/kernel/signal.c -index 57c48b3d1491..367e10c919d1 100644 +index 56edb0580a3a..ac32b4f41d24 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -19,6 +19,7 @@ diff --git a/kernel/patches-4.19.x-rt/0263-Add-localversion-for-RT-release.patch b/kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch similarity index 76% rename from kernel/patches-4.19.x-rt/0263-Add-localversion-for-RT-release.patch rename to kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch index 6d7a761dd..a52728ea5 100644 --- a/kernel/patches-4.19.x-rt/0263-Add-localversion-for-RT-release.patch +++ b/kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch @@ -1,7 +1,7 @@ -From 7b48c4366f0f483bb81cc05f7f427176bff52bf8 Mon Sep 17 00:00:00 2001 +From d3d4bb55c2d289271ff369309d3f8675e651165d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 20:25:16 +0200 -Subject: [PATCH 263/269] Add localversion for -RT release +Subject: [PATCH 262/268] Add localversion for -RT release Signed-off-by: Thomas Gleixner --- diff --git a/kernel/patches-4.19.x-rt/0264-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch b/kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch similarity index 96% rename from kernel/patches-4.19.x-rt/0264-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch rename to kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch index 4de5728b2..1bb51863e 100644 --- a/kernel/patches-4.19.x-rt/0264-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch +++ b/kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch @@ -1,7 +1,7 @@ -From 8c6c7ae29703351a50e4ab8c71d130f8c7c06c91 Mon Sep 17 00:00:00 2001 +From c9f120d53a7fa150f6f26dcfdc6edbde3e42d9ec Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Mar 2019 18:31:54 +0100 -Subject: [PATCH 264/269] powerpc/pseries/iommu: Use a locallock instead +Subject: [PATCH 263/268] powerpc/pseries/iommu: Use a locallock instead local_irq_save() The locallock protects the per-CPU variable tce_page. The function diff --git a/kernel/patches-4.19.x-rt/0265-powerpc-reshuffle-TIF-bits.patch b/kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch similarity index 97% rename from kernel/patches-4.19.x-rt/0265-powerpc-reshuffle-TIF-bits.patch rename to kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch index d9d26ea34..d5879e52f 100644 --- a/kernel/patches-4.19.x-rt/0265-powerpc-reshuffle-TIF-bits.patch +++ b/kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch @@ -1,7 +1,7 @@ -From 9b0199e0f5b4e5782a4588e31d4db3e75aa3bbff Mon Sep 17 00:00:00 2001 +From a55a83a5c20bf641d7ded63b53e940f54c16778f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 22 Mar 2019 17:15:58 +0100 -Subject: [PATCH 265/269] powerpc: reshuffle TIF bits +Subject: [PATCH 264/268] powerpc: reshuffle TIF bits Powerpc32/64 does not compile because TIF_SYSCALL_TRACE's bit is higher than 15 and the assembly instructions don't expect that. diff --git a/kernel/patches-4.19.x-rt/0266-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch similarity index 94% rename from kernel/patches-4.19.x-rt/0266-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch rename to kernel/patches-4.19.x-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch index 622ace242..ee735d527 100644 --- a/kernel/patches-4.19.x-rt/0266-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch +++ b/kernel/patches-4.19.x-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch @@ -1,7 +1,7 @@ -From 0c32e5dfe4724c249b8eda0c9194e96c4c7bf003 Mon Sep 17 00:00:00 2001 +From a42ebff10207ebde6b4075a9d6ad13d2c9544f79 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Wed, 13 Mar 2019 11:40:34 +0000 -Subject: [PATCH 266/269] tty/sysrq: Convert show_lock to raw_spinlock_t +Subject: [PATCH 265/268] tty/sysrq: Convert show_lock to raw_spinlock_t Systems which don't provide arch_trigger_cpumask_backtrace() will invoke showacpu() from a smp_call_function() function which is invoked diff --git a/kernel/patches-4.19.x-rt/0267-drm-i915-Don-t-disable-interrupts-independently-of-t.patch b/kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch similarity index 93% rename from kernel/patches-4.19.x-rt/0267-drm-i915-Don-t-disable-interrupts-independently-of-t.patch rename to kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch index 1e7f184ae..6e2149d0f 100644 --- a/kernel/patches-4.19.x-rt/0267-drm-i915-Don-t-disable-interrupts-independently-of-t.patch +++ b/kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch @@ -1,7 +1,7 @@ -From 4f5c0777eb039305fafbfdf628f44cd4192d7dd8 Mon Sep 17 00:00:00 2001 +From f182d4e0b74c93b0e62a427ed236247d584d3e1a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 10 Apr 2019 11:01:37 +0200 -Subject: [PATCH 267/269] drm/i915: Don't disable interrupts independently of +Subject: [PATCH 266/268] drm/i915: Don't disable interrupts independently of the lock The locks (timeline->lock and rq->lock) need to be taken with disabled diff --git a/kernel/patches-4.19.x-rt/0268-sched-completion-Fix-a-lockup-in-wait_for_completion.patch b/kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch similarity index 95% rename from kernel/patches-4.19.x-rt/0268-sched-completion-Fix-a-lockup-in-wait_for_completion.patch rename to kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch index 33d012c5e..7b06827f1 100644 --- a/kernel/patches-4.19.x-rt/0268-sched-completion-Fix-a-lockup-in-wait_for_completion.patch +++ b/kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch @@ -1,7 +1,7 @@ -From 3fedc60594022bd98689b88034899528d221db8d Mon Sep 17 00:00:00 2001 +From e331096b94e8826b34dbbfc482bcb357e0ec2963 Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Thu, 9 May 2019 14:33:20 -0500 -Subject: [PATCH 268/269] sched/completion: Fix a lockup in +Subject: [PATCH 267/268] sched/completion: Fix a lockup in wait_for_completion() Consider following race: diff --git a/kernel/patches-4.19.x-rt/0269-Linux-4.19.37-rt20-REBASE.patch b/kernel/patches-4.19.x-rt/0268-Linux-4.19.50-rt22-REBASE.patch similarity index 64% rename from kernel/patches-4.19.x-rt/0269-Linux-4.19.37-rt20-REBASE.patch rename to kernel/patches-4.19.x-rt/0268-Linux-4.19.50-rt22-REBASE.patch index c1f04bf74..c0b6c34e6 100644 --- a/kernel/patches-4.19.x-rt/0269-Linux-4.19.37-rt20-REBASE.patch +++ b/kernel/patches-4.19.x-rt/0268-Linux-4.19.50-rt22-REBASE.patch @@ -1,19 +1,19 @@ -From febb7083d474aead8166900edeb557681119dcc4 Mon Sep 17 00:00:00 2001 +From 5c15b7623e1aa08b31d5b4fc5db7d6b7a9b5fb78 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 24 May 2019 14:22:06 -0400 -Subject: [PATCH 269/269] Linux 4.19.37-rt20 REBASE +Subject: [PATCH 268/268] Linux 4.19.50-rt22 REBASE --- localversion-rt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/localversion-rt b/localversion-rt -index 1199ebade17b..e095ab819714 100644 +index 1199ebade17b..c29508d21914 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt16 -+-rt20 ++-rt22 -- 2.20.1